text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# for rgenetics - lped to fbat
# recode to numeric fbat version
# much slower so best to always
# use numeric alleles internally
import sys,os,time
prog = os.path.split(sys.argv[0])[-1]
myversion = 'Oct 10 2009'
galhtmlprefix = """<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="generator" content="Galaxy %s tool output - see http://getgalaxy.org" />
<title></title>
<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
</head>
<body>
<div class="document">
"""
def timenow():
"""return current time as a string
"""
return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
def rgConv(inpedfilepath,outhtmlname,outfilepath):
"""convert linkage ped/map to fbat"""
recode={'A':'1','C':'2','G':'3','T':'4','N':'0','0':'0','1':'1','2':'2','3':'3','4':'4'}
basename = os.path.split(inpedfilepath)[-1] # get basename
inmap = '%s.map' % inpedfilepath
inped = '%s.ped' % inpedfilepath
outf = '%s.ped' % basename # note the fbat exe insists that this is the extension for the ped data
outfpath = os.path.join(outfilepath,outf) # where to write the fbat format file to
try:
mf = file(inmap,'r')
except:
sys.stderr.write('%s cannot open inmap file %s - do you have permission?\n' % (prog,inmap))
sys.exit(1)
try:
rsl = [x.split()[1] for x in mf]
except:
sys.stderr.write('## cannot parse %s' % inmap)
sys.exit(1)
try:
os.makedirs(outfilepath)
except:
pass # already exists
head = ' '.join(rsl) # list of rs numbers
# TODO add anno to rs but fbat will prolly barf?
pedf = file(inped,'r')
o = file(outfpath,'w',2**20)
o.write(head)
o.write('\n')
for i,row in enumerate(pedf):
if i == 0:
lrow = row.split()
try:
x = [int(x) for x in lrow[10:50]] # look for non numeric codes
except:
dorecode = 1
if dorecode:
lrow = row.strip().split()
p = lrow[:6]
g = lrow[6:]
gc = [recode.get(x,'0') for x in g]
lrow = p+gc
row = '%s\n' % ' '.join(lrow)
o.write(row)
o.close()
def main():
"""call fbater
need to work with rgenetics composite datatypes
so in and out are html files with data in extrafiles path
<command interpreter="python">rg_convert_lped_fped.py '$input1/$input1.metadata.base_name'
'$output1' '$output1.extra_files_path'
</command>
"""
nparm = 3
if len(sys.argv) < nparm:
sys.stderr.write('## %s called with %s - needs %d parameters \n' % (prog,sys.argv,nparm))
sys.exit(1)
inpedfilepath = sys.argv[1]
outhtmlname = sys.argv[2]
outfilepath = sys.argv[3]
try:
os.makedirs(outfilepath)
except:
pass
rgConv(inpedfilepath,outhtmlname,outfilepath)
f = file(outhtmlname,'w')
f.write(galhtmlprefix % prog)
flist = os.listdir(outfilepath)
print '## Rgenetics: http://rgenetics.org Galaxy Tools %s %s' % (prog,timenow()) # becomes info
f.write('<div>## Rgenetics: http://rgenetics.org Galaxy Tools %s %s\n<ol>' % (prog,timenow()))
for i, data in enumerate( flist ):
f.write('<li><a href="%s">%s</a></li>\n' % (os.path.split(data)[-1],os.path.split(data)[-1]))
f.write("</div></body></html>")
f.close()
if __name__ == "__main__":
main()
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/datatypes/converters/lped_to_fped_converter.py
|
Python
|
gpl-3.0
| 3,564
|
[
"Galaxy"
] |
b5615cbfe98821bcd5404b7e39803ec07158aad6f40fbb60e66c237b4d5bb8e7
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/sklearn/tests/test_random_projection.py
|
Python
|
gpl-2.0
| 14,035
|
[
"Gaussian"
] |
e914be00fec6da3e31cf605ecaa0f38e474a2ee35a1b78c244f8bf0600318661
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 14 01:34:41 2014
@author: deokwoo
"""
from __future__ import division # To forace float point division
import numpy as np
from numpy.linalg import norm
from scipy.interpolate import interp1d
from shared_constants import *
from data_tools import *
from scipy.stats import stats
import time
import multiprocessing as mp
""""
def verify_data_format(key_list,data_dict,time_slots):
# Verify there is no [] or N/A in the list
# Only FLoat or Int format is allowed
print 'Checking any inconsisent data format.....'
print '---------------------------------'
list_of_wrong_data_format=[]
for key in key_list:
print 'checking ', key, '...'
for i,samples in enumerate(data_dict[key][1]):
for j,each_sample in enumerate(samples):
if each_sample==[]:
list_of_wrong_data_format.append([key,i,j])
print each_sample, 'at', time_slots[j], 'in', key
elif (isinstance(each_sample,int)==False and isinstance(each_sample,float)==False):
list_of_wrong_data_format.append([key,i,j])
print each_sample, 'at', time_slots[j], 'in', key
print '---------------------------------'
if len(list_of_wrong_data_format)>0:
raise NameError('Inconsistent data format in the list of data_used')
return list_of_wrong_data_format
"""
def verify_sensor_data_format(tup):
key = tup[0]
data_list = tup[1]
time_slots = tup[2]
q = tup[3]
print 'checking ', key, '...'
for i,samples in enumerate(data_list):
for j,each_sample in enumerate(samples):
if each_sample==[]:
q.put([key,i,j])
print each_sample, 'at', time_slots[i], 'in', key
elif (isinstance(each_sample,int)==False and isinstance(each_sample,float)==False):
q.put([key,i,j])
print each_sample, 'at', time_slots[i], 'in', key
def verify_data_format(data_dict,PARALLEL=False):
# Verify there is no [] or N/A in the list
# Only FLoat or Int format is allowed
print 'Checking any inconsisent data format.....'
print '---------------------------------'
list_of_wrong_data_format=[]
time_slots=data_dict['time_slots']
weather_list_used = [data_dict['weather_list'][i] for i in [1,2,3,10,11]]
key_list=weather_list_used+ data_dict['sensor_list']
if not PARALLEL:
for key in key_list:
print 'checking ', key, '...'
for i,samples in enumerate(data_dict[key][1]):
for j,each_sample in enumerate(samples):
if each_sample==[]:
list_of_wrong_data_format.append([key,i,j])
print each_sample, 'at', time_slots[i], 'in', key
elif (isinstance(each_sample,int)==False and isinstance(each_sample,float)==False):
list_of_wrong_data_format.append([key,i,j])
print each_sample, 'at', time_slots[i], 'in', key
print '---------------------------------'
# PARALLEL
else:
manager = mp.Manager()
q = manager.Queue()
p = mp.Pool(CPU_CORE_NUM)
param_list = [(key,data_dict[key][1],time_slots,q) for key in key_list]
p.map(verify_sensor_data_format,param_list)
p.close()
p.join()
while not q.empty():
item = q.get()
print 'queue item: ' + str(item)
list_of_wrong_data_format.append(item)
if len(list_of_wrong_data_format)>0:
raise NameError('Inconsistent data format in the list of data_used')
return list_of_wrong_data_format
def verify_data_mat(X):
num_err_temp=np.array([[len(np.nonzero(np.isnan(sample))[0]),len(np.nonzero(sample==np.inf)[0]),len(np.nonzero(np.var(sample)==0)[0])] for sample in X])
num_err=np.sum(num_err_temp,axis=0)
for err_idx in np.argwhere(num_err>0):
if err_idx==0:
NameError('nan entry found')
if err_idx==1:
NameError('inf entry found')
if err_idx==2:
NameError('zero var found')
print 'all entry values of data matrix are verifed ok'
def normalize_data(data_input):
y_pred=data_input.copy()
y_temp=np.delete(y_pred,np.nonzero(y_pred==np.infty), axis=0)
y_temp_sort=np.sort(y_temp)[np.ceil(len(y_temp)*0.05):np.floor(len(y_temp)*0.95)]
var_temp=np.var(y_temp_sort)
#import pdb;pdb.set_trace()
if var_temp>0: # At least 2 non-infty elements in y_pred
no_inf_idx=np.nonzero(y_pred!=np.infty)
y_pred[no_inf_idx]=y_pred[no_inf_idx]-np.mean(y_pred[no_inf_idx])
temp_val=y_pred/norm(y_pred[no_inf_idx])
temp_status=0
else:
temp_val=list(set(y_temp_sort))
temp_status=-1
return temp_val,temp_status
def interploate_data(x_temp,num_type,max_num_succ_idx_for_itpl):
num_of_samples=x_temp.shape[0]
inf_idx=np.nonzero(x_temp==np.inf)[0]
noinf_idx=np.nonzero(x_temp!=np.inf)[0]
# Dont interploate the values on bondary.
inter_idx=np.delete(inf_idx,np.nonzero(inf_idx==0))
inter_idx=np.delete(inter_idx,np.nonzero(inter_idx==num_of_samples-1))
#############################################################################################
# Dont interploate the values unknown successively more than num_succ_idx_no_interploate
# Then deletea any index that meet the condition above,
# inter_idx=np.delete(inter_idx,those index)
# Need to be completed .....
#############################################################################################
# Find successive inf indices
succ_inf_idx = []
for i in range(0, len(noinf_idx) - 1):
# number of successive inf between two non-inf indices
num_succ_inf = noinf_idx[i+1] - noinf_idx[i] - 1
if (num_succ_inf > max_num_succ_idx_for_itpl):
succ_inf_idx = succ_inf_idx + range(noinf_idx[i]+1,noinf_idx[i+1])
# Remove successive inf indices
inter_idx=list(set(inter_idx)-set(succ_inf_idx))
if num_type==FLOAT_TYPE:
#f = interp1d(noinf_idx,x_temp[noinf_idx,0],'linear')
val_new=np.interp(inter_idx,noinf_idx, x_temp[noinf_idx,0])
#val_new = np.interp(t_new, t_,val_)
elif num_type==INT_TYPE:
#f = interp1d(noinf_idx,x_temp[noinf_idx,0],'nearest')
val_new=fast_nearest_interp(inter_idx,noinf_idx, x_temp[noinf_idx,0])
else:
raise NameError('Sample type must either INT or FLOAT type')
#x_temp[inter_idx,0]=f(inter_idx)
x_temp[inter_idx,0]=val_new
print 'No sample in time slot',inf_idx
print len(inter_idx),'/',len(inf_idx), ' time slots are interplated'
return x_temp
def get_feature(data_dict_samples,num_type):
x_temp=[]
for i,sample in enumerate(data_dict_samples):
# If sample=[], np.std returns 0. Avoid zero std, add a infitestimal number
if len(sample)==0: # Set infty if no sample is availble
x_temp.append(np.inf)
else:
if num_type==INT_TYPE:
x_temp.append(int(stats.mode(sample)[0]))
elif num_type==FLOAT_TYPE:
x_temp.append(np.mean(sample))
else:
raise NameError('Sample type must either INT or FLOAT type')
x_temp=np.array(x_temp)[:,np.newaxis]
return x_temp
# Mean value measure
def build_feature_matrix(data_dict,sensor_list,weather_list,time_slots,DO_INTERPOLATE=1,max_num_succ_idx_for_itpl=4):
data_used=sensor_list+weather_list
print 'Build data feature matrix now.....'
if DO_INTERPOLATE==1:
print 'Missing samples will be interpolated upto', max_num_succ_idx_for_itpl, 'successive time slots'
else:
print 'All time slots with any missing sample will be removed without interpolatoin '
start_proc_t=time.time()
num_of_data=len(data_used);
num_of_samples=len(time_slots)
# Declare as 2-d list for exception.
X=[];INT_type_list=[]; FLOAT_type_list=[];input_names=[]
weather_type_idx=[];sensor_type_idx=[];
INT_type_idx=[]; FLOAT_type_idx=[]
zero_var_list=[];zero_var_val=[] # whose variance is zero, hence carry no information,
# Constrcut X matrix by summerizing hourly samples
for j,key in enumerate(data_used):
print '----------------------------------------------'
print 'building for ',key
try:
num_type=check_data_type(data_dict[key][2][1])
# Avg. value feature
x_temp=get_feature(data_dict[key][1],num_type)
non_inf_idx=np.nonzero(x_temp<np.inf)[0]
#if non_inf_idx <len(time_slots):measurement_point_set
# import pdb;pdb.set_trace()
# Outlier removal, different parameters for sensors and weather data
if len(sensor_list)<=j:
# weather data
is_weather_data=True
outlier_idx=outlier_detect(x_temp[non_inf_idx],5,10)
else:
is_weather_data=False
outlier_idx=outlier_detect(x_temp[non_inf_idx],1,20)
if len(outlier_idx)>0:
print 'outlier samples are detected: ', 'outlier_idx:', outlier_idx
x_temp[non_inf_idx[outlier_idx]]=np.inf
# interplolation data, use nearest for int type, use linear for float type
if DO_INTERPOLATE==1:
x_temp=interploate_data(x_temp,num_type,max_num_succ_idx_for_itpl)
norm_data_vec,output_status=normalize_data(x_temp[:,0])
if len(np.nonzero(norm_data_vec==np.inf)[0])>num_of_samples/5:
raise
except Exception:
print ' Error in processing data feature, excluded from analysis'
output_status=-1
norm_data_vec=None
if output_status==-1:
zero_var_list.append(key);
zero_var_val.append(norm_data_vec)
print 'too small variance for float type, added to zero var list'
else:
input_names.append(key)
print j, 'th sensor update'
if (num_type==FLOAT_TYPE) and (is_weather_data==False):
X.append(norm_data_vec);
FLOAT_type_idx.append(len(X)-1);FLOAT_type_list.append(key)
elif (num_type==INT_TYPE) or (is_weather_data==True):
X.append(x_temp[:,0])
INT_type_idx.append(len(X)-1);INT_type_list.append(key);
else:
raise NameError('Sample type must either INT or FLOAT type')
if key in weather_list: weather_type_idx.append(len(X)-1)
elif key in sensor_list:sensor_type_idx.append(len(X)-1)
else: raise NameError('Sample type must either Weather or Sensor type')
# Linear Interpolate
X=np.array(X).T
if X.shape[0]!=num_of_samples:
raise NameError('The numeber of rows in feature matrix and the number of the time slots are different ')
if X.shape[1]+len(zero_var_list)!= num_of_data:
raise NameError('The sume of the numeber of column in feature matrix and the number of zero var column are different from the number of input measurements ')
deleted_timeslot_idx=[]
print '---------------------------------------------------'
print 'removing time slots having no sample...'
inf_idx_set=[]
for col_vec in X.T:
inf_idx=np.nonzero(col_vec==np.infty)[0]
inf_idx_set=np.r_[inf_idx_set,inf_idx]
inf_col_idx=list(set(list(inf_idx_set)))
deleted_timeslot_idx=np.array([int(x) for x in inf_col_idx])
#import pdb;pdb.set_trace()
print 'time slots', deleted_timeslot_idx, ' removed...'
print '---------------------------------------------------'
X=np.delete(X,deleted_timeslot_idx,axis=0)
new_time_slot=np.delete(time_slots,deleted_timeslot_idx)
# Checking whether it has any ill entry value
verify_data_mat(X)
end_proc_t=time.time()
print 'job completed spending ', end_proc_t-start_proc_t, ' sec'
return X,new_time_slot,input_names\
,zero_var_list,zero_var_val,\
INT_type_list,INT_type_idx,FLOAT_type_list,FLOAT_type_idx,weather_type_idx,sensor_type_idx
# Abs Diff value measure
def build_diff(tup):
k = tup[0]
time_slots = tup[1]
conf_lev = tup[2]
set_val = tup[3]
set_name = tup[4]
num_type = tup[5]
print set_name
try:
diff_mean=get_diff(set_val,time_slots,num_type,conf_lev)
if num_type==FLOAT_TYPE:
#norm_diff_mean,output_status=normalize_data(diff_mean[:,0])
norm_diff_mean,output_status=normalize_data(diff_mean)
elif num_type==INT_TYPE:
#num_discrete_vals=len(set(list(diff_mean[:,0])))
num_discrete_vals=len(set(list(diff_mean)))
print 'num_discrete_vals :', num_discrete_vals
if num_discrete_vals>1:
output_status=0
norm_diff_mean=diff_mean
else:
output_status=-1
norm_diff_mean=list(set(diff_mean))
#norm_diff_mean=list(set(diff_mean[:,0]))
else:
pass
except Exception:
print ' Error in processing data feature, excluded from analysis'
output_status=-1
norm_diff_mean=None
return (k,[output_status,norm_diff_mean])
return (k,[output_status,norm_diff_mean])
def get_diff(set_val,time_slots,num_type,conf_lev):
time_slots_utc=dtime_to_unix(time_slots)
TIMELET_INV_seconds=(time_slots[1]-time_slots[0]).seconds
diff_mean=[]
for r,utc_t in enumerate(time_slots_utc):
utc_t_s=utc_t
utc_t_e=utc_t+TIMELET_INV_seconds
idx=np.nonzero((set_val[0]>=utc_t_s) & (set_val[0]<utc_t_e))[0]
if len(idx)<2:
diff_val=np.inf
else:
temp_val=abs(np.diff(set_val[1][idx]))
upper_val=np.sort(temp_val)[np.floor(len(temp_val)*conf_lev):]
if len(upper_val)==0:
diff_val=np.inf
else:
if num_type==FLOAT_TYPE:
diff_val=np.mean(upper_val)
#print 'float type'
elif num_type==INT_TYPE:
diff_val=int(stats.mode(upper_val)[0])
#print 'int type'
else:
raise NameError('Sample type must either INT or FLOAT type')
#diff_val=max(abs(diff(set_val[1][idx])))
#sort(abs(diff(set_val[1][idx])))[::-1]
diff_mean.append(diff_val)
#diff_mean=np.array(diff_mean)[:,np.newaxis]
diff_mean=np.array(diff_mean)
return diff_mean
# Abs Diff value measure
def build_diff_matrix(measurement_point_set,time_slots,num_type_set,irr_data_name,conf_lev=0.5,PARALLEL=False):
#time_slots_utc=dtime_to_unix(time_slots)
Xdiff=[];
input_names=[];
INT_type_list=[]; FLOAT_type_list=[];
INT_type_idx=[]; FLOAT_type_idx=[]
zero_var_list=[];zero_var_val=[] # whose variance is zero, hence carry no information,
num_of_samples=len(time_slots)
#TIMELET_INV_seconds=(time_slots[1]-time_slots[0]).seconds
print '==========================================================='
if not PARALLEL:
for k,(set_val,set_name) in enumerate(zip(measurement_point_set,irr_data_name)):
print irr_data_name[k]
try:
num_type=num_type_set[k]
diff_mean=get_diff(set_val,time_slots,num_type,conf_lev)
if num_type==FLOAT_TYPE:
#norm_diff_mean,output_status=normalize_data(diff_mean[:,0])
norm_diff_mean,output_status=normalize_data(diff_mean)
elif num_type==INT_TYPE:
#num_discrete_vals=len(set(list(diff_mean[:,0])))
num_discrete_vals=len(set(list(diff_mean)))
print 'num_discrete_vals :', num_discrete_vals
if num_discrete_vals>1:
output_status=0
norm_diff_mean=diff_mean
else:
output_status=-1
#norm_diff_mean=list(set(diff_mean[:,0]))
norm_diff_mean=list(set(diff_mean))
else:
pass
if len(np.nonzero(norm_diff_mean==np.inf)[0])>num_of_samples/5:
raise
except Exception:
print ' Error in processing data feature, excluded from analysis'
output_status=-1
norm_diff_mean=None
if output_status==-1:
zero_var_list.append(set_name);#zero_var_flag=1
zero_var_val.append(norm_diff_mean)
print 'too small variance for float type or a single value for int type, added to zero var list'
else:
input_names.append(set_name)
Xdiff.append(norm_diff_mean)
if num_type==FLOAT_TYPE:
FLOAT_type_list.append(set_name)
FLOAT_type_idx.append(len(Xdiff)-1)
elif num_type==INT_TYPE:
INT_type_list.append(set_name)
INT_type_idx.append(len(Xdiff)-1)
print '----------------------------------------'
print '==========================================================='
# PARALLEL ENABLED
else:
print 'Build diff matrix: Parallel enabled...'
# Construct param list for workers
param_list = []
for k,(set_val,set_name) in enumerate(zip(measurement_point_set,irr_data_name)):
param_list.append((k,time_slots,conf_lev,set_val,set_name,num_type_set[k]))
p = mp.Pool(CPU_CORE_NUM)
ret_dict = dict(p.map(build_diff,param_list))
p.close()
p.join()
for k in sorted(ret_dict.keys()):
v = ret_dict[k]
output_status = v[0]
norm_diff_mean = v[1]
set_name = irr_data_name[k]
num_type = num_type_set[k]
if output_status==-1:
zero_var_list.append(set_name);#zero_var_flag=1
zero_var_val.append(norm_diff_mean)
print 'too small variance for float type or a single value for int type, added to zero var list'
else:
input_names.append(set_name)
try:
Xdiff.append(norm_diff_mean)
except:
import pdb;pdb.set_trace()
if num_type==FLOAT_TYPE:
FLOAT_type_list.append(set_name)
FLOAT_type_idx.append(len(Xdiff)-1)
elif num_type==INT_TYPE:
INT_type_list.append(set_name)
INT_type_idx.append(len(Xdiff)-1)
print '----------------------------------------'
Xdiff=np.array(Xdiff).T
deleted_timeslot_idx=[]
print '---------------------------------------------------'
print 'removing time slots having no sample...'
inf_idx_set=[]
for col_vec in Xdiff.T:
inf_idx=np.nonzero(col_vec==np.infty)[0]
inf_idx_set=np.r_[inf_idx_set,inf_idx]
inf_col_idx=list(set(list(inf_idx_set)))
deleted_timeslot_idx=np.array([int(x) for x in inf_col_idx])
print 'time slots', deleted_timeslot_idx, ' removed...'
print '---------------------------------------------------'
Xdiff=np.delete(Xdiff,deleted_timeslot_idx,axis=0)
new_time_slot=np.delete(time_slots,deleted_timeslot_idx)
# Checking whether it has any ill entry value
verify_data_mat(Xdiff)
return Xdiff,new_time_slot,input_names\
,zero_var_list,zero_var_val,\
INT_type_list,INT_type_idx,FLOAT_type_list,FLOAT_type_idx
"""
# Mean value measure
def build_feature_matrix_2(data_dict,sensor_list,weather_list,time_slots,DO_INTERPOLATE=1,max_num_succ_idx_for_itpl=4):
data_used=sensor_list+weather_list
print 'Build data feature matrix now.....'
if DO_INTERPOLATE==1:
print 'Missing samples will be interpolated upto', max_num_succ_idx_for_itpl, 'successive time slots'
else:
print 'All time slots with any missing sample will be removed without interpolatoin '
start_proc_t=time.time()
num_of_data=len(data_used);
num_of_samples=len(time_slots)
# Declare as 2-d list for exception.
X=[];INT_type_list=[]; FLOAT_type_list=[];input_names=[]
weather_type_idx=[];sensor_type_idx=[];
INT_type_idx=[]; FLOAT_type_idx=[]
zero_var_list=[];zero_var_val=[] # whose variance is zero, hence carry no information,
# Constrcut X matrix by summerizing hourly samples
for j,key in enumerate(data_used):
print '----------------------------------------------'
print 'building for ',key
start_time = time.time()
try:
v = mt.loadObjectBinaryFast(str(key)+'.bin')
#start_time = time.time()
num_type=check_data_type(v[2][1])
#num_type=check_data_type(data_dict[key][2][1])
# Avg. value feature
x_temp=get_feature(v[1],num_type)
#x_temp=get_feature(data_dict[key][1],num_type)
non_inf_idx=np.nonzero(x_temp<np.inf)[0]
# Outlier removal, different parameters for sensors and weather data
if len(sensor_list)<=j:
is_weather_data=True
outlier_idx=outlier_detect(x_temp[non_inf_idx],5,10)
else:
is_weather_data=False
outlier_idx=outlier_detect(x_temp[non_inf_idx],1,20)
if len(outlier_idx)>0:
print 'outlier samples are detected: ', 'outlier_idx:', outlier_idx
x_temp[non_inf_idx[outlier_idx]]=np.inf
# interplolation data, use nearest for int type, use linear for float type
if DO_INTERPOLATE==1:
x_temp=interploate_data(x_temp,num_type,max_num_succ_idx_for_itpl)
norm_data_vec,output_status=normalize_data(x_temp[:,0])
except Exception:
print ' Error in processing data feature, excluded from analysis'
output_status=-1
norm_data_vec=None
if output_status==-1:
zero_var_list.append(key);
zero_var_val.append(norm_data_vec)
print 'too small variance for float type, added to zero var list'
else:
input_names.append(key)
print j, 'th sensor update'
if (num_type==FLOAT_TYPE) and (is_weather_data==False):
X.append(norm_data_vec);
FLOAT_type_idx.append(len(X)-1);FLOAT_type_list.append(key)
elif (num_type==INT_TYPE) or (is_weather_data==True):
X.append(x_temp[:,0])
INT_type_idx.append(len(X)-1);INT_type_list.append(key);
else:
raise NameError('Sample type must either INT or FLOAT type')
if key in weather_list: weather_type_idx.append(len(X)-1)
elif key in sensor_list:sensor_type_idx.append(len(X)-1)
else: raise NameError('Sample type must either Weather or Sensor type')
print 'End iteration for ' + str(j) + " " + str(key)
mt.print_report(start_time)
# Linear Interpolate
X=np.array(X).T
if X.shape[0]!=num_of_samples:
raise NameError('The numeber of rows in feature matrix and the number of the time slots are different ')
if X.shape[1]+len(zero_var_list)!= num_of_data:
raise NameError('The sume of the numeber of column in feature matrix and the number of zero var column are different from the number of input measurements ')
deleted_timeslot_idx=[]
print '---------------------------------------------------'
print 'removing time slots having no sample...'
inf_idx_set=[]
for col_vec in X.T:
inf_idx=np.nonzero(col_vec==np.infty)[0]
inf_idx_set=np.r_[inf_idx_set,inf_idx]
inf_col_idx=list(set(list(inf_idx_set)))
deleted_timeslot_idx=np.array([int(x) for x in inf_col_idx])
print 'time slots', deleted_timeslot_idx, ' removed...'
print '---------------------------------------------------'
X=np.delete(X,deleted_timeslot_idx,axis=0)
new_time_slot=np.delete(time_slots,deleted_timeslot_idx)
# Checking whether it has any ill entry value
verify_data_mat(X)
end_proc_t=time.time()
print 'job completed spending ', end_proc_t-start_proc_t, ' sec'
return X,new_time_slot,input_names\
,zero_var_list,zero_var_val,\
INT_type_list,INT_type_idx,FLOAT_type_list,FLOAT_type_idx,weather_type_idx,sensor_type_idx
def build_diff_matrix(measurement_point_set,time_slots,num_type_set,irr_data_name,conf_lev=0.5,PARALLEL=False):
#time_slots_utc=dtime_to_unix(time_slots)
Xdiff=[];
input_names=[];
INT_type_list=[]; FLOAT_type_list=[];
INT_type_idx=[]; FLOAT_type_idx=[]
zero_var_list=[];zero_var_val=[] # whose variance is zero, hence carry no information,
#TIMELET_INV_seconds=(time_slots[1]-time_slots[0]).seconds
print '==========================================================='
if not PARALLEL:
for k,(set_val,set_name) in enumerate(zip(measurement_point_set,irr_data_name)):
print irr_data_name[k]
num_type=num_type_set[k]
diff_mean=get_diff(set_val,time_slots,num_type,conf_lev)
if num_type==FLOAT_TYPE:
norm_diff_mean,output_status=normalize_data(diff_mean[:,0])
elif num_type==INT_TYPE:
num_discrete_vals=len(set(list(diff_mean[:,0])))
print 'num_discrete_vals :', num_discrete_vals
if num_discrete_vals>1:
output_status=0
norm_diff_mean=diff_mean
else:
output_status=-1
norm_diff_mean=list(set(diff_mean[:,0]))
else:
pass
if output_status==-1:
zero_var_list.append(set_name);#zero_var_flag=1
zero_var_val.append(norm_diff_mean)
print 'too small variance for float type or a single value for int type, added to zero var list'
else:
input_names.append(set_name)
try:
Xdiff.append(norm_diff_mean)
except:
import pdb;pdb.set_trace()
if num_type==FLOAT_TYPE:
FLOAT_type_list.append(set_name)
FLOAT_type_idx.append(len(Xdiff)-1)
elif num_type==INT_TYPE:
INT_type_list.append(set_name)
INT_type_idx.append(len(Xdiff)-1)
print '----------------------------------------'
print '==========================================================='
# PARALLEL ENABLED
else:
print 'Build diff matrix: Parallel enabled...'
# Construct param list for workers
param_list = []
for k,(set_val,set_name) in enumerate(zip(measurement_point_set,irr_data_name)):
param_list.append((k,time_slots_utc,conf_lev,set_val,set_name))
p = mp.Pool(CPU_CORE_NUM)
ret_dict = dict(p.map(build_diff,param_list))
p.close()
p.join()
for k in sorted(ret_dict.keys()):
v = ret_dict[k]
output_status = v[0]
norm_diff_mean = v[1]
set_name = irr_data_name[k]
if output_status == -1:
zero_var_list.append(set_name);#zero_var_flag=1
zero_var_val.append(norm_diff_mean)
zero_var_idx.append(k)
print 'too small variance for float type, added to zero var list'
else:
non_zero_var_idx.append(k)
non_zero_var_list.append(set_name)
if len(diff_mean_set)==0:
diff_mean_set=norm_diff_mean
else:
#import pdb;pdb.set_trace()
#try:except ValueError:
diff_mean_set=np.vstack((diff_mean_set,norm_diff_mean))
Xdiff=np.array(Xdiff).T
deleted_timeslot_idx=[]
print '---------------------------------------------------'
print 'removing time slots having no sample...'
inf_idx_set=[]
for col_vec in Xdiff.T:
inf_idx=np.nonzero(col_vec==np.infty)[0]
inf_idx_set=np.r_[inf_idx_set,inf_idx]
inf_col_idx=list(set(list(inf_idx_set)))
deleted_timeslot_idx=np.array([int(x) for x in inf_col_idx])
print 'time slots', deleted_timeslot_idx, ' removed...'
print '---------------------------------------------------'
Xdiff=np.delete(Xdiff,deleted_timeslot_idx,axis=0)
new_time_slot=np.delete(time_slots,deleted_timeslot_idx)
# Checking whether it has any ill entry value
verify_data_mat(Xdiff)
return Xdiff,new_time_slot,input_names\
,zero_var_list,zero_var_val,\
INT_type_list,INT_type_idx,FLOAT_type_list,FLOAT_type_idx
"""
##############################################################################
# Obslete library files
##############################################################################
"""
def build_feature_matrix(data_dict,data_used,time_slots,DO_INTERPOLATE=1,max_num_succ_idx_for_itpl=4):
#old_settings = np.seterr()
#np.seterr(all='raise')
print 'Build data feature matrix now.....'
if DO_INTERPOLATE==1:
print 'Missing samples will be interpolated upto', max_num_succ_idx_for_itpl, 'successive time slots'
start_proc_t=time.time()
num_of_data=len(data_used)
num_of_samples=len(time_slots)
# Declare as 2-d list for exception.
X=np.zeros([num_of_samples,num_of_data])
# Number of samples
NS=np.zeros([num_of_samples,num_of_data])
# Standard devicatoin of samples at each time slot.
STD=np.zeros([num_of_samples,num_of_data])
#X=[[] for i in range(num_of_samples)]
INT_type_cols=[]
FLOAT_type_cols=[]
#print 'build data matrix'
# Constrcut X matrix by summerizing hourly samples
for j,key in enumerate(data_used):
x_temp=np.zeros([num_of_samples,1])
std_temp=np.zeros([num_of_samples,1])
num_type=0
for i,sample in enumerate(data_dict[key][1]):
NS[i,j]=len(sample)
# If sample=[], np.std returns 0. Avoid zero std, add a infitestimal number
if len(sample)==0: # Set infty if no sample is availble
x_temp[i,0]=np.inf
else:
std_temp[i,0]=np.std(sample,ddof=0)+10**-10
if isinstance(sample[0],int):
num_type='int'
#X[i,j]=int(stats.mode(sample)[0])
x_temp[i,0]=int(stats.mode(sample)[0])
if i==0: INT_type_cols.append(j)
elif isinstance(sample[0],float):
num_type='float'
#X[i,j]=np.mean(sample)
x_temp[i,0]=np.mean(sample)
if i==0: FLOAT_type_cols.append(j)
else:
num_type='nan'
raise NameError('Sample type must either INT or FLOAT type')
# Linear Interpolate
if DO_INTERPOLATE==1:
#try:
if isinstance(x_temp[0,0],float):
#print 'interpolate'
inf_idx=np.nonzero(x_temp==np.inf)[0]
noinf_idx=np.nonzero(x_temp!=np.inf)[0]
# Dont interploate the values on bondary.
inter_idx=np.delete(inf_idx,np.nonzero(inf_idx==0))
inter_idx=np.delete(inter_idx,np.nonzero(inter_idx==num_of_samples-1))
#############################################################################################
# Dont interploate the values unknown successively more than num_succ_idx_no_interploate
# Then deletea any index that meet the condition above,
# inter_idx=np.delete(inter_idx,those index)
# Need to be completed .....
#############################################################################################
# Find successive inf indices
succ_inf_idx = []
for i in range(0, len(noinf_idx) - 1):
# number of successive inf between two non-inf indices
num_succ_inf = noinf_idx[i+1] - noinf_idx[i] - 1
if (num_succ_inf > max_num_succ_idx_for_itpl):
succ_inf_idx = succ_inf_idx + range(noinf_idx[i]+1,noinf_idx[i+1])
# Remove successive inf indices
#import pdb;pdb.set_trace()
#list(set([1,2,3]))
inter_idx=list(set(inter_idx)-set(succ_inf_idx))
#inter_idx=np.delete(inter_idx,succ_inf_idx)
if num_type=='float':
f = interp1d(noinf_idx,x_temp[noinf_idx,0],'linear')
elif num_type=='int':
f = interp1d(noinf_idx,x_temp[noinf_idx,0],'nearest')
else:
raise NameError('Sample type must either INT or FLOAT type')
x_temp[inter_idx,0]=f(inter_idx)
print 'No sample in time slot',inf_idx, ' for', key
print len(inter_idx),'/',len(inf_idx), ' time slots are interplated'
#except:
# pass
# Linear Interpolate
X[:,j]=x_temp[:,0]
STD[:,j]=std_temp[:,0]
end_proc_t=time.time()
print 'job completed spending ', end_proc_t-start_proc_t, ' sec'
#np.seterr(**old_settings)
return X,STD,NS,INT_type_cols,FLOAT_type_cols
def reg_feature_matrix(X,STD,NS,data_used,col_selected,REMOVE_INF_COL=1):
print 'normalize data feature matrix now.....'
start_proc_t=time.time()
X_INPUT=[]
input_names=[]
zero_var_list=[] # whose variance is zero, hence carry no information,
# Here we test both float type and int type of sensor data
for i,test_idx in enumerate(col_selected):
#print '---------------------'
y_pred=X[:,test_idx].copy()
y_temp=np.delete(y_pred,np.nonzero(y_pred==np.infty), axis=0)
var_temp=np.var(np.sort(y_temp)[np.ceil(len(y_temp)*0.05):np.floor(len(y_temp)*0.95)])
if var_temp>0: # At least 2 non-infty elements in y_pred
no_inf_idx=np.nonzero(y_pred!=np.infty)
inf_idx=np.nonzero(y_pred==np.infty)
y_pred[no_inf_idx]=y_pred[no_inf_idx]-np.mean(y_pred[no_inf_idx])
temp_val=y_pred/norm(y_pred[no_inf_idx])
X_INPUT.append(temp_val)
input_names.append(data_used[test_idx])
else:
zero_var_list.append(data_used[test_idx])
#except:
# pass
#print '---------------------'
X_INPUT=np.asanyarray(X_INPUT).T
#import pdb;pdb.set_trace()
#inf_col_idx=[]
if REMOVE_INF_COL==1:
inf_idx_set=[]
for col_vec in X_INPUT.T:
inf_idx=np.nonzero(col_vec==np.infty)[0]
inf_idx_set=np.r_[inf_idx_set,inf_idx]
#inf_idx_set.append(inf_idx)
#X_INPUT_ORG=X_INPUT.copy() # Let preserve original copy of it for future use
inf_col_idx=list(set(list(inf_idx_set)))
X_INPUT=np.delete(X_INPUT,inf_col_idx, axis=0)
input_names=np.array(input_names)
deleted_timeslot_idx=np.array([int(x) for x in inf_col_idx])
#deleted_timeslot_idx=np.array(inf_col_idx)
end_proc_t=time.time()
print 'job completed spending ', end_proc_t-start_proc_t, ' sec'
return X_INPUT,input_names,zero_var_list,deleted_timeslot_idx
def gp_interpol_test(x,y,num_data_loss,y_label=[]):
#x -input variable, y- observed variable
#x=np.atleast_2d(x);x= x.reshape((max(x.shape),1))
#y=np.atleast_2d(y);y= y.reshape((max(y.shape),1))
x=make_colvec(x);y=make_colvec(y)
y_org=y.copy()
infty_indice=np.nonzero(y==np.infty)[0]
noinfty_indice=np.nonzero(y!=np.infty)[0]
num_infty_add=np.max([0, num_data_loss-len(infty_indice)])
y[random.sample(noinfty_indice,num_infty_add)]=np.infty
#for row_idx,row_val in enumerate(col_val):
# print 'X',[row_idx, col_idx], ': ', row_val,':', X[row_idx,col_idx]
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1, \
random_start=100)
input_var=[];obs_var=[]
for (x_val,y_val) in zip(x,y):
if y_val!=np.infty:
input_var.append(x_val)
obs_var.append(y_val)
#input_var=np.atleast_2d(input_var);input_var=input_var.reshape((max(input_var.shape),1))
#obs_var=np.atleast_2d(obs_var);obs_var=obs_var.reshape((max(obs_var.shape),1))
input_var=make_colvec(input_var);obs_var=make_colvec(obs_var)
# Instanciate a Gaussian Process model
#import pdb;pdb.set_trace()
gp.fit(input_var,obs_var)
#new_input_var=np.atleast_2d(np.r_[input_var[0]:input_var[-1]]).T
new_input_var=x
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(new_input_var, eval_MSE=True)
sigma = np.sqrt(MSE)
if len(y_label)>0:
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
plt.plot(input_var, obs_var, 'r.', markersize=20, label=u'Observations')
plt.plot(new_input_var,y_org,'s',label=u'Actual')
plt.plot(new_input_var, y_pred, 'bx-', label=u'Prediction')
plt.fill(np.concatenate([new_input_var, new_input_var[::-1]]), \
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]), \
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel(y_label)
#plt.ylim(-10, 20)
plt.legend(loc='upper right')
return y_pred,sigma,infty_indice
def rglr_ref_timelet(data_dict,data_used,time_slots):
time_mat=ref_time_matrix(time_slots)
min_set=time_mat[:,MIN_IDX].astype(int)
hr_set=time_mat[:,HR_IDX].astype(int)
wd_set=time_mat[:,WD_IDX].astype(int)
day_set=time_mat[:,MD_IDX].astype(int)
mn_set=time_mat[:,MN_IDX].astype(int)
cumnum_days_mn=np.r_[0,np.array([calendar.monthrange(2013, i)[1] for i in np.r_[1:12]]).cumsum()]
daycount_set=[int(day+cumnum_days_mn[mn-1]) for i,(day,mn) in enumerate(zip(day_set,mn_set))]
#np.atleast_2d(np.arange(len(hr_set))).T
hrcount_set=make_colvec(np.arange(len(hr_set)))
print 'Adding second time stamps ....'
print '--------------------------------------'
for key_id in data_used:
print key_id
utc_t=[];val=[]
for i,(min_t, sample) in enumerate(zip(data_dict[key_id][0],data_dict[key_id][1])):
if len(sample)>0:
num_samples_per_hr=len(min_t)
sec_t_ar=minidx_to_secs(min_t)
data_dict[key_id][0][i]=sec_t_ar
tt = dtime_to_unix(dt.datetime(2013, mn_set[i], day_set[i], hr_set[i]))
utc_temp=tt+sec_t_ar
for a,b in zip(utc_temp,sample):
utc_t.append(a);val.append(b)
#if len(np.nonzero(diff(utc_temp)<=0))>0:
# import pdb;pdb.set_trace()
# print 'err'
data_dict[key_id].append([utc_t,val])
print '--------------------------------------'
return time_mat
def minidx_to_secs(min_t):
sec_t_ar=[]
#sec_a=[]
sec_t=np.array(min_t)*60
sec_ar=np.zeros(len(sec_t))
dup_min_cnt=0
prv_min_idx=-1;cur_min_idx=-1
pt_str=0;
#pt_end=0
for j,min_idx in enumerate(min_t):
cur_min_idx=min_idx
if cur_min_idx==prv_min_idx:
dup_min_cnt=dup_min_cnt+1
sec_ar[j]=dup_min_cnt
else:
if sec_ar[j-1]==0:
sec_ar[j-1]=60/2
else:
sec_ar[pt_str:j]=sec_ar[pt_str:j]*(60/sec_ar[j-1])
sec_ar[pt_str]=dup_min_cnt
dup_min_cnt=0;pt_str=j
prv_min_idx=cur_min_idx
sec_t_ar=sec_t+sec_ar
return sec_t_ar
def make_colvec(x):
x=np.atleast_2d(x)
return x.reshape((max(x.shape),1))
def ref_time_matrix(t_slots):
# Return reference time matrix for time_slots
# Minute,Hour, Weekday, Day, Month - 5 column matrix
time_mat=np.zeros([len(t_slots),5])
for i, time_sample in enumerate(t_slots):
time_mat[i,MIN_IDX]=time_sample.minute
time_mat[i,HR_IDX]=time_sample.hour
time_mat[i,WD_IDX]=time_sample.weekday()
time_mat[i,MD_IDX]=time_sample.day
time_mat[i,MN_IDX]=time_sample.month
return time_mat
def verify_data_format_2(key_list,data_dict,time_slots,PARALLEL=False):
# Verify there is no [] or N/A in the list
# Only FLoat or Int format is allowed
print 'Checking any inconsisent data format.....'
print '---------------------------------'
list_of_wrong_data_format=[]
if not PARALLEL:
for key in key_list:
print 'checking ', key, '...'
v = mt.loadObjectBinaryFast(key+FL_EXT)
#for i,samples in enumerate(data_dict[key][1]):
#import pdb;pdb.set_trace()
for i,samples in enumerate(v[1]):
for j,each_sample in enumerate(samples):
if each_sample==[]:
list_of_wrong_data_format.append([key,i,j])
print each_sample, 'at', time_slots[i], 'in', key
elif (isinstance(each_sample,int)==False and isinstance(each_sample,float)==False):
list_of_wrong_data_format.append([key,i,j])
print each_sample, 'at', time_slots[i], 'in', key
print '---------------------------------'
# PARALLEL
else:
manager = mp.Manager()
q = manager.Queue()
p = mp.Pool(CPU_CORE_NUM)
#param_list = [(key,data_dict[key][1],time_slots,q) for key in key_list]
param_list = []
for key in key_list:
v = mt.loadObjectBinaryFast(key+FL_EXT)
param_list.append((key,v[1],time_slots,q))
p.map(verify_sensor_data_format,param_list)
p.close()
p.join()
while not q.empty():
item = q.get()
print 'queue item: ' + str(item)
list_of_wrong_data_format.append(item)
if len(list_of_wrong_data_format)>0:
raise NameError('Inconsistent data format in the list of data_used')
return list_of_wrong_data_format
"""
|
TinyOS-Camp/DDEA-DEV
|
Archive/[14_10_10] DDEA sample code/data_preprocess.py
|
Python
|
gpl-2.0
| 43,193
|
[
"Gaussian"
] |
beda84e5a15b81b2cdf6cc9743d1862a3d67510d8e9bf082563b1e547a1d70aa
|
"""
3D Starfield Simulation
Developed by Leonel Machava <leonelmachava@gmail.com>
http://codeNtronix.com
http://twitter.com/codentronix
"""
import pygame, math, sys
from random import randrange
from operator import itemgetter
from BitmapFont.BitmapFont import BitmapFont
from BitmapFont.BitmapFont import BitmapFontScroller
class Point3D:
def __init__(self, x = 0, y = 0, z = 0):
self.x, self.y, self.z = float(x), float(y), float(z)
def rotateX(self, angle):
""" Rotates the point around the X axis by the given angle in degrees. """
rad = angle * math.pi / 180
cosa = math.cos(rad)
sina = math.sin(rad)
y = self.y * cosa - self.z * sina
z = self.y * sina + self.z * cosa
return Point3D(self.x, y, z)
def rotateY(self, angle):
""" Rotates the point around the Y axis by the given angle in degrees. """
rad = angle * math.pi / 180
cosa = math.cos(rad)
sina = math.sin(rad)
z = self.z * cosa - self.x * sina
x = self.z * sina + self.x * cosa
return Point3D(x, self.y, z)
def rotateZ(self, angle):
""" Rotates the point around the Z axis by the given angle in degrees. """
rad = angle * math.pi / 180
cosa = math.cos(rad)
sina = math.sin(rad)
x = self.x * cosa - self.y * sina
y = self.x * sina + self.y * cosa
return Point3D(x, y, self.z)
def project(self, win_width, win_height, fov, viewer_distance):
""" Transforms this 3D point to 2D using a perspective projection. """
factor = fov / (viewer_distance + self.z)
x = self.x * factor + win_width / 2
y = -self.y * factor + win_height / 2
return Point3D(x, y, self.z)
class Simulation:
def __init__(self, num_stars, max_depth):
pygame.init()
self.screen = pygame.display.set_mode((640, 480))
pygame.display.set_caption("3D Starfield Simulation (visit codeNtronix.com)")
self.clock = pygame.time.Clock()
self.num_stars = num_stars
self.max_depth = max_depth
self.init_stars()
self.init_3dcube()
def init_scroller(self):
pass
def move_and_draw_scroller(self):
pass
def init_stars(self):
""" Create the starfield """
self.stars = []
for i in range(self.num_stars):
# A star is represented as a list with this format: [X,Y,Z]
star = [randrange(-25,25), randrange(-25,25), randrange(1, self.max_depth)]
self.stars.append(star)
def move_and_draw_stars(self):
""" Move and draw the stars """
origin_x = self.screen.get_width() / 2
origin_y = self.screen.get_height() / 2
for star in self.stars:
# The Z component is decreased on each frame.
star[2] -= 0.19
# If the star has past the screen (I mean Z<=0) then we
# reposition it far away from the screen (Z=max_depth)
# with random X and Y coordinates.
if star[2] <= 0:
star[0] = randrange(-25,25)
star[1] = randrange(-25,25)
star[2] = self.max_depth
# Convert the 3D coordinates to 2D using perspective projection.
k = 128.0 / star[2]
x = int(star[0] * k + origin_x)
y = int(star[1] * k + origin_y)
# Draw the star (if it is visible in the screen).
# We calculate the size such that distant stars are smaller than
# closer stars. Similarly, we make sure that distant stars are
# darker than closer stars. This is done using Linear Interpolation.
if 0 <= x < self.screen.get_width() and 0 <= y < self.screen.get_height():
size = (1 - float(star[2]) / self.max_depth) * 5
shade = (1 - float(star[2]) / self.max_depth) * 255
self.screen.fill((shade,shade,shade),(x,y,size,size))
# 3D cube
def init_3dcube(self):
self.vertices = [
Point3D(-1,1,-1),
Point3D(1,1,-1),
Point3D(1,-1,-1),
Point3D(-1,-1,-1),
Point3D(-1,1,1),
Point3D(1,1,1),
Point3D(1,-1,1),
Point3D(-1,-1,1)
]
# Define the vertices that compose each of the 6 faces. These numbers are
# indices to the vertices list defined above.
self.faces = [(0,1,2,3),(1,5,6,2),(5,4,7,6),(4,0,3,7),(0,4,5,1),(3,2,6,7)]
# Define colors for each face
self.colors = [(255,0,255),(255,0,0),(0,255,0),(0,0,255),(0,255,255),(255,255,0)]
self.angle = 0
def run(self):
""" Main Loop """
bmfs = BitmapFontScroller(self.screen, "fonts/1/bubsy.bmp", 400, 300)
bmfs.set_text("COON")
bmfs._drop_char("I")
while 1:
# Lock the framerate at 50 FPS.
self.clock.tick(50)
# Handle events.
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
return
self.screen.fill((0,0,0))
self.move_and_draw_stars()
#self.move_and_draw_3dcube()
bmfs.tick()
pygame.display.flip()
def move_and_draw_3dcube(self):
# It will hold transformed vertices.
t = []
cube_width = 200
cube_height = 200
for v in self.vertices:
# Rotate the point around X axis, then around Y axis, and finally around Z axis.
r = v.rotateX(self.angle).rotateY(self.angle).rotateZ(self.angle)
# Transform the point from 3D to 2D
p = r.project(cube_width, cube_height, 128, 4)
# Put the point in the list of transformed vertices
t.append(p)
# Calculate the average Z values of each face.
avg_z = []
i = 0
for f in self.faces:
z = (t[f[0]].z + t[f[1]].z + t[f[2]].z + t[f[3]].z) / 4.0
avg_z.append([i,z])
i = i + 1
# Draw the faces using the Painter's algorithm:
# Distant faces are drawn before the closer ones.
for tmp in sorted(avg_z,key=itemgetter(1),reverse=True):
face_index = tmp[0]
f = self.faces[face_index]
pointlist = [(t[f[0]].x, t[f[0]].y), (t[f[1]].x, t[f[1]].y),
(t[f[1]].x, t[f[1]].y), (t[f[2]].x, t[f[2]].y),
(t[f[2]].x, t[f[2]].y), (t[f[3]].x, t[f[3]].y),
(t[f[3]].x, t[f[3]].y), (t[f[0]].x, t[f[0]].y)]
pygame.draw.polygon(self.screen,self.colors[face_index],pointlist)
self.angle += 1
if __name__ == "__main__":
Simulation(512, 32).run()
|
coon42/FlyFi
|
pygame_spielereien/pifi_menu.py
|
Python
|
gpl-3.0
| 6,868
|
[
"VisIt"
] |
9fd5656f8f91ce6321c234ada508a9959e51dcbbcfbe382dffd6d758b87b5c0b
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008,2010 Gary Burton
# Copyright (C) 2008 Craig J. Anderson
# Copyright (C) 2009 Nick Hall
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Adam Stein <adam@csh.rit.edu>
# Copyright (C) 2011-2012 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Specific option handling for a GUI.
"""
from __future__ import unicode_literals
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
import os
import sys
#-------------------------------------------------------------------------
#
# gtk modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.utils.file import get_unicode_path_from_file_chooser
from ..utils import ProgressMeter
from ..pluginmanager import GuiPluginManager
from .. import widgets
from ..managedwindow import ManagedWindow
from ..dialog import OptionDialog
from ..selectors import SelectorFactory
from gramps.gen.display.name import displayer as _nd
from gramps.gen.filters import GenericFilterFactory, GenericFilter, rules
from gramps.gen.constfunc import cuni, STRTYPE
#------------------------------------------------------------------------
#
# Dialog window used to select a surname
#
#------------------------------------------------------------------------
class LastNameDialog(ManagedWindow):
"""
A dialog that allows the selection of a surname from the database.
"""
def __init__(self, database, uistate, track, surnames, skip_list=set()):
ManagedWindow.__init__(self, uistate, track, self)
flags = Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT
buttons = (Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT, Gtk.STOCK_OK,
Gtk.ResponseType.ACCEPT)
self.__dlg = Gtk.Dialog(None, uistate.window, flags, buttons)
self.__dlg.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)
self.set_window(self.__dlg, None, _('Select surname'))
self.window.set_default_size(400, 400)
# build up a container to display all of the people of interest
self.__model = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_INT)
self.__tree_view = Gtk.TreeView(self.__model)
col1 = Gtk.TreeViewColumn(_('Surname'), Gtk.CellRendererText(), text=0)
col2 = Gtk.TreeViewColumn(_('Count'), Gtk.CellRendererText(), text=1)
col1.set_resizable(True)
col2.set_resizable(True)
col1.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col2.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col1.set_sort_column_id(0)
col2.set_sort_column_id(1)
self.__tree_view.append_column(col1)
self.__tree_view.append_column(col2)
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.add(self.__tree_view)
scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolled_window.set_shadow_type(Gtk.ShadowType.OUT)
self.__dlg.vbox.pack_start(scrolled_window, True, True, 0)
scrolled_window.show_all()
if len(surnames) == 0:
# we could use database.get_surname_list(), but if we do that
# all we get is a list of names without a count...therefore
# we'll traverse the entire database ourself and build up a
# list that we can use
# for name in database.get_surname_list():
# self.__model.append([name, 0])
# build up the list of surnames, keeping track of the count for each
# name (this can be a lengthy process, so by passing in the
# dictionary we can be certain we only do this once)
progress = ProgressMeter(_('Finding Surnames'))
progress.set_pass(_('Finding surnames'),
database.get_number_of_people())
for person in database.iter_people():
progress.step()
key = person.get_primary_name().get_surname()
count = 0
if key in surnames:
count = surnames[key]
surnames[key] = count + 1
progress.close()
# insert the names and count into the model
for key in surnames:
if key.encode('iso-8859-1','xmlcharrefreplace') not in skip_list:
self.__model.append([key, surnames[key]])
# keep the list sorted starting with the most popular last name
self.__model.set_sort_column_id(1, Gtk.SortType.DESCENDING)
# the "OK" button should be enabled/disabled based on the selection of
# a row
self.__tree_selection = self.__tree_view.get_selection()
self.__tree_selection.set_mode(Gtk.SelectionMode.MULTIPLE)
self.__tree_selection.select_path(0)
def run(self):
"""
Display the dialog and return the selected surnames when done.
"""
response = self.__dlg.run()
surname_set = set()
if response == Gtk.ResponseType.ACCEPT:
(mode, paths) = self.__tree_selection.get_selected_rows()
for path in paths:
i = self.__model.get_iter(path)
surname = self.__model.get_value(i, 0)
surname_set.add(surname)
self.__dlg.destroy()
return surname_set
#-------------------------------------------------------------------------
#
# GuiStringOption class
#
#-------------------------------------------------------------------------
class GuiStringOption(Gtk.Entry):
"""
This class displays an option that is a simple one-line string.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.StringOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.set_text( self.__option.get_value() )
# Set up signal handlers when the widget value is changed
# from user interaction or programmatically. When handling
# a specific signal, we need to temporarily block the signal
# that would call the other signal handler.
self.changekey = self.connect('changed', self.__text_changed)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.conkey = self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
self.set_tooltip_text(self.__option.get_help())
def __text_changed(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the change of the value made by the user.
"""
self.__option.disable_signals()
self.__option.set_value( self.get_text() )
self.__option.enable_signals()
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
self.handler_block(self.changekey)
self.set_text(self.__option.get_value())
self.handler_unblock(self.changekey)
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option.disconnect(self.conkey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiColorOption class
#
#-------------------------------------------------------------------------
class GuiColorOption(Gtk.ColorButton):
"""
This class displays an option that allows the selection of a colour.
"""
def __init__(self, option, dbstate, uistate, track, override):
self.__option = option
value = self.__option.get_value()
GObject.GObject.__init__(self)
self.set_color(Gdk.color_parse(self.__option.get_value()))
# Set up signal handlers when the widget value is changed
# from user interaction or programmatically. When handling
# a specific signal, we need to temporarily block the signal
# that would call the other signal handler.
self.changekey = self.connect('color-set', self.__color_changed)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.set_tooltip_text(self.__option.get_help())
def __color_changed(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the change of color made by the user.
"""
colour = self.get_color()
value = '#%02x%02x%02x' % (
int(colour.red * 256 / 65536),
int(colour.green * 256 / 65536),
int(colour.blue * 256 / 65536))
self.__option.disable_signals()
self.__option.set_value(value)
self.__option.enable_signals()
def __value_changed(self):
"""
Handle the change made programmatically
"""
self.handler_block(self.changekey)
self.set_color(Gdk.color_parse(self.__option.get_value()))
self.handler_unblock(self.changekey)
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiNumberOption class
#
#-------------------------------------------------------------------------
class GuiNumberOption(Gtk.SpinButton):
"""
This class displays an option that is a simple number with defined maximum
and minimum values.
"""
def __init__(self, option, dbstate, uistate, track, override):
self.__option = option
decimals = 0
step = self.__option.get_step()
adj = Gtk.Adjustment(1,
self.__option.get_min(),
self.__option.get_max(),
step)
# Calculate the number of decimal places if necessary
if step < 1:
import math
decimals = int(math.log10(step) * -1)
GObject.GObject.__init__(self, adjustment=adj, climb_rate=1, digits=decimals)
Gtk.SpinButton.set_numeric(self, True)
self.set_value(self.__option.get_value())
# Set up signal handlers when the widget value is changed
# from user interaction or programmatically. When handling
# a specific signal, we need to temporarily block the signal
# that would call the other signal handler.
self.changekey = self.connect('value_changed', self.__number_changed)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.conkey = self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
self.set_tooltip_text(self.__option.get_help())
def __number_changed(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the change of the value made by the user.
"""
vtype = type(self.__option.get_value())
self.__option.set_value( vtype(self.get_value()) )
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
self.handler_block(self.changekey)
self.set_value(self.__option.get_value())
self.handler_unblock(self.changekey)
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option.disconnect(self.conkey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiTextOption class
#
#-------------------------------------------------------------------------
class GuiTextOption(Gtk.ScrolledWindow):
"""
This class displays an option that is a multi-line string.
"""
def __init__(self, option, dbstate, uistate, track, override):
self.__option = option
GObject.GObject.__init__(self)
self.set_shadow_type(Gtk.ShadowType.IN)
self.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
# Add a TextView
value = self.__option.get_value()
gtext = Gtk.TextView()
gtext.set_size_request(-1, 70)
gtext.get_buffer().set_text("\n".join(value))
gtext.set_editable(1)
self.add(gtext)
self.__buff = gtext.get_buffer()
# Set up signal handlers when the widget value is changed
# from user interaction or programmatically. When handling
# a specific signal, we need to temporarily block the signal
# that would call the other signal handler.
self.bufcon = self.__buff.connect('changed', self.__text_changed)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
# Required for tooltip
gtext.add_events(Gdk.EventMask.ENTER_NOTIFY_MASK)
gtext.add_events(Gdk.EventMask.LEAVE_NOTIFY_MASK)
gtext.set_tooltip_text(self.__option.get_help())
def __text_changed(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the change of the value made by the user.
"""
text_val = cuni( self.__buff.get_text( self.__buff.get_start_iter(),
self.__buff.get_end_iter(),
False) )
self.__option.disable_signals()
self.__option.set_value( text_val.split('\n') )
self.__option.enable_signals()
def __value_changed(self):
"""
Handle the change made programmatically
"""
self.__buff.handler_block(self.bufcon)
value = self.__option.get_value()
# Can only set using a string. If we have a string value,
# we'll use that. If not, we'll assume a list and convert
# it into a single string by assuming each list element
# is separated by a newline.
if isinstance(value, STRTYPE):
self.__buff.set_text(value)
# Need to manually call the other handler so that the option
# value is changed to be a list. If left as a string,
# it would be treated as a list, meaning each character
# becomes a list element -- not what we want.
self.__text_changed(None)
else:
self.__buff.set_text("\n".join(value))
self.__buff.handler_unblock(self.bufcon)
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
self.__buff.disconnect(self.bufcon)
self.__buff = None
#-------------------------------------------------------------------------
#
# GuiBooleanOption class
#
#-------------------------------------------------------------------------
class GuiBooleanOption(Gtk.CheckButton):
"""
This class displays an option that is a boolean (True or False).
"""
def __init__(self, option, dbstate, uistate, track, override):
self.__option = option
GObject.GObject.__init__(self)
self.set_label(self.__option.get_label())
self.set_active(self.__option.get_value())
# Set up signal handlers when the widget value is changed
# from user interaction or programmatically. When handling
# a specific signal, we need to temporarily block the signal
# that would call the other signal handler.
self.changekey = self.connect('toggled', self.__state_changed)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.conkey = self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
self.set_tooltip_text(self.__option.get_help())
def __state_changed(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the change of the value made by the user.
"""
self.__option.set_value( self.get_active() )
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
self.handler_block(self.changekey)
self.set_active(self.__option.get_value())
self.handler_unblock(self.changekey)
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option.disconnect(self.conkey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiEnumeratedListOption class
#
#-------------------------------------------------------------------------
class GuiEnumeratedListOption(Gtk.HBox):
"""
This class displays an option that provides a finite number of values.
Each possible value is assigned a value and a description.
"""
def __init__(self, option, dbstate, uistate, track, override):
GObject.GObject.__init__(self)
evtBox = Gtk.EventBox()
self.__option = option
self.__combo = Gtk.ComboBoxText()
evtBox.add(self.__combo)
self.pack_start(evtBox, True, True, 0)
self.__update_options()
# Set up signal handlers when the widget value is changed
# from user interaction or programmatically. When handling
# a specific signal, we need to temporarily block the signal
# that would call the other signal handler.
self.changekey = self.__combo.connect('changed', self.__selection_changed)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.conkey1 = self.__option.connect('options-changed', self.__update_options)
self.conkey2 = self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
self.set_tooltip_text(self.__option.get_help())
def __selection_changed(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the change of the value made by the user.
"""
index = self.__combo.get_active()
if index < 0:
return
items = self.__option.get_items()
value, description = items[index] # IGNORE:W0612 - description is unused
# Don't disable the __option signals as is normally done for
# the other widgets or bad things happen (like other needed
# signals don't fire)
self.__option.set_value( value )
self.value_changed() # Allow overriding so that another class
# can add functionality
def value_changed(self):
pass
def __update_options(self):
"""
Handle the change of the available options.
"""
self.__combo.get_model().clear()
cur_val = self.__option.get_value()
active_index = 0
current_index = 0
for (value, description) in self.__option.get_items():
self.__combo.append_text(description)
if value == cur_val:
active_index = current_index
current_index += 1
self.__combo.set_active( active_index )
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
self.__combo.handler_block(self.changekey)
self.__update_options()
self.__combo.handler_unblock(self.changekey)
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option.disconnect(self.conkey1)
self.__option.disconnect(self.conkey2)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiPersonOption class
#
#-------------------------------------------------------------------------
class GuiPersonOption(Gtk.HBox):
"""
This class displays an option that allows a person from the
database to be selected.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.PersonOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__dbstate = dbstate
self.__db = dbstate.get_database()
self.__uistate = uistate
self.__track = track
self.__person_label = Gtk.Label()
self.__person_label.set_alignment(0.0, 0.5)
pevt = Gtk.EventBox()
pevt.add(self.__person_label)
person_button = widgets.SimpleButton(Gtk.STOCK_INDEX,
self.__get_person_clicked)
person_button.set_relief(Gtk.ReliefStyle.NORMAL)
self.pack_start(pevt, False, True, 0)
self.pack_end(person_button, False, True, 0)
gid = self.__option.get_value()
# Pick up the active person
person_handle = self.__uistate.get_active('Person')
person = self.__dbstate.db.get_person_from_handle(person_handle)
if override or not person:
# Pick up the stored option value if there is one
person = self.__db.get_person_from_gramps_id(gid)
if not person:
# If all else fails, get the default person to avoid bad values
person = self.__db.get_default_person()
if not person:
person = self.__db.find_initial_person()
self.__update_person(person)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.conkey = self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
pevt.set_tooltip_text(self.__option.get_help())
person_button.set_tooltip_text(_('Select a different person'))
def __get_person_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the button to choose a different person.
"""
# Create a filter for the person selector.
rfilter = GenericFilter()
rfilter.set_logical_op('or')
rfilter.add_rule(rules.person.IsBookmarked([]))
rfilter.add_rule(rules.person.HasIdOf([self.__option.get_value()]))
# Add the database home person if one exists.
default_person = self.__db.get_default_person()
if default_person:
gid = default_person.get_gramps_id()
rfilter.add_rule(rules.person.HasIdOf([gid]))
# Add the selected person if one exists.
person_handle = self.__uistate.get_active('Person')
active_person = self.__dbstate.db.get_person_from_handle(person_handle)
if active_person:
gid = active_person.get_gramps_id()
rfilter.add_rule(rules.person.HasIdOf([gid]))
select_class = SelectorFactory('Person')
sel = select_class(self.__dbstate, self.__uistate, self.__track,
title=_('Select a person for the report'),
filter=rfilter )
person = sel.run()
self.__update_person(person)
def __update_person(self, person):
"""
Update the currently selected person.
"""
if person:
name = _nd.display(person)
gid = person.get_gramps_id()
self.__person_label.set_text( "%s (%s)" % (name, gid) )
self.__option.set_value(gid)
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
gid = self.__option.get_value()
name = _nd.display(self.__db.get_person_from_gramps_id(gid))
self.__person_label.set_text("%s (%s)" % (name, gid))
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option.disconnect(self.conkey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiFamilyOption class
#
#-------------------------------------------------------------------------
class GuiFamilyOption(Gtk.HBox):
"""
This class displays an option that allows a family from the
database to be selected.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.FamilyOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__dbstate = dbstate
self.__db = dbstate.get_database()
self.__uistate = uistate
self.__track = track
self.__family_label = Gtk.Label()
self.__family_label.set_alignment(0.0, 0.5)
pevt = Gtk.EventBox()
pevt.add(self.__family_label)
family_button = widgets.SimpleButton(Gtk.STOCK_INDEX,
self.__get_family_clicked)
family_button.set_relief(Gtk.ReliefStyle.NORMAL)
self.pack_start(pevt, False, True, 0)
self.pack_end(family_button, False, True, 0)
self.__initialize_family(override)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.conkey = self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
pevt.set_tooltip_text(self.__option.get_help())
family_button.set_tooltip_text(_('Select a different family'))
def __initialize_family(self, override):
"""
Find a family to initialize the option with. If there is no saved
family option, use the active family. If there is no active
family, try to find a family that the user is likely interested in.
"""
family_list = []
fid = self.__option.get_value()
# Use the active family if one is selected
family = self.__uistate.get_active('Family')
if family and not override:
family_list = [family]
else:
# Use the stored option value
family = self.__db.get_family_from_gramps_id(fid)
if family:
family_list = [family.get_handle()]
if not family_list:
# Next try the family of the active person
person_handle = self.__uistate.get_active('Person')
person = self.__dbstate.db.get_person_from_handle(person_handle)
if person:
family_list = person.get_family_handle_list()
if not family_list:
# Next try the family of the default person in the database.
person = self.__db.get_default_person()
if person:
family_list = person.get_family_handle_list()
if not family_list:
# Finally, take any family you can find.
for family in self.__db.iter_family_handles():
self.__update_family(family)
break
else:
self.__update_family(family_list[0])
def __get_family_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the button to choose a different family.
"""
# Create a filter for the person selector.
rfilter = GenericFilterFactory('Family')()
rfilter.set_logical_op('or')
# Add the current family
rfilter.add_rule(rules.family.HasIdOf([self.__option.get_value()]))
# Add all bookmarked families
rfilter.add_rule(rules.family.IsBookmarked([]))
# Add the families of the database home person if one exists.
default_person = self.__db.get_default_person()
if default_person:
family_list = default_person.get_family_handle_list()
for family_handle in family_list:
family = self.__db.get_family_from_handle(family_handle)
gid = family.get_gramps_id()
rfilter.add_rule(rules.family.HasIdOf([gid]))
# Add the families of the selected person if one exists.
# Same code as above one ! See bug #5032 feature request #5038
### active_person = self.__uistate.get_active('Person') ###
#active_person = self.__db.get_default_person()
#if active_person:
#family_list = active_person.get_family_handle_list()
#for family_handle in family_list:
#family = self.__db.get_family_from_handle(family_handle)
#gid = family.get_gramps_id()
#rfilter.add_rule(rules.family.HasIdOf([gid]))
select_class = SelectorFactory('Family')
sel = select_class(self.__dbstate, self.__uistate, self.__track,
filter=rfilter )
family = sel.run()
if family:
self.__update_family(family.get_handle())
def __update_family(self, handle):
"""
Update the currently selected family.
"""
if handle:
family = self.__dbstate.db.get_family_from_handle(handle)
family_id = family.get_gramps_id()
fhandle = family.get_father_handle()
mhandle = family.get_mother_handle()
if fhandle:
father = self.__db.get_person_from_handle(fhandle)
father_name = _nd.display(father)
else:
father_name = _("unknown father")
if mhandle:
mother = self.__db.get_person_from_handle(mhandle)
mother_name = _nd.display(mother)
else:
mother_name = _("unknown mother")
name = _("%(father_name)s and %(mother_name)s (%(family_id)s)") % {
'father_name': father_name,
'mother_name': mother_name,
'family_id': family_id}
self.__family_label.set_text( name )
self.__option.set_value(family_id)
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
fid = self.__option.get_value()
handle = self.__db.get_family_from_gramps_id(fid).get_handle()
# Need to disable signals as __update_family() calls set_value()
# which would launch the 'value-changed' signal which is what
# we are reacting to here in the first place (don't need the
# signal repeated)
self.__option.disable_signals()
self.__update_family(handle)
self.__option.enable_signals()
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option.disconnect(self.conkey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiNoteOption class
#
#-------------------------------------------------------------------------
class GuiNoteOption(Gtk.HBox):
"""
This class displays an option that allows a note from the
database to be selected.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.NoteOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__dbstate = dbstate
self.__db = dbstate.get_database()
self.__uistate = uistate
self.__track = track
self.__note_label = Gtk.Label()
self.__note_label.set_alignment(0.0, 0.5)
pevt = Gtk.EventBox()
pevt.add(self.__note_label)
note_button = widgets.SimpleButton(Gtk.STOCK_INDEX,
self.__get_note_clicked)
note_button.set_relief(Gtk.ReliefStyle.NORMAL)
self.pack_start(pevt, False, True, 0)
self.pack_end(note_button, False, True, 0)
# Initialize to the current value
nid = self.__option.get_value()
note = self.__db.get_note_from_gramps_id(nid)
self.__update_note(note)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
pevt.set_tooltip_text(self.__option.get_help())
note_button.set_tooltip_text(_('Select an existing note'))
def __get_note_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the button to choose a different note.
"""
select_class = SelectorFactory('Note')
sel = select_class(self.__dbstate, self.__uistate, self.__track)
note = sel.run()
self.__update_note(note)
def __update_note(self, note):
"""
Update the currently selected note.
"""
if note:
note_id = note.get_gramps_id()
txt = " ".join(note.get().split())
if len(txt) > 35:
txt = txt[:35] + "..."
txt = "%s [%s]" % (txt, note_id)
self.__note_label.set_text( txt )
self.__option.set_value(note_id)
else:
txt = "<i>%s</i>" % _('No note given, click button to select one')
self.__note_label.set_text( txt )
self.__note_label.set_use_markup(True)
self.__option.set_value("")
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
nid = self.__option.get_value()
note = self.__db.get_note_from_gramps_id(nid)
# Need to disable signals as __update_note() calls set_value()
# which would launch the 'value-changed' signal which is what
# we are reacting to here in the first place (don't need the
# signal repeated)
self.__option.disable_signals()
self.__update_note(note)
self.__option.enable_signals()
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiMediaOption class
#
#-------------------------------------------------------------------------
class GuiMediaOption(Gtk.HBox):
"""
This class displays an option that allows a media object from the
database to be selected.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.MediaOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__dbstate = dbstate
self.__db = dbstate.get_database()
self.__uistate = uistate
self.__track = track
self.__media_label = Gtk.Label()
self.__media_label.set_alignment(0.0, 0.5)
pevt = Gtk.EventBox()
pevt.add(self.__media_label)
media_button = widgets.SimpleButton(Gtk.STOCK_INDEX,
self.__get_media_clicked)
media_button.set_relief(Gtk.ReliefStyle.NORMAL)
self.pack_start(pevt, False, True, 0)
self.pack_end(media_button, False, True, 0)
# Initialize to the current value
mid = self.__option.get_value()
media = self.__db.get_object_from_gramps_id(mid)
self.__update_media(media)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
pevt.set_tooltip_text(self.__option.get_help())
media_button.set_tooltip_text(_('Select an existing media object'))
def __get_media_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the button to choose a different note.
"""
select_class = SelectorFactory('MediaObject')
sel = select_class(self.__dbstate, self.__uistate, self.__track)
media = sel.run()
self.__update_media(media)
def __update_media(self, media):
"""
Update the currently selected media.
"""
if media:
media_id = media.get_gramps_id()
txt = "%s [%s]" % (media.get_description(), media_id)
self.__media_label.set_text( txt )
self.__option.set_value(media_id)
else:
txt = "<i>%s</i>" % _('No image given, click button to select one')
self.__media_label.set_text( txt )
self.__media_label.set_use_markup(True)
self.__option.set_value("")
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
mid = self.__option.get_value()
media = self.__db.get_object_from_gramps_id(mid)
# Need to disable signals as __update_media() calls set_value()
# which would launch the 'value-changed' signal which is what
# we are reacting to here in the first place (don't need the
# signal repeated)
self.__option.disable_signals()
self.__update_media(media)
self.__option.enable_signals()
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiPersonListOption class
#
#-------------------------------------------------------------------------
class GuiPersonListOption(Gtk.HBox):
"""
This class displays a widget that allows multiple people from the
database to be selected.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.PersonListOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__dbstate = dbstate
self.__db = dbstate.get_database()
self.__uistate = uistate
self.__track = track
self.set_size_request(150, 150)
self.__model = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING)
self.__tree_view = Gtk.TreeView(self.__model)
col1 = Gtk.TreeViewColumn(_('Name' ), Gtk.CellRendererText(), text=0)
col2 = Gtk.TreeViewColumn(_('ID' ), Gtk.CellRendererText(), text=1)
col1.set_resizable(True)
col2.set_resizable(True)
col1.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col2.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col1.set_sort_column_id(0)
col2.set_sort_column_id(1)
self.__tree_view.append_column(col1)
self.__tree_view.append_column(col2)
self.__scrolled_window = Gtk.ScrolledWindow()
self.__scrolled_window.add(self.__tree_view)
self.__scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC)
self.__scrolled_window.set_shadow_type(Gtk.ShadowType.OUT)
self.pack_start(self.__scrolled_window, True, True, 0)
self.__value_changed()
# now setup the '+' and '-' pushbutton for adding/removing people from
# the container
self.__add_person = widgets.SimpleButton(Gtk.STOCK_ADD,
self.__add_person_clicked)
self.__del_person = widgets.SimpleButton(Gtk.STOCK_REMOVE,
self.__del_person_clicked)
self.__vbbox = Gtk.VButtonBox()
self.__vbbox.add(self.__add_person)
self.__vbbox.add(self.__del_person)
self.__vbbox.set_layout(Gtk.ButtonBoxStyle.SPREAD)
self.pack_end(self.__vbbox, False, False, 0)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.__tree_view.set_tooltip_text(self.__option.get_help())
def __add_person_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the add person button.
"""
# people we already have must be excluded
# so we don't list them multiple times
skip_list = set()
i = self.__model.get_iter_first()
while (i):
gid = self.__model.get_value(i, 1) # get the GID stored in column #1
person = self.__db.get_person_from_gramps_id(gid)
skip_list.add(person.get_handle())
i = self.__model.iter_next(i)
select_class = SelectorFactory('Person')
sel = select_class(self.__dbstate, self.__uistate,
self.__track, skip=skip_list)
person = sel.run()
if person:
name = _nd.display(person)
gid = person.get_gramps_id()
self.__model.append([name, gid])
# if this person has a spouse, ask if we should include the spouse
# in the list of "people of interest"
#
# NOTE: we may want to make this an optional thing, determined
# by the use of a parameter at the time this class is instatiated
family_list = person.get_family_handle_list()
for family_handle in family_list:
family = self.__db.get_family_from_handle(family_handle)
if person.get_handle() == family.get_father_handle():
spouse_handle = family.get_mother_handle()
else:
spouse_handle = family.get_father_handle()
if spouse_handle and (spouse_handle not in skip_list):
spouse = self.__db.get_person_from_handle(
spouse_handle)
spouse_name = _nd.display(spouse)
text = _('Also include %s?') % spouse_name
prompt = OptionDialog(_('Select Person'),
text,
_('No'), None,
_('Yes'), None)
if prompt.get_response() == Gtk.ResponseType.YES:
gid = spouse.get_gramps_id()
self.__model.append([spouse_name, gid])
self.__update_value()
def __del_person_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the delete person button.
"""
(path, column) = self.__tree_view.get_cursor()
if (path):
i = self.__model.get_iter(path)
self.__model.remove(i)
self.__update_value()
def __update_value(self):
"""
Parse the object and return.
"""
gidlist = ''
i = self.__model.get_iter_first()
while (i):
gid = self.__model.get_value(i, 1)
gidlist = gidlist + gid + ' '
i = self.__model.iter_next(i)
# Supress signals so that the set_value() handler
# (__value_changed()) doesn't get called
self.__option.disable_signals()
self.__option.set_value(gidlist)
self.__option.enable_signals()
def __value_changed(self):
"""
Handle the change made programmatically
"""
value = self.__option.get_value()
if not isinstance(value, STRTYPE):
# Convert array into a string
# (convienence so that programmers can
# set value using a list)
value = " ".join(value)
# Need to change __option value to be the string
self.__option.disable_signals()
self.__option.set_value(value)
self.__option.enable_signals()
# Remove all entries (the new values will REPLACE
# rather than APPEND)
self.__model.clear()
for gid in value.split():
person = self.__db.get_person_from_gramps_id(gid)
if person:
name = _nd.display(person)
self.__model.append([name, gid])
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiPlaceListOption class
#
#-------------------------------------------------------------------------
class GuiPlaceListOption(Gtk.HBox):
"""
This class displays a widget that allows multiple places from the
database to be selected.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.PlaceListOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__dbstate = dbstate
self.__db = dbstate.get_database()
self.__uistate = uistate
self.__track = track
self.set_size_request(150, 150)
self.__model = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING)
self.__tree_view = Gtk.TreeView(self.__model)
col1 = Gtk.TreeViewColumn(_('Place' ), Gtk.CellRendererText(), text=0)
col2 = Gtk.TreeViewColumn(_('ID' ), Gtk.CellRendererText(), text=1)
col1.set_resizable(True)
col2.set_resizable(True)
col1.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col2.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col1.set_sort_column_id(0)
col2.set_sort_column_id(1)
self.__tree_view.append_column(col1)
self.__tree_view.append_column(col2)
self.__scrolled_window = Gtk.ScrolledWindow()
self.__scrolled_window.add(self.__tree_view)
self.__scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC)
self.__scrolled_window.set_shadow_type(Gtk.ShadowType.OUT)
self.pack_start(self.__scrolled_window, True, True, 0)
self.__value_changed()
# now setup the '+' and '-' pushbutton for adding/removing places from
# the container
self.__add_place = widgets.SimpleButton(Gtk.STOCK_ADD,
self.__add_place_clicked)
self.__del_place = widgets.SimpleButton(Gtk.STOCK_REMOVE,
self.__del_place_clicked)
self.__vbbox = Gtk.VButtonBox()
self.__vbbox.add(self.__add_place)
self.__vbbox.add(self.__del_place)
self.__vbbox.set_layout(Gtk.ButtonBoxStyle.SPREAD)
self.pack_end(self.__vbbox, False, False, 0)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.__tree_view.set_tooltip_text(self.__option.get_help())
def __add_place_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the add place button.
"""
# places we already have must be excluded
# so we don't list them multiple times
skip_list = set()
i = self.__model.get_iter_first()
while (i):
gid = self.__model.get_value(i, 1) # get the GID stored in column #1
place = self.__db.get_place_from_gramps_id(gid)
skip_list.add(place.get_handle())
i = self.__model.iter_next(i)
select_class = SelectorFactory('Place')
sel = select_class(self.__dbstate, self.__uistate,
self.__track, skip=skip_list)
place = sel.run()
if place:
place_name = place.get_title()
gid = place.get_gramps_id()
self.__model.append([place_name, gid])
self.__update_value()
def __del_place_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the delete place button.
"""
(path, column) = self.__tree_view.get_cursor()
if (path):
i = self.__model.get_iter(path)
self.__model.remove(i)
self.__update_value()
def __update_value(self):
"""
Parse the object and return.
"""
gidlist = ''
i = self.__model.get_iter_first()
while (i):
gid = self.__model.get_value(i, 1)
gidlist = gidlist + gid + ' '
i = self.__model.iter_next(i)
self.__option.set_value(gidlist)
def __value_changed(self):
"""
Handle the change made programmatically
"""
value = self.__option.get_value()
if not isinstance(value, STRTYPE):
# Convert array into a string
# (convienence so that programmers can
# set value using a list)
value = " ".join(value)
# Need to change __option value to be the string
self.__option.disable_signals()
self.__option.set_value(value)
self.__option.enable_signals()
# Remove all entries (the new values will REPLACE
# rather than APPEND)
self.__model.clear()
for gid in value.split():
place = self.__db.get_place_from_gramps_id(gid)
if place:
place_name = place.get_title()
self.__model.append([place_name, gid])
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiSurnameColorOption class
#
#-------------------------------------------------------------------------
class GuiSurnameColorOption(Gtk.HBox):
"""
This class displays a widget that allows multiple surnames to be
selected from the database, and to assign a colour (not necessarily
unique) to each one.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.SurnameColorOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__dbstate = dbstate
self.__db = dbstate.get_database()
self.__uistate = uistate
self.__track = track
self.set_size_request(150, 150)
# This will get populated the first time the dialog is run,
# and used each time after.
self.__surnames = {} # list of surnames and count
self.__model = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING)
self.__tree_view = Gtk.TreeView(self.__model)
self.__tree_view.connect('row-activated', self.__row_clicked)
col1 = Gtk.TreeViewColumn(_('Surname'), Gtk.CellRendererText(), text=0)
col2 = Gtk.TreeViewColumn(_('Color'), Gtk.CellRendererText(), text=1)
col1.set_resizable(True)
col2.set_resizable(True)
col1.set_sort_column_id(0)
col1.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col2.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
self.__tree_view.append_column(col1)
self.__tree_view.append_column(col2)
self.scrolled_window = Gtk.ScrolledWindow()
self.scrolled_window.add(self.__tree_view)
self.scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC)
self.scrolled_window.set_shadow_type(Gtk.ShadowType.OUT)
self.pack_start(self.scrolled_window, True, True, 0)
self.add_surname = widgets.SimpleButton(Gtk.STOCK_ADD,
self.__add_clicked)
self.del_surname = widgets.SimpleButton(Gtk.STOCK_REMOVE,
self.__del_clicked)
self.vbbox = Gtk.VButtonBox()
self.vbbox.add(self.add_surname)
self.vbbox.add(self.del_surname)
self.vbbox.set_layout(Gtk.ButtonBoxStyle.SPREAD)
self.pack_end(self.vbbox, False, False, 0)
self.__value_changed()
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.__tree_view.set_tooltip_text(self.__option.get_help())
def __add_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the the add surname button.
"""
skip_list = set()
i = self.__model.get_iter_first()
while (i):
surname = self.__model.get_value(i, 0)
skip_list.add(surname.encode('iso-8859-1','xmlcharrefreplace'))
i = self.__model.iter_next(i)
ln_dialog = LastNameDialog(self.__db, self.__uistate,
self.__track, self.__surnames, skip_list)
surname_set = ln_dialog.run()
for surname in surname_set:
self.__model.append([surname, '#ffffff'])
self.__update_value()
def __del_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the the delete surname button.
"""
(path, column) = self.__tree_view.get_cursor()
if (path):
i = self.__model.get_iter(path)
self.__model.remove(i)
self.__update_value()
def __row_clicked(self, treeview, path, column):
"""
Handle the case of a row being clicked on.
"""
# get the surname and colour value for this family
i = self.__model.get_iter(path)
surname = self.__model.get_value(i, 0)
colour = Gdk.color_parse(self.__model.get_value(i, 1))
title = _('Select color for %s') % surname
colour_dialog = Gtk.ColorSelectionDialog(title)
colorsel = colour_dialog.colorsel
colorsel.set_current_color(colour)
response = colour_dialog.run()
if response == Gtk.ResponseType.OK:
colour = colorsel.get_current_color()
colour_name = '#%02x%02x%02x' % (
int(colour.red *256/65536),
int(colour.green*256/65536),
int(colour.blue *256/65536))
self.__model.set_value(i, 1, colour_name)
colour_dialog.destroy()
self.__update_value()
def __update_value(self):
"""
Parse the object and return.
"""
surname_colours = ''
i = self.__model.get_iter_first()
while (i):
surname = self.__model.get_value(i, 0)
#surname = surname.encode('iso-8859-1','xmlcharrefreplace')
colour = self.__model.get_value(i, 1)
# Tried to use a dictionary, and tried to save it as a tuple,
# but coulnd't get this to work right -- this is lame, but now
# the surnames and colours are saved as a plain text string
#
# Hmmm...putting whitespace between the fields causes
# problems when the surname has whitespace -- for example,
# with surnames like "Del Monte". So now we insert a non-
# whitespace character which is unlikely to appear in
# a surname. (See bug report #2162.)
surname_colours += surname + '\xb0' + colour + '\xb0'
i = self.__model.iter_next(i)
self.__option.set_value( surname_colours )
def __value_changed(self):
"""
Handle the change made programmatically
"""
value = self.__option.get_value()
if not isinstance(value, STRTYPE):
# Convert dictionary into a string
# (convienence so that programmers can
# set value using a dictionary)
value_str = ""
for name in value:
value_str += "%s\xb0%s\xb0" % (name, value[name])
value = value_str
# Need to change __option value to be the string
self.__option.disable_signals()
self.__option.set_value(value)
self.__option.enable_signals()
# Remove all entries (the new values will REPLACE
# rather than APPEND)
self.__model.clear()
# populate the surname/colour treeview
#
# For versions prior to 3.0.2, the fields were delimited with
# whitespace. However, this causes problems when the surname
# also has a space within it. When populating the control,
# support both the new and old format -- look for the \xb0
# delimiter, and if it isn't there, assume this is the old-
# style space-delimited format. (Bug #2162.)
if (value.find('\xb0') >= 0):
tmp = value.split('\xb0')
else:
tmp = value.split(' ')
while len(tmp) > 1:
surname = tmp.pop(0)
colour = tmp.pop(0)
self.__model.append([surname, colour])
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiDestinationOption class
#
#-------------------------------------------------------------------------
class GuiDestinationOption(Gtk.HBox):
"""
This class displays an option that allows the user to select a
DestinationOption.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.DestinationOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__entry = Gtk.Entry()
self.__entry.set_text( self.__option.get_value() )
self.__button = Gtk.Button()
img = Gtk.Image()
img.set_from_stock(Gtk.STOCK_OPEN, Gtk.IconSize.BUTTON)
self.__button.add(img)
self.__button.connect('clicked', self.__select_file)
self.pack_start(self.__entry, True, True, 0)
self.pack_end(self.__button, False, False, 0)
# Set up signal handlers when the widget value is changed
# from user interaction or programmatically. When handling
# a specific signal, we need to temporarily block the signal
# that would call the other signal handler.
self.changekey = self.__entry.connect('changed', self.__text_changed)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.conkey1 = self.__option.connect('options-changed', self.__option_changed)
self.conkey2 = self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
self.set_tooltip_text(self.__option.get_help())
def __option_changed(self):
"""
Handle a change of the option.
"""
extension = self.__option.get_extension()
directory = self.__option.get_directory_entry()
value = self.__option.get_value()
if not directory and not value.endswith(extension):
value = value + extension
self.__option.set_value(value)
elif directory and value.endswith(extension):
value = value[:-len(extension)]
self.__option.set_value(value)
self.__entry.set_text( self.__option.get_value() )
def __select_file(self, obj):
"""
Handle the user's request to select a file (or directory).
"""
if self.__option.get_directory_entry():
my_action = Gtk.FileChooserAction.SELECT_FOLDER
else:
my_action = Gtk.FileChooserAction.SAVE
fcd = Gtk.FileChooserDialog(_("Save As"), action=my_action,
buttons=(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN,
Gtk.ResponseType.OK))
name = os.path.abspath(self.__option.get_value())
if self.__option.get_directory_entry():
while not os.path.isdir(name):
# Keep looking up levels to find a valid drive.
name, tail = os.path.split(name)
if not name:
# Avoid infinite loops
name = os.getcwd()
fcd.set_current_folder(name)
else:
fcd.set_current_name(name)
status = fcd.run()
if status == Gtk.ResponseType.OK:
path = get_unicode_path_from_file_chooser(fcd.get_filename())
if path:
if not self.__option.get_directory_entry() and \
not path.endswith(self.__option.get_extension()):
path = path + self.__option.get_extension()
self.__entry.set_text(path)
self.__option.set_value(path)
fcd.destroy()
def __text_changed(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the change of the value made by the user.
"""
self.__option.disable_signals()
self.__option.set_value( self.__entry.get_text() )
self.__option.enable_signals()
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
self.__entry.handler_block(self.changekey)
self.__entry.set_text(self.__option.get_value())
self.__entry.handler_unblock(self.changekey)
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option.disconnect(self.conkey1)
self.__option.disconnect(self.conkey2)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiStyleOption class
#
#-------------------------------------------------------------------------
class GuiStyleOption(GuiEnumeratedListOption):
"""
This class displays a StyleOption.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.StyleOption
@return: nothing
"""
GuiEnumeratedListOption.__init__(self, option, dbstate,
uistate, track)
self.__option = option
self.__button = Gtk.Button("%s..." % _("Style Editor"))
self.__button.connect('clicked', self.__on_style_edit_clicked)
self.pack_end(self.__button, False, False)
def __on_style_edit_clicked(self, *obj):
"""The user has clicked on the 'Edit Styles' button. Create a
style sheet editor object and let them play. When they are
done, update the displayed styles."""
from gramps.gen.plug.docgen import StyleSheetList
from .report._styleeditor import StyleListDisplay
style_list = StyleSheetList(self.__option.get_style_file(),
self.__option.get_default_style())
StyleListDisplay(style_list, None, None)
new_items = []
for style_name in style_list.get_style_names():
new_items.append( (style_name, style_name) )
self.__option.set_items(new_items)
#-------------------------------------------------------------------------
#
# GuiBooleanListOption class
#
#-------------------------------------------------------------------------
class GuiBooleanListOption(Gtk.HBox):
"""
This class displays an option that provides a list of check boxes.
Each possible value is assigned a value and a description.
"""
def __init__(self, option, dbstate, uistate, track, override):
GObject.GObject.__init__(self)
self.__option = option
self.__cbutton = []
COLUMNS = 2 # Number of checkbox columns
column = []
for i in range(COLUMNS):
vbox = Gtk.VBox()
self.pack_start(vbox, True, True, 0)
column.append(vbox)
vbox.show()
counter = 0
default = option.get_value().split(',')
for description in option.get_descriptions():
button = Gtk.CheckButton(description)
self.__cbutton.append(button)
if counter < len(default):
if default[counter] == 'True':
button.set_active(True)
button.connect("toggled", self.__list_changed)
column[counter % COLUMNS].pack_start(button, True, True, 0)
button.show()
counter += 1
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
self.set_tooltip_text(self.__option.get_help())
def __list_changed(self, button):
"""
Handle the change of the value made by the user.
"""
value = ''
for button in self.__cbutton:
value = value + str(button.get_active()) + ','
value = value[:len(value)-1]
self.__option.disable_signals()
self.__option.set_value(value)
self.__option.enable_signals()
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
value = self.__option.get_value()
self.__option.disable_signals()
for button in self.__cbutton:
for key in value:
if key == button.get_label():
bool_value = (value[key] == "True" or value[key] == True)
button.set_active(bool_value)
# Update __option value so that it's correct
self.__list_changed(None)
self.__option.enable_signals()
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
#-----------------------------------------------------------------------------#
# #
# Table mapping menu types to gui widgets used in make_gui_option function #
# #
#-----------------------------------------------------------------------------#
from gramps.gen.plug import menu as menu
_OPTIONS = (
(menu.BooleanListOption, True, GuiBooleanListOption),
(menu.BooleanOption, False, GuiBooleanOption),
(menu.ColorOption, True, GuiColorOption),
(menu.DestinationOption, True, GuiDestinationOption),
(menu.EnumeratedListOption, True, GuiEnumeratedListOption),
(menu.FamilyOption, True, GuiFamilyOption),
(menu.MediaOption, True, GuiMediaOption),
(menu.NoteOption, True, GuiNoteOption),
(menu.NumberOption, True, GuiNumberOption),
(menu.PersonListOption, True, GuiPersonListOption),
(menu.PersonOption, True, GuiPersonOption),
(menu.PlaceListOption, True, GuiPlaceListOption),
(menu.StringOption, True, GuiStringOption),
(menu.StyleOption, True, GuiStyleOption),
(menu.SurnameColorOption, True, GuiSurnameColorOption),
(menu.TextOption, True, GuiTextOption),
# This entry must be last!
(menu.Option, None, None),
)
del menu
def make_gui_option(option, dbstate, uistate, track, override=False):
"""
Stand-alone function so that Options can be used in other
ways, too. Takes an Option and returns a GuiOption.
override: if True will override the GuiOption's normal behavior
(in a GuiOption-dependant fashion, for instance in a GuiPersonOption
it will force the use of the options's value to set the GuiOption)
"""
label, widget = True, None
pmgr = GuiPluginManager.get_instance()
external_options = pmgr.get_external_opt_dict()
if option.__class__ in external_options:
widget = external_options[option.__class__]
else:
for type_, label, widget in _OPTIONS:
if isinstance(option, type_):
break
else:
raise AttributeError(
"can't make GuiOption: unknown option type: '%s'" % option)
if widget:
widget = widget(option, dbstate, uistate, track, override)
return widget, label
def add_gui_options(dialog):
"""
Stand-alone function to add user options to the GUI.
"""
if not hasattr(dialog.options, "menu"):
return
menu = dialog.options.menu
options_dict = dialog.options.options_dict
for category in menu.get_categories():
for name in menu.get_option_names(category):
option = menu.get_option(category, name)
# override option default with xml-saved value:
if name in options_dict:
option.set_value(options_dict[name])
widget, label = make_gui_option(option, dialog.dbstate,
dialog.uistate, dialog.track)
if widget is not None:
if label:
dialog.add_frame_option(category,
option.get_label(),
widget)
else:
dialog.add_frame_option(category, "", widget)
|
Forage/Gramps
|
gramps/gui/plug/_guioptions.py
|
Python
|
gpl-2.0
| 73,366
|
[
"Brian"
] |
88ea72a8c875482f3d51bd5616ed55eb010ea2971d6b1e506cff39ddd06d2d51
|
'''
The settings for OSMC are handled by the OSMC Settings Addon (OSA).
In order to more easily accomodate future changes and enhancements, each OSMC settings bundle (module) is a separate addon.
The module can take the form of an xbmc service, an xbmc script, or an xbmc module, but it must be installed into the users'
/usr/share/kodi/addons folder.
The OSA collects the modules it can find, loads their icons, and launches them individually when the user clicks on an icon.
The modules can either have their own GUI, or they can leverage the settings interface provided by XBMC. If the OSG uses the XBMC
settings interface, then all of their settings must be stored in the addons settings.xml. This is true even if the source of record
is a separate config file.
An example of this type is the Pi settings module; the actual settings are read from the config.txt, then written to the
settings.xml for display in kodi, then finally all changes are written back to the config.txt. The Pi module detects user
changes to the settings by identifying the differences between a newly read settings.xml and the values from a previously
read settings.xml.
The values of the settings displayed by this module are only ever populated by the items in the settings.xml. [Note: meaning that
if the settings data is retrieved from a different source, it will need to be populated in the module before it is displayed
to the user.]
Each module must have in its folder, a sub-folder called 'resources/osmc'. Within that folder must reside this script (OSMCSetting.py),
and the icons to be used in the OSG to represent the module (FX_Icon.png and FO_Icon.png for unfocused and focused images
respectively).
When the OSA creates the OSMC Settings GUI (OSG), these modules are identified and the OSMCSetting.py script in each of them
is imported. This script provides the mechanism for the OSG to apply the changes required from a change in a setting.
The OSMCSetting.py file must have a class called OSMCSettingClass as shown below.
The key variables in this class are:
addonid : The id for the addon. This must be the id declared in the addons addon.xml.
description : The description for the module, shown in the OSA
reboot_required : A boolean to declare if the OS needs to be rebooted. If a change in a specific setting
requires an OS reboot to take affect, this is flag that will let the OSG know.
setting_data_method : This dictionary contains:
- the name of all settings in the module
- the current value of those settings
- [optional] apply - a method to call for each setting when the value changes
- [optional] translate - a method to call to translate the data before adding it to the
setting_data_method dict. The translate method must have a 'reverse' argument which
when set to True, reverses the transformation.
The key methods of this class are:
open_settings_window : This is called by the OSG when the icon is clicked. This will open the settings window.
Usually this would be __addon__.OpenSettings(), but it could be any other script.
This allows the creation of action buttons in the GUI, as well as allowing developers
to script and skin their own user interfaces.
[optional] first_method : called before any individual settings changes are applied.
[optional] final_method : called after all the individual settings changes are done.
[optional] boot_method : called when the OSA is first started.
apply_settings : This is called by the OSG to apply the changes to any settings that have changed.
It calls the first setting method, if it exists.
Then it calls the method listed in setting_data_method for each setting. Then it
calls the final method, again, if it exists.
populate_setting_data_method : This method is used to populate the setting_data_method with the current settings data.
Usually this will be from the addons setting data stored in settings.xml and retrieved
using the settings_retriever_xml method.
Sometimes the user is able to edit external setting files (such as the Pi's config.txt).
If the developer wants to use this source in place of the data stored in the
settings.xml, then they should edit this method to include a mechanism to retrieve and
parse that external data. As the window shown in the OSG populates only with data from
the settings.xml, the developer should ensure that the external data is loaded into that
xml before the settings window is opened.
settings_retriever_xml : This method is used to retrieve all the data for the settings listed in the
setting_data_method from the addons settings.xml.
The developer is free to create any methods they see fit, but the ones listed above are specifically used by the OSA.
Specifically, the apply_settings method is called when the OSA closes.
Settings changes are applied when the OSG is called to close. But this behaviour can be changed to occur when the addon
settings window closes by editing the open_settings_window. The method apply_settings will still be called by OSA, so
keep that in mind.
'''
# XBMC Modules
import xbmc
import xbmcaddon
import xbmcgui
# STANDARD Modules
import subprocess
import sys
import os
import threading
import traceback
addonid = "script.module.osmcsetting.pi"
__addon__ = xbmcaddon.Addon(addonid)
DIALOG = xbmcgui.Dialog()
# Custom modules
sys.path.append(xbmc.translatePath(os.path.join(xbmcaddon.Addon(addonid).getAddonInfo('path'), 'resources','lib')))
# OSMC SETTING Modules
import OSMC_REparser as parser
def lang(id):
san = __addon__.getLocalizedString(id).encode( 'utf-8', 'ignore' )
return san
def log(message):
try:
message = str(message)
except UnicodeEncodeError:
message = message.encode('utf-8', 'ignore' )
xbmc.log('OSMC PI ' + str(message), level=xbmc.LOGDEBUG)
class OSMCSettingClass(threading.Thread):
'''
A OSMCSettingClass is way to substantiate the settings of an OSMC settings module, and make them available to the
OSMC Settings Addon (OSA).
'''
def __init__(self):
'''
The MASTER_SETTINGS contains all the settings in the settings group, as well as the methods to call when a
setting_value has changed and the existing setting_value.
'''
super(OSMCSettingClass, self).__init__()
self.addonid = addonid
self.me = xbmcaddon.Addon(self.addonid)
# this is what is displayed in the main settings gui
self.shortname = 'Pi Config'
self.description = """This is the text that is shown on the OSG. [CR][CR]It should describe:[CR] - what the settings module is for,[CR] - the settings it controls,[CR] - and anything else you want, I suppose."""
self.description = """The Raspberry Pi doesn't have a conventional BIOS. System configuration parameters are stored in a "config.txt" file. For more detail, visit http://elinux.org/RPiconfig[CR]
This settings module allows you to edit your config.txt from within OSMC using a graphical interface.
The module includes:
- display rotation
- hdmi_safe & hdmi_boost
- hdmi_group & hdmi_mode
- function to save edid to file
- sdtv_mode & sdtv_aspect
- GPU memory split
- MPG2 & WVC1 licences (including status)
- your Pi's serial number
Finally, there is a Config Editor that will allow you to quickly add, edit, or delete lines in your config.txt.
Overclock settings are set using the Pi Overclock module."""
# the location of the config file FOR TESTING ONLY
try:
self.config_location = '/boot/config.txt'
self.populate_misc_info()
except:
# if anything fails above, assume we are testing and look for the config
# in the testing location
self.config_location = '/home/plaskev/Documents/config.txt'
try:
self.clean_user_config()
except Exception:
log('Error cleaning users config')
log(traceback.format_exc())
def run(self):
'''
The method determines what happens when the item is clicked in the settings GUI.
Usually this would be __addon__.OpenSettings(), but it could be any other script.
This allows the creation of action buttons in the GUI, as well as allowing developers to script and skin their
own user interfaces.
'''
# read the config.txt file everytime the settings are opened. This is unavoidable because it is possible for
# the user to have made manual changes to the config.txt while OSG is active.
config = parser.read_config_file(self.config_location)
extracted_settings = parser.config_to_kodi(parser.MASTER_SETTINGS, config)
# load the settings into kodi
log('Settings extracted from the config.txt')
for k, v in extracted_settings.iteritems():
log("%s : %s" % (k, v))
self.me.setSetting(k, str(v))
# open the settings GUI and let the user monkey about with the controls
self.me.openSettings()
# retrieve the new settings from kodi
new_settings = self.settings_retriever_xml()
log('New settings applied to the config.txt')
for k, v in new_settings.iteritems():
log("%s : %s" % (k, v))
# read the config into a list of lines again
config = parser.read_config_file(self.config_location)
# construct the new set of config lines using the protocols and the new settings
new_settings = parser.kodi_to_config(parser.MASTER_SETTINGS, config, new_settings)
# write the new lines to the temporary config file
parser.write_config_file('/var/tmp/config.txt', new_settings)
# copy over the temp config.txt to /boot/ as superuser
subprocess.call(["sudo", "mv", '/var/tmp/config.txt', self.config_location])
ok = DIALOG.notification(lang(32095), lang(32096))
def apply_settings(self):
pass
def settings_retriever_xml(self):
'''
Reads the stored settings (in settings.xml) and returns a dictionary with the setting_name: setting_value. This
method cannot be overwritten.
'''
latest_settings = {}
addon = xbmcaddon.Addon(self.addonid)
for key in parser.MASTER_SETTINGS.keys():
latest_settings[key] = addon.getSetting(key)
return latest_settings
def populate_misc_info(self):
# grab the Pi serial number and check to see whether the codec licences are enabled
mpg = subprocess.check_output(["/opt/vc/bin/vcgencmd", "codec_enabled", "MPG2"])
wvc = subprocess.check_output(["/opt/vc/bin/vcgencmd", "codec_enabled", "WVC1"])
serial_raw = subprocess.check_output(["cat", "/proc/cpuinfo"])
# grab just the serial number
serial = serial_raw[serial_raw.index('Serial') + len('Serial'):].replace('\n','').replace(':','').replace(' ','').replace('\t','')
# load the values into the settings gui
__addon__.setSetting('codec_check', mpg.replace('\n','') + ', ' + wvc.replace('\n',''))
__addon__.setSetting('serial', serial)
def clean_user_config(self):
''' Comment out problematic lines in the users config.txt '''
patterns = [
r".*=.*\[remove\].*",
r".*=remove",
]
config = parser.read_config_file(self.config_location)
new_config = parser.clean_config(config, patterns)
# write the new lines to the temporary config file
parser.write_config_file('/var/tmp/config.txt', new_config)
# copy over the temp config.txt to /boot/ as superuser
subprocess.call(["sudo", "mv", '/var/tmp/config.txt', self.config_location])
if __name__ == "__main__":
pass
|
srmo/osmc
|
package/mediacenter-addon-osmc/src/script.module.osmcsetting.pi/resources/osmc/OSMCSetting.py
|
Python
|
gpl-2.0
| 11,555
|
[
"VisIt"
] |
05551c83ac609dedf315fd5300606385cf1b476362a2f4b45f1fb904449f1102
|
# Harmonic Oscillator
The harmonic oscillator is omnipresent in physics. Although you may think
of this as being related to springs, it, or an equivalent
mathematical representation, appears in just about any problem where a
mode is sitting near its potential energy minimum. At that point,
$\partial_x V(x)=0$, and the first non-zero term (aside from a
constant) in the potential energy is that of a harmonic oscillator. In
a solid, sound modes (phonons) are built on a picture of coupled
harmonic oscillators, and in relativistic field theory the fundamental
interactions are also built on coupled oscillators positioned
infinitesimally close to one another in space. The phenomena of a
resonance of an oscillator driven at a fixed frequency plays out
repeatedly in atomic, nuclear and high-energy physics, when quantum
mechanically the evolution of a state oscillates according to
$e^{-iEt}$ and exciting discrete quantum states has very similar
mathematics as exciting discrete states of an oscillator.
The potential energy for a single particle as a function of its position $x$ can be written as a Taylor expansion about some point $x_0$
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation}
V(x)=V(x_0)+(x-x_0)\left.\partial_xV(x)\right|_{x_0}+\frac{1}{2}(x-x_0)^2\left.\partial_x^2V(x)\right|_{x_0}
+\frac{1}{3!}\left.\partial_x^3V(x)\right|_{x_0}+\cdots
\label{_auto1} \tag{1}
\end{equation}
$$
If the position $x_0$ is at the minimum of the resonance, the first two non-zero terms of the potential are
$$
\begin{eqnarray}
V(x)&\approx& V(x_0)+\frac{1}{2}(x-x_0)^2\left.\partial_x^2V(x)\right|_{x_0},\\
\nonumber
&=&V(x_0)+\frac{1}{2}k(x-x_0)^2,~~~~k\equiv \left.\partial_x^2V(x)\right|_{x_0},\\
\nonumber
F&=&-\partial_xV(x)=-k(x-x_0).
\end{eqnarray}
$$
Put into Newton's 2nd law (assuming $x_0=0$),
$$
\begin{eqnarray}
m\ddot{x}&=&-kx,\\
x&=&A\cos(\omega_0 t-\phi),~~~\omega_0=\sqrt{k/m}.
\end{eqnarray}
$$
Here $A$ and $\phi$ are arbitrary. Equivalently, one could have
written this as $A\cos(\omega_0 t)+B\sin(\omega_0 t)$, or as the real
part of $Ae^{i\omega_0 t}$. In this last case $A$ could be an
arbitrary complex constant. Thus, there are 2 arbitrary constants
(either $A$ and $B$ or $A$ and $\phi$, or the real and imaginary part
of one complex constant. This is the expectation for a second order
differential equation, and also agrees with the physical expectation
that if you know a particle's initial velocity and position you should
be able to define its future motion, and that those two arbitrary
conditions should translate to two arbitrary constants.
A key feature of harmonic motion is that the system repeats itself
after a time $T=1/f$, where $f$ is the frequency, and $\omega=2\pi f$
is the angular frequency. The period of the motion is independent of
the amplitude. However, this independence is only exact when one can
neglect higher terms of the potential, $x^3, x^4\cdots$. Once can
neglect these terms for sufficiently small amplitudes, and for larger
amplitudes the motion is no longer purely sinusoidal, and even though
the motion repeats itself, the time for repeating the motion is no
longer independent of the amplitude.
One can also calculate the velocity and the kinetic energy as a function of time,
$$
\begin{eqnarray}
\dot{x}&=&-\omega_0A\sin(\omega_0 t-\phi),\\
\nonumber
K&=&\frac{1}{2}m\dot{x}^2=\frac{m\omega_0^2A^2}{2}\sin^2(\omega_0t-\phi),\\
\nonumber
&=&\frac{k}{2}A^2\sin^2(\omega_0t-\phi).
\end{eqnarray}
$$
The total energy is then
<!-- Equation labels as ordinary links -->
<div id="_auto2"></div>
$$
\begin{equation}
E=K+V=\frac{1}{2}m\dot{x}^2+\frac{1}{2}kx^2=\frac{1}{2}kA^2.
\label{_auto2} \tag{2}
\end{equation}
$$
The total energy then goes as the square of the amplitude.
A pendulum is an example of a harmonic oscillator. By expanding the
kinetic and potential energies for small angles find the frequency for
a pendulum of length $L$ with all the mass $m$ centered at the end by
writing the eq.s of motion in the form of a harmonic oscillator.
The potential energy and kinetic energies are (for $x$ being the displacement)
$$
\begin{eqnarray*}
V&=&mgL(1-\cos\theta)\approx mgL\frac{x^2}{2L^2},\\
K&=&\frac{1}{2}mL^2\dot{\theta}^2\approx \frac{m}{2}\dot{x}^2.
\end{eqnarray*}
$$
For small $x$ Newton's 2nd law becomes
$$
m\ddot{x}=-\frac{mg}{L}x,
$$
and the spring constant would appear to be $k=mg/L$, which makes the
frequency equal to $\omega_0=\sqrt{g/L}$. Note that the frequency is
independent of the mass.
## Damped Oscillators
We consider only the case where the damping force is proportional to
the velocity. This is counter to dragging friction, where the force is
proportional in strength to the normal force and independent of
velocity, and is also inconsistent with wind resistance, where the
magnitude of the drag force is proportional the square of the
velocity. Rolling resistance does seem to be mainly proportional to
the velocity. However, the main motivation for considering damping
forces proportional to the velocity is that the math is more
friendly. This is because the differential equation is linear,
i.e. each term is of order $x$, $\dot{x}$, $\ddot{x}\cdots$, or even
terms with no mention of $x$, and there are no terms such as $x^2$ or
$x\ddot{x}$. The equations of motion for a spring with damping force
$-b\dot{x}$ are
<!-- Equation labels as ordinary links -->
<div id="_auto3"></div>
$$
\begin{equation}
m\ddot{x}+b\dot{x}+kx=0.
\label{_auto3} \tag{3}
\end{equation}
$$
Just to make the solution a bit less messy, we rewrite this equation as
<!-- Equation labels as ordinary links -->
<div id="eq:dampeddiffyq"></div>
$$
\begin{equation}
\label{eq:dampeddiffyq} \tag{4}
\ddot{x}+2\beta\dot{x}+\omega_0^2x=0,~~~~\beta\equiv b/2m,~\omega_0\equiv\sqrt{k/m}.
\end{equation}
$$
Both $\beta$ and $\omega$ have dimensions of inverse time. To find solutions (see appendix C in the text) you must make an educated guess at the form of the solution. To do this, first realize that the solution will need an arbitrary normalization $A$ because the equation is linear. Secondly, realize that if the form is
<!-- Equation labels as ordinary links -->
<div id="_auto4"></div>
$$
\begin{equation}
x=Ae^{rt}
\label{_auto4} \tag{5}
\end{equation}
$$
that each derivative simply brings out an extra power of $r$. This
means that the $Ae^{rt}$ factors out and one can simply solve for an
equation for $r$. Plugging this form into Eq. ([4](#eq:dampeddiffyq)),
<!-- Equation labels as ordinary links -->
<div id="_auto5"></div>
$$
\begin{equation}
r^2+2\beta r+\omega_0^2=0.
\label{_auto5} \tag{6}
\end{equation}
$$
Because this is a quadratic equation there will be two solutions,
<!-- Equation labels as ordinary links -->
<div id="_auto6"></div>
$$
\begin{equation}
r=-\beta\pm\sqrt{\beta^2-\omega_0^2}.
\label{_auto6} \tag{7}
\end{equation}
$$
We refer to the two solutions as $r_1$ and $r_2$ corresponding to the
$+$ and $-$ roots. As expected, there should be two arbitrary
constants involved in the solution,
<!-- Equation labels as ordinary links -->
<div id="_auto7"></div>
$$
\begin{equation}
x=A_1e^{r_1t}+A_2e^{r_2t},
\label{_auto7} \tag{8}
\end{equation}
$$
where the coefficients $A_1$ and $A_2$ are determined by initial
conditions.
The roots listed above, $\sqrt{\omega_0^2-\beta_0^2}$, will be
imaginary if the damping is small and $\beta<\omega_0$. In that case,
$r$ is complex and the factor $e{rt}$ will have some oscillatory
behavior. If the roots are real, there will only be exponentially
decaying solutions. There are three cases:
### Underdamped: $\beta<\omega_0$
$$
\begin{eqnarray}
x&=&A_1e^{-\beta t}e^{i\omega't}+A_2e^{-\beta t}e^{-i\omega't},~~\omega'\equiv\sqrt{\omega_0^2-\beta^2}\\
\nonumber
&=&(A_1+A_2)e^{-\beta t}\cos\omega't+i(A_1-A_2)e^{-\beta t}\sin\omega't.
\end{eqnarray}
$$
Here we have made use of the identity
$e^{i\omega't}=\cos\omega't+i\sin\omega't$. Because the constants are
arbitrary, and because the real and imaginary parts are both solutions
individually, we can simply consider the real part of the solution
alone:
<!-- Equation labels as ordinary links -->
<div id="eq:homogsolution"></div>
$$
\begin{eqnarray}
\label{eq:homogsolution} \tag{9}
x&=&B_1e^{-\beta t}\cos\omega't+B_2e^{-\beta t}\sin\omega't,\\
\nonumber
\omega'&\equiv&\sqrt{\omega_0^2-\beta^2}.
\end{eqnarray}
$$
### Critical dampling: $\beta=\omega_0$
In this case the two terms involving $r_1$ and $r_2$ are identical
because $\omega'=0$. Because we need to arbitrary constants, there
needs to be another solution. This is found by simply guessing, or by
taking the limit of $\omega'\rightarrow 0$ from the underdamped
solution. The solution is then
<!-- Equation labels as ordinary links -->
<div id="eq:criticallydamped"></div>
$$
\begin{equation}
\label{eq:criticallydamped} \tag{10}
x=Ae^{-\beta t}+Bte^{-\beta t}.
\end{equation}
$$
The critically damped solution is interesting because the solution
approaches zero quickly, but does not oscillate. For a problem with
zero initial velocity, the solution never crosses zero. This is a good
choice for designing shock absorbers or swinging doors.
### Overdamped: $\beta>\omega_0$
$$
\begin{eqnarray}
x&=&A_1\exp{-(\beta+\sqrt{\beta^2-\omega_0^2})t}+A_2\exp{-(\beta-\sqrt{\beta^2-\omega_0^2})t}
\end{eqnarray}
$$
This solution will also never pass the origin more than once, and then
only if the initial velocity is strong and initially toward zero.
Given $b$, $m$ and $\omega_0$, find $x(t)$ for a particle whose
initial position is $x=0$ and has initial velocity $v_0$ (assuming an
underdamped solution).
The solution is of the form,
$$
\begin{eqnarray*}
x&=&e^{-\beta t}\left[A_1\cos(\omega' t)+A_2\sin\omega't\right],\\
\dot{x}&=&-\beta x+\omega'e^{-\beta t}\left[-A_1\sin\omega't+A_2\cos\omega't\right].\\
\omega'&\equiv&\sqrt{\omega_0^2-\beta^2},~~~\beta\equiv b/2m.
\end{eqnarray*}
$$
From the initial conditions, $A_1=0$ because $x(0)=0$ and $\omega'A_2=v_0$. So
$$
x=\frac{v_0}{\omega'}e^{-\beta t}\sin\omega't.
$$
## Our Sliding Block Code
Here we study first the case without additional friction term and scale our equation
in terms of a dimensionless time $\tau$.
Let us remind ourselves about the differential equation we want to solve (the general case with damping due to friction)
$$
m\frac{d^2x}{dt^2} + b\frac{dx}{dt}+kx(t) =0.
$$
We divide by $m$ and introduce $\omega_0^2=\sqrt{k/m}$ and obtain
$$
\frac{d^2x}{dt^2} + \frac{b}{m}\frac{dx}{dt}+\omega_0^2x(t) =0.
$$
Thereafter we introduce a dimensionless time $\tau = t\omega_0$ (check
that the dimensionality is correct) and rewrite our equation as
$$
\frac{d^2x}{d\tau^2} + \frac{b}{m\omega_0}\frac{dx}{d\tau}+x(\tau) =0,
$$
which gives us
$$
\frac{d^2x}{d\tau^2} + \frac{b}{m\omega_0}\frac{dx}{d\tau}+x(\tau) =0.
$$
We then define $\gamma = b/(2m\omega_0)$ and rewrite our equations as
$$
\frac{d^2x}{d\tau^2} + 2\gamma\frac{dx}{d\tau}+x(\tau) =0.
$$
This is the equation we will code below. The first version employs the Euler-Cromer method.
%matplotlib inline
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
DeltaT = 0.001
#set up arrays
tfinal = 20 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, v, and x
t = np.zeros(n)
v = np.zeros(n)
x = np.zeros(n)
# Initial conditions as simple one-dimensional arrays of time
x0 = 1.0
v0 = 0.0
x[0] = x0
v[0] = v0
gamma = 0.0
# Start integrating using Euler-Cromer's method
for i in range(n-1):
# Set up the acceleration
# Here you could have defined your own function for this
a = -2*gamma*v[i]-x[i]
# update velocity, time and position
v[i+1] = v[i] + DeltaT*a
x[i+1] = x[i] + DeltaT*v[i+1]
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
#ax.set_xlim(0, tfinal)
ax.set_ylabel('x[m]')
ax.set_xlabel('t[s]')
ax.plot(t, x)
fig.tight_layout()
save_fig("BlockEulerCromer")
plt.show()
When setting up the value of $\gamma$ we see that for $\gamma=0$ we get the simple oscillatory motion with no damping.
Choosing $\gamma < 1$ leads to the classical underdamped case with oscillatory motion, but where the motion comes to an end.
Choosing $\gamma =1$ leads to what normally is called critical damping and $\gamma> 1$ leads to critical overdamping.
Try it out and try also to change the initial position and velocity. Setting $\gamma=1$
yields a situation, as discussed above, where the solution approaches quickly zero and does not oscillate. With zero initial velocity it will never cross zero.
## Sinusoidally Driven Oscillators
Here, we consider the force
<!-- Equation labels as ordinary links -->
<div id="_auto8"></div>
$$
\begin{equation}
F=-kx-b\dot{x}+F_0\cos\omega t,
\label{_auto8} \tag{11}
\end{equation}
$$
which leads to the differential equation
<!-- Equation labels as ordinary links -->
<div id="eq:drivenosc"></div>
$$
\begin{equation}
\label{eq:drivenosc} \tag{12}
\ddot{x}+2\beta\dot{x}+\omega_0^2x=(F_0/m)\cos\omega t.
\end{equation}
$$
Consider a single solution with no arbitrary constants, which we will
call a {\it particular solution}, $x_p(t)$. It should be emphasized
that this is {\bf A} particular solution, because there exists an
infinite number of such solutions because the general solution should
have two arbitrary constants. Now consider solutions to the same
equation without the driving term, which include two arbitrary
constants. These are called either {\it homogenous solutions} or {\it
complementary solutions}, and were given in the previous section,
e.g. Eq. ([9](#eq:homogsolution)) for the underdamped case. The
homogenous solution already incorporates the two arbitrary constants,
so any sum of a homogenous solution and a particular solution will
represent the {\it general solution} of the equation. The general
solution incorporates the two arbitrary constants $A$ and $B$ to
accommodate the two initial conditions. One could have picked a
different particular solution, i.e. the original particular solution
plus any homogenous solution with the arbitrary constants $A_p$ and
$B_p$ chosen at will. When one adds in the homogenous solution, which
has adjustable constants with arbitrary constants $A'$ and $B'$, to
the new particular solution, one can get the same general solution by
simply adjusting the new constants such that $A'+A_p=A$ and
$B'+B_p=B$. Thus, the choice of $A_p$ and $B_p$ are irrelevant, and
when choosing the particular solution it is best to make the simplest
choice possible.
To find a particular solution, one first guesses at the form,
<!-- Equation labels as ordinary links -->
<div id="eq:partform"></div>
$$
\begin{equation}
\label{eq:partform} \tag{13}
x_p(t)=D\cos(\omega t-\delta),
\end{equation}
$$
and rewrite the differential equation as
<!-- Equation labels as ordinary links -->
<div id="_auto9"></div>
$$
\begin{equation}
D\left\{-\omega^2\cos(\omega t-\delta)-2\beta\omega\sin(\omega t-\delta)+\omega_0^2\cos(\omega t-\delta)\right\}=\frac{F_0}{m}\cos(\omega t).
\label{_auto9} \tag{14}
\end{equation}
$$
One can now use angle addition formulas to get
$$
\begin{eqnarray}
D\left\{(-\omega^2\cos\delta+2\beta\omega\sin\delta+\omega_0^2\cos\delta)\cos(\omega t)\right.&&\\
\nonumber
\left.+(-\omega^2\sin\delta-2\beta\omega\cos\delta+\omega_0^2\sin\delta)\sin(\omega t)\right\}
&=&\frac{F_0}{m}\cos(\omega t).
\end{eqnarray}
$$
Both the $\cos$ and $\sin$ terms need to equate if the expression is to hold at all times. Thus, this becomes two equations
$$
\begin{eqnarray}
D\left\{-\omega^2\cos\delta+2\beta\omega\sin\delta+\omega_0^2\cos\delta\right\}&=&\frac{F_0}{m}\\
\nonumber
-\omega^2\sin\delta-2\beta\omega\cos\delta+\omega_0^2\sin\delta&=&0.
\end{eqnarray}
$$
After dividing by $\cos\delta$, the lower expression leads to
<!-- Equation labels as ordinary links -->
<div id="_auto10"></div>
$$
\begin{equation}
\tan\delta=\frac{2\beta\omega}{\omega_0^2-\omega^2}.
\label{_auto10} \tag{15}
\end{equation}
$$
Using the identities $\tan^2+1=\csc^2$ and $\sin^2+\cos^2=1$, one can also express $\sin\delta$ and $\cos\delta$,
$$
\begin{eqnarray}
\sin\delta&=&\frac{2\beta\omega}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}},\\
\nonumber
\cos\delta&=&\frac{(\omega_0^2-\omega^2)}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}}
\end{eqnarray}
$$
Inserting the expressions for $\cos\delta$ and $\sin\delta$ into the expression for $D$,
<!-- Equation labels as ordinary links -->
<div id="eq:Ddrive"></div>
$$
\begin{equation}
\label{eq:Ddrive} \tag{16}
D=\frac{F_0/m}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}}.
\end{equation}
$$
For a given initial condition, e.g. initial displacement and velocity,
one must add the homogenous solution then solve for the two arbitrary
constants. However, because the homogenous solutions decay with time
as $e^{-\beta t}$, the particular solution is all that remains at
large times, and is therefore the steady state solution. Because the
arbitrary constants are all in the homogenous solution, all memory of
the initial conditions are lost at large times, $t>>1/\beta$.
The amplitude of the motion, $D$, is linearly proportional to the
driving force ($F_0/m$), but also depends on the driving frequency
$\omega$. For small $\beta$ the maximum will occur at
$\omega=\omega_0$. This is referred to as a resonance. In the limit
$\beta\rightarrow 0$ the amplitude at resonance approaches infinity.
## Alternative Derivation for Driven Oscillators
Here, we derive the same expressions as in Equations ([13](#eq:partform)) and ([16](#eq:Ddrive)) but express the driving forces as
$$
\begin{eqnarray}
F(t)&=&F_0e^{i\omega t},
\end{eqnarray}
$$
rather than as $F_0\cos\omega t$. The real part of $F$ is the same as before. For the differential equation,
<!-- Equation labels as ordinary links -->
<div id="eq:compdrive"></div>
$$
\begin{eqnarray}
\label{eq:compdrive} \tag{17}
\ddot{x}+2\beta\dot{x}+\omega_0^2x&=&\frac{F_0}{m}e^{i\omega t},
\end{eqnarray}
$$
one can treat $x(t)$ as an imaginary function. Because the operations
$d^2/dt^2$ and $d/dt$ are real and thus do not mix the real and
imaginary parts of $x(t)$, Eq. ([17](#eq:compdrive)) is effectively 2
equations. Because $e^{\omega t}=\cos\omega t+i\sin\omega t$, the real
part of the solution for $x(t)$ gives the solution for a driving force
$F_0\cos\omega t$, and the imaginary part of $x$ corresponds to the
case where the driving force is $F_0\sin\omega t$. It is rather easy
to solve for the complex $x$ in this case, and by taking the real part
of the solution, one finds the answer for the $\cos\omega t$ driving
force.
We assume a simple form for the particular solution
<!-- Equation labels as ordinary links -->
<div id="_auto11"></div>
$$
\begin{equation}
x_p=De^{i\omega t},
\label{_auto11} \tag{18}
\end{equation}
$$
where $D$ is a complex constant.
From Eq. ([17](#eq:compdrive)) one inserts the form for $x_p$ above to get
$$
\begin{eqnarray}
D\left\{-\omega^2+2i\beta\omega+\omega_0^2\right\}e^{i\omega t}=(F_0/m)e^{i\omega t},\\
\nonumber
D=\frac{F_0/m}{(\omega_0^2-\omega^2)+2i\beta\omega}.
\end{eqnarray}
$$
The norm and phase for $D=|D|e^{-i\delta}$ can be read by inspection,
<!-- Equation labels as ordinary links -->
<div id="_auto12"></div>
$$
\begin{equation}
|D|=\frac{F_0/m}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}},~~~~\tan\delta=\frac{2\beta\omega}{\omega_0^2-\omega^2}.
\label{_auto12} \tag{19}
\end{equation}
$$
This is the same expression for $\delta$ as before. One then finds $x_p(t)$,
<!-- Equation labels as ordinary links -->
<div id="eq:fastdriven1"></div>
$$
\begin{eqnarray}
\label{eq:fastdriven1} \tag{20}
x_p(t)&=&\Re\frac{(F_0/m)e^{i\omega t-i\delta}}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}\\
\nonumber
&=&\frac{(F_0/m)\cos(\omega t-\delta)}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}.
\end{eqnarray}
$$
This is the same answer as before.
If one wished to solve for the case where $F(t)= F_0\sin\omega t$, the imaginary part of the solution would work
<!-- Equation labels as ordinary links -->
<div id="eq:fastdriven2"></div>
$$
\begin{eqnarray}
\label{eq:fastdriven2} \tag{21}
x_p(t)&=&\Im\frac{(F_0/m)e^{i\omega t-i\delta}}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}\\
\nonumber
&=&\frac{(F_0/m)\sin(\omega t-\delta)}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}.
\end{eqnarray}
$$
Consider the damped and driven harmonic oscillator worked out above. Given $F_0, m,\beta$ and $\omega_0$, solve for the complete solution $x(t)$ for the case where $F=F_0\sin\omega t$ with initial conditions $x(t=0)=0$ and $v(t=0)=0$. Assume the underdamped case.
The general solution including the arbitrary constants includes both the homogenous and particular solutions,
$$
\begin{eqnarray*}
x(t)&=&\frac{F_0}{m}\frac{\sin(\omega t-\delta)}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}
+A\cos\omega't e^{-\beta t}+B\sin\omega't e^{-\beta t}.
\end{eqnarray*}
$$
The quantities $\delta$ and $\omega'$ are given earlier in the
section, $\omega'=\sqrt{\omega_0^2-\beta^2},
\delta=\tan^{-1}(2\beta\omega/(\omega_0^2-\omega^2)$. Here, solving
the problem means finding the arbitrary constants $A$ and
$B$. Satisfying the initial conditions for the initial position and
velocity:
$$
\begin{eqnarray*}
x(t=0)=0&=&-\eta\sin\delta+A,\\
v(t=0)=0&=&\omega\eta\cos\delta-\beta A+\omega'B,\\
\eta&\equiv&\frac{F_0}{m}\frac{1}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}.
\end{eqnarray*}
$$
The problem is now reduced to 2 equations and 2 unknowns, $A$ and $B$. The solution is
$$
\begin{eqnarray}
A&=& \eta\sin\delta ,~~~B=\frac{-\omega\eta\cos\delta+\beta\eta\sin\delta}{\omega'}.
\end{eqnarray}
$$
## Resonance Widths; the $Q$ factor
From the previous two sections, the particular solution for a driving force, $F=F_0\cos\omega t$, is
$$
\begin{eqnarray}
x_p(t)&=&\frac{F_0/m}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}}\cos(\omega_t-\delta),\\
\nonumber
\delta&=&\tan^{-1}\left(\frac{2\beta\omega}{\omega_0^2-\omega^2}\right).
\end{eqnarray}
$$
If one fixes the driving frequency $\omega$ and adjusts the
fundamental frequency $\omega_0=\sqrt{k/m}$, the maximum amplitude
occurs when $\omega_0=\omega$ because that is when the term from the
denominator $(\omega_0^2-\omega^2)^2+4\omega^2\beta^2$ is at a
minimum. This is akin to dialing into a radio station. However, if one
fixes $\omega_0$ and adjusts the driving frequency one minimize with
respect to $\omega$, e.g. set
<!-- Equation labels as ordinary links -->
<div id="_auto13"></div>
$$
\begin{equation}
\frac{d}{d\omega}\left[(\omega_0^2-\omega^2)^2+4\omega^2\beta^2\right]=0,
\label{_auto13} \tag{22}
\end{equation}
$$
and one finds that the maximum amplitude occurs when
$\omega=\sqrt{\omega_0^2-2\beta^2}$. If $\beta$ is small relative to
$\omega_0$, one can simply state that the maximum amplitude is
<!-- Equation labels as ordinary links -->
<div id="_auto14"></div>
$$
\begin{equation}
x_{\rm max}\approx\frac{F_0}{2m\beta \omega_0}.
\label{_auto14} \tag{23}
\end{equation}
$$
$$
\begin{eqnarray}
\frac{4\omega^2\beta^2}{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}=\frac{1}{2}.
\end{eqnarray}
$$
For small damping this occurs when $\omega=\omega_0\pm \beta$, so the $FWHM\approx 2\beta$. For the purposes of tuning to a specific frequency, one wants the width to be as small as possible. The ratio of $\omega_0$ to $FWHM$ is known as the {\it quality} factor, or $Q$ factor,
<!-- Equation labels as ordinary links -->
<div id="_auto15"></div>
$$
\begin{equation}
Q\equiv \frac{\omega_0}{2\beta}.
\label{_auto15} \tag{24}
\end{equation}
$$
## Numerical Studies of Driven Oscillations
Solving the problem of driven oscillations numerically gives us much
more flexibility to study different types of driving forces. We can
reuse our earlier code by simply adding a driving force. If we stay in
the $x$-direction only this can be easily done by adding a term
$F_{\mathrm{ext}}(x,t)$. Note that we have kept it rather general
here, allowing for both a spatial and a temporal dependence.
Before we dive into the code, we need to briefly remind ourselves
about the equations we started with for the case with damping, namely
$$
m\frac{d^2x}{dt^2} + b\frac{dx}{dt}+kx(t) =0,
$$
with no external force applied to the system.
Let us now for simplicty assume that our external force is given by
$$
F_{\mathrm{ext}}(t) = F_0\cos{(\omega t)},
$$
where $F_0$ is a constant (what is its dimension?) and $\omega$ is the frequency of the applied external driving force.
**Small question:** would you expect energy to be conserved now?
Introducing the external force into our lovely differential equation
and dividing by $m$ and introducing $\omega_0^2=\sqrt{k/m}$ we have
$$
\frac{d^2x}{dt^2} + \frac{b}{m}\frac{dx}{dt}+\omega_0^2x(t) =\frac{F_0}{m}\cos{(\omega t)},
$$
Thereafter we introduce a dimensionless time $\tau = t\omega_0$
and a dimensionless frequency $\tilde{\omega}=\omega/\omega_0$. We have then
$$
\frac{d^2x}{d\tau^2} + \frac{b}{m\omega_0}\frac{dx}{d\tau}+x(\tau) =\frac{F_0}{m\omega_0^2}\cos{(\tilde{\omega}\tau)},
$$
Introducing a new amplitude $\tilde{F} =F_0/(m\omega_0^2)$ (check dimensionality again) we have
$$
\frac{d^2x}{d\tau^2} + \frac{b}{m\omega_0}\frac{dx}{d\tau}+x(\tau) =\tilde{F}\cos{(\tilde{\omega}\tau)}.
$$
Our final step, as we did in the case of various types of damping, is
to define $\gamma = b/(2m\omega_0)$ and rewrite our equations as
$$
\frac{d^2x}{d\tau^2} + 2\gamma\frac{dx}{d\tau}+x(\tau) =\tilde{F}\cos{(\tilde{\omega}\tau)}.
$$
This is the equation we will code below using the Euler-Cromer method.
DeltaT = 0.001
#set up arrays
tfinal = 20 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, v, and x
t = np.zeros(n)
v = np.zeros(n)
x = np.zeros(n)
# Initial conditions as one-dimensional arrays of time
x0 = 1.0
v0 = 0.0
x[0] = x0
v[0] = v0
gamma = 0.2
Omegatilde = 0.5
Ftilde = 1.0
# Start integrating using Euler-Cromer's method
for i in range(n-1):
# Set up the acceleration
# Here you could have defined your own function for this
a = -2*gamma*v[i]-x[i]+Ftilde*cos(t[i]*Omegatilde)
# update velocity, time and position
v[i+1] = v[i] + DeltaT*a
x[i+1] = x[i] + DeltaT*v[i+1]
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
ax.set_ylabel('x[m]')
ax.set_xlabel('t[s]')
ax.plot(t, x)
fig.tight_layout()
save_fig("ForcedBlockEulerCromer")
plt.show()
In the above example we have focused on the Euler-Cromer method. This
method has a local truncation error which is proportional to $\Delta t^2$
and thereby a global error which is proportional to $\Delta t$.
We can improve this by using the Runge-Kutta family of
methods. The widely popular Runge-Kutta to fourth order or just **RK4**
has indeed a much better truncation error. The RK4 method has a global
error which is proportional to $\Delta t$.
Let us revisit this method and see how we can implement it for the above example.
## Differential Equations, Runge-Kutta methods
Runge-Kutta (RK) methods are based on Taylor expansion formulae, but yield
in general better algorithms for solutions of an ordinary differential equation.
The basic philosophy is that it provides an intermediate step in the computation of $y_{i+1}$.
To see this, consider first the following definitions
<!-- Equation labels as ordinary links -->
<div id="_auto16"></div>
$$
\begin{equation}
\frac{dy}{dt}=f(t,y),
\label{_auto16} \tag{25}
\end{equation}
$$
and
<!-- Equation labels as ordinary links -->
<div id="_auto17"></div>
$$
\begin{equation}
y(t)=\int f(t,y) dt,
\label{_auto17} \tag{26}
\end{equation}
$$
and
<!-- Equation labels as ordinary links -->
<div id="_auto18"></div>
$$
\begin{equation}
y_{i+1}=y_i+ \int_{t_i}^{t_{i+1}} f(t,y) dt.
\label{_auto18} \tag{27}
\end{equation}
$$
To demonstrate the philosophy behind RK methods, let us consider
the second-order RK method, RK2.
The first approximation consists in Taylor expanding $f(t,y)$
around the center of the integration interval $t_i$ to $t_{i+1}$,
that is, at $t_i+h/2$, $h$ being the step.
Using the midpoint formula for an integral,
defining $y(t_i+h/2) = y_{i+1/2}$ and
$t_i+h/2 = t_{i+1/2}$, we obtain
<!-- Equation labels as ordinary links -->
<div id="_auto19"></div>
$$
\begin{equation}
\int_{t_i}^{t_{i+1}} f(t,y) dt \approx hf(t_{i+1/2},y_{i+1/2}) +O(h^3).
\label{_auto19} \tag{28}
\end{equation}
$$
This means in turn that we have
<!-- Equation labels as ordinary links -->
<div id="_auto20"></div>
$$
\begin{equation}
y_{i+1}=y_i + hf(t_{i+1/2},y_{i+1/2}) +O(h^3).
\label{_auto20} \tag{29}
\end{equation}
$$
However, we do not know the value of $y_{i+1/2}$. Here comes thus the next approximation, namely, we use Euler's
method to approximate $y_{i+1/2}$. We have then
<!-- Equation labels as ordinary links -->
<div id="_auto21"></div>
$$
\begin{equation}
y_{(i+1/2)}=y_i + \frac{h}{2}\frac{dy}{dt}=y(t_i) + \frac{h}{2}f(t_i,y_i).
\label{_auto21} \tag{30}
\end{equation}
$$
This means that we can define the following algorithm for
the second-order Runge-Kutta method, RK2.
6
0
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
<!-- Equation labels as ordinary links -->
<div id="_auto23"></div>
$$
\begin{equation}
k_2=hf(t_{i+1/2},y_i+k_1/2),
\label{_auto23} \tag{32}
\end{equation}
$$
with the final value
<!-- Equation labels as ordinary links -->
<div id="_auto24"></div>
$$
\begin{equation}
y_{i+i}\approx y_i + k_2 +O(h^3).
\label{_auto24} \tag{33}
\end{equation}
$$
The difference between the previous one-step methods
is that we now need an intermediate step in our evaluation,
namely $t_i+h/2 = t_{(i+1/2)}$ where we evaluate the derivative $f$.
This involves more operations, but the gain is a better stability
in the solution.
The fourth-order Runge-Kutta, RK4, has the following algorithm
6
3
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
$$
k_3=hf(t_i+h/2,y_i+k_2/2)\hspace{0.5cm} k_4=hf(t_i+h,y_i+k_3)
$$
with the final result
$$
y_{i+1}=y_i +\frac{1}{6}\left( k_1 +2k_2+2k_3+k_4\right).
$$
Thus, the algorithm consists in first calculating $k_1$
with $t_i$, $y_1$ and $f$ as inputs. Thereafter, we increase the step
size by $h/2$ and calculate $k_2$, then $k_3$ and finally $k_4$. The global error goes as $O(h^4)$.
However, at this stage, if we keep adding different methods in our
main program, the code will quickly become messy and ugly. Before we
proceed thus, we will now introduce functions that enbody the various
methods for solving differential equations. This means that we can
separate out these methods in own functions and files (and later as classes and more
generic functions) and simply call them when needed. Similarly, we
could easily encapsulate various forces or other quantities of
interest in terms of functions. To see this, let us bring up the code
we developed above for the simple sliding block, but now only with the simple forward Euler method. We introduce
two functions, one for the simple Euler method and one for the
force.
Note that here the forward Euler method does not know the specific force function to be called.
It receives just an input the name. We can easily change the force by adding another function.
def ForwardEuler(v,x,t,n,Force):
for i in range(n-1):
v[i+1] = v[i] + DeltaT*Force(v[i],x[i],t[i])
x[i+1] = x[i] + DeltaT*v[i]
t[i+1] = t[i] + DeltaT
def SpringForce(v,x,t):
# note here that we have divided by mass and we return the acceleration
return -2*gamma*v-x+Ftilde*cos(t*Omegatilde)
It is easy to add a new method like the Euler-Cromer
def ForwardEulerCromer(v,x,t,n,Force):
for i in range(n-1):
a = Force(v[i],x[i],t[i])
v[i+1] = v[i] + DeltaT*a
x[i+1] = x[i] + DeltaT*v[i+1]
t[i+1] = t[i] + DeltaT
and the Velocity Verlet method (be careful with time-dependence here, it is not an ideal method for non-conservative forces))
def VelocityVerlet(v,x,t,n,Force):
for i in range(n-1):
a = Force(v[i],x[i],t[i])
x[i+1] = x[i] + DeltaT*v[i]+0.5*a
anew = Force(v[i],x[i+1],t[i+1])
v[i+1] = v[i] + 0.5*DeltaT*(a+anew)
t[i+1] = t[i] + DeltaT
Finally, we can now add the Runge-Kutta2 method via a new function
def RK2(v,x,t,n,Force):
for i in range(n-1):
# Setting up k1
k1x = DeltaT*v[i]
k1v = DeltaT*Force(v[i],x[i],t[i])
# Setting up k2
vv = v[i]+k1v*0.5
xx = x[i]+k1x*0.5
k2x = DeltaT*vv
k2v = DeltaT*Force(vv,xx,t[i]+DeltaT*0.5)
# Final result
x[i+1] = x[i]+k2x
v[i+1] = v[i]+k2v
t[i+1] = t[i]+DeltaT
Finally, we can now add the Runge-Kutta2 method via a new function
def RK4(v,x,t,n,Force):
for i in range(n-1):
# Setting up k1
k1x = DeltaT*v[i]
k1v = DeltaT*Force(v[i],x[i],t[i])
# Setting up k2
vv = v[i]+k1v*0.5
xx = x[i]+k1x*0.5
k2x = DeltaT*vv
k2v = DeltaT*Force(vv,xx,t[i]+DeltaT*0.5)
# Setting up k3
vv = v[i]+k2v*0.5
xx = x[i]+k2x*0.5
k3x = DeltaT*vv
k3v = DeltaT*Force(vv,xx,t[i]+DeltaT*0.5)
# Setting up k4
vv = v[i]+k3v
xx = x[i]+k3x
k4x = DeltaT*vv
k4v = DeltaT*Force(vv,xx,t[i]+DeltaT)
# Final result
x[i+1] = x[i]+(k1x+2*k2x+2*k3x+k4x)/6.
v[i+1] = v[i]+(k1v+2*k2v+2*k3v+k4v)/6.
t[i+1] = t[i] + DeltaT
The Runge-Kutta family of methods are particularly useful when we have a time-dependent acceleration.
If we have forces which depend only the spatial degrees of freedom (no velocity and/or time-dependence), then energy conserving methods like the Velocity Verlet or the Euler-Cromer method are preferred. As soon as we introduce an explicit time-dependence and/or add dissipitave forces like friction or air resistance, then methods like the family of Runge-Kutta methods are well suited for this.
The code below uses the Runge-Kutta4 methods.
DeltaT = 0.001
#set up arrays
tfinal = 20 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, v, and x
t = np.zeros(n)
v = np.zeros(n)
x = np.zeros(n)
# Initial conditions (can change to more than one dim)
x0 = 1.0
v0 = 0.0
x[0] = x0
v[0] = v0
gamma = 0.2
Omegatilde = 0.5
Ftilde = 1.0
# Start integrating using Euler's method
# Note that we define the force function as a SpringForce
RK4(v,x,t,n,SpringForce)
# Plot position as function of time
fig, ax = plt.subplots()
ax.set_ylabel('x[m]')
ax.set_xlabel('t[s]')
ax.plot(t, x)
fig.tight_layout()
save_fig("ForcedBlockRK4")
plt.show()
## Principle of Superposition and Periodic Forces (Fourier Transforms)
If one has several driving forces, $F(t)=\sum_n F_n(t)$, one can find
the particular solution to each $F_n$, $x_{pn}(t)$, and the particular
solution for the entire driving force is
<!-- Equation labels as ordinary links -->
<div id="_auto25"></div>
$$
\begin{equation}
x_p(t)=\sum_nx_{pn}(t).
\label{_auto25} \tag{34}
\end{equation}
$$
This is known as the principal of superposition. It only applies when
the homogenous equation is linear. If there were an anharmonic term
such as $x^3$ in the homogenous equation, then when one summed various
solutions, $x=(\sum_n x_n)^2$, one would get cross
terms. Superposition is especially useful when $F(t)$ can be written
as a sum of sinusoidal terms, because the solutions for each
sinusoidal (sine or cosine) term is analytic, as we saw above.
Driving forces are often periodic, even when they are not
sinusoidal. Periodicity implies that for some time $\tau$
$$
\begin{eqnarray}
F(t+\tau)=F(t).
\end{eqnarray}
$$
One example of a non-sinusoidal periodic force is a square wave. Many
components in electric circuits are non-linear, e.g. diodes, which
makes many wave forms non-sinusoidal even when the circuits are being
driven by purely sinusoidal sources.
The code here shows a typical example of such a square wave generated using the functionality included in the **scipy** Python package. We have used a period of $\tau=0.2$.
import numpy as np
import math
from scipy import signal
import matplotlib.pyplot as plt
# number of points
n = 500
# start and final times
t0 = 0.0
tn = 1.0
# Period
t = np.linspace(t0, tn, n, endpoint=False)
SqrSignal = np.zeros(n)
SqrSignal = 1.0+signal.square(2*np.pi*5*t)
plt.plot(t, SqrSignal)
plt.ylim(-0.5, 2.5)
plt.show()
For the sinusoidal example studied in the previous subsections the
period is $\tau=2\pi/\omega$. However, higher harmonics can also
satisfy the periodicity requirement. In general, any force that
satisfies the periodicity requirement can be expressed as a sum over
harmonics,
<!-- Equation labels as ordinary links -->
<div id="_auto26"></div>
$$
\begin{equation}
F(t)=\frac{f_0}{2}+\sum_{n>0} f_n\cos(2n\pi t/\tau)+g_n\sin(2n\pi t/\tau).
\label{_auto26} \tag{35}
\end{equation}
$$
From the previous subsection, one can write down the answer for
$x_{pn}(t)$, by substituting $f_n/m$ or $g_n/m$ for $F_0/m$ into Eq.s
([20](#eq:fastdriven1)) or ([21](#eq:fastdriven2)) respectively. By
writing each factor $2n\pi t/\tau$ as $n\omega t$, with $\omega\equiv
2\pi/\tau$,
<!-- Equation labels as ordinary links -->
<div id="eq:fourierdef1"></div>
$$
\begin{equation}
\label{eq:fourierdef1} \tag{36}
F(t)=\frac{f_0}{2}+\sum_{n>0}f_n\cos(n\omega t)+g_n\sin(n\omega t).
\end{equation}
$$
The solutions for $x(t)$ then come from replacing $\omega$ with
$n\omega$ for each term in the particular solution in Equations
([13](#eq:partform)) and ([16](#eq:Ddrive)),
$$
\begin{eqnarray}
x_p(t)&=&\frac{f_0}{2k}+\sum_{n>0} \alpha_n\cos(n\omega t-\delta_n)+\beta_n\sin(n\omega t-\delta_n),\\
\nonumber
\alpha_n&=&\frac{f_n/m}{\sqrt{((n\omega)^2-\omega_0^2)+4\beta^2n^2\omega^2}},\\
\nonumber
\beta_n&=&\frac{g_n/m}{\sqrt{((n\omega)^2-\omega_0^2)+4\beta^2n^2\omega^2}},\\
\nonumber
\delta_n&=&\tan^{-1}\left(\frac{2\beta n\omega}{\omega_0^2-n^2\omega^2}\right).
\end{eqnarray}
$$
Because the forces have been applied for a long time, any non-zero
damping eliminates the homogenous parts of the solution, so one need
only consider the particular solution for each $n$.
The problem will considered solved if one can find expressions for the
coefficients $f_n$ and $g_n$, even though the solutions are expressed
as an infinite sum. The coefficients can be extracted from the
function $F(t)$ by
<!-- Equation labels as ordinary links -->
<div id="eq:fourierdef2"></div>
$$
\begin{eqnarray}
\label{eq:fourierdef2} \tag{37}
f_n&=&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~F(t)\cos(2n\pi t/\tau),\\
\nonumber
g_n&=&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~F(t)\sin(2n\pi t/\tau).
\end{eqnarray}
$$
To check the consistency of these expressions and to verify
Eq. ([37](#eq:fourierdef2)), one can insert the expansion of $F(t)$ in
Eq. ([36](#eq:fourierdef1)) into the expression for the coefficients in
Eq. ([37](#eq:fourierdef2)) and see whether
$$
\begin{eqnarray}
f_n&=?&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~\left\{
\frac{f_0}{2}+\sum_{m>0}f_m\cos(m\omega t)+g_m\sin(m\omega t)
\right\}\cos(n\omega t).
\end{eqnarray}
$$
Immediately, one can throw away all the terms with $g_m$ because they
convolute an even and an odd function. The term with $f_0/2$
disappears because $\cos(n\omega t)$ is equally positive and negative
over the interval and will integrate to zero. For all the terms
$f_m\cos(m\omega t)$ appearing in the sum, one can use angle addition
formulas to see that $\cos(m\omega t)\cos(n\omega
t)=(1/2)(\cos[(m+n)\omega t]+\cos[(m-n)\omega t]$. This will integrate
to zero unless $m=n$. In that case the $m=n$ term gives
<!-- Equation labels as ordinary links -->
<div id="_auto27"></div>
$$
\begin{equation}
\int_{-\tau/2}^{\tau/2}dt~\cos^2(m\omega t)=\frac{\tau}{2},
\label{_auto27} \tag{38}
\end{equation}
$$
and
$$
\begin{eqnarray}
f_n&=?&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~f_n/2\\
\nonumber
&=&f_n~\checkmark.
\end{eqnarray}
$$
The same method can be used to check for the consistency of $g_n$.
Consider the driving force:
<!-- Equation labels as ordinary links -->
<div id="_auto28"></div>
$$
\begin{equation}
F(t)=At/\tau,~~-\tau/2<t<\tau/2,~~~F(t+\tau)=F(t).
\label{_auto28} \tag{39}
\end{equation}
$$
Find the Fourier coefficients $f_n$ and $g_n$ for all $n$ using Eq. ([37](#eq:fourierdef2)).
Only the odd coefficients enter by symmetry, i.e. $f_n=0$. One can find $g_n$ integrating by parts,
<!-- Equation labels as ordinary links -->
<div id="eq:fouriersolution"></div>
$$
\begin{eqnarray}
\label{eq:fouriersolution} \tag{40}
g_n&=&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2}dt~\sin(n\omega t) \frac{At}{\tau}\\
\nonumber
u&=&t,~dv=\sin(n\omega t)dt,~v=-\cos(n\omega t)/(n\omega),\\
\nonumber
g_n&=&\frac{-2A}{n\omega \tau^2}\int_{-\tau/2}^{\tau/2}dt~\cos(n\omega t)
+\left.2A\frac{-t\cos(n\omega t)}{n\omega\tau^2}\right|_{-\tau/2}^{\tau/2}.
\end{eqnarray}
$$
The first term is zero because $\cos(n\omega t)$ will be equally
positive and negative over the interval. Using the fact that
$\omega\tau=2\pi$,
$$
\begin{eqnarray}
g_n&=&-\frac{2A}{2n\pi}\cos(n\omega\tau/2)\\
\nonumber
&=&-\frac{A}{n\pi}\cos(n\pi)\\
\nonumber
&=&\frac{A}{n\pi}(-1)^{n+1}.
\end{eqnarray}
$$
## Fourier Series
More text will come here, chpater 5.7-5.8 of Taylor are discussed
during the lectures. The code here uses the Fourier series discussed
in chapter 5.7 for a square wave signal. The equations for the
coefficients are are discussed in Taylor section 5.7, see Example
5.4. The code here visualizes the various approximations given by
Fourier series compared with a square wave with period $T=0.2$, witth
$0.1$ and max value $F=2$. We see that when we increase the number of
components in the Fourier series, the Fourier series approximation gets closes and closes to the square wave signal.
import numpy as np
import math
from scipy import signal
import matplotlib.pyplot as plt
# number of points
n = 500
# start and final times
t0 = 0.0
tn = 1.0
# Period
T =0.2
# Max value of square signal
Fmax= 2.0
# Width of signal
Width = 0.1
t = np.linspace(t0, tn, n, endpoint=False)
SqrSignal = np.zeros(n)
FourierSeriesSignal = np.zeros(n)
SqrSignal = 1.0+signal.square(2*np.pi*5*t+np.pi*Width/T)
a0 = Fmax*Width/T
FourierSeriesSignal = a0
Factor = 2.0*Fmax/np.pi
for i in range(1,500):
FourierSeriesSignal += Factor/(i)*np.sin(np.pi*i*Width/T)*np.cos(i*t*2*np.pi/T)
plt.plot(t, SqrSignal)
plt.plot(t, FourierSeriesSignal)
plt.ylim(-0.5, 2.5)
plt.show()
## Solving differential equations with Fouries series
The material here was discussed during the lecture of February 19 and 21.
It is also covered by Taylor in section 5.8.
## Response to Transient Force
Consider a particle at rest in the bottom of an underdamped harmonic
oscillator, that then feels a sudden impulse, or change in momentum,
$I=F\Delta t$ at $t=0$. This increases the velocity immediately by an
amount $v_0=I/m$ while not changing the position. One can then solve
the trajectory by solving Eq. ([9](#eq:homogsolution)) with initial
conditions $v_0=I/m$ and $x_0=0$. This gives
<!-- Equation labels as ordinary links -->
<div id="_auto29"></div>
$$
\begin{equation}
x(t)=\frac{I}{m\omega'}e^{-\beta t}\sin\omega't, ~~t>0.
\label{_auto29} \tag{41}
\end{equation}
$$
Here, $\omega'=\sqrt{\omega_0^2-\beta^2}$. For an impulse $I_i$ that
occurs at time $t_i$ the trajectory would be
<!-- Equation labels as ordinary links -->
<div id="_auto30"></div>
$$
\begin{equation}
x(t)=\frac{I_i}{m\omega'}e^{-\beta (t-t_i)}\sin[\omega'(t-t_i)] \Theta(t-t_i),
\label{_auto30} \tag{42}
\end{equation}
$$
where $\Theta(t-t_i)$ is a step function, i.e. $\Theta(x)$ is zero for
$x<0$ and unity for $x>0$. If there were several impulses linear
superposition tells us that we can sum over each contribution,
<!-- Equation labels as ordinary links -->
<div id="_auto31"></div>
$$
\begin{equation}
x(t)=\sum_i\frac{I_i}{m\omega'}e^{-\beta(t-t_i)}\sin[\omega'(t-t_i)]\Theta(t-t_i)
\label{_auto31} \tag{43}
\end{equation}
$$
Now one can consider a series of impulses at times separated by
$\Delta t$, where each impulse is given by $F_i\Delta t$. The sum
above now becomes an integral,
<!-- Equation labels as ordinary links -->
<div id="eq:Greeny"></div>
$$
\begin{eqnarray}\label{eq:Greeny} \tag{44}
x(t)&=&\int_{-\infty}^\infty dt'~F(t')\frac{e^{-\beta(t-t')}\sin[\omega'(t-t')]}{m\omega'}\Theta(t-t')\\
\nonumber
&=&\int_{-\infty}^\infty dt'~F(t')G(t-t'),\\
\nonumber
G(\Delta t)&=&\frac{e^{-\beta\Delta t}\sin[\omega' \Delta t]}{m\omega'}\Theta(\Delta t)
\end{eqnarray}
$$
The quantity
$e^{-\beta(t-t')}\sin[\omega'(t-t')]/m\omega'\Theta(t-t')$ is called a
Green's function, $G(t-t')$. It describes the response at $t$ due to a
force applied at a time $t'$, and is a function of $t-t'$. The step
function ensures that the response does not occur before the force is
applied. One should remember that the form for $G$ would change if the
oscillator were either critically- or over-damped.
When performing the integral in Eq. ([44](#eq:Greeny)) one can use
angle addition formulas to factor out the part with the $t'$
dependence in the integrand,
<!-- Equation labels as ordinary links -->
<div id="eq:Greeny2"></div>
$$
\begin{eqnarray}
\label{eq:Greeny2} \tag{45}
x(t)&=&\frac{1}{m\omega'}e^{-\beta t}\left[I_c(t)\sin(\omega't)-I_s(t)\cos(\omega't)\right],\\
\nonumber
I_c(t)&\equiv&\int_{-\infty}^t dt'~F(t')e^{\beta t'}\cos(\omega't'),\\
\nonumber
I_s(t)&\equiv&\int_{-\infty}^t dt'~F(t')e^{\beta t'}\sin(\omega't').
\end{eqnarray}
$$
If the time $t$ is beyond any time at which the force acts,
$F(t'>t)=0$, the coefficients $I_c$ and $I_s$ become independent of
$t$.
Consider an undamped oscillator ($\beta\rightarrow 0$), with
characteristic frequency $\omega_0$ and mass $m$, that is at rest
until it feels a force described by a Gaussian form,
$$
\begin{eqnarray*}
F(t)&=&F_0 \exp\left\{\frac{-t^2}{2\tau^2}\right\}.
\end{eqnarray*}
$$
For large times ($t>>\tau$), where the force has died off, find
$x(t)$.\\ Solve for the coefficients $I_c$ and $I_s$ in
Eq. ([45](#eq:Greeny2)). Because the Gaussian is an even function,
$I_s=0$, and one need only solve for $I_c$,
$$
\begin{eqnarray*}
I_c&=&F_0\int_{-\infty}^\infty dt'~e^{-t^{\prime 2}/(2\tau^2)}\cos(\omega_0 t')\\
&=&\Re F_0 \int_{-\infty}^\infty dt'~e^{-t^{\prime 2}/(2\tau^2)}e^{i\omega_0 t'}\\
&=&\Re F_0 \int_{-\infty}^\infty dt'~e^{-(t'-i\omega_0\tau^2)^2/(2\tau^2)}e^{-\omega_0^2\tau^2/2}\\
&=&F_0\tau \sqrt{2\pi} e^{-\omega_0^2\tau^2/2}.
\end{eqnarray*}
$$
The third step involved completing the square, and the final step used the fact that the integral
$$
\begin{eqnarray*}
\int_{-\infty}^\infty dx~e^{-x^2/2}&=&\sqrt{2\pi}.
\end{eqnarray*}
$$
To see that this integral is true, consider the square of the integral, which you can change to polar coordinates,
$$
\begin{eqnarray*}
I&=&\int_{-\infty}^\infty dx~e^{-x^2/2}\\
I^2&=&\int_{-\infty}^\infty dxdy~e^{-(x^2+y^2)/2}\\
&=&2\pi\int_0^\infty rdr~e^{-r^2/2}\\
&=&2\pi.
\end{eqnarray*}
$$
Finally, the expression for $x$ from Eq. ([45](#eq:Greeny2)) is
$$
\begin{eqnarray*}
x(t>>\tau)&=&\frac{F_0\tau}{m\omega_0} \sqrt{2\pi} e^{-\omega_0^2\tau^2/2}\sin(\omega_0t).
\end{eqnarray*}
$$
## The classical pendulum and scaling the equations
Let us end our discussion of oscillations with another classical case, the pendulum.
The angular equation of motion of the pendulum is given by
Newton's equation and with no external force it reads
<!-- Equation labels as ordinary links -->
<div id="_auto32"></div>
$$
\begin{equation}
ml\frac{d^2\theta}{dt^2}+mgsin(\theta)=0,
\label{_auto32} \tag{46}
\end{equation}
$$
with an angular velocity and acceleration given by
<!-- Equation labels as ordinary links -->
<div id="_auto33"></div>
$$
\begin{equation}
v=l\frac{d\theta}{dt},
\label{_auto33} \tag{47}
\end{equation}
$$
and
<!-- Equation labels as ordinary links -->
<div id="_auto34"></div>
$$
\begin{equation}
a=l\frac{d^2\theta}{dt^2}.
\label{_auto34} \tag{48}
\end{equation}
$$
We do however expect that the motion will gradually come to an end due a viscous drag torque acting on the pendulum.
In the presence of the drag, the above equation becomes
<!-- Equation labels as ordinary links -->
<div id="eq:pend1"></div>
$$
\begin{equation}
ml\frac{d^2\theta}{dt^2}+\nu\frac{d\theta}{dt} +mgsin(\theta)=0, \label{eq:pend1} \tag{49}
\end{equation}
$$
where $\nu$ is now a positive constant parameterizing the viscosity
of the medium in question. In order to maintain the motion against
viscosity, it is necessary to add some external driving force.
We choose here a periodic driving force. The last equation becomes then
<!-- Equation labels as ordinary links -->
<div id="eq:pend2"></div>
$$
\begin{equation}
ml\frac{d^2\theta}{dt^2}+\nu\frac{d\theta}{dt} +mgsin(\theta)=Asin(\omega t), \label{eq:pend2} \tag{50}
\end{equation}
$$
with $A$ and $\omega$ two constants representing the amplitude and
the angular frequency respectively. The latter is called the driving frequency.
We define
$$
\omega_0=\sqrt{g/l},
$$
the so-called natural frequency and the new dimensionless quantities
$$
\hat{t}=\omega_0t,
$$
with the dimensionless driving frequency
$$
\hat{\omega}=\frac{\omega}{\omega_0},
$$
and introducing the quantity $Q$, called the *quality factor*,
$$
Q=\frac{mg}{\omega_0\nu},
$$
and the dimensionless amplitude
$$
\hat{A}=\frac{A}{mg}
$$
## More on the Pendulum
We have
$$
\frac{d^2\theta}{d\hat{t}^2}+\frac{1}{Q}\frac{d\theta}{d\hat{t}}
+sin(\theta)=\hat{A}cos(\hat{\omega}\hat{t}).
$$
This equation can in turn be recast in terms of two coupled first-order differential equations as follows
$$
\frac{d\theta}{d\hat{t}}=\hat{v},
$$
and
$$
\frac{d\hat{v}}{d\hat{t}}=-\frac{\hat{v}}{Q}-sin(\theta)+\hat{A}cos(\hat{\omega}\hat{t}).
$$
These are the equations to be solved. The factor $Q$ represents the
number of oscillations of the undriven system that must occur before
its energy is significantly reduced due to the viscous drag. The
amplitude $\hat{A}$ is measured in units of the maximum possible
gravitational torque while $\hat{\omega}$ is the angular frequency of
the external torque measured in units of the pendulum's natural
frequency.
|
CompPhysics/MachineLearning
|
doc/src/LectureNotes/testbook/_build/jupyter_execute/chapter5.py
|
Python
|
cc0-1.0
| 51,527
|
[
"Gaussian",
"exciting"
] |
833c9195739457c1429a3d224008d4c96cc16cc529b24b95dfda0846feab4e1b
|
# Hidden Markov Models
#
# Author: Ron Weiss <ronweiss@gmail.com>
# and Shiqiao Du <lucidfrontier.45@gmail.com>
# API changes: Jaques Grobler <jaquesgrobler@gmail.com>
"""
The :mod:`sklearn.hmm` module implements hidden Markov models.
**Warning:** :mod:`sklearn.hmm` is orphaned, undocumented and has known
numerical stability issues. If nobody volunteers to write documentation and
make it more stable, this module will be removed in version 0.11.
"""
import string
import warnings
import numpy as np
from .utils import check_random_state
from .utils.extmath import logsumexp
from .base import BaseEstimator
from .mixture import (
GMM, log_multivariate_normal_density, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from . import cluster
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars, etc.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars, etc. Defaults to all parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publically.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
def eval(self, obs):
"""Compute the log probability under the model and compute posteriors
Implements rank and beam pruning in the forward-backward
algorithm to speed up inference in large models.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence `obs`
posteriors: array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float32).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the `obs`
See Also
--------
eval : Compute the log probability under the model and posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
eval : Compute the log probability under the model and posteriors
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
eval : Compute the log probability under the model and posteriors
score : Compute the log probability under the model
"""
_, posteriors = self.eval(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
eval : Compute the log probability under the model and posteriors
score : Compute the log probability under the model
"""
if self._algorithm in decoder_algorithms:
algorithm = self._algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.eval(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in xrange(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string ''. Likewise, if you
would like just to do an initialization, call this method with
n_iter=0.
Parameters
----------
obs : list
List of array-like observation sequences (shape (n_i, n_features)).
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data, or
decreasing `covars_prior`.
**Please note that setting parameters in the `fit` method is
deprecated and will be removed in the next release.
Set it on initialization instead.**
"""
if kwargs:
warnings.warn("Setting parameters in the 'fit' method is"
"deprecated and will be removed in 0.14. Set it on "
"initialization instead.", DeprecationWarning,
stacklevel=2)
# initialisations for in case the user still adds parameters to fit
# so things don't break
for name in ('n_iter', 'thresh', 'params', 'init_params'):
if name in kwargs:
setattr(self, name, kwargs[name])
if self.algorithm not in decoder_algorithms:
self._algorithm = "viterbi"
self._init(obs, self.init_params)
logprob = []
for i in xrange(self.n_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
logprob.append(curr_logprob)
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape ' +
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components,
self._log_startprob, self._log_transmat,
framelogprob, bwdlattice)
bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
lneta = np.zeros((n_observations - 1,
n_components, n_components))
lnP = logsumexp(fwdlattice[-1])
_hmmc._compute_lneta(n_observations, n_components,
fwdlattice, self._log_transmat, bwdlattice,
framelogprob, lnP, lneta)
stats["trans"] += np.exp(logsumexp(lneta, 0))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
self.transmat_ = transmat_
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
Parameters
----------
n_components : int
Number of states.
``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
Attributes
----------
``_covariance_type`` : string
String describing the type of covariance parameters used by
the model. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussian emissions.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
means : array, shape (`n_components`, `n_features`)
Mean parameters for each state.
covars : array
Covariance parameters for each state. The shape depends on
``_covariance_type``::
(`n_components`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars, etc.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars, etc. Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import GaussianHMM
>>> GaussianHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
See Also
--------
GMM : Gaussian mixture model
"""
def __init__(self, n_components=1, covariance_type='diag', startprob=None,
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
self._covariance_type = covariance_type
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type')
self.means_prior = means_prior
self.means_weight = means_weight
self.covars_prior = covars_prior
self.covars_weight = covars_weight
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _get_means(self):
"""Mean parameters for each state."""
return self._means_
def _set_means(self, means):
means = np.asarray(means)
if hasattr(self, 'n_features') and \
means.shape != (self.n_components, self.n_features):
raise ValueError('means must have shape' +
'(n_components, n_features)')
self._means_ = means.copy()
self.n_features = self._means_.shape[1]
means_ = property(_get_means, _set_means)
def _get_covars(self):
"""Return covars as a full matrix."""
if self._covariance_type == 'full':
return self._covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self._covars_]
elif self._covariance_type == 'tied':
return [self._covars_] * self.n_components
elif self._covariance_type == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars_]
def _set_covars(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_components)
self._covars_ = covars.copy()
covars_ = property(_get_covars, _set_covars)
def _compute_log_likelihood(self, obs):
return log_multivariate_normal_density(
obs, self._means_, self._covars_, self._covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self._covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self._means_[state], cv, self._covariance_type,
random_state=random_state)
def _init(self, obs, params='stmc'):
super(GaussianHMM, self)._init(obs, params=params)
if (hasattr(self, 'n_features')
and self.n_features != obs[0].shape[1]):
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (obs[0].shape[1],
self.n_features))
self.n_features = obs[0].shape[1]
if 'm' in params:
self._means_ = cluster.KMeans(
n_clusters=self.n_components).fit(obs[0]).cluster_centers_
if 'c' in params:
cv = np.cov(obs[0].T)
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_components)
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_components)
stats['obs'] = np.zeros((self.n_components, self.n_features))
stats['obs**2'] = np.zeros((self.n_components, self.n_features))
stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features,
self.n_features))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'm' in params or 'c' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in params:
if self._covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self._covariance_type in ('tied', 'full'):
for t, o in enumerate(obs):
obsobsT = np.outer(o, o)
for c in xrange(self.n_components):
stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT
def _do_mstep(self, stats, params):
super(GaussianHMM, self)._do_mstep(stats, params)
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in params:
prior = self.means_prior
weight = self.means_weight
if prior is None:
weight = 0
prior = 0
self._means_ = (weight * prior + stats['obs']) / (weight + denom)
if 'c' in params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
if covars_prior is None:
covars_weight = 0
covars_prior = 0
means_prior = self.means_prior
means_weight = self.means_weight
if means_prior is None:
means_weight = 0
means_prior = 0
meandiff = self._means_ - means_prior
if self._covariance_type in ('spherical', 'diag'):
cv_num = (means_weight * (meandiff) ** 2
+ stats['obs**2']
- 2 * self._means_ * stats['obs']
+ self._means_ ** 2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = (covars_prior + cv_num) / cv_den
if self._covariance_type == 'spherical':
self._covars_ = np.tile(self._covars_.mean(1)
[:, np.newaxis], (1, self._covars_.shape[1]))
elif self._covariance_type in ('tied', 'full'):
cvnum = np.empty((self.n_components, self.n_features,
self.n_features))
for c in xrange(self.n_components):
obsmean = np.outer(stats['obs'][c], self._means_[c])
cvnum[c] = (means_weight * np.outer(meandiff[c],
meandiff[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self._means_[c], self._means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self._covariance_type == 'tied':
self._covars_ = ((covars_prior + cvnum.sum(axis=0))
/ (cvweight + stats['post'].sum()))
elif self._covariance_type == 'full':
self._covars_ = ((covars_prior + cvnum)
/ (cvweight + stats['post'][:, None, None]))
class MultinomialHMM(_BaseHMM):
"""Hidden Markov Model with multinomial (discrete) emissions
Attributes
----------
n_components : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_components`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars, etc.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars, etc. Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_components, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_components, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, obs].T
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
return symbol
def _init(self, obs, params='ste'):
super(MultinomialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
symbols = symbols.union(set(o))
self.n_symbols = len(symbols)
emissionprob = normalize(self.random_state.rand(self.n_components,
self.n_symbols), 1)
self.emissionprob_ = emissionprob
def _initialize_sufficient_statistics(self):
stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_components, self.n_symbols))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs):
stats['obs'][:, symbol] += posteriors[t]
def _do_mstep(self, stats, params):
super(MultinomialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
symbols = np.asanyarray(obs).flatten()
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input containes negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
return True
def fit(self, obs, **kwargs):
err_msg = ("Input must be both positive integer array and "
"every element must be continuous, but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return _BaseHMM.fit(self, obs, **kwargs)
class GMMHMM(_BaseHMM):
"""Hidden Markov Model with Gaussin mixture emissions
Attributes
----------
init_params : string, optional
Controls which parameters are initialized prior to training. Can \
contain any combination of 's' for startprob, 't' for transmat, 'm' \
for means, and 'c' for covars, etc. Defaults to all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat,'m' for
means, and 'c' for covars, etc. Defaults to all parameters.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
gmms : array of GMM objects, length `n_components`
GMM emission distributions for each state.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
Examples
--------
>>> from sklearn.hmm import GMMHMM
>>> GMMHMM(n_components=2, n_mix=10, covariance_type='diag')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
GMMHMM(algorithm='viterbi', covariance_type='diag',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, n_mix=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", gmms=None, covariance_type='diag',
covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with GMM emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
# XXX: Hotfit for n_mix that is incompatible with the scikit's
# BaseEstimator API
self.n_mix = n_mix
self._covariance_type = covariance_type
self.covars_prior = covars_prior
self.gmms = gmms
if gmms is None:
gmms = []
for x in xrange(self.n_components):
if covariance_type is None:
g = GMM(n_mix)
else:
g = GMM(n_mix, covariance_type=covariance_type)
gmms.append(g)
self.gmms_ = gmms
# Read-only properties.
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _compute_log_likelihood(self, obs):
return np.array([g.score(obs) for g in self.gmms_]).T
def _generate_sample_from_state(self, state, random_state=None):
return self.gmms_[state].sample(1, random_state=random_state).flatten()
def _init(self, obs, params='stwmc'):
super(GMMHMM, self)._init(obs, params=params)
allobs = np.concatenate(obs, 0)
for g in self.gmms_:
g.set_params(init_params=params, n_iter=0)
g.fit(allobs)
def _initialize_sufficient_statistics(self):
stats = super(GMMHMM, self)._initialize_sufficient_statistics()
stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_]
stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_]
stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_]
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GMMHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
for state, g in enumerate(self.gmms_):
_, lgmm_posteriors = g.eval(obs)
lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis]
+ np.finfo(np.float).eps)
gmm_posteriors = np.exp(lgmm_posteriors)
tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type)
n_features = g.means_.shape[1]
tmp_gmm._set_covars(
distribute_covar_matrix_to_match_covariance_type(
np.eye(n_features), g.covariance_type,
g.n_components))
norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)
if np.any(np.isnan(tmp_gmm.covars_)):
raise ValueError
stats['norm'][state] += norm
if 'm' in params:
stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis]
if 'c' in params:
if tmp_gmm.covariance_type == 'tied':
stats['covars'][state] += tmp_gmm.covars_ * norm.sum()
else:
cvnorm = np.copy(norm)
shape = np.ones(tmp_gmm.covars_.ndim)
shape[0] = np.shape(tmp_gmm.covars_)[0]
cvnorm.shape = shape
stats['covars'][state] += tmp_gmm.covars_ * cvnorm
def _do_mstep(self, stats, params):
super(GMMHMM, self)._do_mstep(stats, params)
# All that is left to do is to apply covars_prior to the
# parameters updated in _accumulate_sufficient_statistics.
for state, g in enumerate(self.gmms_):
n_features = g.means_.shape[1]
norm = stats['norm'][state]
if 'w' in params:
g.weights_ = normalize(norm)
if 'm' in params:
g.means_ = stats['means'][state] / norm[:, np.newaxis]
if 'c' in params:
if g.covariance_type == 'tied':
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * np.eye(n_features))
/ norm.sum())
else:
cvnorm = np.copy(norm)
shape = np.ones(g.covars_.ndim)
shape[0] = np.shape(g.covars_)[0]
cvnorm.shape = shape
if (g.covariance_type in ['spherical', 'diag']):
g.covars_ = (stats['covars'][state]
+ self.covars_prior) / cvnorm
elif g.covariance_type == 'full':
eye = np.eye(n_features)
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * eye[np.newaxis])
/ cvnorm)
|
seckcoder/lang-learn
|
python/sklearn/sklearn/hmm.py
|
Python
|
unlicense
| 46,219
|
[
"Gaussian"
] |
29551e0cb3f92c76da65fe11d5563a443eca4cd797d51886ca3ec757ef6ffd9e
|
from parse import Rosetta, parse
def _pytables():
import tables as pytables
tr = Rosetta()
tr.namespace = pytables
expr = open('rosetta/pytables.table').read()
stone = tr.visit(parse(expr))
return dict(a.astuple() for a in stone)
try:
pytables = _pytables()
except IOError:
pytables = None
|
davidcoallier/blaze
|
blaze/rosetta/__init__.py
|
Python
|
bsd-2-clause
| 327
|
[
"VisIt"
] |
e9f898d1ac68fbcdc1c54a938822461f39ca3687944e8231fc224d10b0ebf2ad
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
#
# This is derived from a cadquery script for generating PDIP models in X3D format
#
# from https://bitbucket.org/hyOzd/freecad-macros
# author hyOzd
# This is a
# Dimensions are from Microchips Packaging Specification document:
# DS00000049BY. Body drawing is the same as QFP generator#
## requirements
## cadquery FreeCAD plugin
## https://github.com/jmwright/cadquery-freecad-module
## to run the script just do: freecad main_generator.py modelName
## e.g. c:\freecad\bin\freecad main_generator.py DIP8
## the script will generate STEP and VRML parametric models
## to be used with kicad StepUp script
#* These are a FreeCAD & cadquery tools *
#* to export generated models in STEP & VRML format. *
#* *
#* cadquery script for generating QFP/SOIC/SSOP/TSSOP models in STEP AP214 *
#* Copyright (c) 2015 *
#* Maurice https://launchpad.net/~easyw *
#* All trademarks within this guide belong to their legitimate owners. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., *
#* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *
#* *
#****************************************************************************
import cq_parameters # modules parameters
from cq_parameters import *
import math
from math import tan, cos, sin, radians, sqrt, atan
import cq_base_model # modules parameters
from cq_base_model import *
class cq_parameters_Resonator_C38_LF():
def __init__(self):
x = 0
def get_dest_3D_dir(self, modelName):
for n in self.all_params:
if n == modelName:
return self.all_params[modelName].dest_dir_prefix
def get_dest_file_name(self, modelName):
for n in self.all_params:
if n == modelName:
return self.all_params[modelName].filename
def model_exist(self, modelName):
for n in self.all_params:
if n == modelName:
return True
return False
def get_list_all(self):
list = []
for n in self.all_params:
list.append(n)
return list
def make_3D_model(self, modelName):
case_top = self.make_top(self.all_params[modelName])
case = self.make_case(self.all_params[modelName])
pins = self.make_pins(case, self.all_params[modelName])
show(case_top)
show(case)
show(pins)
doc = FreeCAD.ActiveDocument
objs=GetListOfObjects(FreeCAD, doc)
body_top_color_key = self.all_params[modelName].body_top_color_key
body_color_key = self.all_params[modelName].body_color_key
pin_color_key = self.all_params[modelName].pin_color_key
body_top_color = shaderColors.named_colors[body_top_color_key].getDiffuseFloat()
body_color = shaderColors.named_colors[body_color_key].getDiffuseFloat()
pin_color = shaderColors.named_colors[pin_color_key].getDiffuseFloat()
Color_Objects(Gui,objs[0],body_top_color)
Color_Objects(Gui,objs[1],body_color)
Color_Objects(Gui,objs[2],pin_color)
col_body_top=Gui.ActiveDocument.getObject(objs[0].Name).DiffuseColor[0]
col_body=Gui.ActiveDocument.getObject(objs[1].Name).DiffuseColor[0]
col_pin=Gui.ActiveDocument.getObject(objs[2].Name).DiffuseColor[0]
material_substitutions={
col_body_top[:-1]:body_top_color_key,
col_body[:-1]:body_color_key,
col_pin[:-1]:pin_color_key,
}
expVRML.say(material_substitutions)
while len(objs) > 1:
FuseObjs_wColors(FreeCAD, FreeCADGui, doc.Name, objs[0].Name, objs[1].Name)
del objs
objs = GetListOfObjects(FreeCAD, doc)
return material_substitutions
def make_top(self, params):
type = params.type # body type
L = params.L # top length
W = params.W # top length
A1 = params.A1 # Body distance to PCB
PBD = params.PBD # Distance from pin hole to body
ph = params.p_hole # Distance between pin hole
ps = params.p_split # distance between legs
pl = params.p_length # pin length
pd = params.p_diam # pin diameter
rotation = params.rotation # Rotation if required
FreeCAD.Console.PrintMessage('make_top ...\r\n')
tt = 0.1
tr = 0.15
lb = L * tr
top = cq.Workplane("XY").workplane(offset = tt).moveTo(0.0, 0.0).circle(W / 2.0, False).extrude(L * (1.0 - tt))
top = top.faces(">Z").fillet(pd / 1.2)
if (type == 1):
top = top.rotate((0,0,0), (0,1,0), 90)
top = top.rotate((0,0,0), (0,0,1), 270)
top = top.translate((ph / 2.0, 0.0 - (PBD + tt), A1 + (W / 2.0)))
if (type == 2):
top = top.translate((ph / 2.0, 0.0, A1 + tt))
if (rotation != 0):
top = top.rotate((0,0,0), (0,0,1), rotation)
return (top)
def make_case(self, params):
type = params.type # body type
L = params.L # top length
W = params.W # top length
A1 = params.A1 # Body distance to PCB
PBD = params.PBD # Distance from pin hole to body
ph = params.p_hole # Distance between pin hole
ps = params.p_split # distance between legs
pl = params.p_length # pin length
pd = params.p_diam # pin diameter
rotation = params.rotation # Rotation if required
FreeCAD.Console.PrintMessage('make_case ...\r\n')
tt = 0.1
case = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).circle(W / 2.0, False).extrude(3.0 * tt)
#
if (type == 1):
case = case.rotate((0,0,0), (0,1,0), 90)
case = case.rotate((0,0,0), (0,0,1), 270)
case = case.translate((ph / 2.0, 0.0 - PBD, A1 + (W / 2.0)))
if (type == 2):
case = case.translate((ph / 2.0, 0.0, A1))
if (rotation != 0):
case = case.rotate((0,0,0), (0,0,1), rotation)
return (case)
def make_pins(self, case, params):
type = params.type # body type
L = params.L # top length
W = params.W # top length
A1 = params.A1 # Body distance to PCB
PBD = params.PBD # Distance from pin hole to body
ph = params.p_hole # Distance between pin hole
ps = params.p_split # distance between legs
pl = params.p_length # pin length
pd = params.p_diam # pin diameter
rotation = params.rotation # Rotation if required
FreeCAD.Console.PrintMessage('make_pins ...\r\n')
pins = None
if (type == 1):
tt = 0.1
tr = 0.25
lb = L * tr
zbelow = -3.0 # negative value, length of pins below board level
#
r = 0.5
h = A1 + (W / 1.8)
arco = (1.0-sqrt(2.0)/2.0)*r # helper factor to create midpoints of profile radii
aa = math.degrees(math.atan((PBD / 2.0) / (ps / 4.0)))
path = (
cq.Workplane("XZ")
.lineTo(0, h - r - zbelow)
.threePointArc((arco, h - arco - zbelow),(r , h - zbelow))
.lineTo(3.4, h - zbelow)
)
pins = cq.Workplane("XY").circle(pd / 2.0).sweep(path).translate((0, 0, zbelow))
pins = pins.rotate((0,0,0), (0,0,1), 0.0 - aa)
pins = pins.faces("<Z").edges().fillet(pd / 4.0)
path = (
cq.Workplane("XZ")
.lineTo(0, h - r - zbelow)
.threePointArc((arco, h - arco - zbelow),(r , h - zbelow))
.lineTo(3.4, h - zbelow)
)
pin = cq.Workplane("XY").circle(pd / 2.0).sweep(path).translate((0, 0, zbelow))
pin = pin.rotate((0,0,0), (0,0,1), (0.0 - 180.0) + aa)
pin = pin.translate((ph, 0.0, 0.0))
pin = pin.faces("<Z").edges().fillet(pd / 4.0)
pins = pins.union(pin)
if (type == 2):
path = (
cq.Workplane("XZ")
.lineTo(0.0, 3.0)
.lineTo((ph - ps) / 2.0, 3.0 + 1.0)
.lineTo((ph - ps) / 2.0, 3.0 + 1.0 + A1 + 0.1)
)
pins = cq.Workplane("XY").circle(pd / 2.0).sweep(path).translate((0.0, 0.0, 0.0 - 3.0))
pins = pins.faces("<Z").edges().fillet(pd / 4.0)
# pins = cq.Workplane("XY").workplane(offset=A1 + 0.1).moveTo(0.0, 0.0).circle(pd / 2.2, False).extrude(0.0 - (3.0 + A1 + 0.1))
# pins = pins.faces("<Z").edges().fillet(pd / 4.0)
path = (
cq.Workplane("XZ")
.lineTo(0.0, 3.0)
.lineTo(0.0 - ((ph - ps) / 2.0), 3.0 + 1.0)
.lineTo(0.0 - ((ph - ps) / 2.0), 3.0 + 1.0 + A1 + 0.1)
)
pin = cq.Workplane("XY").circle(pd / 2.0).sweep(path).translate((ph, 0.0, 0.0 - 3.0))
pin = pin.faces("<Z").edges().fillet(pd / 4.0)
pins = pins.union(pin)
if (rotation != 0):
pins = pins.rotate((0,0,0), (0,0,1), rotation)
return (pins)
##enabling optional/default values to None
def namedtuple_with_defaults(typename, field_names, default_values=()):
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (None,) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T
Params = namedtuple_with_defaults("Params", [
'type', # model type
'filename', # file name
'L', # Body length
'W', # Body diameter
'A1', # Body-board separation
'PBD', # Distance from pin hole to body
'p_hole', # Distance between pin hole
'p_split', # Distance between pins
'p_length', # Pin length
'p_diam', # Pin width
'body_top_color_key', # Top color
'body_color_key', # Body colour
'pin_color_key', # Pin color
'rotation', # Rotation if required
'dest_dir_prefix' # Destination directory
])
all_params = {
'C38-LF_Horizontal': Params(
#
#
#
type = 1,
filename = 'Crystal_C38-LF_D3.0mm_L8.0mm_Horizontal', # modelName
L = 8.0, # Top length
W = 3.0, # Top diameter
A1 = 0.0, # Body-board separation
PBD = 2.45, # Distance from pin hole to body
p_hole = 1.9, # Distance between pin hole
p_split = 0.7, # Distance between pins
p_length = 10.0, # Pin length
p_diam = 0.3, # Pin diameter
body_top_color_key = 'metal aluminum', # Top color
body_color_key = 'brown body', # Body color
pin_color_key = 'metal silver', # Pin color
rotation = 0, # Rotation if required
dest_dir_prefix = 'Crystal.3dshapes', # destination directory
),
'C38-LF_Horizontal_1EP_1': Params(
#
#
#
type = 1,
filename = 'Crystal_C38-LF_D3.0mm_L8.0mm_Horizontal_1EP_style1', # modelName
L = 8.0, # Top length
W = 3.0, # Top diameter
A1 = 0.0, # Body-board separation
PBD = 2.45, # Distance from pin hole to body
p_hole = 1.9, # Distance between pin hole
p_split = 0.7, # Distance between pins
p_length = 10.0, # Pin length
p_diam = 0.3, # Pin diameter
body_top_color_key = 'metal aluminum', # Top color
body_color_key = 'brown body', # Body color
pin_color_key = 'metal silver', # Pin color
rotation = 0, # Rotation if required
dest_dir_prefix = 'Crystal.3dshapes', # destination directory
),
'C38-LF_Horizontal_1EP_2': Params(
#
#
#
type = 1,
filename = 'Crystal_C38-LF_D3.0mm_L8.0mm_Horizontal_1EP_style2', # modelName
L = 8.0, # Top length
W = 3.0, # Top diameter
A1 = 0.0, # Body-board separation
PBD = 2.45, # Distance from pin hole to body
p_hole = 1.9, # Distance between pin hole
p_split = 0.7, # Distance between pins
p_length = 10.0, # Pin length
p_diam = 0.3, # Pin diameter
body_top_color_key = 'metal aluminum', # Top color
body_color_key = 'brown body', # Body color
pin_color_key = 'metal silver', # Pin color
rotation = 0, # Rotation if required
dest_dir_prefix = 'Crystal.3dshapes', # destination directory
),
'C38-LF_Vertical': Params(
#
#
#
type = 2,
filename = 'Crystal_C38-LF_D3.0mm_L8.0mm_Vertical', # modelName
L = 8.0, # Top length
W = 3.0, # Top diameter
A1 = 2.0, # Body-board separation
PBD = 2.0, # Distance from pin hole to body
p_hole = 1.9, # Distance between pin hole
p_split = 0.7, # Distance between pins
p_length = 10.0, # Pin length
p_diam = 0.3, # Pin diameter
body_top_color_key = 'metal aluminum', # Top color
body_color_key = 'brown body', # Body color
pin_color_key = 'metal silver', # Pin color
rotation = 0, # Rotation if required
dest_dir_prefix = 'Crystal.3dshapes', # destination directory
),
}
|
easyw/kicad-3d-models-in-freecad
|
cadquery/FCAD_script_generator/Crystal/cq_parameters_Resonator_C38_LF.py
|
Python
|
gpl-2.0
| 16,741
|
[
"CRYSTAL"
] |
11e2141c0f9dcb230a4325a2a8c4cdfeb858df43854dc7269f7b202e4bce17d2
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from typing import Union, List
try:
from dataclasses import dataclass
except ImportError:
from pydantic.dataclasses import dataclass
import numpy as np
from psi4 import core
from psi4.driver import constants
from psi4.driver.p4util import solvers
from psi4.driver.p4util.exceptions import *
from psi4.driver.procrouting.response.scf_products import (TDRSCFEngine, TDUSCFEngine)
dipole = {
'name': 'Dipole polarizabilities',
'printout_labels': ['X', 'Y', 'Z'],
'mints_function': core.MintsHelper.ao_dipole,
'vector names': ['AO Mux', 'AO Muy', 'AO Muz']
}
quadrupole = {
'name': 'Quadrupole polarizabilities',
'printout_labels': ['XX', 'XY', 'XZ', 'YY', 'YZ', 'ZZ'],
'mints_function': core.MintsHelper.ao_quadrupole,
}
quadrupole['vector names'] = ["AO Quadrupole " + x for x in quadrupole["printout_labels"]]
traceless_quadrupole = {
'name': 'Traceless quadrupole polarizabilities',
'printout_labels': ['XX', 'XY', 'XZ', 'YY', 'YZ', 'ZZ'],
'mints_function': core.MintsHelper.ao_traceless_quadrupole,
}
traceless_quadrupole['vector names'] = [
"AO Traceless Quadrupole " + x for x in traceless_quadrupole["printout_labels"]
]
property_dicts = {
'DIPOLE_POLARIZABILITIES': dipole,
'QUADRUPOLE_POLARIZABILITIES': quadrupole,
'TRACELESS_QUADRUPOLE_POLARIZABILITIES': traceless_quadrupole
}
def cpscf_linear_response(wfn, *args, **kwargs):
"""
Compute the static properties from a reference wavefunction. The currently implemented properties are
- dipole polarizability
- quadrupole polarizability
Parameters
----------
wfn : psi4 wavefunction
The reference wavefunction.
args : list
The list of arguments. For each argument, such as ``dipole polarizability``, will return the corresponding
response. The user may also choose to pass a list or tuple of custom vectors.
kwargs : dict
Options that control how the response is computed. The following options are supported (with default values):
- ``conv_tol``: 1e-5
- ``max_iter``: 10
- ``print_lvl``: 2
Returns
-------
responses : list
The list of responses.
"""
mints = core.MintsHelper(wfn.basisset())
# list of dictionaries to control response calculations, count how many user-supplied vectors we have
complete_dict = []
n_user = 0
for arg in args:
# for each string keyword, append the appropriate dictionary (vide supra) to our list
if isinstance(arg, str):
ret = property_dicts.get(arg)
if ret:
complete_dict.append(ret)
else:
raise ValidationError('Do not understand {}. Abort.'.format(arg))
# the user passed a list of vectors. absorb them into a dictionary
elif isinstance(arg, tuple) or isinstance(arg, list):
complete_dict.append({
'name': 'User Vectors',
'length': len(arg),
'vectors': arg,
'vector names': ['User Vector {}_{}'.format(n_user, i) for i in range(len(arg))]
})
n_user += len(arg)
# single vector passed. stored in a dictionary as a list of length 1 (can be handled as the case above that way)
# note: the length is set to '0' to designate that it was not really passed as a list
else:
complete_dict.append({
'name': 'User Vector',
'length': 0,
'vectors': [arg],
'vector names': ['User Vector {}'.format(n_user)]
})
n_user += 1
# vectors will be passed to the cphf solver, vector_names stores the corresponding names
vectors = []
vector_names = []
# construct the list of vectors. for the keywords, fetch the appropriate tensors from MintsHelper
for prop in complete_dict:
if 'User' in prop['name']:
for name, vec in zip(prop['vector names'], prop['vectors']):
vectors.append(vec)
vector_names.append(name)
else:
tmp_vectors = prop['mints_function'](mints)
for tmp in tmp_vectors:
tmp.scale(-2.0) # RHF only
vectors.append(tmp)
vector_names.append(tmp.name)
# do we have any vectors to work with?
if len(vectors) == 0:
raise ValidationError('I have no vectors to work with. Aborting.')
# print information on module, vectors that will be used
_print_header(complete_dict, n_user)
# fetch wavefunction information
nmo = wfn.nmo()
ndocc = wfn.nalpha()
nvirt = nmo - ndocc
c_occ = wfn.Ca_subset("AO", "OCC")
c_vir = wfn.Ca_subset("AO", "VIR")
nbf = c_occ.shape[0]
# the vectors need to be in the MO basis. if they have the shape nbf x nbf, transform.
for i in range(len(vectors)):
shape = vectors[i].shape
if shape == (nbf, nbf):
vectors[i] = core.triplet(c_occ, vectors[i], c_vir, True, False, False)
# verify that this vector already has the correct shape
elif shape != (ndocc, nvirt):
raise ValidationError('ERROR: "{}" has an unrecognized shape ({}, {}). Must be either ({}, {}) or ({}, {})'.format(
vector_names[i], shape[0], shape[1], nbf, nbf, ndocc, nvirt))
# compute response vectors for each input vector
params = [kwargs.pop("conv_tol", 1.e-5), kwargs.pop("max_iter", 10), kwargs.pop("print_lvl", 2)]
responses = wfn.cphf_solve(vectors, *params)
# zip vectors, responses for easy access
vectors = {k: v for k, v in zip(vector_names, vectors)}
responses = {k: v for k, v in zip(vector_names, responses)}
# compute response values, format output
output = []
for prop in complete_dict:
# try to replicate the data structure of the input
if 'User' in prop['name']:
if prop['length'] == 0:
output.append(responses[prop['vector names'][0]])
else:
buf = []
for name in prop['vector names']:
buf.append(responses[name])
output.append(buf)
else:
names = prop['vector names']
dim = len(names)
buf = np.zeros((dim, dim))
for i, i_name in enumerate(names):
for j, j_name in enumerate(names):
buf[i, j] = -1.0 * vectors[i_name].vector_dot(responses[j_name])
output.append(buf)
_print_output(complete_dict, output)
return output
def _print_header(complete_dict, n_user):
core.print_out('\n\n ---------------------------------------------------------\n'
' {:^57}\n'.format('CPSCF Linear Response Solver') +
' {:^57}\n'.format('by Marvin Lechner and Daniel G. A. Smith') +
' ---------------------------------------------------------\n')
core.print_out('\n ==> Requested Responses <==\n\n')
for prop in complete_dict:
if 'User' not in prop['name']:
core.print_out(' {}\n'.format(prop['name']))
if n_user != 0:
core.print_out(' {} user-supplied vector(s)\n'.format(n_user))
def _print_matrix(descriptors, content, title):
length = len(descriptors)
matrix_header = ' ' + ' {:^10}' * length + '\n'
core.print_out(matrix_header.format(*descriptors))
core.print_out(' -----' + ' ----------' * length + '\n')
for i, desc in enumerate(descriptors):
core.print_out(' {:^5}'.format(desc))
for j in range(length):
core.print_out(' {:>10.5f}'.format(content[i, j]))
# Set the name
var_name = title + " " + descriptors[i] + descriptors[j]
core.set_variable(var_name, content[i, j])
core.print_out('\n')
def _print_output(complete_dict, output):
core.print_out('\n ==> Response Properties <==\n')
for i, prop in enumerate(complete_dict):
if not 'User' in prop['name']:
core.print_out('\n => {} <=\n\n'.format(prop['name']))
directions = prop['printout_labels']
var_name = prop['name'].upper().replace("IES", "Y")
_print_matrix(directions, output[i], var_name)
def _print_tdscf_header(*, r_convergence: float, guess_type: str, restricted: bool, ptype: str):
core.print_out("\n\n ---------------------------------------------------------\n"
f" {'TDSCF excitation energies':^57}\n" +
f" {'by Andrew M. James and Daniel G. A. Smith':^57}\n" +
" ---------------------------------------------------------\n")
core.print_out("\n ==> Options <==\n\n")
core.print_out(f" {'Residual threshold':<20s}: {r_convergence:.4e}\n")
core.print_out(f" {'Initial guess':20s}: {guess_type.lower()}\n")
reference = 'RHF' if restricted else 'UHF'
core.print_out(f" {'Reference':20s}: {reference}\n")
solver_type = 'Hamiltonian' if ptype == "RPA" else "Davidson"
core.print_out(f" {'Solver type':20s}: {ptype} ({solver_type})\n")
core.print_out("\n")
@dataclass
class _TDSCFResults:
E_ex_au: float
irrep_GS: str
irrep_ES: str
irrep_trans: str
edtm_length: np.ndarray
f_length: float
edtm_velocity: np.ndarray
f_velocity: float
mdtm: np.ndarray
R_length: float
R_velocity: float
spin_mult: str
R_eigvec: Union[core.Matrix, List[core.Matrix]]
L_eigvec: Union[core.Matrix, List[core.Matrix]]
def _solve_loop(wfn,
ptype,
solve_function,
states_per_irrep: List[int],
maxiter: int,
restricted: bool = True,
spin_mult: str = "singlet") -> List[_TDSCFResults]:
"""
References
----------
For the expression of the transition moments in length and velocity gauges:
- T. B. Pedersen, A. E. Hansen, "Ab Initio Calculation and Display of the
Rotary Strength Tensor in the Random Phase Approximation. Method and Model
Studies." Chem. Phys. Lett., 246, 1 (1995)
- P. J. Lestrange, F. Egidi, X. Li, "The Consequences of Improperly
Describing Oscillator Strengths beyond the Electric Dipole Approximation."
J. Chem. Phys., 143, 234103 (2015)
"""
core.print_out("\n ==> Requested Excitations <==\n\n")
for nstate, state_sym in zip(states_per_irrep, wfn.molecule().irrep_labels()):
core.print_out(f" {nstate} {spin_mult} states with {state_sym} symmetry\n")
# construct the engine
if restricted:
if spin_mult == "triplet":
engine = TDRSCFEngine(wfn, ptype=ptype.lower(), triplet=True)
else:
engine = TDRSCFEngine(wfn, ptype=ptype.lower(), triplet=False)
else:
engine = TDUSCFEngine(wfn, ptype=ptype.lower())
# collect results and compute some spectroscopic observables
mints = core.MintsHelper(wfn.basisset())
results = []
irrep_GS = wfn.molecule().irrep_labels()[engine.G_gs]
for state_sym, nstates in enumerate(states_per_irrep):
if nstates == 0:
continue
irrep_ES = wfn.molecule().irrep_labels()[state_sym]
core.print_out(f"\n\n ==> Seeking the lowest {nstates} {spin_mult} states with {irrep_ES} symmetry")
engine.reset_for_state_symm(state_sym)
guess_ = engine.generate_guess(nstates * 4)
# ret = {"eigvals": ee, "eigvecs": (rvecs, rvecs), "stats": stats} (TDA)
# ret = {"eigvals": ee, "eigvecs": (rvecs, lvecs), "stats": stats} (RPA)
ret = solve_function(engine, nstates, guess_, maxiter)
# check whether all roots converged
if not ret["stats"][-1]["done"]:
# raise error
raise TDSCFConvergenceError(maxiter, wfn, f"singlet excitations in irrep {irrep_ES}", ret["stats"][-1])
# flatten dictionary: helps with sorting by energy
# also append state symmetry to return value
for e, (R, L) in zip(ret["eigvals"], ret["eigvecs"]):
irrep_trans = wfn.molecule().irrep_labels()[engine.G_gs ^ state_sym]
# length-gauge electric dipole transition moment
edtm_length = engine.residue(R, mints.so_dipole())
# length-gauge oscillator strength
f_length = ((2 * e) / 3) * np.sum(edtm_length**2)
# velocity-gauge electric dipole transition moment
edtm_velocity = engine.residue(L, mints.so_nabla())
## velocity-gauge oscillator strength
f_velocity = (2 / (3 * e)) * np.sum(edtm_velocity**2)
# length gauge magnetic dipole transition moment
# 1/2 is the Bohr magneton in atomic units
mdtm = 0.5 * engine.residue(L, mints.so_angular_momentum())
# NOTE The signs for rotatory strengths are opposite WRT the cited paper.
# This is becasue Psi4 defines length-gauge dipole integral to include the electron charge (-1.0)
# length gauge rotatory strength
R_length = np.einsum("i,i", edtm_length, mdtm)
# velocity gauge rotatory strength
R_velocity = -np.einsum("i,i", edtm_velocity, mdtm) / e
results.append(
_TDSCFResults(e, irrep_GS, irrep_ES, irrep_trans, edtm_length, f_length, edtm_velocity, f_velocity,
mdtm, R_length, R_velocity, spin_mult, R, L))
return results
def _states_per_irrep(states, nirrep):
"""Distributes states into nirrep"""
spi = [states // nirrep] * nirrep
for i in range(states % nirrep):
spi[i] += 1
return spi
def _validate_tdscf(*, wfn, states, triplets, guess) -> None:
# validate states
if not isinstance(states, (int, list)):
raise ValidationError("TDSCF: Number of states must be either an integer or a list of integers")
# list of states per irrep given, validate it
if isinstance(states, list):
if len(states) != wfn.nirrep():
raise ValidationError(f"TDSCF: States requested ({states}) do not match number of irreps ({wfn.nirrep()})")
# do triplets?
if triplets not in ["NONE", "ALSO", "ONLY"]:
raise ValidationError(
f"TDSCF: Triplet option ({triplets}) unrecognized. Must be one of 'NONE', 'ALSO' or 'ONLY'")
restricted = wfn.same_a_b_orbs()
do_triplets = False if triplets == "NONE" else True
if (not restricted) and do_triplets:
raise ValidationError("TDSCF: Cannot compute triplets with an unrestricted reference")
# determine how many states per irrep to seek and apportion them between singlets/triplets and irreps.
# validate calculation
if restricted and wfn.functional().needs_xc() and do_triplets:
raise ValidationError("TDSCF: Restricted Vx kernel only spin-adapted for singlets")
not_lda = wfn.functional().is_gga() or wfn.functional().is_meta()
if (not restricted) and not_lda:
raise ValidationError("TDSCF: Unrestricted Kohn-Sham Vx kernel currently limited to SVWN functional")
if guess != "DENOMINATORS":
raise ValidationError(f"TDSCF: Guess type {guess} is not valid")
def tdscf_excitations(wfn,
*,
states: Union[int, List[int]],
triplets: str = "NONE",
tda: bool = False,
r_convergence: float = 1.0e-4,
maxiter: int = 60,
guess: str = "DENOMINATORS",
verbose: int = 1):
"""Compute excitations from a SCF(HF/KS) wavefunction
Parameters
-----------
wfn : :py:class:`psi4.core.Wavefunction`
The reference wavefunction
states : Union[int, List[int]]
How many roots (excited states) should the solver seek to converge?
This function accepts either an integer or a list of integers:
- The list has :math:`n_{\mathrm{irrep}}` elements and is only
acceptable if the system has symmetry. It tells the solver how many
states per irrep to calculate.
- If an integer is given _and_ the system has symmetry, the states
will be distributed among irreps.
For example, ``states = 10`` for a D2h system will compute 10 states
distributed as ``[2, 2, 1, 1, 1, 1, 1, 1]`` among irreps.
triplets : {"NONE", "ONLY", "ALSO"}
Should the solver seek to converge states of triplet symmetry?
Default is `none`: do not seek to converge triplets.
Valid options are:
- `NONE`. Do not seek to converge triplets.
- `ONLY`. Only seek to converge triplets.
- `ALSO`. Seek to converge both triplets and singlets. This choice is
only valid for restricted reference wavefunction.
The number of states given will be apportioned roughly 50-50 between
singlet and triplet states, preferring the former. For example:
given ``state = 5, triplets = "ALSO"``, the solver will seek to
converge 3 states of singlet spin symmetry and 2 of triplet spin
symmetry. When asking for ``states = [3, 3, 3, 3], triplets =
"ALSO"`` states (C2v symmetry), ``[2, 2, 2, 2]`` will be of singlet
spin symmetry and ``[1, 1, 1, 1]``` will be of triplet spin
symmetry.
tda : bool, optional.
Should the solver use the Tamm-Dancoff approximation (TDA) or the
random-phase approximation (RPA)?
Default is ``False``: use RPA.
Note that TDA is equivalent to CIS for HF references.
r_convergence : float, optional.
The convergence threshold for the norm of the residual vector.
Default: 1.0e-4
Using a tighter convergence threshold here requires tighter SCF ground
state convergence threshold. As a rule of thumb, with the SCF ground
state density converged to :math:`10^{-N}` (``D_CONVERGENGE = 1.0e-N``),
you can afford converging a corresponding TDSCF calculation to
:math:`10^{-(N-2)}`.
The default value is consistent with the default value for
``D_CONVERGENCE``.
maxiter : int, optional
Maximum number of iterations.
Default: 60
guess : str, optional.
How should the starting trial vectors be generated?
Default: `DENOMINATORS`, i.e. use orbital energy differences to generate
guess vectors.
verbose : int, optional.
How verbose should the solver be?
Default: 1
Notes
-----
The algorithm employed to solve the non-Hermitian eigenvalue problem (``tda = False``)
will fail when the SCF wavefunction has a triplet instability.
This function can be used for:
- restricted singlets: RPA or TDA, any functional
- restricted triplets: RPA or TDA, Hartree-Fock only
- unresctricted: RPA or TDA, Hartre-Fock and LDA only
Tighter convergence thresholds will require a larger iterative subspace.
The maximum size of the iterative subspace is calculated based on `r_convergence`:
max_vecs_per_root = -np.log10(r_convergence) * 50
for the default converegence threshold this gives 200 trial vectors per root and a maximum subspace size
of:
max_ss_size = max_vecs_per_root * n
where `n` are the number of roots to seek in the given irrep.
For each irrep, the algorithm will store up to `max_ss_size` trial vectors
before collapsing (restarting) the iterations from the `n` best
approximations.
"""
# validate input parameters
triplets = triplets.upper()
guess = guess.upper()
_validate_tdscf(wfn=wfn, states=states, triplets=triplets, guess=guess)
restricted = wfn.same_a_b_orbs()
# determine how many states per irrep to seek and apportion them between singlets/triplets and irreps.
singlets_per_irrep = []
triplets_per_irrep = []
if isinstance(states, list):
if triplets == "ONLY":
triplets_per_irrep = states
elif triplets == "ALSO":
singlets_per_irrep = [(s // 2) + (s % 2) for s in states]
triplets_per_irrep = [(s // 2) for s in states]
else:
singlets_per_irrep = states
else:
# total number of states given
# first distribute them among singlets and triplets, preferring the
# former then distribute them among irreps
if triplets == "ONLY":
triplets_per_irrep = _states_per_irrep(states, wfn.nirrep())
elif triplets == "ALSO":
spi = (states // 2) + (states % 2)
singlets_per_irrep = _states_per_irrep(spi, wfn.nirrep())
tpi = states - spi
triplets_per_irrep = _states_per_irrep(tpi, wfn.nirrep())
else:
singlets_per_irrep = _states_per_irrep(states, wfn.nirrep())
# tie maximum number of vectors per root to requested residual tolerance
# This gives 200 vectors per root with default tolerance
max_vecs_per_root = int(-np.log10(r_convergence) * 50)
def rpa_solver(e, n, g, m):
return solvers.hamiltonian_solver(engine=e,
nroot=n,
guess=g,
r_convergence=r_convergence,
max_ss_size=max_vecs_per_root * n,
verbose=verbose)
def tda_solver(e, n, g, m):
return solvers.davidson_solver(engine=e,
nroot=n,
guess=g,
r_convergence=r_convergence,
max_ss_size=max_vecs_per_root * n,
verbose=verbose)
# determine which solver function to use: Davidson for TDA or Hamiltonian for RPA?
if tda:
ptype = "TDA"
solve_function = tda_solver
else:
ptype = "RPA"
solve_function = rpa_solver
_print_tdscf_header(r_convergence=r_convergence, guess_type=guess, restricted=restricted, ptype=ptype)
# collect solver results into a list
_results = []
# singlets solve loop
if triplets == "NONE" or triplets == "ALSO":
res_1 = _solve_loop(wfn, ptype, solve_function, singlets_per_irrep, maxiter, restricted, "singlet")
_results.extend(res_1)
# triplets solve loop
if triplets == "ALSO" or triplets == "ONLY":
res_3 = _solve_loop(wfn, ptype, solve_function, triplets_per_irrep, maxiter, restricted, "triplet")
_results.extend(res_3)
# sort by energy
_results = sorted(_results, key=lambda x: x.E_ex_au)
core.print_out("\n{}\n".format("*"*90) +
"{}{:^70}{}\n".format("*"*10, "WARNING", "*"*10) +
"{}{:^70}{}\n".format("*"*10, "Length-gauge rotatory strengths are **NOT** gauge-origin invariant", "*"*10) +
"{}\n\n".format("*"*90)) #yapf: disable
# print results
core.print_out(" " + (" " * 20) + " " + "Excitation Energy".center(31) + f" {'Total Energy':^15}" +
"Oscillator Strength".center(31) + "Rotatory Strength".center(31) + "\n")
core.print_out(
f" {'#':^4} {'Sym: GS->ES (Trans)':^20} {'au':^15} {'eV':^15} {'au':^15} {'au (length)':^15} {'au (velocity)':^15} {'au (length)':^15} {'au (velocity)':^15}\n"
)
core.print_out(
f" {'-':->4} {'-':->20} {'-':->15} {'-':->15} {'-':->15} {'-':->15} {'-':->15} {'-':->15} {'-':->15}\n")
# collect results
solver_results = []
for i, x in enumerate(_results):
sym_descr = f"{x.irrep_GS}->{x.irrep_ES} ({1 if x.spin_mult== 'singlet' else 3} {x.irrep_trans})"
E_ex_ev = constants.conversion_factor('hartree', 'eV') * x.E_ex_au
E_tot_au = wfn.energy() + x.E_ex_au
# prepare return dictionary for this root
solver_results.append({
"EXCITATION ENERGY": x.E_ex_au,
"ELECTRIC DIPOLE TRANSITION MOMENT (LEN)": x.edtm_length,
"OSCILLATOR STRENGTH (LEN)": x.f_length,
"ELECTRIC DIPOLE TRANSITION MOMENT (VEL)": x.edtm_velocity,
"OSCILLATOR STRENGTH (VEL)": x.f_velocity,
"MAGNETIC DIPOLE TRANSITION MOMENT": x.mdtm,
"ROTATORY STRENGTH (LEN)": x.R_length,
"ROTATORY STRENGTH (VEL)": x.R_velocity,
"SYMMETRY": x.irrep_trans,
"SPIN": x.spin_mult,
"RIGHT EIGENVECTOR ALPHA": x.R_eigvec if restricted else x.R_eigvec[0],
"LEFT EIGENVECTOR ALPHA": x.L_eigvec if restricted else x.L_eigvec[0],
"RIGHT EIGENVECTOR BETA": x.R_eigvec if restricted else x.R_eigvec[1],
"LEFT EIGENVECTOR BETA": x.L_eigvec if restricted else x.L_eigvec[1],
})
# stash in psivars/wfnvars
ssuper_name = wfn.functional().name()
# wfn.set_variable("TD-fctl ROOT n TOTAL ENERGY - h SYMMETRY") # P::e SCF
# wfn.set_variable("TD-fctl ROOT 0 -> ROOT m EXCITATION ENERGY - h SYMMETRY") # P::e SCF
# wfn.set_variable("TD-fctl ROOT 0 -> ROOT m OSCILLATOR STRENGTH (LEN) - h SYMMETRY") # P::e SCF
# wfn.set_variable("TD-fctl ROOT 0 -> ROOT m OSCILLATOR STRENGTH (VEL) - h SYMMETRY") # P::e SCF
# wfn.set_variable("TD-fctl ROOT 0 -> ROOT m ROTATORY STRENGTH (LEN) - h SYMMETRY") # P::e SCF
# wfn.set_variable("TD-fctl ROOT 0 -> ROOT m ROTATORY STRENGTH (VEL) - h SYMMETRY") # P::e SCF
wfn.set_variable(f"TD-{ssuper_name} ROOT {i+1} TOTAL ENERGY - {x.irrep_ES} SYMMETRY", E_tot_au)
wfn.set_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} EXCITATION ENERGY - {x.irrep_ES} SYMMETRY", x.E_ex_au)
wfn.set_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} OSCILLATOR STRENGTH (LEN) - {x.irrep_ES} SYMMETRY",
x.f_length)
wfn.set_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} OSCILLATOR STRENGTH (VEL) - {x.irrep_ES} SYMMETRY",
x.f_velocity)
wfn.set_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} ROTATORY STRENGTH (LEN) - {x.irrep_ES} SYMMETRY",
x.R_length)
wfn.set_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} ROTATORY STRENGTH (VEL) - {x.irrep_ES} SYMMETRY",
x.R_velocity)
wfn.set_array_variable(
f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} ELECTRIC TRANSITION DIPOLE MOMENT (LEN) - {x.irrep_ES} SYMMETRY",
core.Matrix.from_array(x.edtm_length.reshape((1, 3))))
wfn.set_array_variable(
f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} ELECTRIC TRANSITION DIPOLE MOMENT (VEL) - {x.irrep_ES} SYMMETRY",
core.Matrix.from_array(x.edtm_velocity.reshape((1, 3))))
wfn.set_array_variable(
f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} MAGNETIC TRANSITION DIPOLE MOMENT - {x.irrep_ES} SYMMETRY",
core.Matrix.from_array(x.mdtm.reshape((1, 3))))
wfn.set_array_variable(
f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} RIGHT EIGENVECTOR ALPHA - {x.irrep_ES} SYMMETRY",
x.R_eigvec if restricted else x.R_eigvec[0])
wfn.set_array_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} LEFT EIGENVECTOR ALPHA - {x.irrep_ES} SYMMETRY",
x.L_eigvec if restricted else x.L_eigvec[0])
wfn.set_array_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} RIGHT EIGENVECTOR BETA - {x.irrep_ES} SYMMETRY",
x.R_eigvec if restricted else x.R_eigvec[1])
wfn.set_array_variable(f"TD-{ssuper_name} ROOT 0 -> ROOT {i+1} LEFT EIGENVECTOR ALPHA - {x.irrep_ES} SYMMETRY",
x.L_eigvec if restricted else x.L_eigvec[1])
core.print_out(
f" {i+1:^4} {sym_descr:^20} {x.E_ex_au:< 15.5f} {E_ex_ev:< 15.5f} {E_tot_au:< 15.5f} {x.f_length:< 15.4f} {x.f_velocity:< 15.4f} {x.R_length:< 15.4f} {x.R_velocity:< 15.4f}\n"
)
core.print_out("\n")
return solver_results
|
lothian/psi4
|
psi4/driver/procrouting/response/scf_response.py
|
Python
|
lgpl-3.0
| 29,112
|
[
"Psi4"
] |
ce68e3145f39787ad054b1c1ea51e319011158596054f5703f9a9d1d5beb53ab
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Unit tests for specific parser behaviors, such as overriden methods."""
import unittest
class NormalisesymTest(unittest.TestCase):
# Not needed: DALTON, MOPAC, NWChem, ORCA, QChem
def test_normalisesym_adf(self):
from cclib.parser.adfparser import ADF
sym = ADF("dummyfile").normalisesym
labels = ['A', 's', 'A1', 'A1.g', 'Sigma', 'Pi', 'Delta', 'Phi', 'Sigma.g', 'A.g', 'AA', 'AAA', 'EE1', 'EEE1']
ref = ['A', 's', 'A1', 'A1g', 'sigma', 'pi', 'delta', 'phi', 'sigma.g', 'Ag', "A'", 'A"', "E1'", 'E1"']
self.assertEqual(list(map(sym, labels)), ref)
def test_normalisesym_gamess(self):
from cclib.parser.gamessparser import GAMESS
sym = GAMESS("dummyfile").normalisesym
labels = ['A', 'A1', 'A1G', "A'", "A''", "AG"]
ref = ['A', 'A1', 'A1g', "A'", 'A"', 'Ag']
self.assertEqual(list(map(sym, labels)), ref)
def test_normalisesym_gamessuk(self):
from cclib.parser.gamessukparser import GAMESSUK
sym = GAMESSUK("dummyfile.txt").normalisesym
labels = ['a', 'a1', 'ag', "a'", 'a"', "a''", "a1''", 'a1"', "e1+", "e1-"]
ref = ['A', 'A1', 'Ag', "A'", 'A"', 'A"', 'A1"', 'A1"', 'E1', 'E1']
self.assertEqual(list(map(sym, labels)), ref)
def test_normalisesym_gaussian(self):
from cclib.parser.gaussianparser import Gaussian
sym = Gaussian("dummyfile").normalisesym
labels = ['A1', 'AG', 'A1G', "SG", "PI", "PHI", "DLTA", 'DLTU', 'SGG']
ref = ['A1', 'Ag', 'A1g', 'sigma', 'pi', 'phi', 'delta', 'delta.u', 'sigma.g']
self.assertEqual(list(map(sym, labels)), ref)
def test_normalisesym_jaguar(self):
from cclib.parser.jaguarparser import Jaguar
sym = Jaguar("dummyfile").normalisesym
labels = ['A', 'A1', 'Ag', 'Ap', 'App', "A1p", "A1pp", "E1pp/Ap"]
ref = ['A', 'A1', 'Ag', "A'", 'A"', "A1'", 'A1"', 'E1"']
self.assertEqual(list(map(sym, labels)), ref)
def test_normalisesym_molcas(self):
from cclib.parser.molcasparser import Molcas
sym = Molcas("dummyfile").normalisesym
labels = ["a", "a1", "ag"]
ref = ["A", "A1", "Ag"]
self.assertEqual(list(map(sym, labels)), ref)
def test_normalisesym_molpro(self):
from cclib.parser.molproparser import Molpro
sym = Molpro("dummyfile").normalisesym
labels = ["A`", "A``"]
ref = ["A'", "A''"]
self.assertEqual(list(map(sym, labels)), ref)
def test_normalisesym_psi4(self):
from cclib.parser.psi4parser import Psi4
sym = Psi4("dummyfile").normalisesym
labels = ["Ap", "App"]
ref = ["A'", 'A"']
self.assertEqual(list(map(sym, labels)), ref)
def test_normalisesym_turbomole(self):
from cclib.parser.turbomoleparser import Turbomole
sym = Turbomole("dummyfile").normalisesym
labels = ["a", "a1", "ag"]
ref = ["A", "A1", "Ag"]
self.assertEqual(list(map(sym, labels)), ref)
if __name__ == "__main__":
unittest.main()
|
langner/cclib
|
test/parser/testspecificparsers.py
|
Python
|
bsd-3-clause
| 3,256
|
[
"ADF",
"Dalton",
"GAMESS",
"Gaussian",
"Jaguar",
"MOLCAS",
"MOPAC",
"Molpro",
"NWChem",
"ORCA",
"Psi4",
"TURBOMOLE",
"cclib"
] |
5caec82fd244af482156a879363ffcbbbf3c26dc6a926f6caf9f49cc75d06c06
|
import glob, os, sys
sys.path.append(os.getcwd() + '/lib/')
sys.path.append(os.getcwd() + '/cloudtracker/')
# Multiprocessing modules
import multiprocessing as mp
from multiprocessing import Pool
PROC = 16
import model_param as mc
from conversion import convert
import cloudtracker.main
# Default working directory for ent_analysis package
cwd = os.getcwd()
# Output profile names
profiles = {'condensed', 'condensed_env', 'condensed_edge', \
'condensed_shell' , 'core', 'core_env', 'core_edge', 'core_shell', \
'plume', 'condensed_entrain', 'core_entrain', 'surface'}
def wrapper(module_name, script_name, function_name, filelist):
pkg = __import__ (module_name, globals(), locals(), ['*'])
md = getattr(pkg, script_name)
fn = getattr(md, function_name)
pool = mp.Pool(PROC)
pool.map(fn, filelist)
def run_conversion():
pkg = 'conversion'
os.chdir(mc.input_directory)
# Ensure the data folders exist at the target location
if not os.path.exists(mc.data_directory):
os.makedirs(mc.data_directory)
if not os.path.exists('%s/variables/' % (mc.data_directory)):
os.makedirs('%s/variables/' % (mc.data_directory))
if not os.path.exists('%s/tracking/' % (mc.data_directory)):
os.makedirs('%s/tracking/' % (mc.data_directory))
if not os.path.exists('%s/core_entrain/' % (mc.data_directory)):
os.makedirs('%s/core_entrain/' % (mc.data_directory))
if not os.path.exists('%s/condensed_entrain/' % (mc.data_directory)):
os.makedirs('%s/condensed_entrain/' % (mc.data_directory))
# Generate cloud field statistic
convert.convert_stat()
# bin3d2nc conversion
filelist = glob.glob('./*.bin3D')
wrapper(pkg, 'convert', 'convert', filelist)
# Move the netCDF files to relevant locations
filelist = glob.glob('./*.nc')
wrapper(pkg, 'nc_transfer', 'transfer', filelist)
# generate_tracking
filelist = glob.glob('%s/variables/*.nc' % (mc.data_directory))
wrapper(pkg, 'generate_tracking', 'main', filelist)
def run_cloudtracker():
# Change the working directory for cloudtracker
os.chdir('%s/cloudtracker/' % (cwd))
model_config = mc.model_config
# Update nt
model_config['nt'] = mc.nt
# Swap input directory for cloudtracker
model_config['input_directory'] = mc.data_directory + '/tracking/'
cloudtracker.main.main(model_config)
def run_profiler():
### Time Profiles
pkg = 'time_profiles'
os.chdir('%s/time_profiles' % (cwd))
# Ensure output folder exists
if not os.path.exists('%s/time_profiles/cdf' % (cwd)):
os.makedirs('%s/time_profiles/cdf' % (cwd))
# Main thermodynamic profiles
filelist = glob.glob('%s/variables/*.nc' % (mc.data_directory))
wrapper(pkg, 'make_profiles', 'main', filelist)
if(mc.do_entrainment):
filelist = glob.glob('%s/core_entrain/*.nc' % (mc.data_directory))
wrapper(pkg, 'core_entrain_profiles', 'main', filelist)
filelist = glob.glob('%s/condensed_entrain/*.nc' % (mc.data_directory))
wrapper(pkg, 'condensed_entrain_profiles', 'main', filelist)
# Chi Profiles
filelist = glob.glob('cdf/core_env*.nc')
wrapper(pkg, 'chi_core', 'makechi', filelist)
filelist = glob.glob('cdf/condensed_env*.nc')
wrapper(pkg, 'chi_condensed', 'makechi', filelist)
# Surface Profiles (based on cloud tracking algorithm)
wrapper(pkg, 'surface_profiles', 'main', range(mc.nt))
def run_id_profiles():
### ID Profiles
pkg = 'id_profiles'
os.chdir('%s/id_profiles' % (cwd))
# Ensure output folder exists
if not os.path.exists('%s/id_profiles/cdf' % (cwd)):
os.makedirs('%s/id_profiles/cdf' % (cwd))
wrapper(pkg, 'all_profiles', 'main', profiles)
if __name__ == '__main__':
run_conversion()
run_cloudtracker()
run_profiler()
#run_id_profiles()
print 'Entrainment analysis completed'
|
lorenghoh/ent_analysis
|
run_analysis.py
|
Python
|
mit
| 3,709
|
[
"NetCDF"
] |
83d45baed1bd76755f843457cd89b5131a2c11bbe07e78904c38b54e96eff910
|
__author__ = 'algol'
import sqlite3
from datetime import datetime
from configparser import ConfigParser
class Singleton(object):
instance = None
inited = False
def __new__(cls, *a, **kwa):
if cls.instance is None:
cls.instance = object.__new__(cls)
return cls.instance
def __init__(self):
if not self.__class__.inited:
self.__class__.inited = True
self.init()
def init(self):
pass
class Database(Singleton):
@staticmethod
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def init(self):
c = ConfigParser()
c.read('config.ini')
self.uri = c.get('database', 'uri').strip("'")
def cursor(self, connection=None) -> sqlite3.Cursor:
if (connection==None):
connection = self.connection()
return connection.cursor()
## TODO придумать как закрывать соединение
def connection(self) -> sqlite3.Connection:
conn = sqlite3.connect(self.uri)
conn.row_factory = self.dict_factory
return conn
class Memin:
table_name = None
pk_name = None
pk_value = None
db = Database()
def __init__(self, pk_value):
self.pk_value = pk_value
@classmethod
def fetchrow(cls, pk_value):
"""
Загружает сзапись из таблицы cls.table_name по первичному ключу csl.pk_name
:param pk_value: int значение первичного ключа
:return: dict
:rtype: dict
"""
if cls.table_name is None or cls.pk_name is None:
raise Exception("table_name and pk_name is empty")
conn = Database().connection()
cur = Database().cursor(conn)
cur.execute('SELECT * FROM %s WHERE %s = ?' % (cls.table_name, cls.pk_name), (pk_value,))
res = cur.fetchone()
cur.close()
conn.close()
return res
@classmethod
def get_all(cls, filter_data=None):
raise Exception('Not implemented yet')
@classmethod
def load(cls, pk_value):
raise Exception('Not implemented yet')
def save(self):
raise Exception('Not implemented yet')
def save_data(self, data):
if self.pk_value is None:
self.pk_value = self.insert(data)
else:
self.update(data)
def insert(self, data) -> int:
"""
Вставляет новую запись в БД
:param data: тип dict, ключи - назвнаия полей таблицы
:return: int primary key вставленной записи
:rtype: int
"""
query_tmpl = "INSERT INTO {0} ({1}) values ({2})"
fields = ''
vl = ''
values = list()
for i in list(data):
fields += i + ', '
vl += '?, '
values.append(data[i])
fields = fields[:-2]
vl = vl[:-2]
query = query_tmpl.format(self.table_name, fields, vl)
conn = self.db.connection()
cur = self.db.cursor(conn)
cur.execute(query, values)
conn.commit()
res = cur.lastrowid
cur.close()
conn.close()
return res
def update(self, data):
"""
Обновляет запись в БД в таблице self.table_name по первичному ключу self.pk_name, self.pk_value
:param data: тип dict, ключи - назвнаия полей таблицы
:return:
"""
if self.pk_value is None:
raise Exception("Cant update primary key is empty")
query_tmpl = "UPDATE %s SET {0} WHERE %s = %s" % (self.table_name, self.pk_name, self.pk_value)
fields = ''
values = list()
for i in list(data):
fields += '%s = ?, ' % i
values.append(data[i])
fields = fields[:-2]
query = query_tmpl.format(fields)
conn = self.db.connection()
cur = self.db.cursor(conn)
cur.execute(query, values)
conn.commit()
cur.close()
conn.close()
@staticmethod
def create_filter(data=None) -> str:
"""
Из dict создаёт SQL условие WHERE с точным соответствием.
:param data: dict ключи - названия полей
:return: str
:rtype: str
"""
if data is None or len(data) == 0:
return ''
res = ' WHERE'
for f in list(data):
res += ' ' + f + " = '" + str(data[f]) + "' AND"
return res[:-4]
class Person(Memin):
table_name = 'Person'
pk_name = "PersonID"
def __init__(self, fname, lname='', phone='', email='', person_id=None, insert_date=''):
super().__init__(person_id)
self.fname = fname
self.lname = lname
self.phone = phone
self.email = email
self.insert_date = insert_date if insert_date != '' else datetime.now().strftime('%d.%m.%Y')
def __str__(self):
return self.fname + ' ' + self.lname + ', Phone: ' + self.phone
@classmethod
def get_all(cls, filter_data=None):
"""
:param filter_data:
:return:
:rtype: list[Person]
"""
cur = cls.db.cursor()
res = list()
for row in cur.execute("SELECT * FROM Person" + cls.create_filter(filter_data)):
res.append(Person(row['Fname'],
row['Lname'],
row['Phone'],
row['Email'],
row['PersonID'],
row['InsertDate']))
cur.close()
return res
@classmethod
def load(cls, pk_value):
"""
:param pk_value:
:return:
:rtype: Person
"""
row = cls.fetchrow(pk_value)
res = None
if row:
res = Person(row['Fname'], row['Lname'], row['Phone'], row['Email'], row['PersonID'], row['InsertDate'])
return res
def save(self):
"""
Сохраняет Person в БД
:return: int первичный ключ
:rtype: int
"""
self.save_data({'Fname': self.fname,
'Lname': self.lname,
'Phone': self.phone,
'Email': self.email,
'InsertDate': self.insert_date})
return self.pk_value
class Classroom(Memin):
table_name = 'Classroom'
pk_name = 'ClassroomID'
def __init__(self, name, address, comment='', active=1, classroom_id=None):
super().__init__(classroom_id)
self.address = address
self.name = name
self.active = active
self.comment = comment
def __str__(self):
return self.name + ' ' + self.address
@classmethod
def load(cls, pk_value):
"""
:param pk_value: ClassroomID
:return: loaded Classroom object
:rtype: Classroom
"""
row = cls.fetchrow(pk_value)
res = None
if row:
res = Classroom(row['Name'], row['Address'], row['Comment'], row['Active'], row['ClassroomID'])
return res
@classmethod
def get_all(cls, filter_data=None):
"""
:param filter_data:
:return: Список Classroom
:rtype: list[Classroom]
"""
cur = cls.db.cursor()
res = list()
for row in cur.execute("SELECT * FROM Classroom" + cls.create_filter(filter_data) ):
res.append(Classroom(row['Name'],
row['Address'],
row['Comment'],
row['Active'],
row['ClassroomID']))
cur.close()
return res
def save(self):
"""
Сохраняет Classroom в БД
:return: int первичный ключ
:rtype: int
"""
self.save_data({'Name': self.name,
'Address': self.address,
'Comment': self.comment,
'Active': self.active})
return self.pk_value
class Lesson(Memin):
table_name = 'Lesson'
pk_name = 'LessonID'
def __init__(self, name, duration=60, comment='', lesson_id=None):
super().__init__(lesson_id)
self.name = name
self.duration = duration
self.comment = comment
def save(self):
"""
Сохраняет Lesson в БД
:return: int значение первичного ключа
:rtype: int
"""
self.save_data({'Name': self.name,
'Duration': self.duration,
'Comment': self.comment})
return self.pk_value
@classmethod
def load(cls, pk_value):
"""
Загружает Lesson из БД по первичному ключу
:param pk_value:
:return: Lesson
:rtype: Lesson
"""
row = cls.fetchrow(pk_value)
res = None
if row:
res = Lesson(row['Name'],
row['Duration'],
row['Comment'],
row['LessonID'])
return res
@classmethod
def get_all(cls, filter_data=None):
"""
Загружает список Lesson из БД
:param filter_data:
:return: список Lesson
:rtype: list[Lesson]
"""
cur = cls.db.cursor()
res = list()
for row in cur.execute('SELECT * FROM Lesson' + cls.create_filter(filter_data)):
res.append(Lesson(row['Name'],
row['Duration'],
row['Comment'],
row['LessonID']))
cur.close()
return res
class Payment(Memin):
table_name = 'Payment'
pk_name = 'PaymentID'
def __init__(self, person_id, amount, payment_type_id, date=None, payment_id=None):
super().__init__(payment_id)
self.person_id = person_id
self.amount = amount
self.payment_type_id = payment_type_id
self.date = datetime.now().strftime('%d.%m.%Y') if date == None else date
@classmethod
def load(cls, pk_value):
"""
Загружает Payment из БД по первичному ключу
:param pk_value:
:return: Payment
:rtype: Payment
"""
row = cls.fetchrow(pk_value)
res = None
if row:
res = Payment(row['PersonID'],
row['Amount'],
row['PaymentTypeID'],
row['InsertDate'],
row['PaymentID'])
return res
@classmethod
def get_all(cls, filter_data=None):
"""
Загружает список Payment из БД
:param filter_data:
:return: список Payment
:rtype: list[Payment]
"""
cur = cls.db.cursor()
res = list()
for row in cur.execute("SELECT * FROM Payment" + cls.create_filter(filter_data)):
res.append(Payment(row['PersonID'],
row['Amount'],
row['PaymentTypeID'],
row['InsertDate'],
row['PaymentID']))
cur.close()
return res
def save(self):
"""
Сохраняет Payment в БД
:return: первичный ключ
:rtype: int
"""
self.save_data({'PersonID': self.person_id,
'Amount': self.amount,
'PaymentTypeID': self.payment_type_id,
'InsertDate': self.date})
return self.pk_value
class PaymentType(Memin):
table_name = 'PaymentType'
pk_name = 'PaymentTypeID'
def __init__(self, name, comment='', payment_type_id=None):
super().__init__(payment_type_id)
self.name = name
self.comment = comment
def __str__(self):
return self.name
@classmethod
def load(cls, pk_value):
"""
Загружает PaymentType из БД по первичному ключу
:param pk_value: int первичный ключ
:return: PaymentType
:rtype: PaymentType
"""
row = cls.fetchrow(pk_value)
res = None
if row:
res = PaymentType(row['Name'], row['Comment'], row['PaymentTypeID'])
return res
def save(self):
"""
Сохраняет PaymentType в БД
:return: int первичный ключ
:rtype: int
"""
self.save_data({'Name': self.name, 'Comment': self.comment})
return self.pk_value
@classmethod
def get_all(cls, filter_data=None):
"""
Загружает список PaymentType из БД
:param filter_data:
:return: список PaymentType
:rtype: list[PaymentType]
"""
cur = cls.db.cursor()
res = list()
for row in cur.execute("SELECT * FROM PaymentType" + cls.create_filter(filter_data)):
res.append(PaymentType(row['Name'], row['Comment'], row['PaymentTypeID']))
cur.close()
return res
class Visit(Memin):
table_name = 'Visit'
pk_name = 'VisitID'
def __init__(self, person_id, classroom_id, lesson_id, date, visit_id=None):
super().__init__(visit_id)
self.person_id = person_id
self.classroom_id = classroom_id
self.lesson_id = lesson_id
self.date = date
@classmethod
def load(cls, pk_value):
"""
Загружает Visit из БД по первичному ключу
:param pk_value: int первичный ключ
:return: Visit
:rtype: Visit
"""
row = cls.fetchrow(pk_value)
res = None
if row:
res = Visit(row['PersonID'],
row['ClassroomID'],
row['LessonID'],
row['InsertDate'],
row['VisitID'])
return res
@classmethod
def get_all(cls, filter_data=None):
"""
Загружает список Visit из БД
:param filter_data:
:return: список Visit
:rtype: list[Visit]
"""
cur = cls.db.cursor()
res = list()
for row in cur.execute("SELECT * FROM Visit" + cls.create_filter(filter_data)):
res.append(Visit(row['PersonID'],
row['ClassroomID'],
row['LessonID'],
row['InsertDate'],
row['VisitID']))
cur.close()
return res
def save(self):
"""
Сохраняет Visit в БД
:return: int первичный ключ
:rtype: int
"""
self.save_data({'PersonID': self.person_id,
'ClassroomID': self.classroom_id,
'LessonID': self.lesson_id,
'InsertDate': self.date})
return self.pk_value
|
migihajami/memin
|
memin/core.py
|
Python
|
bsd-3-clause
| 15,539
|
[
"VisIt"
] |
d94ccca2dae0f23c1dcebcce2ac3fb08395d2ba4ce50eb19abba624f9bed1781
|
########################################################################
# $HeadURL$
# File : ComputingElementFactory.py
# Author : Stuart Paterson
########################################################################
""" The Computing Element Factory has one method that instantiates a given Computing Element
from the CEUnique ID specified in the JobAgent configuration section.
"""
from DIRAC.Resources.Computing.ComputingElement import getCEConfigDict
from DIRAC import S_OK, S_ERROR, gLogger
__RCSID__ = "$Id$"
class ComputingElementFactory( object ):
#############################################################################
def __init__(self, ceType=''):
""" Standard constructor
"""
self.ceType = ceType
self.log = gLogger.getSubLogger( self.ceType )
#############################################################################
def getCE(self, ceType='', ceName='', ceParametersDict={}):
"""This method returns the CE instance corresponding to the supplied
CEUniqueID. If no corresponding CE is available, this is indicated.
"""
self.log.verbose('Creating CE of %s type with the name %s' % (ceType, ceName) )
ceTypeLocal = ceType
if not ceTypeLocal:
ceTypeLocal = self.ceType
ceNameLocal = ceName
if not ceNameLocal:
ceNameLocal = self.ceType
ceConfigDict = getCEConfigDict( ceNameLocal )
self.log.verbose('CEConfigDict', ceConfigDict)
if 'CEType' in ceConfigDict:
ceTypeLocal = ceConfigDict['CEType']
if not ceTypeLocal:
error = 'Can not determine CE Type'
self.log.error( error )
return S_ERROR( error )
subClassName = "%sComputingElement" % (ceTypeLocal)
try:
ceSubClass = __import__('DIRAC.Resources.Computing.%s' % subClassName, globals(), locals(), [subClassName])
except Exception, x:
msg = 'ComputingElementFactory could not import DIRAC.Resources.Computing.%s' % ( subClassName )
self.log.exception()
self.log.warn( msg )
return S_ERROR( msg )
try:
ceStr = 'ceSubClass.%s( "%s" )' % ( subClassName, ceNameLocal )
computingElement = eval( ceStr )
if ceParametersDict:
computingElement.setParameters(ceParametersDict)
except Exception, x:
msg = 'ComputingElementFactory could not instantiate %s()' % (subClassName)
self.log.exception()
self.log.warn( msg )
return S_ERROR( msg )
computingElement._reset()
return S_OK( computingElement )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
avedaee/DIRAC
|
Resources/Computing/ComputingElementFactory.py
|
Python
|
gpl-3.0
| 2,631
|
[
"DIRAC"
] |
7de1e3feeadc53eee5456cb929e6bdf72ad16410c5c349360091a56a2d6ce744
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***********************************************
espressopp.interaction.LennardJonesEnergyCapped
***********************************************
.. math::
V(r) = 4 \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{12} -
\left( \frac{\sigma}{r} \right)^{6} \right]
where `r` is either the distance or the capped distance, depending on which is
greater.
.. function:: espressopp.interaction.LennardJonesEnergyCapped(epsilon, sigma, cutoff, caprad, shift)
:param epsilon: (default: 1.0)
:param sigma: (default: 1.0)
:param cutoff: (default: infinity)
:param caprad: (default: 0.0)
:param shift: (default: "auto")
:type epsilon: real
:type sigma: real
:type cutoff:
:type caprad: real
:type shift:
.. function:: espressopp.interaction.VerletListLennardJonesEnergyCapped(vl)
:param vl:
:type vl:
.. function:: espressopp.interaction.VerletListLennardJonesEnergyCapped.getPotential(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
.. function:: espressopp.interaction.VerletListLennardJonesEnergyCapped.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListAdressLennardJonesEnergyCapped(vl, fixedtupleList)
:param vl:
:param fixedtupleList:
:type vl:
:type fixedtupleList:
.. function:: espressopp.interaction.VerletListAdressLennardJonesEnergyCapped.getPotentialAT(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
.. function:: espressopp.interaction.VerletListAdressLennardJonesEnergyCapped.getPotentialCG(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
.. function:: espressopp.interaction.VerletListAdressLennardJonesEnergyCapped.setPotentialAT(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListAdressLennardJonesEnergyCapped.setPotentialCG(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListHadressLennardJonesEnergyCapped(vl, fixedtupleList)
:param vl:
:param fixedtupleList:
:type vl:
:type fixedtupleList:
.. function:: espressopp.interaction.VerletListHadressLennardJonesEnergyCapped.getPotentialAT(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
.. function:: espressopp.interaction.VerletListHadressLennardJonesEnergyCapped.getPotentialCG(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
.. function:: espressopp.interaction.VerletListHadressLennardJonesEnergyCapped.setPotentialAT(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListHadressLennardJonesEnergyCapped.setPotentialCG(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.CellListLennardJonesEnergyCapped(stor)
:param stor:
:type stor:
.. function:: espressopp.interaction.CellListLennardJonesEnergyCapped.getPotential(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
.. function:: espressopp.interaction.CellListLennardJonesEnergyCapped.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.FixedPairListLennardJonesEnergyCapped(system, vl, potential)
:param system:
:param vl:
:param potential:
:type system:
:type vl:
:type potential:
.. function:: espressopp.interaction.FixedPairListLennardJonesEnergyCapped.getPotential()
:rtype:
.. function:: espressopp.interaction.FixedPairListLennardJonesEnergyCapped.setPotential(potential)
:param potential:
:type potential:
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_LennardJonesEnergyCapped, \
interaction_VerletListLennardJonesEnergyCapped, \
interaction_VerletListAdressLennardJonesEnergyCapped, \
interaction_VerletListHadressLennardJonesEnergyCapped, \
interaction_CellListLennardJonesEnergyCapped, \
interaction_FixedPairListLennardJonesEnergyCapped
class LennardJonesEnergyCappedLocal(PotentialLocal, interaction_LennardJonesEnergyCapped):
def __init__(self, epsilon=1.0, sigma=1.0,
cutoff=infinity, caprad=0.0 ,shift="auto"):
"""Initialize the local Lennard Jones object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if shift =="auto":
cxxinit(self, interaction_LennardJonesEnergyCapped,
epsilon, sigma, cutoff, caprad)
else:
cxxinit(self, interaction_LennardJonesEnergyCapped,
epsilon, sigma, cutoff, caprad, shift)
class VerletListLennardJonesEnergyCappedLocal(InteractionLocal, interaction_VerletListLennardJonesEnergyCapped):
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListLennardJonesEnergyCapped, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
class VerletListAdressLennardJonesEnergyCappedLocal(InteractionLocal, interaction_VerletListAdressLennardJonesEnergyCapped):
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListAdressLennardJonesEnergyCapped, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
def getPotentialAT(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotentialAT(self, type1, type2)
def getPotentialCG(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotentialCG(self, type1, type2)
class VerletListHadressLennardJonesEnergyCappedLocal(InteractionLocal, interaction_VerletListHadressLennardJonesEnergyCapped):
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListHadressLennardJonesEnergyCapped, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
def getPotentialAT(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotentialAT(self, type1, type2)
def getPotentialCG(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotentialCG(self, type1, type2)
class CellListLennardJonesEnergyCappedLocal(InteractionLocal, interaction_CellListLennardJonesEnergyCapped):
def __init__(self, stor):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListLennardJonesEnergyCapped, stor)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
class FixedPairListLennardJonesEnergyCappedLocal(InteractionLocal, interaction_FixedPairListLennardJonesEnergyCapped):
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListLennardJonesEnergyCapped, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def getPotential(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self)
if pmi.isController:
class LennardJonesEnergyCapped(Potential):
'The Lennard-Jones potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.LennardJonesEnergyCappedLocal',
pmiproperty = ['epsilon', 'sigma', 'cutoff', 'caprad']
)
class VerletListLennardJonesEnergyCapped(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListLennardJonesEnergyCappedLocal',
pmicall = ['setPotential', 'getPotential']
)
class VerletListAdressLennardJonesEnergyCapped(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListAdressLennardJonesEnergyCappedLocal',
pmicall = ['setPotentialAT', 'setPotentialCG', 'getPotentialAT', 'getPotentialCG']
)
class VerletListHadressLennardJonesEnergyCapped(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListHadressLennardJonesEnergyCappedLocal',
pmicall = ['setPotentialAT', 'setPotentialCG', 'getPotentialAT', 'getPotentialCG']
)
class CellListLennardJonesEnergyCapped(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.CellListLennardJonesEnergyCappedLocal',
pmicall = ['setPotential', 'getPotential']
)
class FixedPairListLennardJonesEnergyCapped(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListLennardJonesEnergyCappedLocal',
pmicall = ['setPotential']
)
|
espressopp/espressopp
|
src/interaction/LennardJonesEnergyCapped.py
|
Python
|
gpl-3.0
| 14,335
|
[
"ESPResSo"
] |
302ce51dda8867c843279f8aea5c1e7b13ae5555c59aebcf0c54fe77ace10c53
|
"""bluesky.dispersers.hysplit.hysplit
The code in this module was copied from BlueSky Framework, and modified
significantly. It was originally written by Sonoma Technology, Inc.
v0.2.0 introduced a number of chnanges migrated from BSF's hysplit v8.
Heres are the notes coped from BSF:
'''
Version 8 - modifications Dec 2015, rcs
1) greatly expanded user access to variables in the hysplit
CONTROL and SETUP.CFG files
2) heavily modified the way particle initialization files are
created/read, including support for MPI (read and write)
runs but not for TODO tranched runs
READ_INIT_FILE is not longer supported, instead use NINIT
to control if and how to read in PARINIT file
HYSPLIT_SETUP_CFG is no longer supported. instead include
the SETUP.CFG variable one wishes to set in the .ini
list of supported vars:
NCYCL, NDUMP, KHMAX, NINIT, INITD, PARINIT, PARDUMP,
QCYCLE, TRATIO, DELT, NUMPAR, MAXPAR, MGMIN and
ICHEM
PARINIT & PARDUMP can both handle strftime strings in
their names.
NOTE: full path name length must be <= 80 chars.
new variables now accessible in the CONTROL file are
three sampling interv opts (type, hour and minutes),
three particle opts (diameter, density, shape),
five dry dep opts (vel, mol weight, reactivity,
diffusivity, henry const),
three wet dep opts (henry, in-cloud scavenging ratio,
below-cloud scav coef),
radioactive half-life and
resusspension constant
'''
"""
__author__ = "Joel Dubowy and Sonoma Technology, Inc."
__version__ = "0.2.0"
import copy
import logging
import math
import os
import shutil
# import tarfile
import threading
import datetime
from afdatetime.parsing import parse_datetime
from bluesky import io
from bluesky.config import Config
from bluesky.models.fires import Fire
from .. import (
DispersionBase, GRAMS_PER_TON, SQUARE_METERS_PER_ACRE, PHASES
)
from . import hysplit_utils
__all__ = [
'HYSPLITDispersion'
]
DEFAULT_BINARIES = {
'HYSPLIT': {
"old_config_key": "HYSPLIT_BINARY",
"default":"hycs_std"
},
'HYSPLIT_MPI': {
"old_config_key": "HYSPLIT_MPI_BINARY",
"default":"hycm_std"
},
'NCEA': {
"old_config_key": "NCEA_EXECUTABLE",
"default":"ncea"
},
'NCKS': {
"old_config_key": "NCKS_EXECUTABLE",
"default":"ncks"
},
'MPI': {
"old_config_key": "MPIEXEC",
"default":"mpiexec"
},
'HYSPLIT2NETCDF': {
"old_config_key": "HYSPLIT2NETCDF_BINARY",
"default":"hysplit2netcdf"
}
}
def _get_binaries(config_getter):
"""The various executables can be specified either
using the old BSF config keys or the new keys nested
under 'binaries'. e.g.
Old:
{
...,
"config": {
"hysplit": {
"HYSPLIT_BINARY": "hycs_std",
"HYSPLIT_MPI_BINARY": "hycm_std",
"NCEA_EXECUTABLE": "ncea",
"NCKS_EXECUTABLE": "ncks",
"MPIEXEC": "mpiexec",
"HYSPLIT2NETCDF_BINARY": "hysplit2netcdf"
}
}
...
}
New:
{
...,
"config": {
"hysplit": {
"binaries" : {
'hysplit': "hycs_std",
'hysplit_mpi': "hycm_std",
'ncea': "ncea",
'ncks': "ncks",
'mpi': "mpiexec",
'hysplit2netcdf': "hysplit2netcdf"
}
}
}
...
}
The new way takes precedence over the old.
"""
binaries = {}
for k, d in DEFAULT_BINARIES.items():
# config_getter will try upper and lower case
# versions of k and d['old_config_key']
binaries[k] = (config_getter('binaries', k, allow_missing=True)
or config_getter(d['old_config_key'], allow_missing=True)
or d['default'])
return binaries
class HYSPLITDispersion(DispersionBase):
""" HYSPLIT Dispersion model
HYSPLIT Concentration model version 4.9
TODO: determine which config options we'll support
"""
def __init__(self, met_info, **config):
super(HYSPLITDispersion, self).__init__(met_info, **config)
self.BINARIES = _get_binaries(self.config)
self._set_met_info(copy.deepcopy(met_info))
self._output_file_name = self.config('output_file_name')
self._has_parinit = []
def _required_activity_fields(self):
return ('timeprofile', 'plumerise', 'emissions')
def _run(self, wdir):
"""Runs hysplit
args:
- wdir -- working directory
"""
dispersion_offset = int(self.config("DISPERSION_OFFSET") or 0)
self._model_start += datetime.timedelta(hours=dispersion_offset)
self._num_hours -= dispersion_offset
self._adjust_dispersion_window_for_available_met()
self._set_grid_params()
self._set_reduction_factor()
self._compute_tranches()
if 1 < self._num_processes:
# hysplit_utils.create_fire_tranches will log number of processes
# and number of fires each
self._run_parallel(wdir)
else:
self._run_process(self._fires, wdir)
# Note: DispersionBase.run will add directory, start_time,
# and num_hours to the response dict
self._met_info.pop('hours')
self._met_info['files'] = list(self._met_info['files'])
return {
"output": {
"grid_filetype": "NETCDF",
"grid_filename": self._output_file_name,
"parameters": {"pm25": "PM25"},
"grid_parameters": self._grid_params
},
"num_processes": self._num_processes,
"met_info": self._met_info,
"carryover": {
"any": bool(self._has_parinit) and any(self._has_parinit),
"all": bool(self._has_parinit) and all(self._has_parinit)
}
}
##
## Seting met info
##
def _get_met_file(self, met_file_info):
if not met_file_info.get('file'):
raise ValueError("ARL file not defined for specified date range")
if not os.path.exists(met_file_info['file']):
raise ValueError("ARL file does not exist: {}".format(
met_file_info['file']))
return met_file_info['file']
def _get_met_hours(self, met_file_info):
first_hour = parse_datetime(met_file_info['first_hour'], 'first_hour')
last_hour = parse_datetime(met_file_info['last_hour'], 'last_hour')
hours = [first_hour + datetime.timedelta(hours=n)
for n in range(int((last_hour-first_hour).total_seconds() / 3600) + 1)]
return hours
def _set_met_info(self, met_info):
# TODO: move validation code into common module met.arl.validation ?
self._met_info = {}
if met_info.get('grid'):
self._met_info['grid'] = met_info['grid']
# The grid fields, 'domain', 'boundary', and 'grid_spacing_km' can be
# defined either in the met object or in the hsyplit config. Expections
# will be raised downstream if not defined in either place
# hysplit just needs the name, but we need to know the hours with
# met data for adjusting dispersion time dinwow
self._met_info['files'] = set()
self._met_info['hours'] = set()
if not met_info.get('files'):
raise ValueError("Met info lacking arl file information")
for met_file_info in met_info.pop('files'):
self._met_info['files'].add(self._get_met_file(met_file_info))
self._met_info['hours'].update(self._get_met_hours(met_file_info))
def _adjust_dispersion_window_for_available_met(self):
n = 0
while n < self._num_hours:
hr = self._model_start + datetime.timedelta(hours=n)
if hr not in self._met_info['hours']:
break
n += 1
if n == 0:
raise ValueError(
"No ARL met data for first hour of dispersion window")
elif n < self._num_hours:
self._record_warning("Incomplete met. Running dispersion for"
" {} hours instead of {}".format(n, self._num_hours))
self._num_hours = n
# Number of quantiles in vertical emissions allocation scheme
NQUANTILES = 20
def _set_reduction_factor(self):
"""Retrieve factor for reducing the number of vertical emission levels"""
# Ensure the factor divides evenly into the number of quantiles.
# For the 20 quantile vertical accounting scheme, the following values are appropriate:
# reductionFactor = 1 .... 20 emission levels (no change from the original scheme)
# reductionFactor = 2......10 emission levels
# reductionFactor = 4......5 emission levels
# reductionFactor = 5......4 emission levels
# reductionFactor = 10.....2 emission levels
# reductionFactor = 20.....1 emission level
# Pull reduction factor from user input
self._reduction_factor = self.config("VERTICAL_EMISLEVELS_REDUCTION_FACTOR")
self._reduction_factor = int(self._reduction_factor)
# Ensure a valid reduction factor
if self._reduction_factor > self.NQUANTILES:
self._reduction_factor = self.NQUANTILES
logging.debug("VERTICAL_EMISLEVELS_REDUCTION_FACTOR reset to %s" % str(self.NQUANTILES))
elif self._reduction_factor <= 0:
self._reduction_factor = 1
logging.debug("VERTICAL_EMISLEVELS_REDUCTION_FACTOR reset to 1")
while (self.NQUANTILES % self._reduction_factor) != 0: # make sure factor evenly divides into the number of quantiles
self._reduction_factor -= 1
logging.debug("VERTICAL_EMISLEVELS_REDUCTION_FACTOR reset to %s" % str(self._reduction_factor))
self.num_output_quantiles = self.NQUANTILES // self._reduction_factor
if self._reduction_factor != 1:
logging.info("Number of vertical emission levels reduced by factor of %s" % str(self._reduction_factor))
logging.info("Number of vertical emission quantiles will be %s" % str(self.num_output_quantiles))
def _compute_tranches(self):
tranching_config = {
'num_processes': self.config("NPROCESSES"),
'num_fires_per_process': self.config("NFIRES_PER_PROCESS"),
'num_processes_max': self.config("NPROCESSES_MAX"),
# The 'or 0' handles None value
'parinit_or_pardump': int(self.config("NINIT") or 0) > 0
or self.config("MAKE_INIT_FILE")
}
# Note: organizing the fire sets is wasted computation if we end up
# running only one process, but doing so before looking at the
# NPROCESSES, NFIRES_PER_PROCESS, NPROCESSES_MAX config values allows
# for more code to be encapsulated in hysplit_utils, which then allows
# for greater testability. (hysplit_utils.create_fire_sets could be
# skipped if either NPROCESSES > 1 or NFIRES_PER_PROCESS > 1)
self._fire_sets = hysplit_utils.create_fire_sets(self._fires)
self._num_processes = hysplit_utils.compute_num_processes(
len(self._fire_sets), **tranching_config)
def _run_parallel(self, working_dir):
runner = self
class T(threading.Thread):
def __init__(self, fires, config, working_dir, tranche_num):
super(T, self).__init__()
self.fires = fires
self.config = config
self.working_dir = working_dir
if not os.path.exists(working_dir):
os.makedirs(working_dir)
self.tranche_num = tranche_num
self.exc = None
def run(self):
# We need to set config to what was loaded in the main thread.
# Otherwise, we'll just be using defaults
Config().set(self.config)
try:
runner._run_process(self.fires, self.working_dir,
self.tranche_num)
except Exception as e:
self.exc = e
fire_tranches = hysplit_utils.create_fire_tranches(self._fire_sets,
self._num_processes, self._model_start, self._num_hours,
self._grid_params)
threads = []
main_thread_config = Config().get()
for nproc in range(len(fire_tranches)):
fires = fire_tranches[nproc]
# Note: no need to set _context.basedir; it will be set to workdir
logging.info("Starting thread to run HYSPLIT on %d fires." % (len(fires)))
t = T(fires, main_thread_config,
os.path.join(working_dir, str(nproc)), nproc)
t.start()
threads.append(t)
# If there were any exceptions, raise one of them after joining all threads
exc = None
for t in threads:
t.join()
if t.exc:
exc = t.exc # TODO: just raise exception here, possibly before all threads have been joined?
if exc:
raise exc
# 'ttl' is sum of values; see http://nco.sourceforge.net/nco.html#Operation-Types
# sum together all the PM2.5 fields then append the TFLAG field from
# one of the individual runs (they're all the same)
# using run 0 as it should always be present regardless of how many
# processes were used....
# prevents ncea from adding all the TFLAGs together and mucking up the
# date
output_file = os.path.join(working_dir, self._output_file_name)
#ncea_args = ["-y", "ttl", "-O"]
ncea_args = ["-O","-v","PM25","-y","ttl"]
ncea_args.extend(["%d/%s" % (i, self._output_file_name) for i in range(self._num_processes)])
ncea_args.append(output_file)
io.SubprocessExecutor().execute(self.BINARIES['NCEA'], *ncea_args, cwd=working_dir)
ncks_args = ["-A","-v","TFLAG"]
ncks_args.append("0/%s" % (self._output_file_name))
ncks_args.append(output_file)
io.SubprocessExecutor().execute(self.BINARIES['NCKS'], *ncks_args, cwd=working_dir)
self._archive_file(output_file)
def _run_process(self, fires, working_dir, tranche_num=None):
hysplit_utils.ensure_tranch_has_dummy_fire(fires, self._model_start,
self._num_hours, self._grid_params)
logging.info("Running one HYSPLIT49 Dispersion model process")
# TODO: set all but fires, working_dir, and tranche_num as instance
# properties in self.run so that they don't have to be passed into
# each call to _run_process.
# The only things that change from call to call are working_dir,
# fires, and tranche_num
self._create_sym_links_for_process(working_dir)
emissions_file = os.path.join(working_dir, "EMISS.CFG")
control_file = os.path.join(working_dir, "CONTROL")
setup_file = os.path.join(working_dir, "SETUP.CFG")
message_files = [os.path.join(working_dir, "MESSAGE")]
output_conc_file = os.path.join(working_dir, "hysplit.con")
output_file = os.path.join(working_dir, self._output_file_name)
# NINIT: sets how particle init file is to be used
# 0 = no particle initialization file read (default)
# 1 = read parinit file only once at initialization time
# 2 = check each hour, if there is a match then read those values in
# 3 = like '2' but replace emissions instead of adding to existing
# particles
ninit_val = int(self.config("NINIT") or 0)
# need an input file if ninit_val > 0
if ninit_val > 0:
# name of pardump input file, parinit (check for strftime strings)
parinit = self.config("PARINIT")
if "%" in parinit:
parinit = self._model_start.strftime(parinit)
if tranche_num is not None:
parinit = parinit + "-" + str(tranche_num).zfill(2)
parinitFiles = [ parinit ]
# if an MPI run need to create the full list of expected files
# based on the number of CPUs
if self.config("MPI"):
NCPUS = self.config("NCPUS")
parinitFiles = ["%s.%3.3i" % ( parinit, (i+1)) for i in range(NCPUS)]
# loop over parinitFiles check if exists.
# for MPI runs check that all files exist...if any in the list
# don't exist raise exception if STOP_IF_NO_PARINIT is True
# if STOP_IF_NO_PARINIT is False and all/some files don't exist,
# set ninit_val to 0 and issue warning.
for f in parinitFiles:
if not os.path.exists(f):
if self.config("STOP_IF_NO_PARINIT"):
msg = "Matching particle init file, %s, not found. Stop." % f
raise Exception(msg)
msg = "No matching particle initialization file, %s, found; Using no particle initialization" % f
logging.warning(msg)
logging.debug(msg)
ninit_val = 0
self._has_parinit.append(False)
else:
logging.info("Using particle initialization file %s" % f)
self._has_parinit.append(True)
# Prepare for run ... get pardump name just in case needed
pardump = self.config("PARDUMP")
if "%" in pardump:
pardump = self._model_start.strftime(pardump)
if tranche_num is not None:
pardump = pardump + '-' + str(tranche_num).zfill(2)
pardumpFiles = [ pardump ]
# If MPI run
if self.config("MPI"):
NCPUS = self.config("NCPUS")
logging.info("Running MPI HYSPLIT with %s processors." % NCPUS)
if NCPUS < 1:
logging.warning("Invalid NCPUS specified...resetting NCPUS to 1 for this run.")
NCPUS = 1
message_files = ["MESSAGE.%3.3i" % (i+1) for i in range(NCPUS)]
# name of the pardump files (one for each CPU)
if self.config("MAKE_INIT_FILE"):
pardumpFiles = ["%s.%3.3i" % ( pardump, (i+1)) for i in range(NCPUS)]
# what command do we use to issue an mpi version of hysplit
# TODO: either update the following checks for self.BINARIES['MPI'] and
# self.BINARIES['HYSPLIT_MPI'] to try running with -v or -h option or
# something similar, or remove them
# if not os.path.isfile(self.BINARIES['MPI']):
# msg = "Failed to find %s. Check self.BINARIES['MPI'] setting and/or your MPICH2 installation." % mpiexec
# raise AssertionError(msg)
# if not os.path.isfile(self.BINARIES['HYSPLIT_MPI']):
# msg = "HYSPLIT MPI executable %s not found." % self.BINARIES['HYSPLIT_MPI']
# raise AssertionError(msg)
# Else single cpu run
else:
NCPUS = 1
self._write_emissions(fires, emissions_file)
self._write_control_file(fires, control_file, output_conc_file)
self._write_setup_file(fires, emissions_file, setup_file, ninit_val, NCPUS, tranche_num)
try:
# Run HYSPLIT
if self.config("MPI"):
args = [self.BINARIES['MPI']]
args.extend(["-n", str(NCPUS), self.BINARIES['HYSPLIT_MPI']])
io.SubprocessExecutor().execute(*args, cwd=working_dir)
else: # standard serial run
io.SubprocessExecutor().execute(self.BINARIES['HYSPLIT'], cwd=working_dir)
if not os.path.exists(output_conc_file):
msg = "HYSPLIT failed, check MESSAGE file for details"
raise AssertionError(msg)
self._archive_file(output_conc_file, tranche_num=tranche_num)
if self.config('CONVERT_HYSPLIT2NETCDF'):
logging.info("Converting HYSPLIT output to NetCDF format: %s -> %s" % (output_conc_file, output_file))
io.SubprocessExecutor().execute(self.BINARIES['HYSPLIT2NETCDF'],
"-I" + output_conc_file,
"-O" + os.path.basename(output_file),
"-X1000000.0", # Scale factor to convert from grams to micrograms
"-D1", # Debug flag
"-L-1", # Lx is x layers. x=-1 for all layers...breaks KML output for multiple layers
cwd=working_dir
)
if not os.path.exists(output_file):
msg = "Unable to convert HYSPLIT concentration file to NetCDF format"
raise AssertionError(msg)
self._archive_file(output_file, tranche_num=tranche_num)
finally:
# Archive input files
self._archive_file(emissions_file, tranche_num=tranche_num)
self._archive_file(control_file, tranche_num=tranche_num)
self._archive_file(setup_file, tranche_num=tranche_num)
# Archive data files
for f in message_files:
self._archive_file(f, tranche_num=tranche_num)
if self.config("MAKE_INIT_FILE") and self.config('archive_pardump_files'):
for f in pardumpFiles:
self._archive_file(f, tranche_num=tranche_num)
#shutil.copy2(os.path.join(working_dir, f), self._run_output_dir)
def _archive_file(self, filename, tranche_num=None):
if tranche_num is None:
super()._archive_file(filename)
# Only archive tranched files if configured to do so
elif self.config('archive_tranche_files'):
super()._archive_file(filename, suffix=tranche_num)
def _create_sym_links_for_process(self, working_dir):
for f in self._met_info['files']:
# bluesky.modules.dispersion.run will have weeded out met
# files that aren't relevant to this dispersion run
io.create_sym_link(f,
os.path.join(working_dir, os.path.basename(f)))
# Create sym links to ancillary data files (note: HYSPLIT49 balks
# if it can't find ASCDATA.CFG).
io.create_sym_link(self.config("ASCDATA_FILE"),
os.path.join(working_dir, 'ASCDATA.CFG'))
io.create_sym_link(self.config("LANDUSE_FILE"),
os.path.join(working_dir, 'LANDUSE.ASC'))
io.create_sym_link(self.config("ROUGLEN_FILE"),
os.path.join(working_dir, 'ROUGLEN.ASC'))
def _get_hour_data(self, dt, fire):
if fire.plumerise and fire.timeprofiled_emissions and fire.timeprofiled_area:
local_dt = dt + datetime.timedelta(hours=fire.utc_offset)
# TODO: will fire.plumerise and fire.timeprofile always
# have string value keys
local_dt = local_dt.strftime('%Y-%m-%dT%H:%M:%S')
plumerise_hour = fire.plumerise.get(local_dt)
timeprofiled_emissions_hour = fire.timeprofiled_emissions.get(local_dt)
hourly_area = fire.timeprofiled_area.get(local_dt)
if plumerise_hour and timeprofiled_emissions_hour and hourly_area:
return False, plumerise_hour, timeprofiled_emissions_hour, hourly_area
return (True, hysplit_utils.DUMMY_PLUMERISE_HOUR, dict(), 0.0)
def _write_emissions(self, fires, emissions_file):
# A value slightly above ground level at which to inject smoldering
# emissions into the model.
smolder_height = self.config("SMOLDER_HEIGHT")
# sub-hour emissions?
SERI = self.config("SUBHOUR_EMISSIONS_REDUCTION_INTERVAL")
# must be 1 to 12 and result in an integer when 60 is divided by it
if ( SERI < 1 or SERI > 13 ):
SERI = 1
temp = 60%SERI
if temp > 0:
SERI = 1
minutes_per_interval = int(60/SERI)
with open(emissions_file, "w") as emis:
# HYSPLIT skips past the first two records, so these are for comment purposes only
emis.write("emissions group header: YYYY MM DD HH QINC NUMBER\n")
emis.write("each emission's source: YYYY MM DD HH MM DUR_HHMM LAT LON HT RATE AREA HEAT\n")
# Loop through the timesteps
for hour in range(self._num_hours):
dt = self._model_start + datetime.timedelta(hours=hour)
dt_str = dt.strftime("%y %m %d %H")
num_fires = len(fires)
#num_heights = 21 # 20 quantile gaps, plus ground level
num_heights = self.num_output_quantiles + 1
num_sources = num_fires * num_heights * SERI
# TODO: What is this and what does it do?
# A reasonable guess would be that it means a time increment of 1 hour
qinc = 1
# Write the header line for this timestep
emis.write("%s %02d %04d\n" % (dt_str, qinc, num_sources))
fires_wo_emissions = 0
# Loop through the fire locations
for fire in fires:
# loop over sub-hour interval (default hourly)
icount = 0
for interval in range(SERI):
min_dur_str = "{:0>2}".format(icount*minutes_per_interval) + " 00"+"{:0>2}".format(minutes_per_interval)
if (SERI == 1):
min_dur_str = "00 0100"
icount += 1
# Get some properties from the fire location
lat = fire.latitude
lon = fire.longitude
# If we don't have real data for the given timestep, we apparently need
# to stick in dummy records anyway (so we have the correct number of sources).
(dummy, plumerise_hour, timeprofiled_emissions_hour,
hourly_area) = self._get_hour_data(dt, fire)
if dummy:
logging.debug("Fire %s has no emissions for hour %s", fire.id, hour)
fires_wo_emissions += 1
area_meters = 0.0
smoldering_fraction = 0.0
pm25_injected = 0.0
if not dummy:
# Extract the fraction of area burned in this timestep, and
# convert it from acres to square meters.
area_meters = hourly_area * SQUARE_METERS_PER_ACRE
smoldering_fraction = plumerise_hour['smolder_fraction']
# Compute the total PM2.5 emitted at this timestep (grams) by
# multiplying the phase-specific total emissions by the
# phase-specific hourly fractions for this hour to get the
# hourly emissions by phase for this hour, and then summing
# the three values to get the total emissions for this hour
# TODO: use fire.timeprofiled_emissions[local_dt]['PM2.5']
pm25_emitted = timeprofiled_emissions_hour.get('PM2.5', 0.0)
pm25_emitted *= GRAMS_PER_TON
# Total PM2.5 smoldering (not lofted in the plume)
pm25_injected = pm25_emitted * smoldering_fraction
entrainment_fraction = 1.0 - smoldering_fraction
# We don't assign any heat, so the PM2.5 mass isn't lofted
# any higher. This is because we are assigning explicit
# heights from the plume rise.
heat = 0.0
# Inject the smoldering fraction of the emissions at ground level
# (SMOLDER_HEIGHT represents a value slightly above ground level)
height_meters = smolder_height
# Write the smoldering record to the file
record_fmt = "%s %s %8.4f %9.4f %6.0f %7.2f %7.2f %15.2f\n"
emis.write(record_fmt % (dt_str, min_dur_str, lat, lon, height_meters, pm25_injected, area_meters, heat))
for level in range(0, len(plumerise_hour['heights']) - 1, self._reduction_factor):
height_meters = 0.0
pm25_injected = 0.0
if not dummy:
# Loop through the heights (20 quantiles of smoke density)
# For the unreduced case, we loop through 20 quantiles, but we have
# 21 quantile-edge measurements. So for each
# quantile gap, we need to find a point halfway
# between the two edges and inject that quantile's fraction of total emissions
# KJC optimization...
# Reduce the number of vertical emission levels by a reduction factor
# and place the appropriate fraction of emissions at each level.
# ReductionFactor MUST evenly divide into the number of quantiles
lower_height = plumerise_hour['heights'][level]
upper_height_index = min(level + self._reduction_factor, len(plumerise_hour['heights']) - 1)
upper_height = plumerise_hour['heights'][upper_height_index]
if self._reduction_factor == 1:
height_meters = (lower_height + upper_height) / 2.0 # original approach
else:
height_meters = upper_height # top-edge approach
# Total PM2.5 entrained (lofted in the plume)
pm25_entrained = pm25_emitted * entrainment_fraction
# Inject the proper fraction of the entrained PM2.5 in each quantile gap.
fraction = sum(plumerise_hour['emission_fractions'][level:level+self._reduction_factor])
pm25_injected = pm25_entrained * fraction
# Write the record to the file
emis.write(record_fmt % (dt_str, min_dur_str, lat, lon, height_meters, pm25_injected, area_meters, heat))
if fires_wo_emissions > 0:
logging.debug("%d of %d fires had no emissions for hour %d", fires_wo_emissions, num_fires, hour)
VERTICAL_CHOICES = {
"DATA": 0,
"ISOB": 1,
"ISEN": 2,
"DENS": 3,
"SIGMA": 4,
"DIVERG": 5,
"ETA": 6
}
def _get_vertical_method(self):
# Vertical motion choices:
VERTICAL_METHOD = self.config("VERTICAL_METHOD")
try:
verticalMethod = self.VERTICAL_CHOICES[VERTICAL_METHOD]
except KeyError:
verticalMethod = self.VERTICAL_CHOICES["DATA"]
return verticalMethod
def _set_grid_params(self):
self._grid_params = hysplit_utils.get_grid_params(
met_info=self._met_info, fires=self._fires)
def _write_control_file(self, fires, control_file, concFile):
# sub-hour emissions?
SERI = self.config("SUBHOUR_EMISSIONS_REDUCTION_INTERVAL")
# must be 1 to 12 and result in an integer when 60 is divided by it
if ( SERI < 1 or SERI > 13 ):
SERI = 1
temp = 60%SERI
if temp > 0:
SERI = 1
num_fires = len(fires)
num_heights = self.num_output_quantiles + 1 # number of quantiles used, plus ground level
num_sources = num_fires * num_heights * SERI
# An arbitrary height value. Used for the default source height
# in the CONTROL file. This can be anything we want, because
# the actual source heights are overridden in the EMISS.CFG file.
sourceHeight = 15.0
verticalMethod = self._get_vertical_method()
# Height of the top of the model domain
modelTop = self.config("TOP_OF_MODEL_DOMAIN")
#modelEnd = self._model_start + datetime.timedelta(hours=self._num_hours)
# Build the vertical Levels string
levels = self.config("VERTICAL_LEVELS")
numLevels = len(levels)
verticalLevels = " ".join(str(x) for x in levels)
# Warn about multiple sampling grid levels and KML/PNG image generation
if numLevels > 1:
logging.warning("KML and PNG images will be empty since more than 1 vertical level is selected")
# To minimize change in the following code, set aliases
centerLat = self._grid_params["center_latitude"]
centerLon = self._grid_params["center_longitude"]
widthLon = self._grid_params["width_longitude"]
heightLat = self._grid_params["height_latitude"]
spacingLon = self._grid_params["spacing_longitude"]
spacingLat = self._grid_params["spacing_latitude"]
# Decrease the grid resolution based on number of fires
if self.config("OPTIMIZE_GRID_RESOLUTION"):
logging.info("Grid resolution adjustment option invoked")
minSpacingLon = spacingLon
minSpacingLat = spacingLat
maxSpacingLon = self.config("MAX_SPACING_LONGITUDE")
maxSpacingLat = self.config("MAX_SPACING_LATITUDE")
intervals = sorted([int(x) for x in self.config("FIRE_INTERVALS")])
# Maximum grid spacing cannot be smaller than the minimum grid spacing
if maxSpacingLon < minSpacingLon:
maxSpacingLon = minSpacingLon
logging.debug("maxSpacingLon > minSpacingLon...longitude grid spacing will not be adjusted")
if maxSpacingLat < minSpacingLat:
maxSpacingLat = minSpacingLat
logging.debug("maxSpacingLat > minSpacingLat...latitude grid spacing will not be adjusted")
# Throw out negative intervals
intervals = [x for x in intervals if x >= 0]
if len(intervals) == 0:
intervals = [0,num_fires]
logging.debug("FIRE_INTERVALS had no values >= 0...grid spacing will not be adjusted")
# First bin should always start with zero
if intervals[0] != 0:
intervals.insert(0,0)
logging.debug("Zero added to the beginning of FIRE_INTERVALS list")
# must always have at least 2 intervals
if len(intervals) < 2:
intervals = [0,num_fires]
logging.debug("Need at least two FIRE_INTERVALS...grid spacing will not be adjusted")
# Increase the grid spacing depending on number of fires
i = 0
numBins = len(intervals)
rangeSpacingLat = (maxSpacingLat - minSpacingLat)/(numBins - 1)
rangeSpacingLon = (maxSpacingLon - minSpacingLon)/(numBins - 1)
for interval in intervals:
if num_fires > interval:
spacingLat = minSpacingLat + (i * rangeSpacingLat)
spacingLon = minSpacingLon + (i * rangeSpacingLon)
i += 1
logging.debug("Lon,Lat grid spacing for interval %d adjusted to %f,%f" % (interval,spacingLon,spacingLat))
logging.info("Lon/Lat grid spacing for %d fires will be %f,%f" % (num_fires,spacingLon,spacingLat))
# Note: Due to differences in projections, the dimensions of this
# output grid are conservatively large.
logging.info("HYSPLIT grid CENTER_LATITUDE = %s" % centerLat)
logging.info("HYSPLIT grid CENTER_LONGITUDE = %s" % centerLon)
logging.info("HYSPLIT grid HEIGHT_LATITUDE = %s" % heightLat)
logging.info("HYSPLIT grid WIDTH_LONGITUDE = %s" % widthLon)
logging.info("HYSPLIT grid SPACING_LATITUDE = %s" % spacingLat)
logging.info("HYSPLIT grid SPACING_LONGITUDE = %s" % spacingLon)
with open(control_file, "w") as f:
# Starting time (year, month, day hour)
f.write(self._model_start.strftime("%y %m %d %H") + "\n")
# Number of sources
f.write("%d\n" % num_sources)
# Source locations
for fire in fires:
for height in range(num_heights):
for intervals in range(SERI):
f.write("%9.3f %9.3f %9.3f\n" % (fire.latitude, fire.longitude, sourceHeight))
# Total run time (hours)
f.write("%04d\n" % self._num_hours)
# Method to calculate vertical motion
f.write("%d\n" % verticalMethod)
# Top of model domain
f.write("%9.1f\n" % modelTop)
# Number of input data grids (met files)
f.write("%d\n" % len(self._met_info['files']))
# Directory for input data grid and met file name
for filename in sorted(self._met_info['files']):
f.write("./\n")
f.write("%s\n" % os.path.basename(filename))
# Number of pollutants = 1 (only modeling PM2.5 for now)
f.write("1\n")
# Pollutant ID (4 characters)
f.write("PM25\n")
# Emissions rate (per hour) (Ken's code says "Emissions source strength (mass per second)" -- which is right?)
f.write("0.001\n")
# Duration of emissions (hours)
f.write(" %9.3f\n" % self._num_hours)
# Source release start time (year, month, day, hour, minute)
f.write("%s\n" % self._model_start.strftime("%y %m %d %H %M"))
# Number of simultaneous concentration grids
f.write("1\n")
# Sampling grid center location (latitude, longitude)
f.write("%9.3f %9.3f\n" % (centerLat, centerLon))
# Sampling grid spacing (degrees latitude and longitude)
f.write("%9.3f %9.3f\n" % (spacingLat, spacingLon))
# Sampling grid span (degrees latitude and longitude)
f.write("%9.3f %9.3f\n" % (heightLat, widthLon))
# Directory of concentration output file
f.write("./\n")
# Filename of concentration output file
f.write("%s\n" % os.path.basename(concFile))
# Number of vertical concentration levels in output sampling grid
f.write("%d\n" % numLevels)
# Height of each sampling level in meters AGL
f.write("%s\n" % verticalLevels)
# Sampling start time (year month day hour minute)
f.write("%s\n" % self._model_start.strftime("%y %m %d %H %M"))
# Sampling stop time (year month day hour minute)
# The following would be the same as
# model_end = self._model_start + datetime.timedelta(
# hours=self._num_hours)
model_end = self._model_start + datetime.timedelta(
hours=self._num_hours)
f.write("%s\n" % model_end.strftime("%y %m %d %H %M"))
# Sampling interval (type hour minute)
# A type of 0 gives an average over the interval.
sampling_interval_type = int(self.config("SAMPLING_INTERVAL_TYPE"))
sampling_interval_hour = int(self.config("SAMPLING_INTERVAL_HOUR"))
sampling_interval_min = int(self.config("SAMPLING_INTERVAL_MIN"))
#f.write("0 1 00\n")
f.write("%d %d %d\n" % (sampling_interval_type, sampling_interval_hour, sampling_interval_min))
# Number of pollutants undergoing deposition
f.write("1\n") # only modeling PM2.5 for now
# Particle diameter (um), density (g/cc), shape
particle_diamater = self.config("PARTICLE_DIAMETER")
particle_density = self.config("PARTICLE_DENSITY")
particle_shape = self.config("PARTICLE_SHAPE")
#f.write("1.0 1.0 1.0\n")
f.write("%g %g %g\n" % ( particle_diamater, particle_density, particle_shape))
# Dry deposition:
# deposition velocity (m/s),
# molecular weight (g/mol),
# surface reactivity ratio,
# diffusivity ratio,
# effective Henry's constant
dry_dep_velocity = self.config("DRY_DEP_VELOCITY")
dry_dep_mol_weight = self.config("DRY_DEP_MOL_WEIGHT")
dry_dep_reactivity = self.config("DRY_DEP_REACTIVITY")
dry_dep_diffusivity = self.config("DRY_DEP_DIFFUSIVITY")
dry_dep_eff_henry = self.config("DRY_DEP_EFF_HENRY")
#f.write("0.0 0.0 0.0 0.0 0.0\n")
f.write("%g %g %g %g %g\n" % ( dry_dep_velocity, dry_dep_mol_weight, dry_dep_reactivity, dry_dep_diffusivity, dry_dep_eff_henry))
# Wet deposition (gases):
# actual Henry's constant (M/atm),
# in-cloud scavenging ratio (L/L),
# below-cloud scavenging coefficient (1/s)
wet_dep_actual_henry = self.config("WET_DEP_ACTUAL_HENRY")
wet_dep_in_cloud_scav = self.config("WET_DEP_IN_CLOUD_SCAV")
wet_dep_below_cloud_scav = self.config("WET_DEP_BELOW_CLOUD_SCAV")
#f.write("0.0 0.0 0.0\n")
f.write("%g %g %g\n" % ( wet_dep_actual_henry, wet_dep_in_cloud_scav, wet_dep_below_cloud_scav ))
# Radioactive decay half-life (days)
radioactive_half_life = self.config("RADIOACTIVE_HALF_LIVE")
#f.write("0.0\n")
f.write("%g\n" % radioactive_half_life)
# Pollutant deposition resuspension constant (1/m)
# non-zero requires the definition of a deposition grid
f.write("0.0\n")
def _write_setup_file(self, fires, emissions_file, setup_file, ninit_val, ncpus, tranche_num):
# Advanced setup options
# adapted from Robert's HysplitGFS Perl script
khmax_val = int(self.config("KHMAX"))
# pardump vars
ndump_val = int(self.config("NDUMP"))
ncycl_val = int(self.config("NCYCL"))
dump_datetime = self._model_start + datetime.timedelta(hours=ndump_val)
# emission cycle time
qcycle_val =self.config("QCYCLE")
# type of dispersion to use
initd_val = int(self.config("INITD"))
# set time step stuff
tratio_val = self.config("TRATIO")
delt_val = self.config("DELT")
# set numpar (if 0 then set to num_fires * num_heights)
# else set to value given (hysplit default of 500)
num_fires = len(fires)
num_heights = self.num_output_quantiles + 1
numpar_val = int(self.config("NUMPAR"))
num_sources = numpar_val
if numpar_val == 0:
num_sources = num_fires * num_heights
# set maxpar. if 0 set to num_sources (ie, numpar) * 1000/ncpus
# else set to value given (hysplit default of 10000)
maxpar_val = int(self.config("MAXPAR"))
max_particles = maxpar_val
if maxpar_val == 0:
max_particles = (num_sources * 1000) / ncpus
# name of the particle input file (check for strftime strings)
parinit = self.config("PARINIT")
if "%" in parinit:
parinit = self._model_start.strftime(parinit)
if tranche_num is not None:
parinit = parinit + '-' + str(tranche_num).zfill(2)
# name of the particle output file (check for strftime strings)
pardump = self.config("PARDUMP")
if "%" in pardump:
pardump = self._model_start.strftime(pardump)
if tranche_num is not None:
pardump = pardump + '-' + str(tranche_num).zfill(2)
# conversion module
ichem_val = int(self.config("ICHEM"))
# minimum size in grid units of the meteorological sub-grid
mgmin_val = int(self.config("MGMIN"))
with open(setup_file, "w") as f:
f.write("&SETUP\n")
# conversion module
f.write(" ICHEM = %d,\n" % ichem_val)
# qcycle: the number of hours between emission start cycles
f.write(" QCYCLE = %f,\n" % qcycle_val)
# mgmin: default is 10 (from the hysplit user manual). however,
# once a run complained and said i need to reaise this
# variable to some value around what i have here
f.write(" MGMIN = %d,\n" % mgmin_val)
# maxpar: max number of particles that are allowed to be active at one time
f.write(" MAXPAR = %d,\n" % max_particles)
# numpar: number of particles (or puffs) permited than can be released
# during one time step
f.write(" NUMPAR = %d,\n" % num_sources)
# khmax: maximum particle duration in terms of hours after relase
f.write(" KHMAX = %d,\n" % khmax_val)
# delt: used to set time step integration interval (used along
# with tratio
f.write(" DELT = %g,\n" % delt_val)
f.write(" TRATIO = %g,\n" % tratio_val)
# initd: # 0 - Horizontal and Vertical Particle
# 1 - Horizontal Gaussian Puff, Vertical Top Hat Puff
# 2 - Horizontal and Vertical Top Hat Puff
# 3 - Horizontal Gaussian Puff, Vertical Particle
# 4 - Horizontal Top-Hat Puff, Vertical Particle (default)
f.write(" INITD = %d,\n" % initd_val)
# make the 'smoke initizilaztion' files?
# pinfp: particle initialization file (see also ninit)
if ninit_val > 0:
f.write(" PINPF = \"%s\",\n" % parinit)
# ninit: (used along side parinit) sets the type of initialization...
# 0 - no initialzation (even if files are present)
# 1 = read parinit file only once at initialization time
# 2 = check each hour, if there is a match then read those
# values in
# 3 = like '2' but replace emissions instead of adding to
# existing particles
f.write(" NINIT = %d,\n" % ninit_val)
# pardump: particle output/dump file
if self.config("MAKE_INIT_FILE"):
pardump_dir = os.path.dirname(pardump)
if not os.path.isdir(pardump_dir):
# Even though we check if the dir exists before calling
# os.makedirs, set exist_ok=True in case of race
# condition in multi-process mode. (It's happened)
os.makedirs(pardump_dir, exist_ok=True)
f.write(" POUTF = \"%s\",\n" % pardump)
logging.info("Dumping particles to %s starting at %s every %s hours" % (pardump, dump_datetime, ncycl_val))
# ndump: when/how often to dump a pardump file negative values
# indicate to just one create just one 'restart' file at
# abs(hours) after the model start
# NOTE: negative hours do no actually appear to be supported, rcs)
if self.config("MAKE_INIT_FILE"):
f.write(" NDUMP = %d,\n" % ndump_val)
# ncycl: set the interval at which time a pardump file is written
# after the 1st file (which is first created at
# T = ndump hours after the start of the model simulation
if self.config("MAKE_INIT_FILE"):
f.write(" NCYCL = %d,\n" % ncycl_val)
# efile: the name of the emissions info (used to vary emission rate etc (and
# can also be used to change emissions time
f.write(" EFILE = \"%s\",\n" % os.path.basename(emissions_file))
f.write("&END\n")
|
pnwairfire/bluesky
|
bluesky/dispersers/hysplit/hysplit.py
|
Python
|
gpl-3.0
| 48,822
|
[
"Gaussian",
"NetCDF"
] |
8445c3d7e14b040edc92ae1b93adf6d6278f85068ee84b479a816a6dd58743d0
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Deposition data model classes.
Classes for wrapping BibWorkflowObject and friends to make it easier to
work with the data attributes.
"""
from uuid import uuid4
import json
import os
from datetime import datetime
from dateutil.tz import tzutc
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.datastructures import MultiDict
from werkzeug.utils import secure_filename
from flask import redirect, render_template, flash, url_for, request, \
session, current_app
from flask.ext.login import current_user
from flask.ext.restful import fields, marshal
from invenio.ext.restful import UTCISODateTime
from invenio.base.helpers import unicodifier
from invenio.ext.sqlalchemy import db
from invenio.modules.workflows.models import BibWorkflowObject, Workflow, \
ObjectVersion
from invenio.modules.workflows.engine import WorkflowStatus
from .form import CFG_FIELD_FLAGS, DataExporter
from .signals import file_uploaded
from .storage import Storage, DepositionStorage
#
# Exceptions
#
class DepositionError(Exception):
"""Base class for deposition errors."""
pass
class InvalidDepositionType(DepositionError):
"""Raise when a deposition type cannot be found."""
pass
class InvalidDepositionAction(DepositionError):
"""Raise when deposition is in an invalid state for action."""
pass
class DepositionDoesNotExists(DepositionError):
"""Raise when a deposition does not exists."""
pass
class DraftDoesNotExists(DepositionError):
"""Raise when a draft does not exists."""
pass
class FormDoesNotExists(DepositionError):
"""Raise when a draft does not exists."""
pass
class FileDoesNotExists(DepositionError):
"""Raise when a draft does not exists."""
pass
class DepositionNotDeletable(DepositionError):
"""Raise when a deposition cannot be deleted."""
pass
class FilenameAlreadyExists(DepositionError):
"""Raise when an identical filename is already present in a deposition."""
pass
class ForbiddenAction(DepositionError):
"""Raise when action on a deposition, draft or file is not authorized."""
pass
class InvalidApiAction(DepositionError):
"""Raise when an invalid API action is requested."""
pass
#
# Helpers
#
class FactoryMixin(object):
"""Mix-in class to help create objects from persisted object state."""
@classmethod
def factory(cls, state, *args, **kwargs):
obj = cls(*args, **kwargs)
obj.__setstate__(state)
return obj
#
# Primary classes
#
class DepositionType(object):
"""
A base class for the deposition types to ensure certain
properties are defined on each type.
A deposition type is just a BibWorkflow with a couple of extra methods.
To customize rendering behavior of the workflow for a given deposition type
you can override the render_error(), render_step() and render_completed()
methods.
"""
workflow = []
""" Workflow definition """
name = ""
""" Display name for this deposition type """
name_plural = ""
""" Plural version of display name for this deposition type """
enabled = False
""" Determines if type is enabled - TODO: REMOVE"""
default = False
"""
Determines if type is the default - warnings are issed if conflicts exsists
TODO: remove
"""
deletable = False
"""
Determine if a deposition is deletable after submission.
"""
editable = False
"""
Determine if a deposition is editable after submission.
"""
stopable = False
"""
Determine if a deposition workflow can be stopped (i.e. discard changes).
"""
group = None
""" Name of group to include this type in. """
api = False
"""
Determines if API is enabled for this type (requires workflow to be
compatible with the API).
"""
draft_definitions = {'_default': None}
"""
Dictionary of all drafts for this deposition type
"""
marshal_file_fields = dict(
checksum=fields.String,
filename=fields.String(attribute='name'),
id=fields.String(attribute='uuid'),
filesize=fields.String(attribute='size'),
)
""" REST API structure of a file """
marshal_draft_fields = dict(
metadata=fields.Raw(attribute='values'),
completed=fields.Boolean,
id=fields.String,
)
""" REST API structure of a draft """
marshal_deposition_fields = dict(
id=fields.Integer,
title=fields.String,
created=UTCISODateTime,
modified=UTCISODateTime,
owner=fields.Integer(attribute='user_id'),
state=fields.String,
submitted=fields.Boolean,
files=fields.Nested(marshal_file_fields),
drafts=fields.Nested(marshal_draft_fields, attribute='drafts_list'),
)
""" REST API structure of a deposition """
@classmethod
def default_draft_id(cls, deposition):
return '_default'
@classmethod
def render_error(cls, dummy_deposition):
"""
Render a page when deposition had an workflow error.
Method can be overwritten by subclasses to provide custom
user interface.
"""
flash('%(name)s deposition has returned error.' %
{'name': cls.name}, 'error')
return redirect(url_for('.index'))
@classmethod
def render_step(self, deposition):
"""
Render a page for a given deposition step.
Method can be overwritten by subclasses to provide custom
user interface.
"""
ctx = deposition.get_render_context()
if ctx:
return render_template(**ctx)
else:
return render_template('deposit/error.html', **dict(
depostion=deposition,
deposition_type=(
None if deposition.type.is_default()
else deposition.type.get_identifier()
),
uuid=deposition.id,
my_depositions=Deposition.get_depositions(
current_user, type=deposition.type
),
))
@classmethod
def render_completed(cls, dummy_deposition):
"""
Render page when deposition was successfully completed (i.e workflow
just finished successfully).
Method can be overwritten by subclasses to provide custom
user interface.
"""
flash('%(name)s was successfully finished.' %
{'name': cls.name}, 'success')
return redirect(url_for('.index'))
@classmethod
def render_final(cls, deposition):
"""
Render page when deposition was *already* successfully completed (i.e
a finished workflow is being executed a second time).
This allows you render e.g. a preview of the record. The distinction
between render_completed and render_final is primarily useful for the
REST API (see api_final and api_completed)
Method can be overwritten by subclasses to provide custom
user interface.
"""
return cls.render_completed(deposition)
@classmethod
def api_completed(cls, deposition):
"""
Workflow just finished processing so return an 202 Accepted, since
usually further background processing may happen.
"""
return deposition.marshal(), 202
@classmethod
def api_final(cls, deposition):
"""
Workflow already finished, and the user tries to re-execute the
workflow, so send a 400 Bad Request back.
"""
return dict(
message="Deposition workflow already completed",
status=400,
), 400
@classmethod
def api_step(cls, deposition):
"""
Workflow was halted during processing. The workflow task that halted
processing is expected to provide a response to send back to the
client.
The default response code is 500 Internal Server Error. A workflow task
is expected to use Deposition.set_render_context() with a dictionary
which is returned to the client. Set the key 'status', to change the
status code, e.g.::
d.set_render_context(dict(status=400, message="Bad request"))
If no response is provided by the workflow task, it is regarded as
an internal server error.
"""
ctx = deposition.get_render_context()
if ctx:
return ctx.get('response', {}), ctx.get('status', 500)
return cls.api_error(deposition)
@classmethod
def api_error(cls, deposition):
return dict(message='Internal Server Error', status=500), 500
@classmethod
def api_action(cls, deposition, action_id):
if action_id == 'run':
return deposition.run_workflow(headless=True)
elif action_id == 'reinitialize':
deposition.reinitialize_workflow()
return deposition.run_workflow(headless=True)
elif action_id == 'stop':
deposition.stop_workflow()
return deposition.run_workflow(headless=True)
raise InvalidApiAction(action_id)
@classmethod
def api_metadata_schema(cls, draft_id):
"""
Get the input validation schema for this draft_id
Allows you to override API defaults.
"""
from wtforms.fields.core import FieldList, FormField
if draft_id in cls.draft_definitions:
schema = dict()
formclass = cls.draft_definitions[draft_id]
for fname, fclass in formclass()._fields.items():
if isinstance(fclass, FieldList):
schema[fname] = dict(type='list')
elif isinstance(fclass, FormField):
schema[fname] = dict(type='dict')
else:
schema[fname] = dict(type='any')
return dict(type='dict', schema=schema)
return None
@classmethod
def marshal_deposition(cls, obj):
"""
Generate a JSON representation for REST API of a Deposition
"""
return marshal(obj, cls.marshal_deposition_fields)
@classmethod
def marshal_draft(cls, obj):
"""
Generate a JSON representation for REST API of a DepositionDraft
"""
return marshal(obj, cls.marshal_draft_fields)
@classmethod
def marshal_file(cls, obj):
"""
Generate a JSON representation for REST API of a DepositionFile
"""
return marshal(obj, cls.marshal_file_fields)
@classmethod
def authorize(cls, deposition, action):
if action == 'create':
return True # Any authenticated user
elif action == 'delete':
if deposition.has_sip():
return deposition.type.deletable
return True
elif action == 'reinitialize':
return deposition.type.editable
elif action == 'stop':
return deposition.type.stopable
elif action in ['add_file', 'remove_file', 'sort_files']:
# Don't allow to add/remove/sort files after first submission
return not deposition.has_sip()
elif action in ['add_draft', ]:
# Allow adding drafts when inprogress (independent of SIP exists
# or not).
return deposition.state == 'inprogress'
else:
return not deposition.has_sip()
@classmethod
def authorize_draft(cls, deposition, draft, action):
if action == 'update':
# If deposition allows adding a draft, then allow editing the
# draft.
return cls.authorize(deposition, 'add_draft')
return cls.authorize(deposition, 'add_draft')
@classmethod
def authorize_file(cls, deposition, deposition_file, action):
return cls.authorize(deposition, 'add_file')
@classmethod
def get_identifier(cls):
""" Get type identifier (identical to workflow name) """
return cls.__name__
@classmethod
def is_enabled(cls):
""" Check if workflow is enabled """
# Wrapping in a method to eventually allow enabling/disabling
# via configuration.
return cls.enabled
@classmethod
def is_default(cls):
""" Check if workflow is the default """
# Wrapping in a method to eventually allow configuration
# via configuration.
return cls.default
@classmethod
def run_workflow(cls, deposition):
"""
Run workflow for the given BibWorkflowObject.
Usually not invoked directly, but instead indirectly through
Deposition.run_workflow().
"""
if deposition.workflow_object.workflow is None or (
deposition.workflow_object.version == ObjectVersion.INITIAL
and
deposition.workflow_object.workflow.status ==
WorkflowStatus.NEW):
return deposition.workflow_object.start_workflow(
workflow_name=cls.get_identifier(),
id_user=deposition.workflow_object.id_user,
module_name="webdeposit"
)
else:
return deposition.workflow_object.continue_workflow(
start_point="restart_task",
)
@classmethod
def reinitialize_workflow(cls, deposition):
# Only reinitialize if really needed (i.e. you can only
# reinitialize a fully completed workflow).
wo = deposition.workflow_object
if wo.version == ObjectVersion.COMPLETED and \
wo.workflow.status == WorkflowStatus.COMPLETED:
wo.version = ObjectVersion.INITIAL
wo.workflow.status = WorkflowStatus.NEW
# Clear deposition drafts
deposition.drafts = {}
@classmethod
def stop_workflow(cls, deposition):
# Only stop workflow if really needed
wo = deposition.workflow_object
if wo.version != ObjectVersion.COMPLETED and \
wo.workflow.status != WorkflowStatus.COMPLETED:
# Only workflows which has been fully completed once before
# can be stopped
if deposition.has_sip():
wo.version = ObjectVersion.COMPLETED
wo.workflow.status = WorkflowStatus.COMPLETED
# Clear all drafts
deposition.drafts = {}
# Set title - FIXME: find better way to set title
sip = deposition.get_latest_sip(sealed=True)
title = sip.metadata.get('title', 'Untitled')
deposition.title = title
@classmethod
def all(cls):
""" Get a dictionary of deposition types """
from .registry import deposit_types
return deposit_types.mapping()
@classmethod
def get(cls, identifier):
try:
return cls.all()[identifier]
except KeyError:
raise InvalidDepositionType(identifier)
@classmethod
def keys(cls):
""" Get a list of deposition type names """
return cls.all().keys()
@classmethod
def values(cls):
""" Get a list of deposition type names """
return cls.all().values()
@classmethod
def get_default(cls):
""" Get a list of deposition type names """
from .registry import deposit_default_type
return deposit_default_type.get()
def __unicode__(self):
""" Return a name for this class """
return self.get_identifier()
class DepositionFile(FactoryMixin):
"""
Represents an uploaded file
Creating a normal deposition file::
uploaded_file = request.files['file']
filename = secure_filename(uploaded_file.filename)
backend = DepositionStorage(deposition_id)
d = DepositionFile(backend=backend)
d.save(uploaded_file, filename)
Creating a chunked deposition file::
uploaded_file = request.files['file']
filename = secure_filename(uploaded_file.filename)
chunk = request.files['chunk']
chunks = request.files['chunks']
backend = ChunkedDepositionStorage(deposition_id)
d = DepositionFile(id=file_id, backend=backend)
d.save(uploaded_file, filename, chunk, chunks)
if chunk == chunks:
d.save(finish=True, filename=filename)
Reading a file::
d = DepositionFile.from_json(data)
if d.is_local():
send_file(d.get_syspath())
else:
redirect(d.get_url())
d.delete()
Deleting a file::
d = DepositionFile.from_json(data)
d.delete()
"""
def __init__(self, uuid=None, backend=None):
self.uuid = uuid or str(uuid4())
self._backend = backend
self.name = ''
def __getstate__(self):
# TODO: Add content_type attributes
return dict(
id=self.uuid,
path=self.path,
name=self.name,
size=self.size,
checksum=self.checksum,
#bibdoc=self.bibdoc
)
def __setstate__(self, state):
self.uuid = state['id']
self._path = state['path']
self.name = state['name']
self.size = state['size']
self.checksum = state['checksum']
def __repr__(self):
data = self.__getstate__()
del data['path']
return json.dumps(data)
@property
def backend(self):
if not self._backend:
self._backend = Storage(None)
return self._backend
@property
def path(self):
if self._path is None:
raise Exception("No path set")
return self._path
def save(self, incoming_file, filename=None, *args, **kwargs):
self.name = secure_filename(filename or incoming_file.filename)
(self._path, self.size, self.checksum, result) = self.backend.save(
incoming_file, filename, *args, **kwargs
)
return result
def delete(self):
""" Delete the file on storage """
return self.backend.delete(self.path)
def is_local(self):
""" Determine if file is a local file """
return self.backend.is_local(self.path)
def get_url(self):
""" Get a URL for the file """
return self.backend.get_url(self.path)
def get_syspath(self):
""" Get a local system path to the file """
return self.backend.get_syspath(self.path)
class DepositionDraftCacheManager(object):
"""
Draft cache manager takes care of storing draft values in the cache prior
to a workflow being run. The data can be loaded by the prefill_draft()
workflow task.
"""
def __init__(self, user_id):
self.user_id = user_id
self.data = {}
@classmethod
def from_request(cls):
"""
Create a new draft cache from the current request.
"""
obj = cls(current_user.get_id())
# First check if we can get it via a json
data = request.get_json(silent=True)
if not data:
# If, not simply merge all both query parameters and request body
# parameters.
data = request.values.to_dict()
obj.data = data
return obj
@classmethod
def get(cls):
obj = cls(current_user.get_id())
obj.load()
return obj
def save(self):
""" Save data to session """
if self.has_data():
session['deposit_prefill'] = self.data
session.modified = True
else:
self.delete()
def load(self):
""" Load data from session """
self.data = session.get('deposit_prefill', {})
def delete(self):
""" Delete data in session """
if 'deposit_prefill' in session:
del session['deposit_prefill']
session.modified = True
def has_data(self):
"""
Determine if the cache has data.
"""
return bool(self.data)
def fill_draft(self, deposition, draft_id, clear=True):
"""
Fill a draft with cached draft values
"""
draft = deposition.get_or_create_draft(draft_id)
draft.process(self.data)
if clear:
self.data = {}
self.delete()
return draft
class DepositionDraft(FactoryMixin):
"""
Represents the state of a form
"""
def __init__(self, draft_id, form_class=None, deposition_ref=None):
self.id = draft_id
self.completed = False
self.form_class = form_class
self.values = {}
self.flags = {}
self._form = None
# Back reference to the depositions
self._deposition_ref = deposition_ref
self.validate = False
def __getstate__(self):
return dict(
completed=self.completed,
values=self.values,
flags=self.flags,
validate=self.validate,
)
def __setstate__(self, state):
self.completed = state['completed']
self.form_class = None
if self._deposition_ref:
self.form_class = self._deposition_ref.type.draft_definitions.get(
self.id
)
self.values = state['values']
self.flags = state['flags']
self.validate = state.get('validate', True)
def is_completed(self):
return self.completed
def has_form(self):
return self.form_class is not None
def authorize(self, action):
if not self._deposition_ref:
return True # Not connected to deposition so authorize anything.
return self._deposition_ref.type.authorize_draft(
self._deposition_ref, self, action
)
def complete(self):
"""
Set state of draft to completed.
"""
self.completed = True
def update(self, form):
"""
Update draft values and flags with data from form.
"""
data = dict((key, value) for key, value in form.data.items()
if value is not None)
self.values = data
self.flags = form.get_flags()
def process(self, data, complete_form=False):
"""
Process, validate and store incoming form data and return response.
"""
if not self.authorize('update'):
raise ForbiddenAction('update', self)
if not self.has_form():
raise FormDoesNotExists(self.id)
# The form is initialized with form and draft data. The original
# draft_data is accessible in Field.object_data, Field.raw_data is the
# new form data and Field.data is the processed form data or the
# original draft data.
#
# Behind the scences, Form.process() is called, which in turns call
# Field.process_data(), Field.process_formdata() and any filters
# defined.
#
# Field.object_data contains the value of process_data(), while
# Field.data contains the value of process_formdata() and any filters
# applied.
form = self.get_form(formdata=data)
# Run form validation which will call Field.pre_valiate(),
# Field.validators, Form.validate_<field>() and Field.post_validate().
# Afterwards Field.data has been validated and any errors will be
# present in Field.errors.
validated = form.validate()
# Call Form.run_processors() which in turn will call
# Field.run_processors() that allow fields to set flags (hide/show)
# and values of other fields after the entire formdata has been
# processed and validated.
validated_flags, validated_data, validated_msgs = (
form.get_flags(), form.data, form.messages
)
form.post_process(formfields=[] if complete_form else data.keys())
post_processed_flags, post_processed_data, post_processed_msgs = (
form.get_flags(), form.data, form.messages
)
# Save form values
self.update(form)
# Build result dictionary
process_field_names = None if complete_form else data.keys()
# Determine if some fields where changed during post-processing.
changed_values = dict(
(name, value) for name, value in post_processed_data.items()
if validated_data[name] != value
)
# Determine changed flags
changed_flags = dict(
(name, flags) for name, flags in post_processed_flags.items()
if validated_flags.get(name, []) != flags
)
# Determine changed messages
changed_msgs = dict(
(name, messages) for name, messages in post_processed_msgs.items()
if validated_msgs.get(name, []) != messages
or process_field_names is None or name in process_field_names
)
result = {}
if changed_msgs:
result['messages'] = changed_msgs
if changed_values:
result['values'] = changed_values
if changed_flags:
for flag in CFG_FIELD_FLAGS:
fields = [
(name, flag in field_flags)
for name, field_flags in changed_flags.items()
]
result[flag + '_on'] = map(
lambda x: x[0], filter(lambda x: x[1], fields)
)
result[flag + '_off'] = map(
lambda x: x[0], filter(lambda x: not x[1], fields)
)
return form, validated, result
def get_form(self, formdata=None, load_draft=True,
validate_draft=False):
"""
Create form instance with draft data and form data if provided.
:param formdata: Incoming form data.
:param files: Files to ingest into form
:param load_draft: True to initialize form with draft data.
:param validate_draft: Set to true to validate draft data, when no form
data is provided.
"""
if not self.has_form():
raise FormDoesNotExists(self.id)
# If a field is not present in formdata, Form.process() will assume it
# is blank instead of using the draft_data value. Most of the time we
# are only submitting a single field in JSON via AJAX requests. We
# therefore reset non-submitted fields to the draft_data value with
# form.reset_field_data().
# WTForms deal with unicode - we deal with UTF8 so convert all
draft_data = unicodifier(self.values) if load_draft else {}
formdata = MultiDict(formdata or {})
form = self.form_class(
formdata=formdata, **draft_data
)
if formdata:
form.reset_field_data(exclude=formdata.keys())
# Set field flags
if load_draft and self.flags:
form.set_flags(self.flags)
# Ingest files in form
if self._deposition_ref:
form.files = self._deposition_ref.files
else:
form.files = []
if validate_draft and draft_data and formdata is None:
form.validate()
return form
@classmethod
def merge_data(cls, drafts):
"""
Merge data of multiple drafts
Duplicate keys will be overwritten without warning.
"""
data = {}
# Don't include *) disabled fields, and *) empty optional fields
func = lambda f: not f.flags.disabled and (f.flags.required or f.data)
for d in drafts:
if d.has_form():
visitor = DataExporter(
filter_func=func
)
visitor.visit(d.get_form())
data.update(visitor.data)
else:
data.update(d.values)
return data
class Deposition(object):
"""
Wraps a BibWorkflowObject
Basically an interface to work with BibWorkflowObject data attribute in an
easy manner.
"""
def __init__(self, workflow_object, type=None, user_id=None):
self.workflow_object = workflow_object
if not workflow_object:
self.files = []
self.drafts = {}
self.type = self.get_type(type)
self.title = ''
self.sips = []
self.workflow_object = BibWorkflowObject.create_object(
id_user=user_id,
)
# Ensure default data is set for all objects.
self.update()
else:
self.__setstate__(workflow_object.get_data())
self.engine = None
#
# Properties proxies to BibWorkflowObject
#
@property
def id(self):
return self.workflow_object.id
@property
def user_id(self):
return self.workflow_object.id_user
@user_id.setter
def user_id(self, value):
self.workflow_object.id_user = value
self.workflow_object.workflow.id_user = value
@property
def created(self):
return self.workflow_object.created
@property
def modified(self):
return self.workflow_object.modified
@property
def drafts_list(self):
# Needed for easy marshaling by API
return self.drafts.values()
#
# Proxy methods
#
def authorize(self, action):
"""
Determine if certain action is authorized
Delegated to deposition type to allow overwriting default behavior.
"""
return self.type.authorize(self, action)
#
# Serialization related methods
#
def marshal(self):
"""
API representation of an object.
Delegated to the DepositionType, to allow overwriting default
behaviour.
"""
return self.type.marshal_deposition(self)
def __getstate__(self):
"""
Serialize deposition state for storing in the BibWorkflowObject
"""
# The bibworkflow object id and owner is implicit, as the Deposition
# object only wraps the data attribute of a BibWorkflowObject.
# FIXME: Find better solution for setting the title.
for d in self.drafts.values():
if 'title' in d.values:
self.title = d.values['title']
break
return dict(
type=self.type.get_identifier(),
title=self.title,
files=[f.__getstate__() for f in self.files],
drafts=dict(
[(d_id, d.__getstate__()) for d_id, d in self.drafts.items()]
),
sips=[f.__getstate__() for f in self.sips],
)
def __setstate__(self, state):
"""
Deserialize deposition from state stored in BibWorkflowObject
"""
self.type = DepositionType.get(state['type'])
self.title = state['title']
self.files = [
DepositionFile.factory(
f_state,
uuid=f_state['id'],
backend=DepositionStorage(self.id),
)
for f_state in state['files']
]
self.drafts = dict(
[(d_id, DepositionDraft.factory(d_state, d_id,
deposition_ref=self))
for d_id, d_state in state['drafts'].items()]
)
self.sips = [
SubmissionInformationPackage.factory(s_state, uuid=s_state['id'])
for s_state in state.get('sips', [])
]
#
# Persistence related methods
#
def update(self):
"""
Update workflow object with latest data.
"""
data = self.__getstate__()
# BibWorkflow calls get_data() before executing any workflow task, and
# and calls set_data() after. Hence, unless we update the data
# attribute it will be overwritten.
try:
self.workflow_object.data = data
except AttributeError:
pass
self.workflow_object.set_data(data)
def reload(self):
"""
Get latest data from workflow object
"""
self.__setstate__(self.workflow_object.get_data())
def save(self):
"""
Save the state of the deposition.
Uses the __getstate__ method to make a JSON serializable
representation which, sets this as data on the workflow object
and saves it.
"""
self.update()
self.workflow_object.save()
def delete(self):
"""
Delete the current deposition
"""
if not self.authorize('delete'):
raise DepositionNotDeletable(self)
for f in self.files:
f.delete()
if self.workflow_object.id_workflow:
Workflow.delete(uuid=self.workflow_object.id_workflow)
BibWorkflowObject.query.filter_by(
id_workflow=self.workflow_object.id_workflow
).delete()
else:
db.session.delete(self.workflow_object)
db.session.commit()
#
# Workflow execution
#
def run_workflow(self, headless=False):
"""
Execute the underlying workflow
If you made modifications to the deposition you must save if before
running the workflow, using the save() method.
"""
if self.workflow_object.workflow is not None:
current_status = self.workflow_object.workflow.status
if current_status == WorkflowStatus.COMPLETED:
return self.type.api_final(self) if headless \
else self.type.render_final(self)
self.update()
self.engine = self.type.run_workflow(self)
self.reload()
status = self.engine.status
if status == WorkflowStatus.ERROR:
return self.type.api_error(self) if headless else \
self.type.render_error(self)
elif status != WorkflowStatus.COMPLETED:
return self.type.api_step(self) if headless else \
self.type.render_step(self)
elif status == WorkflowStatus.COMPLETED:
return self.type.api_completed(self) if headless else \
self.type.render_completed(self)
def reinitialize_workflow(self):
"""
Reinitialize a workflow object (i.e. prepare it for editing)
"""
if self.state != 'done':
raise InvalidDepositionAction("Action only allowed for "
"depositions in state 'done'.")
if not self.authorize('reinitialize'):
raise ForbiddenAction('reinitialize', self)
self.type.reinitialize_workflow(self)
def stop_workflow(self):
"""
Stop a running workflow object (e.g. discard changes while editing).
"""
if self.state != 'inprogress' or not self.submitted:
raise InvalidDepositionAction("Action only allowed for "
"depositions in state 'inprogress'.")
if not self.authorize('stop'):
raise ForbiddenAction('stop', self)
self.type.stop_workflow(self)
def set_render_context(self, ctx):
"""
Set rendering context - used in workflow tasks to set what is to be
rendered (either by API or UI)
"""
self.workflow_object.deposition_context = ctx
def get_render_context(self):
"""
Get rendering context - used by DepositionType.render_step/api_step
"""
return getattr(self.workflow_object, 'deposition_context', {})
@property
def state(self):
"""
Return simplified workflow state - inprogress, done or error
"""
try:
status = self.workflow_object.workflow.status
if status == WorkflowStatus.ERROR:
return "error"
elif status == WorkflowStatus.COMPLETED:
return "done"
except AttributeError:
pass
return "inprogress"
#
# Draft related methods
#
def get_draft(self, draft_id):
"""
Get draft
"""
if draft_id not in self.drafts:
raise DraftDoesNotExists(draft_id)
return self.drafts[draft_id]
def get_or_create_draft(self, draft_id):
"""
Get or create a draft for given draft_id
"""
if draft_id not in self.drafts:
if draft_id not in self.type.draft_definitions:
raise DraftDoesNotExists(draft_id)
if not self.authorize('add_draft'):
raise ForbiddenAction('add_draft', self)
self.drafts[draft_id] = DepositionDraft(
draft_id,
form_class=self.type.draft_definitions[draft_id],
deposition_ref=self,
)
return self.drafts[draft_id]
def get_default_draft_id(self):
"""
Get the default draft id for this deposition.
"""
return self.type.default_draft_id(self)
#
# Submission information package related methods
#
def get_latest_sip(self, sealed=None):
"""
Get the latest submission information package
:param sealed: Set to true to only returned latest sealed SIP. Set to
False to only return latest unsealed SIP.
"""
if len(self.sips) > 0:
for sip in reversed(self.sips):
if sealed is None:
return sip
elif sealed and sip.is_sealed():
return sip
elif not sealed and not sip.is_sealed():
return sip
return None
def create_sip(self):
"""
Create a new submission information package (SIP) with metadata from
the drafts.
"""
metadata = DepositionDraft.merge_data(self.drafts.values())
metadata['files'] = map(
lambda x: dict(path=x.path, name=os.path.splitext(x.name)[0]),
self.files
)
sip = SubmissionInformationPackage(metadata=metadata)
self.sips.append(sip)
return sip
def has_sip(self, sealed=True):
"""
Determine if deposition has a sealed submission information package.
"""
for sip in self.sips:
if (sip.is_sealed() and sealed) or \
(not sealed and not sip.is_sealed()):
return True
return False
@property
def submitted(self):
return self.has_sip()
#
# File related methods
#
def get_file(self, file_id):
for f in self.files:
if f.uuid == file_id:
return f
return None
def add_file(self, deposition_file):
if not self.authorize('add_file'):
raise ForbiddenAction('add_file', self)
for f in self.files:
if f.name == deposition_file.name:
raise FilenameAlreadyExists(deposition_file.name)
self.files.append(deposition_file)
file_uploaded.send(
self.type.get_identifier(),
deposition=self,
deposition_file=deposition_file,
)
def remove_file(self, file_id):
if not self.authorize('remove_file'):
raise ForbiddenAction('remove_file', self)
idx = None
for i, f in enumerate(self.files):
if f.uuid == file_id:
idx = i
if idx is not None:
return self.files.pop(idx)
return None
def sort_files(self, file_id_list):
"""
Order the files according the list of ids provided to this function.
"""
if not self.authorize('sort_files'):
raise ForbiddenAction('sort_files', self)
search_dict = dict(
[(f, i) for i, f in enumerate(file_id_list)]
)
def _sort_files_cmp(f_x, f_y):
i_x = search_dict.get(f_x.uuid, None)
i_y = search_dict.get(f_y.uuid, None)
if i_x == i_y:
return 0
elif i_x is None or i_x > i_y:
return 1
elif i_y is None or i_x < i_y:
return -1
self.files = sorted(self.files, _sort_files_cmp)
#
# Class methods
#
@classmethod
def get_type(self, type_or_id):
if type_or_id and isinstance(type_or_id, type) and \
issubclass(type_or_id, DepositionType):
return type_or_id
else:
return DepositionType.get(type_or_id) if type_or_id else \
DepositionType.get_default()
@classmethod
def create(cls, user, type=None):
"""
Create a new deposition object.
To persist the deposition, you must call save() on the created object.
If no type is defined, the default deposition type will be assigned.
@param user: The owner of the deposition
@param type: Deposition type identifier.
"""
t = cls.get_type(type)
if not t.authorize(None, 'create'):
raise ForbiddenAction('create')
# Note: it is correct to pass 'type' and not 't' below to constructor.
obj = cls(None, type=type, user_id=user.get_id())
return obj
@classmethod
def get(cls, object_id, user=None, type=None):
"""
Get the deposition with specified object id.
@param object_id: The BibWorkflowObject id.
@param user: Owner of the BibWorkflowObject
@param type: Deposition type identifier.
"""
if type:
type = DepositionType.get(type)
try:
workflow_object = BibWorkflowObject.query.filter(
BibWorkflowObject.id == object_id,
# id_user!=0 means current version, as opposed to some snapshot
# version.
BibWorkflowObject.id_user != 0,
).one()
except NoResultFound:
raise DepositionDoesNotExists(object_id)
if user and workflow_object.id_user != user.get_id():
raise DepositionDoesNotExists(object_id)
obj = cls(workflow_object)
if type and obj.type != type:
raise DepositionDoesNotExists(object_id, type)
return obj
@classmethod
def get_depositions(cls, user=None, type=None):
params = [
Workflow.module_name == 'webdeposit',
]
if user:
params.append(BibWorkflowObject.id_user == user.get_id())
else:
params.append(BibWorkflowObject.id_user != 0)
if type:
params.append(Workflow.name == type.get_identifier())
objects = BibWorkflowObject.query.join("workflow").options(
db.contains_eager('workflow')).filter(*params).order_by(
BibWorkflowObject.modified.desc()).all()
def _create_obj(o):
try:
obj = cls(o)
except InvalidDepositionType as err:
current_app.logger.exception(err)
return None
if type is None or obj.type == type:
return obj
return None
return filter(lambda x: x is not None, map(_create_obj, objects))
class SubmissionInformationPackage(FactoryMixin):
"""Submission information package (SIP).
:param uuid: Unique identifier for this SIP
:param metadata: Metadata in JSON for this submission information package
:param package: Full generated metadata for this package (i.e. normally
MARC for records, but could anything).
:param timestamp: UTC timestamp in ISO8601 format of when package was
sealed.
:param agents: List of agents for this package (e.g. creator, ...)
:param task_ids: List of task ids submitted to ingest this package (may be
appended to after SIP has been sealed).
"""
def __init__(self, uuid=None, metadata={}):
self.uuid = uuid or str(uuid4())
self.metadata = metadata
self.package = ""
self.timestamp = None
self.agents = []
self.task_ids = []
def __getstate__(self):
return dict(
id=self.uuid,
metadata=self.metadata,
package=self.package,
timestamp=self.timestamp,
task_ids=self.task_ids,
agents=[a.__getstate__() for a in self.agents],
)
def __setstate__(self, state):
self.uuid = state['id']
self._metadata = state.get('metadata', {})
self.package = state.get('package', None)
self.timestamp = state.get('timestamp', None)
self.agents = [Agent.factory(a_state)
for a_state in state.get('agents', [])]
self.task_ids = state.get('task_ids', [])
def seal(self):
self.timestamp = datetime.now(tzutc()).isoformat()
def is_sealed(self):
return self.timestamp is not None
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, value):
import datetime
import json
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.datetime, datetime.date)):
encoded_object = obj.isoformat()
else:
encoded_object = json.JSONEncoder.default(self, obj)
return encoded_object
data = json.dumps(value, cls=DateTimeEncoder)
self._metadata = json.loads(data)
class Agent(FactoryMixin):
"""Agent."""
def __init__(self, role=None, from_request_context=False):
self.role = role
self.user_id = None
self.ip_address = None
self.email_address = None
if from_request_context:
self.from_request_context()
def __getstate__(self):
return dict(
role=self.role,
user_id=self.user_id,
ip_address=self.ip_address,
email_address=self.email_address,
)
def __setstate__(self, state):
self.role = state['role']
self.user_id = state['user_id']
self.ip_address = state['ip_address']
self.email_address = state['email_address']
def from_request_context(self):
from flask import request
from invenio.ext.login import current_user
self.ip_address = request.remote_addr
self.user_id = current_user.get_id()
self.email_address = current_user.info.get('email', '')
|
kasioumis/invenio
|
invenio/modules/deposit/models.py
|
Python
|
gpl-2.0
| 47,359
|
[
"VisIt"
] |
abc61528e22424f9b61a01984ffe99debd9571547e72e5ba0a5267d1324c94e2
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see starthinker/scripts for possible source):
# - Command: "python starthinker_ui/manage.py airflow"
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
CM360 Report To Storage
Move existing CM report into a Storage bucket.
- Specify an account id.
- Specify either report name or report id to move a report.
- The most recent file will be moved to the bucket.
- Schema is pulled from the official CM specification.
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'auth_read':'user', # Credentials used for reading data.
'auth_write':'service', # Credentials used for writing data.
'account':'',
'report_id':'',
'report_name':'',
'bucket':'',
'path':'CM_Report',
}
RECIPE = {
'tasks':[
{
'dcm':{
'auth':{'field':{'name':'auth_read','kind':'authentication','order':1,'default':'user','description':'Credentials used for reading data.'}},
'report':{
'account':{'field':{'name':'account','kind':'integer','order':2,'default':''}},
'report_id':{'field':{'name':'report_id','kind':'integer','order':3,'default':''}},
'name':{'field':{'name':'report_name','kind':'string','order':4,'default':''}}
},
'out':{
'storage':{
'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},
'bucket':{'field':{'name':'bucket','kind':'string','order':5,'default':''}},
'path':{'field':{'name':'path','kind':'string','order':6,'default':'CM_Report'}}
}
}
}
}
]
}
dag_maker = DAG_Factory('dcm_to_storage', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
|
google/starthinker
|
dags/dcm_to_storage_dag.py
|
Python
|
apache-2.0
| 4,669
|
[
"VisIt"
] |
64b936af397dbaaaf35f82425f7d21bc72f094901c57771797672535a1cc2fc9
|
#!/usr/bin/env python
""" update local cfg
"""
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgFile] ... DB ...' % Script.scriptName,
'Arguments:',
' setup: Name of the build setup (mandatory)'] ) )
Script.parseCommandLine()
args = Script.getPositionalArgs()
# Setup the DFC
#
# DataManagement
# {
# Production
# {
# Services
# {
# FileCatalog
# {
# DirectoryManager = DirectoryClosure
# FileManager = FileManagerPS
# SecurityManager = FullSecurityManager
# }
# }
# Databases
# {
# FileCatalogDB
# {
# DBName = FileCatalogDB
# }
# }
# }
# }
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
csAPI = CSAPI()
for sct in ['Systems/DataManagement/Production/Services',
'Systems/DataManagement/Production/Services/FileCatalog' ]:
res = csAPI.createSection( sct )
if not res['OK']:
print res['Message']
exit( 1 )
csAPI.setOption( 'Systems/DataManagement/Production/Services/FileCatalog/DirectoryManager', 'DirectoryClosure' )
csAPI.setOption( 'Systems/DataManagement/Production/Services/FileCatalog/FileManager', 'FileManagerPs' )
csAPI.setOption( 'Systems/DataManagement/Production/Services/FileCatalog/OldSecurityManager', 'DirectorySecurityManagerWithDelete' )
csAPI.setOption( 'Systems/DataManagement/Production/Services/FileCatalog/SecurityManager', 'PolicyBasedSecurityManager' )
csAPI.setOption( 'Systems/DataManagement/Production/Services/FileCatalog/SecurityPolicy', 'DIRAC/DataManagementSystem/DB/FileCatalogComponents/SecurityPolicies/VOMSPolicy' )
csAPI.setOption( 'Systems/DataManagement/Production/Services/FileCatalog/UniqueGUID', True )
csAPI.commit()
|
Andrew-McNab-UK/DIRAC
|
tests/Jenkins/dirac-cfg-update-services.py
|
Python
|
gpl-3.0
| 1,938
|
[
"DIRAC"
] |
1e661b6cac62ecbd030dbcfe34533d29f4b6edd959402a1dae8801a4c16aa507
|
# Copyright (c) 2021, Alliance for Open Media. All rights reserved
#
# This source code is subject to the terms of the BSD 2 Clause License and
# the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
# was not distributed with this source code in the LICENSE file, you can
# obtain it at www.aomedia.org/license/software. If the Alliance for Open
# Media Patent License 1.0 was not distributed with this source code in the
# PATENTS file, you can obtain it at www.aomedia.org/license/patent.
#
from __future__ import print_function
import sys
import os
import operator
from pycparser import c_parser, c_ast, parse_file
from math import *
from inspect import currentframe, getframeinfo
from collections import deque
def debug_print(frameinfo):
print('******** ERROR:', frameinfo.filename, frameinfo.lineno, '********')
class StructItem():
def __init__(self,
typedef_name=None,
struct_name=None,
struct_node=None,
is_union=False):
self.typedef_name = typedef_name
self.struct_name = struct_name
self.struct_node = struct_node
self.is_union = is_union
self.child_decl_map = None
def __str__(self):
return str(self.typedef_name) + ' ' + str(self.struct_name) + ' ' + str(
self.is_union)
def compute_child_decl_map(self, struct_info):
self.child_decl_map = {}
if self.struct_node != None and self.struct_node.decls != None:
for decl_node in self.struct_node.decls:
if decl_node.name == None:
for sub_decl_node in decl_node.type.decls:
sub_decl_status = parse_decl_node(struct_info, sub_decl_node)
self.child_decl_map[sub_decl_node.name] = sub_decl_status
else:
decl_status = parse_decl_node(struct_info, decl_node)
self.child_decl_map[decl_status.name] = decl_status
def get_child_decl_status(self, decl_name):
if self.child_decl_map == None:
debug_print(getframeinfo(currentframe()))
print('child_decl_map is None')
return None
if decl_name not in self.child_decl_map:
debug_print(getframeinfo(currentframe()))
print(decl_name, 'does not exist ')
return None
return self.child_decl_map[decl_name]
class StructInfo():
def __init__(self):
self.struct_name_dic = {}
self.typedef_name_dic = {}
self.enum_value_dic = {} # enum value -> enum_node
self.enum_name_dic = {} # enum name -> enum_node
self.struct_item_list = []
def get_struct_by_typedef_name(self, typedef_name):
if typedef_name in self.typedef_name_dic:
return self.typedef_name_dic[typedef_name]
else:
return None
def get_struct_by_struct_name(self, struct_name):
if struct_name in self.struct_name_dic:
return self.struct_name_dic[struct_name]
else:
debug_print(getframeinfo(currentframe()))
print('Cant find', struct_name)
return None
def update_struct_item_list(self):
# Collect all struct_items from struct_name_dic and typedef_name_dic
# Compute child_decl_map for each struct item.
for struct_name in self.struct_name_dic.keys():
struct_item = self.struct_name_dic[struct_name]
struct_item.compute_child_decl_map(self)
self.struct_item_list.append(struct_item)
for typedef_name in self.typedef_name_dic.keys():
struct_item = self.typedef_name_dic[typedef_name]
if struct_item.struct_name not in self.struct_name_dic:
struct_item.compute_child_decl_map(self)
self.struct_item_list.append(struct_item)
def update_enum(self, enum_node):
if enum_node.name != None:
self.enum_name_dic[enum_node.name] = enum_node
if enum_node.values != None:
enumerator_list = enum_node.values.enumerators
for enumerator in enumerator_list:
self.enum_value_dic[enumerator.name] = enum_node
def update(self,
typedef_name=None,
struct_name=None,
struct_node=None,
is_union=False):
"""T: typedef_name S: struct_name N: struct_node
T S N
case 1: o o o
typedef struct P {
int u;
} K;
T S N
case 2: o o x
typedef struct P K;
T S N
case 3: x o o
struct P {
int u;
};
T S N
case 4: o x o
typedef struct {
int u;
} K;
"""
struct_item = None
# Check whether struct_name or typedef_name is already in the dictionary
if struct_name in self.struct_name_dic:
struct_item = self.struct_name_dic[struct_name]
if typedef_name in self.typedef_name_dic:
struct_item = self.typedef_name_dic[typedef_name]
if struct_item == None:
struct_item = StructItem(typedef_name, struct_name, struct_node, is_union)
if struct_node.decls != None:
struct_item.struct_node = struct_node
if struct_name != None:
self.struct_name_dic[struct_name] = struct_item
if typedef_name != None:
self.typedef_name_dic[typedef_name] = struct_item
class StructDefVisitor(c_ast.NodeVisitor):
def __init__(self):
self.struct_info = StructInfo()
def visit_Struct(self, node):
if node.decls != None:
self.struct_info.update(None, node.name, node)
self.generic_visit(node)
def visit_Union(self, node):
if node.decls != None:
self.struct_info.update(None, node.name, node, True)
self.generic_visit(node)
def visit_Enum(self, node):
self.struct_info.update_enum(node)
self.generic_visit(node)
def visit_Typedef(self, node):
if node.type.__class__.__name__ == 'TypeDecl':
typedecl = node.type
if typedecl.type.__class__.__name__ == 'Struct':
struct_node = typedecl.type
typedef_name = node.name
struct_name = struct_node.name
self.struct_info.update(typedef_name, struct_name, struct_node)
elif typedecl.type.__class__.__name__ == 'Union':
union_node = typedecl.type
typedef_name = node.name
union_name = union_node.name
self.struct_info.update(typedef_name, union_name, union_node, True)
# TODO(angiebird): Do we need to deal with enum here?
self.generic_visit(node)
def build_struct_info(ast):
v = StructDefVisitor()
v.visit(ast)
struct_info = v.struct_info
struct_info.update_struct_item_list()
return v.struct_info
class DeclStatus():
def __init__(self, name, struct_item=None, is_ptr_decl=False):
self.name = name
self.struct_item = struct_item
self.is_ptr_decl = is_ptr_decl
def get_child_decl_status(self, decl_name):
if self.struct_item != None:
return self.struct_item.get_child_decl_status(decl_name)
else:
#TODO(angiebird): 2. Investigage the situation when a struct's definition can't be found.
return None
def __str__(self):
return str(self.struct_item) + ' ' + str(self.name) + ' ' + str(
self.is_ptr_decl)
def peel_ptr_decl(decl_type_node):
""" Remove PtrDecl and ArrayDecl layer """
is_ptr_decl = False
peeled_decl_type_node = decl_type_node
while peeled_decl_type_node.__class__.__name__ == 'PtrDecl' or peeled_decl_type_node.__class__.__name__ == 'ArrayDecl':
is_ptr_decl = True
peeled_decl_type_node = peeled_decl_type_node.type
return is_ptr_decl, peeled_decl_type_node
def parse_peeled_decl_type_node(struct_info, node):
struct_item = None
if node.__class__.__name__ == 'TypeDecl':
if node.type.__class__.__name__ == 'IdentifierType':
identifier_type_node = node.type
typedef_name = identifier_type_node.names[0]
struct_item = struct_info.get_struct_by_typedef_name(typedef_name)
elif node.type.__class__.__name__ == 'Struct':
struct_node = node.type
if struct_node.name != None:
struct_item = struct_info.get_struct_by_struct_name(struct_node.name)
else:
struct_item = StructItem(None, None, struct_node, False)
struct_item.compute_child_decl_map(struct_info)
elif node.type.__class__.__name__ == 'Union':
# TODO(angiebird): Special treatment for Union?
struct_node = node.type
if struct_node.name != None:
struct_item = struct_info.get_struct_by_struct_name(struct_node.name)
else:
struct_item = StructItem(None, None, struct_node, True)
struct_item.compute_child_decl_map(struct_info)
elif node.type.__class__.__name__ == 'Enum':
# TODO(angiebird): Special treatment for Union?
struct_node = node.type
struct_item = None
else:
print('Unrecognized peeled_decl_type_node.type',
node.type.__class__.__name__)
else:
# debug_print(getframeinfo(currentframe()))
# print(node.__class__.__name__)
#TODO(angiebird): Do we need to take care of this part?
pass
return struct_item
def parse_decl_node(struct_info, decl_node):
# struct_item is None if this decl_node is not a struct_item
decl_node_name = decl_node.name
decl_type_node = decl_node.type
is_ptr_decl, peeled_decl_type_node = peel_ptr_decl(decl_type_node)
struct_item = parse_peeled_decl_type_node(struct_info, peeled_decl_type_node)
return DeclStatus(decl_node_name, struct_item, is_ptr_decl)
def get_lvalue_lead(lvalue_node):
"""return '&' or '*' of lvalue if available"""
if lvalue_node.__class__.__name__ == 'UnaryOp' and lvalue_node.op == '&':
return '&'
elif lvalue_node.__class__.__name__ == 'UnaryOp' and lvalue_node.op == '*':
return '*'
return None
def parse_lvalue(lvalue_node):
"""get id_chain from lvalue"""
id_chain = parse_lvalue_recursive(lvalue_node, [])
return id_chain
def parse_lvalue_recursive(lvalue_node, id_chain):
"""cpi->rd->u -> (cpi->rd)->u"""
if lvalue_node.__class__.__name__ == 'ID':
id_chain.append(lvalue_node.name)
id_chain.reverse()
return id_chain
elif lvalue_node.__class__.__name__ == 'StructRef':
id_chain.append(lvalue_node.field.name)
return parse_lvalue_recursive(lvalue_node.name, id_chain)
elif lvalue_node.__class__.__name__ == 'ArrayRef':
return parse_lvalue_recursive(lvalue_node.name, id_chain)
elif lvalue_node.__class__.__name__ == 'UnaryOp' and lvalue_node.op == '&':
return parse_lvalue_recursive(lvalue_node.expr, id_chain)
elif lvalue_node.__class__.__name__ == 'UnaryOp' and lvalue_node.op == '*':
return parse_lvalue_recursive(lvalue_node.expr, id_chain)
else:
return None
class FuncDefVisitor(c_ast.NodeVisitor):
func_dictionary = {}
def visit_FuncDef(self, node):
func_name = node.decl.name
self.func_dictionary[func_name] = node
def build_func_dictionary(ast):
v = FuncDefVisitor()
v.visit(ast)
return v.func_dictionary
def get_func_start_coord(func_node):
return func_node.coord
def find_end_node(node):
node_list = []
for c in node:
node_list.append(c)
if len(node_list) == 0:
return node
else:
return find_end_node(node_list[-1])
def get_func_end_coord(func_node):
return find_end_node(func_node).coord
def get_func_size(func_node):
start_coord = get_func_start_coord(func_node)
end_coord = get_func_end_coord(func_node)
if start_coord.file == end_coord.file:
return end_coord.line - start_coord.line + 1
else:
return None
def save_object(obj, filename):
with open(filename, 'wb') as obj_fp:
pickle.dump(obj, obj_fp, protocol=-1)
def load_object(filename):
obj = None
with open(filename, 'rb') as obj_fp:
obj = pickle.load(obj_fp)
return obj
def get_av1_ast(gen_ast=False):
# TODO(angiebird): Generalize this path
c_filename = './av1_pp.c'
print('generate ast')
ast = parse_file(c_filename)
#save_object(ast, ast_file)
print('finished generate ast')
return ast
def get_func_param_id_map(func_def_node):
param_id_map = {}
func_decl = func_def_node.decl.type
param_list = func_decl.args.params
for decl in param_list:
param_id_map[decl.name] = decl
return param_id_map
class IDTreeStack():
def __init__(self, global_id_tree):
self.stack = deque()
self.global_id_tree = global_id_tree
def add_link_node(self, node, link_id_chain):
link_node = self.add_id_node(link_id_chain)
node.link_node = link_node
node.link_id_chain = link_id_chain
def push_id_tree(self, id_tree=None):
if id_tree == None:
id_tree = IDStatusNode()
self.stack.append(id_tree)
return id_tree
def pop_id_tree(self):
return self.stack.pop()
def add_id_seed_node(self, id_seed, decl_status):
return self.stack[-1].add_child(id_seed, decl_status)
def get_id_seed_node(self, id_seed):
idx = len(self.stack) - 1
while idx >= 0:
id_node = self.stack[idx].get_child(id_seed)
if id_node != None:
return id_node
idx -= 1
id_node = self.global_id_tree.get_child(id_seed)
if id_node != None:
return id_node
return None
def add_id_node(self, id_chain):
id_seed = id_chain[0]
id_seed_node = self.get_id_seed_node(id_seed)
if id_seed_node == None:
return None
if len(id_chain) == 1:
return id_seed_node
return id_seed_node.add_descendant(id_chain[1:])
def get_id_node(self, id_chain):
id_seed = id_chain[0]
id_seed_node = self.get_id_seed_node(id_seed)
if id_seed_node == None:
return None
if len(id_chain) == 1:
return id_seed_node
return id_seed_node.get_descendant(id_chain[1:])
def top(self):
return self.stack[-1]
class IDStatusNode():
def __init__(self, name=None, root=None):
if root is None:
self.root = self
else:
self.root = root
self.name = name
self.parent = None
self.children = {}
self.assign = False
self.last_assign_coord = None
self.refer = False
self.last_refer_coord = None
self.decl_status = None
self.link_id_chain = None
self.link_node = None
self.visit = False
def set_link_id_chain(self, link_id_chain):
self.set_assign(False)
self.link_id_chain = link_id_chain
self.link_node = self.root.get_descendant(link_id_chain)
def set_link_node(self, link_node):
self.set_assign(False)
self.link_id_chain = ['*']
self.link_node = link_node
def get_link_id_chain(self):
return self.link_id_chain
def get_concrete_node(self):
if self.visit == True:
# return None when there is a loop
return None
self.visit = True
if self.link_node == None:
self.visit = False
return self
else:
concrete_node = self.link_node.get_concrete_node()
self.visit = False
if concrete_node == None:
return self
return concrete_node
def set_assign(self, assign, coord=None):
concrete_node = self.get_concrete_node()
concrete_node.assign = assign
concrete_node.last_assign_coord = coord
def get_assign(self):
concrete_node = self.get_concrete_node()
return concrete_node.assign
def set_refer(self, refer, coord=None):
concrete_node = self.get_concrete_node()
concrete_node.refer = refer
concrete_node.last_refer_coord = coord
def get_refer(self):
concrete_node = self.get_concrete_node()
return concrete_node.refer
def set_parent(self, parent):
concrete_node = self.get_concrete_node()
concrete_node.parent = parent
def add_child(self, name, decl_status=None):
concrete_node = self.get_concrete_node()
if name not in concrete_node.children:
child_id_node = IDStatusNode(name, concrete_node.root)
concrete_node.children[name] = child_id_node
if decl_status == None:
# Check if the child decl_status can be inferred from its parent's
# decl_status
if self.decl_status != None:
decl_status = self.decl_status.get_child_decl_status(name)
child_id_node.set_decl_status(decl_status)
return concrete_node.children[name]
def get_child(self, name):
concrete_node = self.get_concrete_node()
if name in concrete_node.children:
return concrete_node.children[name]
else:
return None
def add_descendant(self, id_chain):
current_node = self.get_concrete_node()
for name in id_chain:
current_node.add_child(name)
parent_node = current_node
current_node = current_node.get_child(name)
current_node.set_parent(parent_node)
return current_node
def get_descendant(self, id_chain):
current_node = self.get_concrete_node()
for name in id_chain:
current_node = current_node.get_child(name)
if current_node == None:
return None
return current_node
def get_children(self):
current_node = self.get_concrete_node()
return current_node.children
def set_decl_status(self, decl_status):
current_node = self.get_concrete_node()
current_node.decl_status = decl_status
def get_decl_status(self):
current_node = self.get_concrete_node()
return current_node.decl_status
def __str__(self):
if self.link_id_chain is None:
return str(self.name) + ' a: ' + str(int(self.assign)) + ' r: ' + str(
int(self.refer))
else:
return str(self.name) + ' -> ' + ' '.join(self.link_id_chain)
def collect_assign_refer_status(self,
id_chain=None,
assign_ls=None,
refer_ls=None):
if id_chain == None:
id_chain = []
if assign_ls == None:
assign_ls = []
if refer_ls == None:
refer_ls = []
id_chain.append(self.name)
if self.assign:
info_str = ' '.join([
' '.join(id_chain[1:]), 'a:',
str(int(self.assign)), 'r:',
str(int(self.refer)),
str(self.last_assign_coord)
])
assign_ls.append(info_str)
if self.refer:
info_str = ' '.join([
' '.join(id_chain[1:]), 'a:',
str(int(self.assign)), 'r:',
str(int(self.refer)),
str(self.last_refer_coord)
])
refer_ls.append(info_str)
for c in self.children:
self.children[c].collect_assign_refer_status(id_chain, assign_ls,
refer_ls)
id_chain.pop()
return assign_ls, refer_ls
def show(self):
assign_ls, refer_ls = self.collect_assign_refer_status()
print('---- assign ----')
for item in assign_ls:
print(item)
print('---- refer ----')
for item in refer_ls:
print(item)
class FuncInOutVisitor(c_ast.NodeVisitor):
def __init__(self,
func_def_node,
struct_info,
func_dictionary,
keep_body_id_tree=True,
call_param_map=None,
global_id_tree=None,
func_history=None,
unknown=None):
self.func_dictionary = func_dictionary
self.struct_info = struct_info
self.param_id_map = get_func_param_id_map(func_def_node)
self.parent_node = None
self.global_id_tree = global_id_tree
self.body_id_tree = None
self.keep_body_id_tree = keep_body_id_tree
if func_history == None:
self.func_history = {}
else:
self.func_history = func_history
if unknown == None:
self.unknown = []
else:
self.unknown = unknown
self.id_tree_stack = IDTreeStack(global_id_tree)
self.id_tree_stack.push_id_tree()
#TODO move this part into a function
for param in self.param_id_map:
decl_node = self.param_id_map[param]
decl_status = parse_decl_node(self.struct_info, decl_node)
descendant = self.id_tree_stack.add_id_seed_node(decl_status.name,
decl_status)
if call_param_map is not None and param in call_param_map:
# This is a function call.
# Map the input parameter to the caller's nodes
# TODO(angiebird): Can we use add_link_node here?
descendant.set_link_node(call_param_map[param])
def get_id_tree_stack(self):
return self.id_tree_stack
def generic_visit(self, node):
prev_parent = self.parent_node
self.parent_node = node
for c in node:
self.visit(c)
self.parent_node = prev_parent
# TODO rename
def add_new_id_tree(self, node):
self.id_tree_stack.push_id_tree()
self.generic_visit(node)
id_tree = self.id_tree_stack.pop_id_tree()
if self.parent_node == None and self.keep_body_id_tree == True:
# this is function body
self.body_id_tree = id_tree
def visit_For(self, node):
self.add_new_id_tree(node)
def visit_Compound(self, node):
self.add_new_id_tree(node)
def visit_Decl(self, node):
if node.type.__class__.__name__ != 'FuncDecl':
decl_status = parse_decl_node(self.struct_info, node)
descendant = self.id_tree_stack.add_id_seed_node(decl_status.name,
decl_status)
if node.init is not None:
init_id_chain = self.process_lvalue(node.init)
if init_id_chain != None:
if decl_status.struct_item is None:
init_descendant = self.id_tree_stack.add_id_node(init_id_chain)
if init_descendant != None:
init_descendant.set_refer(True, node.coord)
else:
self.unknown.append(node)
descendant.set_assign(True, node.coord)
else:
self.id_tree_stack.add_link_node(descendant, init_id_chain)
else:
self.unknown.append(node)
else:
descendant.set_assign(True, node.coord)
self.generic_visit(node)
def is_lvalue(self, node):
if self.parent_node is None:
# TODO(angiebird): Do every lvalue has parent_node != None?
return False
if self.parent_node.__class__.__name__ == 'StructRef':
return False
if self.parent_node.__class__.__name__ == 'ArrayRef' and node == self.parent_node.name:
# if node == self.parent_node.subscript, the node could be lvalue
return False
if self.parent_node.__class__.__name__ == 'UnaryOp' and self.parent_node.op == '&':
return False
if self.parent_node.__class__.__name__ == 'UnaryOp' and self.parent_node.op == '*':
return False
return True
def process_lvalue(self, node):
id_chain = parse_lvalue(node)
if id_chain == None:
return id_chain
elif id_chain[0] in self.struct_info.enum_value_dic:
return None
else:
return id_chain
def process_possible_lvalue(self, node):
if self.is_lvalue(node):
id_chain = self.process_lvalue(node)
lead_char = get_lvalue_lead(node)
# make sure the id is not an enum value
if id_chain == None:
self.unknown.append(node)
return
descendant = self.id_tree_stack.add_id_node(id_chain)
if descendant == None:
self.unknown.append(node)
return
decl_status = descendant.get_decl_status()
if decl_status == None:
descendant.set_assign(True, node.coord)
descendant.set_refer(True, node.coord)
self.unknown.append(node)
return
if self.parent_node.__class__.__name__ == 'Assignment':
if node is self.parent_node.lvalue:
if decl_status.struct_item != None:
if len(id_chain) > 1:
descendant.set_assign(True, node.coord)
elif len(id_chain) == 1:
if lead_char == '*':
descendant.set_assign(True, node.coord)
else:
right_id_chain = self.process_lvalue(self.parent_node.rvalue)
if right_id_chain != None:
self.id_tree_stack.add_link_node(descendant, right_id_chain)
else:
#TODO(angiebird): 1.Find a better way to deal with this case.
descendant.set_assign(True, node.coord)
else:
debug_print(getframeinfo(currentframe()))
else:
descendant.set_assign(True, node.coord)
elif node is self.parent_node.rvalue:
if decl_status.struct_item is None:
descendant.set_refer(True, node.coord)
if lead_char == '&':
descendant.set_assign(True, node.coord)
else:
left_id_chain = self.process_lvalue(self.parent_node.lvalue)
left_lead_char = get_lvalue_lead(self.parent_node.lvalue)
if left_id_chain != None:
if len(left_id_chain) > 1:
descendant.set_refer(True, node.coord)
elif len(left_id_chain) == 1:
if left_lead_char == '*':
descendant.set_refer(True, node.coord)
else:
#TODO(angiebird): Check whether the other node is linked to this node.
pass
else:
self.unknown.append(self.parent_node.lvalue)
debug_print(getframeinfo(currentframe()))
else:
self.unknown.append(self.parent_node.lvalue)
debug_print(getframeinfo(currentframe()))
else:
debug_print(getframeinfo(currentframe()))
elif self.parent_node.__class__.__name__ == 'UnaryOp':
# TODO(angiebird): Consider +=, *=, -=, /= etc
if self.parent_node.op == '--' or self.parent_node.op == '++' or\
self.parent_node.op == 'p--' or self.parent_node.op == 'p++':
descendant.set_assign(True, node.coord)
descendant.set_refer(True, node.coord)
else:
descendant.set_refer(True, node.coord)
elif self.parent_node.__class__.__name__ == 'Decl':
#The logic is at visit_Decl
pass
elif self.parent_node.__class__.__name__ == 'ExprList':
#The logic is at visit_FuncCall
pass
else:
descendant.set_refer(True, node.coord)
def visit_ID(self, node):
# If the parent is a FuncCall, this ID is a function name.
if self.parent_node.__class__.__name__ != 'FuncCall':
self.process_possible_lvalue(node)
self.generic_visit(node)
def visit_StructRef(self, node):
self.process_possible_lvalue(node)
self.generic_visit(node)
def visit_ArrayRef(self, node):
self.process_possible_lvalue(node)
self.generic_visit(node)
def visit_UnaryOp(self, node):
if node.op == '&' or node.op == '*':
self.process_possible_lvalue(node)
self.generic_visit(node)
def visit_FuncCall(self, node):
if node.name.__class__.__name__ == 'ID':
if node.name.name in self.func_dictionary:
if node.name.name not in self.func_history:
self.func_history[node.name.name] = True
func_def_node = self.func_dictionary[node.name.name]
call_param_map = self.process_func_call(node, func_def_node)
visitor = FuncInOutVisitor(func_def_node, self.struct_info,
self.func_dictionary, False,
call_param_map, self.global_id_tree,
self.func_history, self.unknown)
visitor.visit(func_def_node.body)
else:
self.unknown.append(node)
self.generic_visit(node)
def process_func_call(self, func_call_node, func_def_node):
# set up a refer/assign for func parameters
# return call_param_map
call_param_ls = func_call_node.args.exprs
call_param_map = {}
func_decl = func_def_node.decl.type
decl_param_ls = func_decl.args.params
for param_node, decl_node in zip(call_param_ls, decl_param_ls):
id_chain = self.process_lvalue(param_node)
if id_chain != None:
descendant = self.id_tree_stack.add_id_node(id_chain)
if descendant == None:
self.unknown.append(param_node)
else:
decl_status = descendant.get_decl_status()
if decl_status != None:
if decl_status.struct_item == None:
if decl_status.is_ptr_decl == True:
descendant.set_assign(True, param_node.coord)
descendant.set_refer(True, param_node.coord)
else:
descendant.set_refer(True, param_node.coord)
else:
call_param_map[decl_node.name] = descendant
else:
self.unknown.append(param_node)
else:
self.unknown.append(param_node)
return call_param_map
def build_global_id_tree(ast, struct_info):
global_id_tree = IDStatusNode()
for node in ast.ext:
if node.__class__.__name__ == 'Decl':
# id tree is for tracking assign/refer status
# we don't care about function id because they can't be changed
if node.type.__class__.__name__ != 'FuncDecl':
decl_status = parse_decl_node(struct_info, node)
descendant = global_id_tree.add_child(decl_status.name, decl_status)
return global_id_tree
class FuncAnalyzer():
def __init__(self):
self.ast = get_av1_ast()
self.struct_info = build_struct_info(self.ast)
self.func_dictionary = build_func_dictionary(self.ast)
self.global_id_tree = build_global_id_tree(self.ast, self.struct_info)
def analyze(self, func_name):
if func_name in self.func_dictionary:
func_def_node = self.func_dictionary[func_name]
visitor = FuncInOutVisitor(func_def_node, self.struct_info,
self.func_dictionary, True, None,
self.global_id_tree)
visitor.visit(func_def_node.body)
root = visitor.get_id_tree_stack()
root.top().show()
else:
print(func_name, "doesn't exist")
if __name__ == '__main__':
fa = FuncAnalyzer()
fa.analyze('tpl_get_satd_cost')
pass
|
AlienCowEatCake/ImageViewer
|
src/ThirdParty/aom/aom-v3.2.0/tools/auto_refactor/auto_refactor.py
|
Python
|
gpl-3.0
| 29,568
|
[
"VisIt"
] |
fbc943e39d45e8bdc890c7608785b0dddb9d9819f5f0187e9f3f094b44276c6e
|
#
# QAPI types generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
#
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def generate_fwd_struct(name, members):
return mcgen('''
typedef struct %(name)s %(name)s;
typedef struct %(name)sList
{
%(name)s *value;
struct %(name)sList *next;
} %(name)sList;
''',
name=name)
def generate_struct(structname, fieldname, members):
ret = mcgen('''
struct %(name)s
{
''',
name=structname)
for argname, argentry, optional, structured in parse_args(members):
if optional:
ret += mcgen('''
bool has_%(c_name)s;
''',
c_name=c_var(argname))
if structured:
push_indent()
ret += generate_struct("", argname, argentry)
pop_indent()
else:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(argentry), c_name=c_var(argname))
if len(fieldname):
fieldname = " " + fieldname
ret += mcgen('''
}%(field)s;
''',
field=fieldname)
return ret
def generate_enum_lookup(name, values):
ret = mcgen('''
const char *%(name)s_lookup[] = {
''',
name=name)
i = 0
for value in values:
ret += mcgen('''
"%(value)s",
''',
value=value.lower())
ret += mcgen('''
NULL,
};
''')
return ret
def generate_enum(name, values):
lookup_decl = mcgen('''
extern const char *%(name)s_lookup[];
''',
name=name)
enum_decl = mcgen('''
typedef enum %(name)s
{
''',
name=name)
# append automatically generated _MAX value
enum_values = values + [ 'MAX' ]
i = 0
for value in enum_values:
enum_decl += mcgen('''
%(abbrev)s_%(value)s = %(i)d,
''',
abbrev=de_camel_case(name).upper(),
value=c_var(value).upper(),
i=i)
i += 1
enum_decl += mcgen('''
} %(name)s;
''',
name=name)
return lookup_decl + enum_decl
def generate_union(name, typeinfo):
ret = mcgen('''
struct %(name)s
{
%(name)sKind kind;
union {
''',
name=name)
for key in typeinfo:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(typeinfo[key]),
c_name=c_var(key))
ret += mcgen('''
};
};
''')
return ret
def generate_type_cleanup_decl(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj);
''',
c_type=c_type(name),type=name)
return ret
def generate_type_cleanup(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj)
{
QapiDeallocVisitor *md;
Visitor *v;
if (!obj) {
return;
}
md = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(md);
visit_type_%(type)s(v, &obj, NULL, NULL);
qapi_dealloc_visitor_cleanup(md);
}
''',
c_type=c_type(name),type=name)
return ret
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "p:o:", ["prefix=", "output-dir="])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
c_file = 'qapi-types.c'
h_file = 'qapi-types.h'
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
fdef = open(c_file, 'w')
fdecl = open(h_file, 'w')
fdef.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* deallocation functions for schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
* Michael Roth <mdroth@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "qapi/qapi-dealloc-visitor.h"
#include "%(prefix)sqapi-types.h"
#include "%(prefix)sqapi-visit.h"
''', prefix=prefix))
fdecl.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include "qapi/qapi-types-core.h"
''',
guard=guardname(h_file)))
exprs = parse_schema(sys.stdin)
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_fwd_struct(expr['type'], expr['data'])
elif expr.has_key('enum'):
ret += generate_enum(expr['enum'], expr['data'])
fdef.write(generate_enum_lookup(expr['enum'], expr['data']))
elif expr.has_key('union'):
ret += generate_fwd_struct(expr['union'], expr['data']) + "\n"
ret += generate_enum('%sKind' % expr['union'], expr['data'].keys())
else:
continue
fdecl.write(ret)
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_struct(expr['type'], "", expr['data']) + "\n"
ret += generate_type_cleanup_decl(expr['type'] + "List")
fdef.write(generate_type_cleanup(expr['type'] + "List") + "\n")
ret += generate_type_cleanup_decl(expr['type'])
fdef.write(generate_type_cleanup(expr['type']) + "\n")
elif expr.has_key('union'):
ret += generate_union(expr['union'], expr['data'])
else:
continue
fdecl.write(ret)
fdecl.write('''
#endif
''')
fdecl.flush()
fdecl.close()
fdef.flush()
fdef.close()
|
KernelAnalysisPlatform/KlareDbg
|
tracers/qemu/decaf/scripts/qapi-types.py
|
Python
|
gpl-3.0
| 6,007
|
[
"VisIt"
] |
8d58db2dc784e9056750bf8e1ec7687d3c17d29c968b0b89d1c49458f09626a5
|
from __future__ import division, print_function, absolute_import
import sys
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
import numpy as np
config = Configuration('siesta',parent_package,top_path)
config.add_subpackage('io')
config.add_subpackage('fdf')
# Add sparse library
einfo = get_info('ALL')
config.add_extension('sparse',sources=['sparse.c'],
include_dirs=['.','..',np.get_include()],
extra_info = einfo)
return config
if __name__ == '__main__':
from distutils.core import setup
setup(**configuration(top_path='').todict())
|
zerothi/siesta-es
|
sids/siesta/setup.py
|
Python
|
gpl-3.0
| 741
|
[
"SIESTA"
] |
ea5e87a51bbe655735f78d4c313a54b1a606854ca99039359567e3e1ffee822b
|
from math import e
from StringIO import StringIO
from urllib import urlopen
from subprocess import Popen
from tempfile import mkstemp
from urlparse import urlparse
from os.path import splitext
from os import write, close, unlink
try:
import PIL
except ImportError:
import Image
from ImageDraw import ImageDraw
from Image import ANTIALIAS, AFFINE, BICUBIC
from ImageOps import autocontrast
from ImageFilter import MinFilter, MaxFilter
else:
from PIL import Image
from PIL.ImageDraw import ImageDraw
from PIL.Image import ANTIALIAS, AFFINE, BICUBIC
from PIL.ImageOps import autocontrast
from PIL.ImageFilter import MinFilter, MaxFilter
from numpy import array, fromstring, ubyte, convolve
from BlobDetector import detect
from matrixmath import Point, triangle2triangle
from featuremath import Transform
class Blob:
"""
"""
def __init__(self, xmin, ymin, xmax, ymax, size):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.size = size
self.x = (xmin + xmax) / 2
self.y = (ymin + ymax) / 2
self.w = xmax - xmin
self.h = ymax - ymin
self.bbox = (xmin, ymin, xmax, ymax)
def open(url):
"""
"""
bytes = StringIO(urlopen(url).read())
image = Image.open(bytes)
try:
image.load()
except IOError:
pass
else:
return image
s, h, path, p, q, f = urlparse(url)
head, tail = splitext(path)
handle, input_filename = mkstemp(prefix='imagemath-', suffix=tail)
write(handle, bytes.getvalue())
close(handle)
handle, output_filename = mkstemp(prefix='imagemath-', suffix='.jpg')
close(handle)
try:
convert = Popen(('convert', input_filename, output_filename))
convert.wait()
if convert.returncode != 0:
raise IOError("Couldn't read %(url)s even with convert" % locals())
return Image.open(output_filename)
finally:
unlink(input_filename)
unlink(output_filename)
def imgblobs(img, highpass_filename=None, preblobs_filename=None, postblobs_filename=None):
""" Extract bboxes of blobs from an image.
Assumes blobs somewhere in the neighborhood of 0.25" or so
on a scan not much smaller than 8" on its smallest side.
Each blob is a bbox: (xmin, ymin, xmax, ymax)
"""
thumb = img.copy().convert('L')
thumb.thumbnail((1500, 1500), ANTIALIAS)
# needed to get back up to input image size later.
scale = float(img.size[0]) / float(thumb.size[0])
# largest likely blob size, from scan size, 0.25", and floor of 8" for print.
maxdim = min(*img.size) * 0.25 / 8.0
# smallest likely blob size, wild-ass-guessed.
mindim = 8
thumb = autocontrast(thumb)
thumb = lowpass(thumb, 1)
thumb = highpass(thumb, 16)
if highpass_filename:
thumb.save(highpass_filename)
thumb = thumb.point(lambda p: (p < 116) and 0xFF or 0x00)
thumb = thumb.filter(MinFilter(5)).filter(MaxFilter(5))
if preblobs_filename:
thumb.save(preblobs_filename)
ident = img.copy().convert('L').convert('RGB')
draw = ImageDraw(ident)
blobs = []
for (xmin, ymin, xmax, ymax, pixels) in detect(thumb):
coverage = pixels / float((1 + xmax - xmin) * (1 + ymax - ymin))
if coverage < 0.7:
# too spidery
continue
xmin *= scale
ymin *= scale
xmax *= scale
ymax *= scale
blob = Blob(xmin, ymin, xmax, ymax, pixels)
if blob.w < mindim or blob.h < mindim:
# too small
continue
if blob.w > maxdim or blob.h > maxdim:
# too large
continue
if max(blob.w, blob.h) / min(blob.w, blob.h) > 2:
# too stretched
continue
draw.rectangle(blob.bbox, outline=(0xFF, 0, 0))
draw.text(blob.bbox[2:4], str(len(blobs)), fill=(0x99, 0, 0))
blobs.append(blob)
if postblobs_filename:
ident.save(postblobs_filename)
return blobs
def gaussian(data, radius):
""" Perform a gaussian blur on a data array representing an image.
Manipulate the data array directly.
"""
#
# Build a convolution kernel based on
# http://en.wikipedia.org/wiki/Gaussian_function#Two-dimensional_Gaussian_function
#
kernel = range(-radius, radius + 1)
kernel = [(d ** 2) / (2 * (radius * .5) ** 2) for d in kernel]
kernel = [e ** -d for d in kernel]
kernel = array(kernel, dtype=float) / sum(kernel)
#
# Convolve in two dimensions.
#
for row in range(data.shape[0]):
data[row,:] = convolve(data[row,:], kernel, 'same')
for col in range(data.shape[1]):
data[:,col] = convolve(data[:,col], kernel, 'same')
def lowpass(img, radius):
""" Perform a low-pass with a given radius on the image, return a new image.
"""
#
# Convert image to array
#
blur = img2arr(img)
gaussian(blur, radius)
return arr2img(blur)
def highpass(img, radius):
""" Perform a high-pass with a given radius on the image, return a new image.
"""
#
# Convert image to arrays
#
orig = img2arr(img)
blur = orig.copy()
gaussian(blur, radius)
#
# Combine blurred with original, see http://www.gimp.org/tutorials/Sketch_Effect/
#
high = .5 * orig + .5 * (0xff - blur)
return arr2img(high)
def extract_image(scan2print, print_bbox, scan_img, dest_dim, step=50):
""" Extract a portion of a scan image by print coordinates.
scan2print - transformation from scan pixels to original print.
"""
dest_img = Image.new('RGB', dest_dim)
#
# Compute transformation from print image bbox to destination image.
#
print2dest = triangle2triangle(Point(print_bbox[0], print_bbox[1]), Point(0, 0),
Point(print_bbox[0], print_bbox[3]), Point(0, dest_dim[1]),
Point(print_bbox[2], print_bbox[1]), Point(dest_dim[0], 0))
#
# Compute transformation from source image to destination image.
#
scan2dest = scan2print.multiply(print2dest)
dest_w, dest_h = dest_dim
for y in range(0, dest_h, step):
for x in range(0, dest_w, step):
# dimensions of current destination cell
w = min(step, dest_w - x)
h = min(step, dest_h - y)
# transformation from scan pixels to destination cell
m = scan2dest
m = m.multiply(Transform(1, 0, -x, 0, 1, -y))
m = m.inverse()
a = m.affine(0, 0, w, h)
p = scan_img.transform((w, h), AFFINE, a, BICUBIC)
dest_img.paste(p, (x, y))
return dest_img
def arr2img(ar):
""" Convert Numeric array to PIL Image.
"""
return Image.fromstring('L', (ar.shape[1], ar.shape[0]), ar.astype(ubyte).tostring())
def img2arr(im):
""" Convert PIL Image to Numeric array.
"""
return fromstring(im.tostring(), ubyte).reshape((im.size[1], im.size[0]))
|
stamen/fieldpapers
|
decoder/imagemath.py
|
Python
|
gpl-2.0
| 7,355
|
[
"Gaussian"
] |
ef513e5475118a1f7c525d59fc0966b33beaf61a50085f5c56b8569217a6b8c2
|
#!/usr/bin/python
# -------------------------------------------------------------------
# Import statements
# -------------------------------------------------------------------
import math
import os
import re
import sys
from decimal import *
from operator import *
import marvin.db.models.SampleModelClasses as sampledb
import numpy as np
from astropy.io import fits
from flask_login import UserMixin
from marvin.core.caching_query import RelationshipCache
from marvin.db.ArrayUtils import ARRAY_D
from marvin.db.database import db
from sqlalchemy import and_, func, select # for aggregate, other functions
from sqlalchemy.dialects.postgresql import *
from sqlalchemy.engine import reflection
from sqlalchemy.ext.hybrid import hybrid_method, hybrid_property
from sqlalchemy.orm import configure_mappers, deferred, relationship
from sqlalchemy.orm.session import Session
from sqlalchemy.schema import Column
from sqlalchemy.sql import column
from sqlalchemy.types import JSON, Float, Integer, String
from sqlalchemy_utils import Timestamp
from werkzeug.security import check_password_hash, generate_password_hash
try:
from sdss_access.path import Path
except ImportError as e:
Path = None
# ========================
# Define database classes
# ========================
Base = db.Base
class ArrayOps(object):
''' this class adds array functionality '''
__tablename__ = 'arrayops'
__table_args__ = {'extend_existing': True}
@property
def cols(self):
return list(self.__table__.columns._data.keys())
@property
def collist(self):
return ['wavelength', 'flux', 'ivar', 'mask', 'xpos', 'ypos', 'specres']
def getTableName(self):
return self.__table__.name
def matchIndex(self, name=None):
# Get index of correct column
incols = [x for x in self.cols if x in self.collist]
if not any(incols):
return None
elif len(incols) == 1:
idx = self.cols.index(incols[0])
else:
if not name:
print('Multiple columns found. Column name must be specified!')
return None
elif name in self.collist:
idx = self.cols.index(name)
else:
return None
return idx
def filter(self, start, end, name=None):
# Check input types or map string operators
startnum = type(start) == int or type(start) == float
endnum = type(end) == int or type(end) == float
opdict = {'=': eq, '<': lt, '<=': le, '>': gt, '>=': ge, '!=': ne}
if start in opdict.keys() or end in opdict.keys():
opind = list(opdict.keys()).index(start) if start in opdict.keys() else list(opdict.keys()).index(end)
if start in opdict.keys():
start = opdict[list(opdict.keys())[opind]]
if end in opdict.keys():
end = opdict[list(opdict.keys())[opind]]
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
if startnum and endnum:
arr = [x for x in data if x >= start and x <= end]
elif not startnum and endnum:
arr = [x for x in data if start(x, end)]
elif startnum and not endnum:
arr = [x for x in data if end(x, start)]
elif startnum == eq or endnum == eq:
arr = [x for x in data if start(x, end)] if start == eq else [x for x in data if end(x, start)]
return arr
else:
return None
def equal(self, num, name=None):
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
arr = [x for x in data if x == num]
return arr
else:
return None
def less(self, num, name=None):
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
arr = [x for x in data if x <= num]
return arr
else:
return None
def greater(self, num, name=None):
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
arr = [x for x in data if x >= num]
return arr
else:
return None
def getIndices(self, arr):
if self.idx:
indices = [self.__getattribute__(self.cols[self.idx]).index(a) for a in arr]
else:
return None
return indices
class Cube(Base, ArrayOps):
__tablename__ = 'cube'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb', 'extend_existing': True}
specres = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
specresd = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
prespecres = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
prespecresd = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
def __repr__(self):
return '<Cube (pk={0}, plate={1}, ifudesign={2}, tag={3})>'.format(self.pk, self.plate, self.ifu.name, self.pipelineInfo.version.version)
@property
def header(self):
'''Returns an astropy header'''
session = Session.object_session(self)
data = session.query(FitsHeaderKeyword.label, FitsHeaderValue.value,
FitsHeaderValue.comment).join(FitsHeaderValue).filter(
FitsHeaderValue.cube == self).all()
hdr = fits.Header(data)
return hdr
@property
def name(self):
return 'manga-{0}-{1}-LOGCUBE.fits.gz'.format(self.plate, self.ifu.name)
@property
def default_mapsname(self):
return 'mangadap-{0}-{1}-default.fits.gz'.format(self.plate, self.ifu.name)
def getPath(self):
sasurl = os.getenv('SAS_URL')
if sasurl:
sasredux = os.path.join(sasurl, 'sas/mangawork/manga/spectro/redux')
path = sasredux
else:
redux = os.getenv('MANGA_SPECTRO_REDUX')
path = redux
version = self.pipelineInfo.version.version
cubepath = os.path.join(path, version, str(self.plate), 'stack')
return cubepath
@property
def location(self):
name = self.name
path = self.getPath()
loc = os.path.join(path, name)
return loc
@property
def image(self):
ifu = '{0}.png'.format(self.ifu.name)
path = self.getPath()
imageloc = os.path.join(path, 'images', ifu)
return imageloc
def header_to_dict(self):
'''Returns a simple python dictionary header'''
values = self.headervals
hdrdict = {str(val.keyword.label): val.value for val in values}
return hdrdict
@property
def plateclass(self):
'''Returns a plate class'''
plate = Plate(self)
return plate
def testhead(self, key):
''' Test existence of header keyword'''
try:
if self.header_to_dict()[key]:
return True
except:
return False
def getFlags(self, bits, name):
session = Session.object_session(self)
# if bits not a digit, return None
if not str(bits).isdigit():
return 'NULL'
else:
bits = int(bits)
# Convert the integer value to list of bits
bitlist = [int(i) for i in '{0:08b}'.format(bits)]
bitlist.reverse()
indices = [i for i, bit in enumerate(bitlist) if bit]
labels = []
for i in indices:
maskbit = session.query(MaskBit).filter_by(flag=name, bit=i).one()
labels.append(maskbit.label)
return labels
def getQualBits(self, stage='3d'):
''' get quality flags '''
col = 'DRP2QUAL' if stage == '2d' else 'DRP3QUAL'
hdr = self.header_to_dict()
bits = hdr.get(col, None)
return bits
def getQualFlags(self, stage='3d'):
''' get quality flags '''
name = 'MANGA_DRP2QUAL' if stage == '2d' else 'MANGA_DRP3QUAL'
bits = self.getQualBits(stage=stage)
if bits:
return self.getFlags(bits, name)
else:
return None
def getTargFlags(self, targtype=1):
''' get target flags '''
name = 'MANGA_TARGET1' if targtype == 1 else 'MANGA_TARGET2' if targtype == 2 else 'MANGA_TARGET3'
bits = self.getTargBits(targtype=targtype)
if bits:
return self.getFlags(bits, name)
else:
return None
def getTargBits(self, targtype=1):
''' get target bits '''
assert targtype in [1,2,3], 'target type can only 1, 2 or 3'
hdr = self.header_to_dict()
newcol = 'MNGTARG{0}'.format(targtype)
oldcol = 'MNGTRG{0}'.format(targtype)
bits = hdr.get(newcol, hdr.get(oldcol, None))
return bits
def get3DCube(self, extension='flux'):
"""Returns a 3D array of ``extension`` from the cube spaxels.
For example, ``cube.get3DCube('flux')`` will return the original
flux cube with the same ordering as the FITS data cube.
Note that this method seems to be really slow retrieving arrays (this
is especially serious for large IFUs).
"""
session = Session.object_session(self)
spaxels = session.query(getattr(Spaxel, extension)).filter(
Spaxel.cube_pk == self.pk).order_by(Spaxel.x, Spaxel.y).all()
# Assumes cubes are always square (!)
nx = ny = int(np.sqrt(len(spaxels)))
nwave = len(spaxels[0][0])
spArray = np.array(spaxels)
return spArray.transpose().reshape((nwave, ny, nx)).transpose(0, 2, 1)
@hybrid_property
def plateifu(self):
'''Returns parameter plate-ifu'''
return '{0}-{1}'.format(self.plate, self.ifu.name)
@plateifu.expression
def plateifu(cls):
return func.concat(Cube.plate, '-', IFUDesign.name)
@hybrid_property
def restwave(self):
if self.target:
redshift = self.target.NSA_objects[0].z
wave = np.array(self.wavelength.wavelength)
restwave = wave / (1 + redshift)
return restwave
else:
return None
@restwave.expression
def restwave(cls):
restw = (func.rest_wavelength(sampledb.NSA.z))
return restw
def has_modelspaxels(self, name=None):
if not name:
name = '(SPX|HYB)'
has_ms = False
model_cubes = [f.modelcube for f in self.dapfiles if re.search('LOGCUBE-{0}'.format(name), f.filename)]
if model_cubes:
mc = sum(model_cubes, [])
if mc:
from marvin.db.models.DapModelClasses import ModelSpaxel
session = Session.object_session(mc[0])
ms = session.query(ModelSpaxel).filter_by(modelcube_pk=mc[0].pk).first()
has_ms = True if ms else False
return has_ms
def has_spaxels(self):
if len(self.spaxels) > 0:
return True
else:
return False
def has_fibers(self):
if len(self.fibers) > 0:
return True
else:
return False
def set_quality(stage):
''' produces cube quality flag '''
col = 'DRP2QUAL' if stage == '2d' else 'DRP3QUAL'
label = 'cubequal{0}'.format(stage)
kwarg = 'DRP{0}QUAL'.format(stage[0])
@hybrid_property
def quality(self):
bits = self.getQualBits(stage=stage)
return int(bits)
@quality.expression
def quality(cls):
return select([FitsHeaderValue.value.cast(Integer)]).\
where(and_(FitsHeaderKeyword.pk==FitsHeaderValue.fits_header_keyword_pk,
FitsHeaderKeyword.label.ilike(kwarg),
FitsHeaderValue.cube_pk==cls.pk)).\
label(label)
return quality
def set_manga_target(targtype):
''' produces manga_target flags '''
label = 'mngtrg{0}'.format(targtype)
kwarg = 'MNGT%RG{0}'.format(targtype)
@hybrid_property
def target(self):
bits = self.getTargBits(targtype=targtype)
return int(bits)
@target.expression
def target(cls):
return select([FitsHeaderValue.value.cast(Integer)]).\
where(and_(FitsHeaderKeyword.pk==FitsHeaderValue.fits_header_keyword_pk,
FitsHeaderKeyword.label.ilike(kwarg),
FitsHeaderValue.cube_pk==cls.pk)).\
label(label)
return target
setattr(Cube, 'manga_target1', set_manga_target(1))
setattr(Cube, 'manga_target2', set_manga_target(2))
setattr(Cube, 'manga_target3', set_manga_target(3))
setattr(Cube, 'quality', set_quality('3d'))
class Wavelength(Base, ArrayOps):
__tablename__ = 'wavelength'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb', 'extend_existing': True}
wavelength = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
def __repr__(self):
return '<Wavelength (pk={0})>'.format(self.pk)
class Spaxel(Base, ArrayOps):
__tablename__ = 'spaxel'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb', 'extend_existing': True}
flux = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
ivar = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
mask = deferred(Column(ARRAY_D(Integer, zero_indexes=True)))
disp = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
predisp = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
def __repr__(self):
return '<Spaxel (pk={0}, x={1}, y={2})'.format(self.pk, self.x, self.y)
@hybrid_method
def sum(self, name=None):
total = sum(self.flux)
return total
@sum.expression
def sum(cls):
# return select(func.sum(func.unnest(cls.flux))).select_from(func.unnest(cls.flux)).label('totalflux')
return select([func.sum(column('totalflux'))]).select_from(func.unnest(cls.flux).alias('totalflux'))
class RssFiber(Base, ArrayOps):
__tablename__ = 'rssfiber'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb', 'extend_existing': True}
flux = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
ivar = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
mask = deferred(Column(ARRAY_D(Integer, zero_indexes=True)))
xpos = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
ypos = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
disp = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
predisp = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
def __repr__(self):
return '<RssFiber (pk={0}, expnum={1}, mjd={2}, fiber={3})>'.format(self.pk, self.exposure_no, self.mjd, self.fiber.fiberid)
class PipelineInfo(Base):
__tablename__ = 'pipeline_info'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return ('<Pipeline_Info (pk={0}, name={3}, ver={1}, release={2})>'.format(self.pk,
self.version.version, self.version.label, self.name.label))
class PipelineVersion(Base):
__tablename__ = 'pipeline_version'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Pipeline_Version (pk={0}, version={1}, release={2})>'.format(self.pk, self.version, self.label)
class PipelineStage(Base):
__tablename__ = 'pipeline_stage'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Pipeline_Stage (pk={0}, label={1})>'.format(self.pk, self.label)
class PipelineCompletionStatus(Base):
__tablename__ = 'pipeline_completion_status'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Pipeline_Completion_Status (pk={0}, label={1})>'.format(self.pk, self.label)
class PipelineName(Base):
__tablename__ = 'pipeline_name'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Pipeline_Name (pk={0}, label={1})>'.format(self.pk, self.label)
class FitsHeaderValue(Base):
__tablename__ = 'fits_header_value'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Fits_Header_Value (pk={0})'.format(self.pk)
class FitsHeaderKeyword(Base):
__tablename__ = 'fits_header_keyword'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Fits_Header_Keyword (pk={0}, label={1})'.format(self.pk, self.label)
class IFUDesign(Base):
__tablename__ = 'ifudesign'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<IFU_Design (pk={0}, name={1})'.format(self.pk, self.name)
@property
def ifuname(self):
return self.name
@property
def designid(self):
return self.name
@property
def ifutype(self):
return self.name[:-2]
class IFUToBlock(Base):
__tablename__ = 'ifu_to_block'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<IFU_to_Block (pk={0})'.format(self.pk)
class SlitBlock(Base):
__tablename__ = 'slitblock'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<SlitBlock (pk={0})'.format(self.pk)
class Cart(Base):
__tablename__ = 'cart'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Cart (pk={0}, id={1})'.format(self.pk, self.id)
class Fibers(Base):
__tablename__ = 'fibers'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Fibers (pk={0}, fiberid={1}, fnum={2})'.format(self.pk, self.fiberid, self.fnum)
class FiberType(Base):
__tablename__ = 'fiber_type'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Fiber_Type (pk={0},label={1})'.format(self.pk, self.label)
class TargetType(Base):
__tablename__ = 'target_type'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Target_Type (pk={0},label={1})'.format(self.pk, self.label)
class Sample(Base, ArrayOps):
__tablename__ = 'sample'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Sample (pk={0},cube={1})'.format(self.pk, self.cube)
@hybrid_property
def nsa_logmstar(self):
try:
return math.log10(self.nsa_mstar)
except ValueError:
return -9999.0
except TypeError:
return None
@nsa_logmstar.expression
def nsa_logmstar(cls):
return func.log(cls.nsa_mstar)
@hybrid_property
def nsa_logmstar_el(self):
try:
return math.log10(self.nsa_mstar_el)
except ValueError as e:
return -9999.0
except TypeError as e:
return None
@nsa_logmstar_el.expression
def nsa_logmstar_el(cls):
return func.log(cls.nsa_mstar_el)
class CartToCube(Base):
__tablename__ = 'cart_to_cube'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<CartToCube (pk={0},cube={1}, cart={2})'.format(self.pk, self.cube, self.cart)
class Wcs(Base, ArrayOps):
__tablename__ = 'wcs'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<WCS (pk={0},cube={1})'.format(self.pk, self.cube)
def makeHeader(self):
wcscols = self.cols[2:]
newhdr = fits.Header()
for c in wcscols:
newhdr[c] = float(self.__getattribute__(c)) if type(self.__getattribute__(c)) == Decimal else self.__getattribute__(c)
return newhdr
class ObsInfo(Base):
__tablename__ = 'obsinfo'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
_expnum = Column('expnum', String)
@hybrid_property
def expnum(self):
return func.trim(self._expnum)
def __repr__(self):
return '<ObsInfo (pk={0},cube={1})'.format(self.pk, self.cube)
class CubeShape(Base):
__tablename__ = 'cube_shape'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<CubeShape (pk={0},cubes={1},size={2},totalrows={3})'.format(self.pk, len(self.cubes), self.size, self.total)
@property
def shape(self):
return (self.size, self.size)
def makeIndiceArray(self):
''' Return the indices array as a numpy array '''
return np.array(self.indices)
def getXY(self, index=None):
''' Get the x,y elements from a single digit index '''
if index is not None:
if index > self.total:
return None, None
else:
i = int(index / self.size)
j = int(index - i * self.size)
else:
arrind = self.makeIndiceArray()
i = np.array(arrind / self.size, dtype=int)
j = np.array(self.makeIndiceArray() - i * self.size, dtype=int)
return i, j
@hybrid_property
def x(self):
'''Returns parameter plate-ifu'''
x = self.getXY()[0]
return x
@x.expression
def x(cls):
#arrind = func.unnest(cls.indices).label('arrind')
#x = func.array_agg(arrind / cls.size).label('x')
s = db.Session()
arrind = (func.unnest(cls.indices) / cls.size).label('xarrind')
#x = s.query(arrind).select_from(cls).subquery('xarr')
#xagg = s.query(func.array_agg(x.c.xarrind))
return arrind
@hybrid_property
def y(self):
'''Returns parameter plate-ifu'''
y = self.getXY()[1]
return y
@y.expression
def y(cls):
#arrind = func.unnest(cls.indices).label('arrind')
#x = arrind / cls.size
#y = func.array_agg(arrind - x*cls.size).label('y')
#return y
s = db.Session()
arrunnest = func.unnest(cls.indices)
xarr = (func.unnest(cls.indices) / cls.size).label('xarrind')
arrind = (arrunnest - xarr*cls.size).label('yarrind')
#n.arrind-(n.arrind/n.size)*n.size
y = s.query(arrind).select_from(cls).subquery('yarr')
yagg = s.query(func.array_agg(y.c.yarrind))
return yagg.as_scalar()
class Plate(object):
''' new plate class '''
__tablename__ = 'myplate'
def __init__(self, cube=None, id=None):
self.id = cube.plate if cube else id if id else None
self.cube = cube if cube else None
self.drpver = None
if self.cube:
self._hdr = self.cube.header_to_dict()
self.type = self.getPlateType()
self.platetype = self._hdr.get('PLATETYP', None)
self.surveymode = self._hdr.get('SRVYMODE', None)
self.dateobs = self._hdr.get('DATE-OBS', None)
self.ra = self._hdr.get('CENRA', None)
self.dec = self._hdr.get('CENDEC', None)
self.designid = self._hdr.get('DESIGNID', None)
self.cartid = self._hdr.get('CARTID', None)
self.drpver = self.cube.pipelineInfo.version.version
self.isbright = 'APOGEE' in self.surveymode
self.dir3d = 'mastar' if self.isbright else 'stack'
# cast a few
self.ra = float(self.ra) if self.ra else None
self.dec = float(self.dec) if self.dec else None
self.id = int(self.id) if self.id else None
self.designid = int(self.designid) if self.designid else None
def __repr__(self):
return self.__str__()
def __str__(self):
return ('Plate (id={0}, ra={1}, dec={2}, '
' designid={3})'.format(self.id, self.ra, self.dec, self.designid))
def getPlateType(self):
''' Get the type of MaNGA plate '''
hdr = self.cube.header
# try galaxy
mngtrg = self._hdr.get('MNGTRG1', None)
pltype = 'Galaxy' if mngtrg else None
# try stellar
if not pltype:
mngtrg = self._hdr.get('MNGTRG2', None)
pltype = 'Stellar' if mngtrg else None
# try ancillary
if not pltype:
mngtrg = self._hdr.get('MNGTRG3', None)
pltype = 'Ancillary' if mngtrg else None
return pltype
@property
def cubes(self):
''' Get all cubes on this plate '''
session = db.Session()
if self.drpver:
cubes = session.query(Cube).join(PipelineInfo, PipelineVersion).\
filter(Cube.plate == self.id, PipelineVersion.version == self.drpver).all()
else:
cubes = session.query(Cube).filter(Cube.plate == self.id).all()
return cubes
# ================
# manga Aux DB classes
# ================
class CubeHeader(Base):
__tablename__ = 'cube_header'
__table_args__ = {'autoload': True, 'schema': 'mangaauxdb'}
header = Column(JSON)
def __repr__(self):
return '<CubeHeader (pk={0},cube={1})'.format(self.pk, self.cube)
class MaskLabels(Base):
__tablename__ = 'maskbit_labels'
__table_args__ = {'autoload': True, 'schema': 'mangaauxdb'}
def __repr__(self):
return '<MaskLabels (pk={0},bit={1})'.format(self.pk, self.maskbit)
class MaskBit(Base):
__tablename__ = 'maskbit'
__table_args__ = {'autoload': True, 'schema': 'mangaauxdb'}
def __repr__(self):
return '<MaskBit (pk={0},flag={1}, bit={2}, label={3})'.format(self.pk, self.flag, self.bit, self.label)
# ================
# Query Meta classes
# ================
class QueryMeta(Base, Timestamp):
__tablename__ = 'query'
__table_args__ = {'autoload': True, 'schema': 'history'}
def __repr__(self):
return '<QueryMeta (pk={0}, filter={1}), count={2}>'.format(self.pk, self.searchfilter, self.count)
class User(Base, UserMixin, Timestamp):
__tablename__ = 'user'
__table_args__ = {'autoload': True, 'schema': 'history'}
def __repr__(self):
return '<User (pk={0}, username={1})'.format(self.pk, self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def get_id(self):
return (self.pk)
def update_stats(self, request=None):
remote_addr = request.remote_addr or None
self.login_count += 1
old_current_ip, new_current_ip = self.current_ip, remote_addr
self.last_ip = old_current_ip
self.current_ip = new_current_ip
# Define relationships
# ========================
Cube.pipelineInfo = relationship(PipelineInfo, backref="cubes")
Cube.wavelength = relationship(Wavelength, backref="cube")
Cube.ifu = relationship(IFUDesign, backref="cubes")
Cube.carts = relationship(Cart, secondary=CartToCube.__table__, backref="cubes")
Cube.wcs = relationship(Wcs, backref='cube', uselist=False)
Cube.shape = relationship(CubeShape, backref='cubes', uselist=False)
Cube.obsinfo = relationship(ObsInfo, backref='cube', uselist=False)
# from SampleDB
Cube.target = relationship(sampledb.MangaTarget, backref='cubes')
Sample.cube = relationship(Cube, backref="sample", uselist=False)
FitsHeaderValue.cube = relationship(Cube, backref="headervals")
FitsHeaderValue.keyword = relationship(FitsHeaderKeyword, backref="value")
IFUDesign.blocks = relationship(SlitBlock, secondary=IFUToBlock.__table__, backref='ifus')
Fibers.ifu = relationship(IFUDesign, backref="fibers")
Fibers.fibertype = relationship(FiberType, backref="fibers")
Fibers.targettype = relationship(TargetType, backref="fibers")
insp = reflection.Inspector.from_engine(db.engine)
fks = insp.get_foreign_keys(Spaxel.__table__.name, schema='mangadatadb')
if fks:
Spaxel.cube = relationship(Cube, backref='spaxels')
fks = insp.get_foreign_keys(RssFiber.__table__.name, schema='mangadatadb')
if fks:
RssFiber.cube = relationship(Cube, backref='rssfibers')
RssFiber.fiber = relationship(Fibers, backref='rssfibers')
PipelineInfo.name = relationship(PipelineName, backref="pipeinfo")
PipelineInfo.stage = relationship(PipelineStage, backref="pipeinfo")
PipelineInfo.version = relationship(PipelineVersion, backref="pipeinfo")
PipelineInfo.completionStatus = relationship(PipelineCompletionStatus, backref="pipeinfo")
# from AuxDB
CubeHeader.cube = relationship(Cube, backref='hdr')
# ---------------------------------------------------------
# Test that all relationships/mappings are self-consistent.
# ---------------------------------------------------------
try:
configure_mappers()
except RuntimeError as error:
print("""
mangadb.DataModelClasses:
An error occurred when verifying the relationships between the database tables.
Most likely this is an error in the definition of the SQLAlchemy relationships -
see the error message below for details.
""")
print("Error type: %s" % sys.exc_info()[0])
print("Error value: %s" % sys.exc_info()[1])
print("Error trace: %s" % sys.exc_info()[2])
sys.exit(1)
data_cache = RelationshipCache(Cube.target).\
and_(RelationshipCache(Cube.pipelineInfo)).\
and_(RelationshipCache(Cube.ifu)).\
and_(RelationshipCache(Cube.spaxels)).\
and_(RelationshipCache(Cube.wavelength)).\
and_(RelationshipCache(Cube.wcs)).\
and_(RelationshipCache(Cube.shape)).\
and_(RelationshipCache(Cube.obsinfo)).\
and_(RelationshipCache(IFUDesign.fibers)).\
and_(RelationshipCache(PipelineInfo.version)).\
and_(RelationshipCache(Cube.rssfibers))
|
sdss/marvin
|
python/marvin/db/models/DataModelClasses.py
|
Python
|
bsd-3-clause
| 30,632
|
[
"Galaxy"
] |
54ffaa2ec35245176f45131e007abf3d2b7e53768081dfec1848651f8102b561
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for Ensemble Kalman Filtering."""
import collections
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import distributions
from tensorflow_probability.python.internal import dtype_util
__all__ = [
'EnsembleKalmanFilterState',
'ensemble_kalman_filter_predict',
'ensemble_kalman_filter_update',
'ensemble_kalman_filter_log_marginal_likelihood',
'inflate_by_scaled_identity_fn',
]
# Sample covariance. Handles differing shapes.
def _covariance(x, y=None):
"""Sample covariance, assuming samples are the leftmost axis."""
x = tf.convert_to_tensor(x, name='x')
# Covariance *only* uses the centered versions of x (and y).
x = x - tf.reduce_mean(x, axis=0)
if y is None:
y = x
else:
y = tf.convert_to_tensor(y, name='y', dtype=x.dtype)
y = y - tf.reduce_mean(y, axis=0)
return tf.reduce_mean(tf.linalg.matmul(
x[..., tf.newaxis],
y[..., tf.newaxis], adjoint_b=True), axis=0)
class EnsembleKalmanFilterState(collections.namedtuple(
'EnsembleKalmanFilterState', ['step', 'particles', 'extra'])):
"""State for Ensemble Kalman Filter algorithms.
Contents:
step: Scalar `Integer` tensor. The timestep associated with this state.
particles: Structure of Floating-point `Tensor`s of shape
[N, B1, ... Bn, Ek] where `N` is the number of particles in the ensemble,
`Bi` are the batch dimensions and `Ek` is the size of each state.
extra: Structure containing any additional information. Can be used
for keeping track of diagnostics or propagating side information to
the Ensemble Kalman Filter.
"""
pass
def inflate_by_scaled_identity_fn(scaling_factor):
"""Return function that scales up covariance matrix by `scaling_factor**2`."""
def _inflate_fn(particles):
particle_means = tf.nest.map_structure(
lambda x: tf.math.reduce_mean(x, axis=0), particles)
return tf.nest.map_structure(
lambda x, m: scaling_factor * (x - m) + m,
particles,
particle_means)
return _inflate_fn
def ensemble_kalman_filter_predict(
state,
transition_fn,
seed=None,
inflate_fn=None,
name=None):
"""Ensemble Kalman Filter Prediction.
The [Ensemble Kalman Filter](
https://en.wikipedia.org/wiki/Ensemble_Kalman_filter) is a Monte Carlo
version of the traditional Kalman Filter.
This method is the 'prediction' equation associated with the Ensemble
Kalman Filter. This takes in an optional `inflate_fn` to perform covariance
inflation on the ensemble [2].
Args:
state: Instance of `EnsembleKalmanFilterState`.
transition_fn: callable returning a (joint) distribution over the next
latent state, and any information in the `extra` state.
Each component should be an instance of
`MultivariateNormalLinearOperator`.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
inflate_fn: Function that takes in the `particles` and returns a
new set of `particles`. Used for inflating the covariance of points.
Note this function should try to preserve the sample mean of the
particles, and scale up the sample covariance.
name: Python `str` name for ops created by this method.
Default value: `None` (i.e., `'ensemble_kalman_filter_predict'`).
Returns:
next_state: `EnsembleKalmanFilterState` representing particles after
applying `transition_fn`.
#### References
[1] Geir Evensen. Sequential data assimilation with a nonlinear
quasi-geostrophic model using Monte Carlo methods to forecast error
statistics. Journal of Geophysical Research, 1994.
[2] Jeffrey L. Anderson and Stephen L. Anderson. A Monte Carlo Implementation
of the Nonlinear Filtering Problem to Produce Ensemble Assimilations and
Forecasts. Monthly Weather Review, 1999.
"""
with tf.name_scope(name or 'ensemble_kalman_filter_predict'):
if inflate_fn is not None:
state = EnsembleKalmanFilterState(
step=state.step,
particles=inflate_fn(state.particles),
extra=state.extra)
new_particles_dist, extra = transition_fn(
state.step, state.particles, state.extra)
return EnsembleKalmanFilterState(
step=state.step, particles=new_particles_dist.sample(
seed=seed), extra=extra)
def ensemble_kalman_filter_update(
state,
observation,
observation_fn,
damping=1.,
seed=None,
name=None):
"""Ensemble Kalman Filter Update.
The [Ensemble Kalman Filter](
https://en.wikipedia.org/wiki/Ensemble_Kalman_filter) is a Monte Carlo
version of the traditional Kalman Filter.
This method is the 'update' equation associated with the Ensemble
Kalman Filter. In expectation, the ensemble covariance will match that
of the true posterior (under a Linear Gaussian State Space Model).
Args:
state: Instance of `EnsembleKalmanFilterState`.
observation: `Tensor` representing the observation for this timestep.
observation_fn: callable returning an instance of
`tfd.MultivariateNormalLinearOperator` along with an extra information
to be returned in the `EnsembleKalmanFilterState`.
damping: Floating-point `Tensor` representing how much to damp the
update by. Used to mitigate filter divergence. Default value: 1.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: Python `str` name for ops created by this method.
Default value: `None` (i.e., `'ensemble_kalman_filter_update'`).
Returns:
next_state: `EnsembleKalmanFilterState` representing particles at next
timestep, after applying Kalman update equations.
"""
with tf.name_scope(name or 'ensemble_kalman_filter_update'):
observation_particles_dist, extra = observation_fn(
state.step, state.particles, state.extra)
common_dtype = dtype_util.common_dtype(
[observation_particles_dist, observation], dtype_hint=tf.float32)
observation = tf.convert_to_tensor(observation, dtype=common_dtype)
observation_size_is_static_and_scalar = (observation.shape[-1] == 1)
if not isinstance(observation_particles_dist,
distributions.MultivariateNormalLinearOperator):
raise ValueError('Expected `observation_fn` to return an instance of '
'`MultivariateNormalLinearOperator`')
observation_particles = observation_particles_dist.sample(seed=seed)
observation_particles_covariance = _covariance(observation_particles)
covariance_between_state_and_predicted_observations = tf.nest.map_structure(
lambda x: _covariance(x, observation_particles), state.particles)
observation_particles_diff = observation - observation_particles
observation_particles_covariance = (
observation_particles_covariance +
observation_particles_dist.covariance())
# We specialize the univariate case.
# TODO(srvasude): Refactor linear_gaussian_ssm, normal_conjugate_posteriors
# and this code so we have a central place for normal conjugacy code.
if observation_size_is_static_and_scalar:
# In the univariate observation case, the Kalman gain is given by:
# K = cov(X, Y) / (var(Y) + var_noise). That is we just divide
# by the particle covariance plus the observation noise.
kalman_gain = tf.nest.map_structure(
lambda x: x / observation_particles_covariance,
covariance_between_state_and_predicted_observations)
new_particles = tf.nest.map_structure(
lambda x, g: x + damping * tf.linalg.matvec( # pylint:disable=g-long-lambda
g, observation_particles_diff), state.particles, kalman_gain)
else:
# TODO(b/153489530): Handle the case where the dimensionality of the
# observations is large. We can use the Sherman-Woodbury-Morrison
# identity in this case.
observation_particles_cholesky = tf.linalg.cholesky(
observation_particles_covariance)
added_term = tf.squeeze(tf.linalg.cholesky_solve(
observation_particles_cholesky,
observation_particles_diff[..., tf.newaxis]), axis=-1)
added_term = tf.nest.map_structure(
lambda x: tf.linalg.matvec(x, added_term),
covariance_between_state_and_predicted_observations)
new_particles = tf.nest.map_structure(
lambda x, a: x + damping * a, state.particles, added_term)
return EnsembleKalmanFilterState(
step=state.step + 1, particles=new_particles, extra=extra)
def ensemble_kalman_filter_log_marginal_likelihood(
state,
observation,
observation_fn,
seed=None,
name=None):
"""Ensemble Kalman Filter Log Marginal Likelihood.
The [Ensemble Kalman Filter](
https://en.wikipedia.org/wiki/Ensemble_Kalman_filter) is a Monte Carlo
version of the traditional Kalman Filter.
This method estimates (logarithm of) the marginal likelihood of the
observation at step `k`, `Y_k`, given previous observations from steps
`1` to `k-1`, `Y_{1:k}`. In other words, `Log[p(Y_k | Y_{1:k})]`.
This function's approximation to `p(Y_k | Y_{1:k})` is correct under a
Linear Gaussian state space model assumption, as ensemble size --> infinity.
Args:
state: Instance of `EnsembleKalmanFilterState` at step `k`,
conditioned on previous observations `Y_{1:k}`. Typically this is the
output of `ensemble_kalman_filter_predict`.
observation: `Tensor` representing the observation at step `k`.
observation_fn: callable returning an instance of
`tfd.MultivariateNormalLinearOperator` along with an extra information
to be returned in the `EnsembleKalmanFilterState`.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: Python `str` name for ops created by this method.
Default value: `None`
(i.e., `'ensemble_kalman_filter_log_marginal_likelihood'`).
Returns:
log_marginal_likelihood: `Tensor` with same dtype as `state`.
"""
with tf.name_scope(name or 'ensemble_kalman_filter_log_marginal_likelihood'):
observation_particles_dist, unused_extra = observation_fn(
state.step, state.particles, state.extra)
common_dtype = dtype_util.common_dtype(
[observation_particles_dist, observation], dtype_hint=tf.float32)
observation = tf.convert_to_tensor(observation, dtype=common_dtype)
if not isinstance(observation_particles_dist,
distributions.MultivariateNormalLinearOperator):
raise ValueError('Expected `observation_fn` to return an instance of '
'`MultivariateNormalLinearOperator`')
observation_particles = observation_particles_dist.sample(seed=seed)
observation_dist = distributions.MultivariateNormalTriL(
loc=tf.reduce_mean(observation_particles, axis=0),
scale_tril=tf.linalg.cholesky(_covariance(observation_particles)))
return observation_dist.log_prob(observation)
|
tensorflow/probability
|
tensorflow_probability/python/experimental/sequential/ensemble_kalman_filter.py
|
Python
|
apache-2.0
| 11,621
|
[
"Gaussian"
] |
c23275853fa765897651eba62ef213a11b14a9bcd69b454e4f34fc7b043743e5
|
''' Some standard routines for PySCF IO and queue interaction.'''
import os, json, shutil, ccq_sub_py
from pyscf.gto import Mole
from pyscf.pbc.gto import Cell
from pyscf.pbc.lib.chkfile import save_cell
from pyscf.lib.chkfile import load_mol, save_mol
from h5py import File
from pyscf.pbc.tools.pyscf_ase import ase_atoms_to_pyscf
# Filename convention. f"{SCFNAME}.py" will be run.
SCFNAME = "scfcalc"
def make_cell(atomase,**cellargs):
''' Make a cell from an ASE Atoms object.'''
cell = Cell()
cell.atom = ase_atoms_to_pyscf(atomase)
cell.a = atomase.cell.tolist()
cell.build(**cellargs)
return cell
def runcalc(loc,cell,
mfargs={},
qsubargs={'time':'6:00:00','queue':'gen'},
guess=None,
dfints=None,
meta={},
overwrite_meta=False,
run_anyways=False
):
''' Deposit run input into a location and run.
Args:
loc (str): directory for pyscf.
cell (Cell): PySCF Mole or Cell.
mfargs (dict): non-default args for mean field object.
Returns:
bool: if the directory was newly prepped.
'''
print(f"\n --- Checking job {loc}. ---")
if loc[-1] != '/': loc+='/'
cwd = os.getcwd()
if (os.path.exists(f"{loc}{SCFNAME}.py") or os.path.exists(f"{loc}{SCFNAME}.json")) and not run_anyways:
print("Already started.")
if overwrite_meta:
json.dump(meta,open(f"{loc}meta.json",'w'))
return False
if not os.path.exists(loc): os.mkdir(loc)
print(f"Preparing calculation in {loc}{SCFNAME}...")
cell.build()
if type(cell) == Cell:
save_cell(cell,f"{loc}{SCFNAME}.chk")
elif type(cell) == Mole:
save_mol(cell,f"{loc}{SCFNAME}.chk")
else:
raise AssertionError("Struture type not recognized.")
json.dump(mfargs,open(f"{loc}{SCFNAME}.json",'w'),indent=' ')
json.dump(meta,open(f"{loc}meta.json",'w'),indent=' ')
if dfints is not None:
shutil.copyfile(dfints,f"{loc}{SCFNAME}_gdf.h5")
if guess is not None:
shutil.copyfile(guess,f"{loc}guess.chk")
shutil.copyfile(f"{SCFNAME}.py",f"{loc}{SCFNAME}.py")
os.chdir(loc)
ccq_sub_py.qsub(SCFNAME+'.py',**qsubargs)
os.chdir(cwd)
print(f"Done running {loc}.")
return True
def readcalc(loc):
if loc[-1] != '/': loc+='/'
print(f"\nReading results from {loc}{SCFNAME}.")
results = {}
results['loc'] = loc
root = loc+SCFNAME
scfjson = root+'.json'
scfchk = root+'.chk'
meta = loc+'meta.json'
stdout = root+'.py.out'
if os.path.exists(scfjson):
results.update(json.load(open(scfjson,'r')))
if os.path.exists(meta):
results['meta'] = json.load(open(meta,'r'))
if os.path.exists(scfjson):
results.update(json.load(open(scfjson,'r')))
if os.path.exists(scfchk):
struct = load_mol(scfchk)
results['basis'] = struct.basis
results['ecp'] = struct.ecp
results['spin'] = struct.spin
if 'exp_to_discard' in struct.__dict__: results['exp_to_discard'] = struct.exp_to_discard
else: results['exp_to_discard'] = None
if 'dimension' in struct.__dict__: results['dimension'] = int(struct.dimension)
else: results['dimension'] = 0
if 'a' in struct.__dict__: results['lattice'] = struct.a
else: results['lattice'] = None
results['atoms'] = [struct.atom_symbol(i) for i in range(struct.natm)]
results['nelec'] = struct.tot_electrons()
results['verbose'] = struct.verbose
#output = pyscf.pbc.lib.chkfile.load(scfchk,'scf')
output = File(scfchk)
if 'scf' in output.keys():
results['e_tot'] = output['scf']['e_tot'][()]
print("Found energy:",results['e_tot'])
else: print("No results found.")
results['converged'] = False
if os.path.exists(stdout) and 'converged SCF energy' in open(stdout).read():
results['converged'] = True
return results
|
bbusemeyer/mython
|
busempyer/runpyutils.py
|
Python
|
gpl-2.0
| 3,822
|
[
"ASE",
"PySCF"
] |
98144cbe566ea34c9cb0658945a6022a03870abef65c4184a16567cb539fe451
|
# encoding: utf-8
"""
System command aliases.
Authors:
* Fernando Perez
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import re
import sys
from IPython.config.configurable import Configurable
from IPython.core.error import UsageError
from IPython.utils.py3compat import string_types
from IPython.utils.traitlets import List, Instance
from IPython.utils.warn import error
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
# This is used as the pattern for calls to split_user_input.
shell_line_split = re.compile(r'^(\s*)()(\S+)(.*$)')
def default_aliases():
"""Return list of shell aliases to auto-define.
"""
# Note: the aliases defined here should be safe to use on a kernel
# regardless of what frontend it is attached to. Frontends that use a
# kernel in-process can define additional aliases that will only work in
# their case. For example, things like 'less' or 'clear' that manipulate
# the terminal should NOT be declared here, as they will only work if the
# kernel is running inside a true terminal, and not over the network.
if os.name == 'posix':
default_aliases = [('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
('mv', 'mv'), ('rm', 'rm'), ('cp', 'cp'),
('cat', 'cat'),
]
# Useful set of ls aliases. The GNU and BSD options are a little
# different, so we make aliases that provide as similar as possible
# behavior in ipython, by passing the right flags for each platform
if sys.platform.startswith('linux'):
ls_aliases = [('ls', 'ls -F --color'),
# long ls
('ll', 'ls -F -o --color'),
# ls normal files only
('lf', 'ls -F -o --color %l | grep ^-'),
# ls symbolic links
('lk', 'ls -F -o --color %l | grep ^l'),
# directories or links to directories,
('ldir', 'ls -F -o --color %l | grep /$'),
# things which are executable
('lx', 'ls -F -o --color %l | grep ^-..x'),
]
else:
# BSD, OSX, etc.
ls_aliases = [('ls', 'ls -F -G'),
# long ls
('ll', 'ls -F -l -G'),
# ls normal files only
('lf', 'ls -F -l -G %l | grep ^-'),
# ls symbolic links
('lk', 'ls -F -l -G %l | grep ^l'),
# directories or links to directories,
('ldir', 'ls -F -G -l %l | grep /$'),
# things which are executable
('lx', 'ls -F -l -G %l | grep ^-..x'),
]
default_aliases = default_aliases + ls_aliases
elif os.name in ['nt', 'dos']:
default_aliases = [('ls', 'dir /on'),
('ddir', 'dir /ad /on'), ('ldir', 'dir /ad /on'),
('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
('echo', 'echo'), ('ren', 'ren'), ('copy', 'copy'),
]
else:
default_aliases = []
return default_aliases
class AliasError(Exception):
pass
class InvalidAliasError(AliasError):
pass
class Alias(object):
"""Callable object storing the details of one alias.
Instances are registered as magic functions to allow use of aliases.
"""
# Prepare blacklist
blacklist = {'cd','popd','pushd','dhist','alias','unalias'}
def __init__(self, shell, name, cmd):
self.shell = shell
self.name = name
self.cmd = cmd
self.nargs = self.validate()
def validate(self):
"""Validate the alias, and return the number of arguments."""
if self.name in self.blacklist:
raise InvalidAliasError("The name %s can't be aliased "
"because it is a keyword or builtin." % self.name)
try:
caller = self.shell.magics_manager.magics['line'][self.name]
except KeyError:
pass
else:
if not isinstance(caller, Alias):
raise InvalidAliasError("The name %s can't be aliased "
"because it is another magic command." % self.name)
if not (isinstance(self.cmd, string_types)):
raise InvalidAliasError("An alias command must be a string, "
"got: %r" % self.cmd)
nargs = self.cmd.count('%s')
if (nargs > 0) and (self.cmd.find('%l') >= 0):
raise InvalidAliasError('The %s and %l specifiers are mutually '
'exclusive in alias definitions.')
return nargs
def __repr__(self):
return "<alias {} for {!r}>".format(self.name, self.cmd)
def __call__(self, rest=''):
cmd = self.cmd
nargs = self.nargs
# Expand the %l special to be the user's input line
if cmd.find('%l') >= 0:
cmd = cmd.replace('%l', rest)
rest = ''
if nargs==0:
# Simple, argument-less aliases
cmd = '%s %s' % (cmd, rest)
else:
# Handle aliases with positional arguments
args = rest.split(None, nargs)
if len(args) < nargs:
raise UsageError('Alias <%s> requires %s arguments, %s given.' %
(self.name, nargs, len(args)))
cmd = '%s %s' % (cmd % tuple(args[:nargs]),' '.join(args[nargs:]))
self.shell.system(cmd)
#-----------------------------------------------------------------------------
# Main AliasManager class
#-----------------------------------------------------------------------------
class AliasManager(Configurable):
default_aliases = List(default_aliases(), config=True)
user_aliases = List(default_value=[], config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
def __init__(self, shell=None, **kwargs):
super(AliasManager, self).__init__(shell=shell, **kwargs)
# For convenient access
self.linemagics = self.shell.magics_manager.magics['line']
self.init_aliases()
def init_aliases(self):
# Load default & user aliases
for name, cmd in self.default_aliases + self.user_aliases:
self.soft_define_alias(name, cmd)
@property
def aliases(self):
return [(n, func.cmd) for (n, func) in self.linemagics.items()
if isinstance(func, Alias)]
def soft_define_alias(self, name, cmd):
"""Define an alias, but don't raise on an AliasError."""
try:
self.define_alias(name, cmd)
except AliasError as e:
error("Invalid alias: %s" % e)
def define_alias(self, name, cmd):
"""Define a new alias after validating it.
This will raise an :exc:`AliasError` if there are validation
problems.
"""
caller = Alias(shell=self.shell, name=name, cmd=cmd)
self.shell.magics_manager.register_function(caller, magic_kind='line',
magic_name=name)
def get_alias(self, name):
"""Return an alias, or None if no alias by that name exists."""
aname = self.linemagics.get(name, None)
return aname if isinstance(aname, Alias) else None
def is_alias(self, name):
"""Return whether or not a given name has been defined as an alias"""
return self.get_alias(name) is not None
def undefine_alias(self, name):
if self.is_alias(name):
del self.linemagics[name]
else:
raise ValueError('%s is not an alias' % name)
def clear_aliases(self):
for name, cmd in self.aliases:
self.undefine_alias(name)
def retrieve_alias(self, name):
"""Retrieve the command to which an alias expands."""
caller = self.get_alias(name)
if caller:
return caller.cmd
else:
raise ValueError('%s is not an alias' % name)
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/IPython/core/alias.py
|
Python
|
bsd-3-clause
| 8,950
|
[
"Brian"
] |
3bcca9636dee57c705ad81c347382561c93210f11d9fbb4c38a3e826eab8f4bc
|
#!/usr/bin/env python
# $Id: FJExample.py 545 2012-01-18 06:10:03Z cvermilion $
#----------------------------------------------------------------------
# Copyright (c) 2010-12, Pierre-Antoine Delsart, Kurtis Geerlings, Joey Huston,
# Brian Martin, and Christopher Vermilion
#
#----------------------------------------------------------------------
# This file is part of SpartyJet.
#
# SpartyJet is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# SpartyJet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SpartyJet; if not, write to the Free Software
# Foundation, Inc.:
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#----------------------------------------------------------------------
from spartyjet import *
#===============================================
# Create a jet builder---------------------------
builder = SJ.JetBuilder(SJ.INFO)
#builder.silent_mode() # turns off debugging information
# Configure input -------------------------------
input = getInputMaker('../data/J1_Clusters.dat')
builder.configure_input(input)
#**********************************************************************************
## Below are all the different ways one may use FastJet from within SpartyJet
## Run fastjet with SpartyJet handling jet definition
name = 'AntiKt4'
alg = fj.antikt_algorithm
R = 0.4
findJetAreas = False
anti4 = SJ.FastJet.FastJetFinder(name, alg, R, findJetAreas)
builder.add_default_analysis(anti4)
## Run fastjet by passing custom Jet Definiton to SpartyJet
anti7_def = fj.JetDefinition(fj.antikt_algorithm, 0.7)
anti7 = SJ.FastJet.FastJetFinder(anti7_def, 'AntiKt7', findJetAreas)
builder.add_default_analysis(anti7)
## Run SISCone via included plugin algorithm (need to build SISCone dictionaries - see External/ExternalLinkDef.hpp)
#coneRadius = 0.7
#overlapThreshold = 0.75
#sisPlugin = fj.SISConePlugin(coneRadius, overlapThreshold)
#sisPlugin_jet_def = fj.JetDefinition(sisPlugin)
#sis7 = SJ.FastJet.FastJetFinder(sisPlugin_jet_def, 'SISCone7', findJetAreas)
#builder.add_default_analysis(sis7)
## User Plugin example
## If lib*.so is genereated as in ../external/UserPlugins/ExamplePlugin/Makefile
## then lib*.so file must be included as follows:
from ROOT import gSystem
gSystem.Load('../UserPlugins/ExamplePlugin/libExamplePlugin.so')
## else one must place the Plugin's lib*.a file and header file
## in ../fastjet/UserPlugins/ lib/ and include/ respectively
## See UserPlugins/Makefile and the manual for more information
plugin = fj.ExamplePlugin(fj.JetDefinition(fj.antikt_algorithm, 1.0))
plugin_jet_def = fj.JetDefinition(plugin)
exFinder = SJ.FastJet.FastJetFinder(plugin_jet_def, 'ExamplePlugin', False)
builder.add_default_analysis(exFinder)
## Other plugins shipped with FastJet
## To use these plugins one must:
## - uncomment the necessary lines in spartyjet/FastJetCore/FastJetCoreLinkDef.hpp
## - recompile FastJet with ./configure --enable-allcxxplugins
## (not need to recompile if you use FastJet shipped with SpartyJet)
## - recompile spartyjet/fastjet dir by doing: make fastjetC && make fastjet
## 3 examples, see FastJet docs for more
## CMS Iterative Cone Plugin
#coneRadius = 0.4
#seedThresh = 1.0
#cmsConePlugin = fj.CMSIterativeConePlugin(coneRadius, seedThresh)
#cmsCone_jet_def = fj.JetDefinition(cmsConePlugin)
#cmsCone = SJ.FastJet.FastJetFinder(cmsCone_jet_def, 'CMSCone', False)
#builder.add_default_analysis(cmsCone)
## Jade Plugin
#jPlugin = fj.JadePlugin()
#jPlugin_jet_def = fj.JetDefinition(jPlugin)
#jade = SJ.FastJet.FastJetFinder(jPlugin_jet_def, 'Jade', False)
#builder.add_default_analysis(jade)
## e-e Cambridge Plugin
#ycut = 0.4
#eecPlugin = fj.EECambridgePlugin(ycut)
#eecPlugin_jet_def = fj.JetDefinition(eecPlugin)
#eec = SJ.FastJet.FastJetFinder(eecPlugin_jet_def, 'EECambridge', False)
#builder.add_default_analysis(eec)
#**********************************************************************************
# Configure output--------------------------------
builder.add_text_output("../data/output/simple.dat")
outfile = "../data/output/simple.root"
builder.configure_output("SpartyJet_Tree", outfile)
# Run SpartyJet
builder.process_events(10)
# Save this script in the ROOT file (needs to go after process_events or it
# gets over-written!)
writeCurrentFile(outfile)
|
mickypaganini/SSI2016-jet-clustering
|
spartyjet-4.0.2_mac/examples_py/FJExample.py
|
Python
|
mit
| 4,761
|
[
"Brian"
] |
b5e576fabb5bdf9761fc5b2aefa8dfcdabf0943685f6fadef50e519e3f67df32
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Mozilla-specific Buildbot steps.
#
# The Initial Developer of the Original Code is
# Mozilla Foundation.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Brian Warner <warner@lothar.com>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
from twisted.internet import defer
from twisted.python import log
from buildbot.util import loop
from buildbot.util import collections
from buildbot.util.eventual import eventually
class SchedulerManager(loop.MultiServiceLoop):
def __init__(self, master, db, change_svc):
loop.MultiServiceLoop.__init__(self)
self.master = master
self.db = db
self.change_svc = change_svc
self.upstream_subscribers = collections.defaultdict(list)
def updateSchedulers(self, newschedulers):
"""Add and start any Scheduler that isn't already a child of ours.
Stop and remove any that are no longer in the list. Make sure each
one has a schedulerid in the database."""
# TODO: this won't tolerate reentrance very well
new_names = set()
added = set()
removed = set()
for s in newschedulers:
new_names.add(s.name)
try:
old = self.getServiceNamed(s.name)
except KeyError:
old = None
if old:
if old.compareToOther(s):
removed.add(old)
added.add(s)
else:
pass # unchanged
else:
added.add(s)
for old in list(self):
if old.name not in new_names:
removed.add(old)
#if removed or added:
# # notify Downstream schedulers to potentially pick up
# # new schedulers now that we have removed and added some
# def updateDownstreams(res):
# log.msg("notifying downstream schedulers of changes")
# for s in newschedulers:
# if interfaces.IDownstreamScheduler.providedBy(s):
# s.checkUpstreamScheduler()
# d.addCallback(updateDownstreams)
log.msg("removing %d old schedulers, adding %d new ones"
% (len(removed), len(added)))
dl = [defer.maybeDeferred(s.disownServiceParent) for s in removed]
d = defer.gatherResults(dl)
d.addCallback(lambda ign: self.db.addSchedulers(added))
def _attach(ign):
for s in added:
s.setServiceParent(self)
self.upstream_subscribers = collections.defaultdict(list)
for s in list(self):
if s.upstream_name:
self.upstream_subscribers[s.upstream_name].append(s)
eventually(self.trigger)
d.addCallback(_attach)
d.addErrback(log.err)
return d
def publish_buildset(self, upstream_name, bsid, t):
if upstream_name in self.upstream_subscribers:
for s in self.upstream_subscribers[upstream_name]:
s.buildSetSubmitted(bsid, t)
def trigger_add_change(self, category, changenumber):
self.trigger()
def trigger_modify_buildset(self, category, *bsids):
# TODO: this could just run the schedulers that have subscribed to
# scheduler_upstream_buildsets, or even just the ones that subscribed
# to hear about the specific buildsetid
self.trigger()
|
centrumholdings/buildbot
|
buildbot/schedulers/manager.py
|
Python
|
gpl-2.0
| 4,863
|
[
"Brian"
] |
ef244466cd62347cdc15cf93d709e9173997b5c24aa57d14d629e43f49b639cf
|
""" Munch is a subclass of dict with attribute-style access.
>>> b = Munch()
>>> b.hello = 'world'
>>> b.hello
'world'
>>> b['hello'] += "!"
>>> b.hello
'world!'
>>> b.foo = Munch(lol=True)
>>> b.foo.lol
True
>>> b.foo is b['foo']
True
It is safe to import * from this module:
__all__ = ('Munch', 'munchify','unmunchify')
un/munchify provide dictionary conversion; Munches can also be
converted via Munch.to/fromDict().
"""
from .python3_compat import iterkeys, iteritems, Mapping, u
try:
# For python 3.8 and later
import importlib.metadata as importlib_metadata
except ImportError:
# For everyone else
import importlib_metadata
try:
__version__ = importlib_metadata.version(__name__)
except importlib_metadata.PackageNotFoundError:
# package is not installed
pass
VERSION = tuple(map(int, __version__.split('.')[:3]))
__all__ = ('Munch', 'munchify', 'DefaultMunch', 'DefaultFactoryMunch', 'RecursiveMunch', 'unmunchify')
class Munch(dict):
""" A dictionary that provides attribute-style access.
>>> b = Munch()
>>> b.hello = 'world'
>>> b.hello
'world'
>>> b['hello'] += "!"
>>> b.hello
'world!'
>>> b.foo = Munch(lol=True)
>>> b.foo.lol
True
>>> b.foo is b['foo']
True
A Munch is a subclass of dict; it supports all the methods a dict does...
>>> sorted(b.keys())
['foo', 'hello']
Including update()...
>>> b.update({ 'ponies': 'are pretty!' }, hello=42)
>>> print (repr(b))
Munch({'ponies': 'are pretty!', 'foo': Munch({'lol': True}), 'hello': 42})
As well as iteration...
>>> sorted([ (k,b[k]) for k in b ])
[('foo', Munch({'lol': True})), ('hello', 42), ('ponies', 'are pretty!')]
And "splats".
>>> "The {knights} who say {ni}!".format(**Munch(knights='lolcats', ni='can haz'))
'The lolcats who say can haz!'
See unmunchify/Munch.toDict, munchify/Munch.fromDict for notes about conversion.
"""
def __init__(self, *args, **kwargs): # pylint: disable=super-init-not-called
self.update(*args, **kwargs)
# only called if k not found in normal places
def __getattr__(self, k):
""" Gets key if it exists, otherwise throws AttributeError.
nb. __getattr__ is only called if key is not found in normal places.
>>> b = Munch(bar='baz', lol={})
>>> b.foo
Traceback (most recent call last):
...
AttributeError: foo
>>> b.bar
'baz'
>>> getattr(b, 'bar')
'baz'
>>> b['bar']
'baz'
>>> b.lol is b['lol']
True
>>> b.lol is getattr(b, 'lol')
True
"""
try:
# Throws exception if not in prototype chain
return object.__getattribute__(self, k)
except AttributeError:
try:
return self[k]
except KeyError:
raise AttributeError(k)
def __setattr__(self, k, v):
""" Sets attribute k if it exists, otherwise sets key k. A KeyError
raised by set-item (only likely if you subclass Munch) will
propagate as an AttributeError instead.
>>> b = Munch(foo='bar', this_is='useful when subclassing')
>>> hasattr(b.values, '__call__')
True
>>> b.values = 'uh oh'
>>> b.values
'uh oh'
>>> b['values']
Traceback (most recent call last):
...
KeyError: 'values'
"""
try:
# Throws exception if not in prototype chain
object.__getattribute__(self, k)
except AttributeError:
try:
self[k] = v
except:
raise AttributeError(k)
else:
object.__setattr__(self, k, v)
def __delattr__(self, k):
""" Deletes attribute k if it exists, otherwise deletes key k. A KeyError
raised by deleting the key--such as when the key is missing--will
propagate as an AttributeError instead.
>>> b = Munch(lol=42)
>>> del b.lol
>>> b.lol
Traceback (most recent call last):
...
AttributeError: lol
"""
try:
# Throws exception if not in prototype chain
object.__getattribute__(self, k)
except AttributeError:
try:
del self[k]
except KeyError:
raise AttributeError(k)
else:
object.__delattr__(self, k)
def toDict(self):
""" Recursively converts a munch back into a dictionary.
>>> b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
>>> sorted(b.toDict().items())
[('foo', {'lol': True}), ('hello', 42), ('ponies', 'are pretty!')]
See unmunchify for more info.
"""
return unmunchify(self)
@property
def __dict__(self):
return self.toDict()
def __repr__(self):
""" Invertible* string-form of a Munch.
>>> b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
>>> print (repr(b))
Munch({'ponies': 'are pretty!', 'foo': Munch({'lol': True}), 'hello': 42})
>>> eval(repr(b))
Munch({'ponies': 'are pretty!', 'foo': Munch({'lol': True}), 'hello': 42})
>>> with_spaces = Munch({1: 2, 'a b': 9, 'c': Munch({'simple': 5})})
>>> print (repr(with_spaces))
Munch({'a b': 9, 1: 2, 'c': Munch({'simple': 5})})
>>> eval(repr(with_spaces))
Munch({'a b': 9, 1: 2, 'c': Munch({'simple': 5})})
(*) Invertible so long as collection contents are each repr-invertible.
"""
return '{0}({1})'.format(self.__class__.__name__, dict.__repr__(self))
def __dir__(self):
return list(iterkeys(self))
def __getstate__(self):
""" Implement a serializable interface used for pickling.
See https://docs.python.org/3.6/library/pickle.html.
"""
return {k: v for k, v in self.items()}
def __setstate__(self, state):
""" Implement a serializable interface used for pickling.
See https://docs.python.org/3.6/library/pickle.html.
"""
self.clear()
self.update(state)
__members__ = __dir__ # for python2.x compatibility
@classmethod
def fromDict(cls, d):
""" Recursively transforms a dictionary into a Munch via copy.
>>> b = Munch.fromDict({'urmom': {'sez': {'what': 'what'}}})
>>> b.urmom.sez.what
'what'
See munchify for more info.
"""
return munchify(d, cls)
def copy(self):
return type(self).fromDict(self)
def update(self, *args, **kwargs):
"""
Override built-in method to call custom __setitem__ method that may
be defined in subclasses.
"""
for k, v in iteritems(dict(*args, **kwargs)):
self[k] = v
def get(self, k, d=None):
"""
D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.
"""
if k not in self:
return d
return self[k]
def setdefault(self, k, d=None):
"""
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
"""
if k not in self:
self[k] = d
return self[k]
class AutoMunch(Munch):
def __setattr__(self, k, v):
""" Works the same as Munch.__setattr__ but if you supply
a dictionary as value it will convert it to another Munch.
"""
if isinstance(v, Mapping) and not isinstance(v, (AutoMunch, Munch)):
v = munchify(v, AutoMunch)
super(AutoMunch, self).__setattr__(k, v)
class DefaultMunch(Munch):
"""
A Munch that returns a user-specified value for missing keys.
"""
def __init__(self, *args, **kwargs):
""" Construct a new DefaultMunch. Like collections.defaultdict, the
first argument is the default value; subsequent arguments are the
same as those for dict.
"""
# Mimic collections.defaultdict constructor
if args:
default = args[0]
args = args[1:]
else:
default = None
super(DefaultMunch, self).__init__(*args, **kwargs)
self.__default__ = default
def __getattr__(self, k):
""" Gets key if it exists, otherwise returns the default value."""
try:
return super(DefaultMunch, self).__getattr__(k)
except AttributeError:
return self.__default__
def __setattr__(self, k, v):
if k == '__default__':
object.__setattr__(self, k, v)
else:
super(DefaultMunch, self).__setattr__(k, v)
def __getitem__(self, k):
""" Gets key if it exists, otherwise returns the default value."""
try:
return super(DefaultMunch, self).__getitem__(k)
except KeyError:
return self.__default__
def __getstate__(self):
""" Implement a serializable interface used for pickling.
See https://docs.python.org/3.6/library/pickle.html.
"""
return (self.__default__, {k: v for k, v in self.items()})
def __setstate__(self, state):
""" Implement a serializable interface used for pickling.
See https://docs.python.org/3.6/library/pickle.html.
"""
self.clear()
default, state_dict = state
self.update(state_dict)
self.__default__ = default
@classmethod
def fromDict(cls, d, default=None):
# pylint: disable=arguments-differ
return munchify(d, factory=lambda d_: cls(default, d_))
def copy(self):
return type(self).fromDict(self, default=self.__default__)
def __repr__(self):
return '{0}({1!r}, {2})'.format(
type(self).__name__, self.__undefined__, dict.__repr__(self))
class DefaultFactoryMunch(Munch):
""" A Munch that calls a user-specified function to generate values for
missing keys like collections.defaultdict.
>>> b = DefaultFactoryMunch(list, {'hello': 'world!'})
>>> b.hello
'world!'
>>> b.foo
[]
>>> b.bar.append('hello')
>>> b.bar
['hello']
"""
def __init__(self, default_factory, *args, **kwargs):
super(DefaultFactoryMunch, self).__init__(*args, **kwargs)
self.default_factory = default_factory
@classmethod
def fromDict(cls, d, default_factory):
# pylint: disable=arguments-differ
return munchify(d, factory=lambda d_: cls(default_factory, d_))
def copy(self):
return type(self).fromDict(self, default_factory=self.default_factory)
def __repr__(self):
factory = self.default_factory.__name__
return '{0}({1}, {2})'.format(
type(self).__name__, factory, dict.__repr__(self))
def __setattr__(self, k, v):
if k == 'default_factory':
object.__setattr__(self, k, v)
else:
super(DefaultFactoryMunch, self).__setattr__(k, v)
def __missing__(self, k):
self[k] = self.default_factory()
return self[k]
class RecursiveMunch(DefaultFactoryMunch):
"""A Munch that calls an instance of itself to generate values for
missing keys.
>>> b = RecursiveMunch({'hello': 'world!'})
>>> b.hello
'world!'
>>> b.foo
RecursiveMunch(RecursiveMunch, {})
>>> b.bar.okay = 'hello'
>>> b.bar
RecursiveMunch(RecursiveMunch, {'okay': 'hello'})
>>> b
RecursiveMunch(RecursiveMunch, {'hello': 'world!', 'foo': RecursiveMunch(RecursiveMunch, {}),
'bar': RecursiveMunch(RecursiveMunch, {'okay': 'hello'})})
"""
def __init__(self, *args, **kwargs):
super(RecursiveMunch, self).__init__(RecursiveMunch, *args, **kwargs)
@classmethod
def fromDict(cls, d):
# pylint: disable=arguments-differ
return munchify(d, factory=cls)
def copy(self):
return type(self).fromDict(self)
# While we could convert abstract types like Mapping or Iterable, I think
# munchify is more likely to "do what you mean" if it is conservative about
# casting (ex: isinstance(str,Iterable) == True ).
#
# Should you disagree, it is not difficult to duplicate this function with
# more aggressive coercion to suit your own purposes.
def munchify(x, factory=Munch):
""" Recursively transforms a dictionary into a Munch via copy.
>>> b = munchify({'urmom': {'sez': {'what': 'what'}}})
>>> b.urmom.sez.what
'what'
munchify can handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = munchify({ 'lol': ('cats', {'hah':'i win again'}),
... 'hello': [{'french':'salut', 'german':'hallo'}] })
>>> b.hello[0].french
'salut'
>>> b.lol[1].hah
'i win again'
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
# Munchify x, using `seen` to track object cycles
seen = dict()
def munchify_cycles(obj):
# If we've already begun munchifying obj, just return the already-created munchified obj
try:
return seen[id(obj)]
except KeyError:
pass
# Otherwise, first partly munchify obj (but without descending into any lists or dicts) and save that
seen[id(obj)] = partial = pre_munchify(obj)
# Then finish munchifying lists and dicts inside obj (reusing munchified obj if cycles are encountered)
return post_munchify(partial, obj)
def pre_munchify(obj):
# Here we return a skeleton of munchified obj, which is enough to save for later (in case
# we need to break cycles) but it needs to filled out in post_munchify
if isinstance(obj, Mapping):
return factory({})
elif isinstance(obj, list):
return type(obj)()
elif isinstance(obj, tuple):
type_factory = getattr(obj, "_make", type(obj))
return type_factory(munchify_cycles(item) for item in obj)
else:
return obj
def post_munchify(partial, obj):
# Here we finish munchifying the parts of obj that were deferred by pre_munchify because they
# might be involved in a cycle
if isinstance(obj, Mapping):
partial.update((k, munchify_cycles(obj[k])) for k in iterkeys(obj))
elif isinstance(obj, list):
partial.extend(munchify_cycles(item) for item in obj)
elif isinstance(obj, tuple):
for (item_partial, item) in zip(partial, obj):
post_munchify(item_partial, item)
return partial
return munchify_cycles(x)
def unmunchify(x):
""" Recursively converts a Munch into a dictionary.
>>> b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
>>> sorted(unmunchify(b).items())
[('foo', {'lol': True}), ('hello', 42), ('ponies', 'are pretty!')]
unmunchify will handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = Munch(foo=['bar', Munch(lol=True)], hello=42,
... ponies=('are pretty!', Munch(lies='are trouble!')))
>>> sorted(unmunchify(b).items()) #doctest: +NORMALIZE_WHITESPACE
[('foo', ['bar', {'lol': True}]), ('hello', 42), ('ponies', ('are pretty!', {'lies': 'are trouble!'}))]
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
# Munchify x, using `seen` to track object cycles
seen = dict()
def unmunchify_cycles(obj):
# If we've already begun unmunchifying obj, just return the already-created unmunchified obj
try:
return seen[id(obj)]
except KeyError:
pass
# Otherwise, first partly unmunchify obj (but without descending into any lists or dicts) and save that
seen[id(obj)] = partial = pre_unmunchify(obj)
# Then finish unmunchifying lists and dicts inside obj (reusing unmunchified obj if cycles are encountered)
return post_unmunchify(partial, obj)
def pre_unmunchify(obj):
# Here we return a skeleton of unmunchified obj, which is enough to save for later (in case
# we need to break cycles) but it needs to filled out in post_unmunchify
if isinstance(obj, Mapping):
return dict()
elif isinstance(obj, list):
return type(obj)()
elif isinstance(obj, tuple):
type_factory = getattr(obj, "_make", type(obj))
return type_factory(unmunchify_cycles(item) for item in obj)
else:
return obj
def post_unmunchify(partial, obj):
# Here we finish unmunchifying the parts of obj that were deferred by pre_unmunchify because they
# might be involved in a cycle
if isinstance(obj, Mapping):
partial.update((k, unmunchify_cycles(obj[k])) for k in iterkeys(obj))
elif isinstance(obj, list):
partial.extend(unmunchify_cycles(v) for v in obj)
elif isinstance(obj, tuple):
for (value_partial, value) in zip(partial, obj):
post_unmunchify(value_partial, value)
return partial
return unmunchify_cycles(x)
# Serialization
try:
try:
import json
except ImportError:
import simplejson as json
def toJSON(self, **options):
""" Serializes this Munch to JSON. Accepts the same keyword options as `json.dumps()`.
>>> b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
>>> json.dumps(b) == b.toJSON()
True
"""
return json.dumps(self, **options)
def fromJSON(cls, stream, *args, **kwargs):
""" Deserializes JSON to Munch or any of its subclasses.
"""
factory = lambda d: cls(*(args + (d,)), **kwargs)
return munchify(json.loads(stream), factory=factory)
Munch.toJSON = toJSON
Munch.fromJSON = classmethod(fromJSON)
except ImportError:
pass
try:
# Attempt to register ourself with PyYAML as a representer
import yaml
from yaml.representer import Representer, SafeRepresenter
def from_yaml(loader, node):
""" PyYAML support for Munches using the tag `!munch` and `!munch.Munch`.
>>> import yaml
>>> yaml.load('''
... Flow style: !munch.Munch { Clark: Evans, Brian: Ingerson, Oren: Ben-Kiki }
... Block style: !munch
... Clark : Evans
... Brian : Ingerson
... Oren : Ben-Kiki
... ''') #doctest: +NORMALIZE_WHITESPACE
{'Flow style': Munch(Brian='Ingerson', Clark='Evans', Oren='Ben-Kiki'),
'Block style': Munch(Brian='Ingerson', Clark='Evans', Oren='Ben-Kiki')}
This module registers itself automatically to cover both Munch and any
subclasses. Should you want to customize the representation of a subclass,
simply register it with PyYAML yourself.
"""
data = Munch()
yield data
value = loader.construct_mapping(node)
data.update(value)
def to_yaml_safe(dumper, data):
""" Converts Munch to a normal mapping node, making it appear as a
dict in the YAML output.
>>> b = Munch(foo=['bar', Munch(lol=True)], hello=42)
>>> import yaml
>>> yaml.safe_dump(b, default_flow_style=True)
'{foo: [bar, {lol: true}], hello: 42}\\n'
"""
return dumper.represent_dict(data)
def to_yaml(dumper, data):
""" Converts Munch to a representation node.
>>> b = Munch(foo=['bar', Munch(lol=True)], hello=42)
>>> import yaml
>>> yaml.dump(b, default_flow_style=True)
'!munch.Munch {foo: [bar, !munch.Munch {lol: true}], hello: 42}\\n'
"""
return dumper.represent_mapping(u('!munch.Munch'), data)
for loader_name in ("BaseLoader", "FullLoader", "SafeLoader", "Loader", "UnsafeLoader", "DangerLoader"):
LoaderCls = getattr(yaml, loader_name, None)
if LoaderCls is None:
# This code supports both PyYAML 4.x and 5.x versions
continue
yaml.add_constructor(u('!munch'), from_yaml, Loader=LoaderCls)
yaml.add_constructor(u('!munch.Munch'), from_yaml, Loader=LoaderCls)
SafeRepresenter.add_representer(Munch, to_yaml_safe)
SafeRepresenter.add_multi_representer(Munch, to_yaml_safe)
Representer.add_representer(Munch, to_yaml)
Representer.add_multi_representer(Munch, to_yaml)
# Instance methods for YAML conversion
def toYAML(self, **options):
""" Serializes this Munch to YAML, using `yaml.safe_dump()` if
no `Dumper` is provided. See the PyYAML documentation for more info.
>>> b = Munch(foo=['bar', Munch(lol=True)], hello=42)
>>> import yaml
>>> yaml.safe_dump(b, default_flow_style=True)
'{foo: [bar, {lol: true}], hello: 42}\\n'
>>> b.toYAML(default_flow_style=True)
'{foo: [bar, {lol: true}], hello: 42}\\n'
>>> yaml.dump(b, default_flow_style=True)
'!munch.Munch {foo: [bar, !munch.Munch {lol: true}], hello: 42}\\n'
>>> b.toYAML(Dumper=yaml.Dumper, default_flow_style=True)
'!munch.Munch {foo: [bar, !munch.Munch {lol: true}], hello: 42}\\n'
"""
opts = dict(indent=4, default_flow_style=False)
opts.update(options)
if 'Dumper' not in opts:
return yaml.safe_dump(self, **opts)
else:
return yaml.dump(self, **opts)
def fromYAML(cls, stream, *args, **kwargs):
factory = lambda d: cls(*(args + (d,)), **kwargs)
loader_class = kwargs.pop('Loader', yaml.FullLoader)
return munchify(yaml.load(stream, Loader=loader_class), factory=factory)
Munch.toYAML = toYAML
Munch.fromYAML = classmethod(fromYAML)
except ImportError:
pass
|
Infinidat/munch
|
munch/__init__.py
|
Python
|
mit
| 22,610
|
[
"Brian"
] |
77747daa9bb70d6c11accb0bf7f8acbe6a7bf960faeb188d7d500cf0ad94bf75
|
# (C) British Crown Copyright 2013 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
# Historically this was auto-generated from
# SciTools/iris-code-generators:tools/gen_rules.py
import cf_units
import numpy as np
import calendar
from iris.aux_factory import HybridHeightFactory, HybridPressureFactory
from iris.coords import AuxCoord, CellMethod, DimCoord
from iris.fileformats.rules import (ConversionMetadata, Factory, Reference,
ReferenceTarget)
from iris.fileformats.um_cf_map import (LBFC_TO_CF, STASH_TO_CF,
STASHCODE_IMPLIED_HEIGHTS)
import iris.fileformats.pp
###############################################################################
#
# Convert vectorisation routines.
#
def _dim_or_aux(*args, **kwargs):
try:
result = DimCoord(*args, **kwargs)
except ValueError:
attr = kwargs.get('attributes')
if attr is not None and 'positive' in attr:
del attr['positive']
result = AuxCoord(*args, **kwargs)
return result
def _convert_vertical_coords(lbcode, lbvc, blev, lblev, stash,
bhlev, bhrlev, brsvd1, brsvd2, brlev,
dim=None):
"""
Encode scalar or vector vertical level values from PP headers as CM data
components.
Args:
* lbcode:
Scalar field :class:`iris.fileformats.pp.SplittableInt` value.
* lbvc:
Scalar field value.
* blev:
Scalar field value or :class:`numpy.ndarray` vector of field values.
* lblev:
Scalar field value or :class:`numpy.ndarray` vector of field values.
* stash:
Scalar field :class:`iris.fileformats.pp.STASH` value.
* bhlev:
Scalar field value or :class:`numpy.ndarray` vector of field values.
* bhrlev:
Scalar field value or :class:`numpy.ndarray` vector of field values.
* brsvd1:
Scalar field value or :class:`numpy.ndarray` vector of field values.
* brsvd2:
Scalar field value or :class:`numpy.ndarray` vector of field values.
* brlev:
Scalar field value or :class:`numpy.ndarray` vector of field values.
Kwargs:
* dim:
Associated dimension of the vertical coordinate. Defaults to None.
Returns:
A tuple containing a list of coords_and_dims, and a list of factories.
"""
factories = []
coords_and_dims = []
# See Word no. 33 (LBLEV) in section 4 of UM Model Docs (F3).
BASE_RHO_LEVEL_LBLEV = 9999
model_level_number = np.atleast_1d(lblev)
model_level_number[model_level_number == BASE_RHO_LEVEL_LBLEV] = 0
# Ensure to vectorise these arguments as arrays, as they participate
# in the conditions of convert rules.
blev = np.atleast_1d(blev)
brsvd1 = np.atleast_1d(brsvd1)
brlev = np.atleast_1d(brlev)
# Height.
if (lbvc == 1) and \
str(stash) not in STASHCODE_IMPLIED_HEIGHTS and \
np.all(blev != -1):
coord = _dim_or_aux(blev, standard_name='height', units='m',
attributes={'positive': 'up'})
coords_and_dims.append((coord, dim))
if str(stash) in STASHCODE_IMPLIED_HEIGHTS:
height = STASHCODE_IMPLIED_HEIGHTS[str(stash)]
coord = DimCoord(height, standard_name='height', units='m',
attributes={'positive': 'up'})
coords_and_dims.append((coord, None))
# Model level number.
if (len(lbcode) != 5) and \
(lbvc == 2):
coord = _dim_or_aux(model_level_number, standard_name='model_level_number',
attributes={'positive': 'down'})
coords_and_dims.append((coord, dim))
# Depth - unbound.
if (len(lbcode) != 5) and \
(lbvc == 2) and \
np.all(brsvd1 == brlev):
coord = _dim_or_aux(blev, standard_name='depth', units='m',
attributes={'positive': 'down'})
coords_and_dims.append((coord, dim))
# Depth - bound.
if (len(lbcode) != 5) and \
(lbvc == 2) and \
np.all(brsvd1 != brlev):
coord = _dim_or_aux(blev, standard_name='depth', units='m',
bounds=np.vstack((brsvd1, brlev)).T,
attributes={'positive': 'down'})
coords_and_dims.append((coord, dim))
# Depth - unbound and bound (mixed).
if (len(lbcode) != 5) and \
(lbvc == 2) and \
(np.any(brsvd1 == brlev) and np.any(brsvd1 != brlev)):
lower = np.where(brsvd1 == brlev, blev, brsvd1)
upper = np.where(brsvd1 == brlev, blev, brlev)
coord = _dim_or_aux(blev, standard_name='depth', units='m',
bounds=np.vstack((lower, upper)).T,
attributes={'positive': 'down'})
coords_and_dims.append((coord, dim))
# Soil level/depth.
if len(lbcode) != 5 and lbvc == 6:
if np.all(brsvd1 == 0) and np.all(brlev == 0):
# UM populates lblev, brsvd1 and brlev metadata INCORRECTLY,
# so continue to treat as a soil level.
coord = _dim_or_aux(model_level_number,
long_name='soil_model_level_number',
attributes={'positive': 'down'})
coords_and_dims.append((coord, dim))
elif np.any(brsvd1 != brlev):
# UM populates metadata CORRECTLY,
# so treat it as the expected (bounded) soil depth.
coord = _dim_or_aux(blev, standard_name='depth', units='m',
bounds=np.vstack((brsvd1, brlev)).T,
attributes={'positive': 'down'})
coords_and_dims.append((coord, dim))
# Pressure.
if (lbvc == 8) and \
(len(lbcode) != 5 or (len(lbcode) == 5 and
1 not in [lbcode.ix, lbcode.iy])):
coord = _dim_or_aux(blev, long_name='pressure', units='hPa')
coords_and_dims.append((coord, dim))
# Air potential temperature.
if (len(lbcode) != 5) and \
(lbvc == 19):
coord = _dim_or_aux(blev, standard_name='air_potential_temperature', units='K',
attributes={'positive': 'up'})
coords_and_dims.append((coord, dim))
# Hybrid pressure levels.
if lbvc == 9:
model_level_number = _dim_or_aux(model_level_number,
standard_name='model_level_number',
attributes={'positive': 'up'})
level_pressure = _dim_or_aux(bhlev,
long_name='level_pressure',
units='Pa',
bounds=np.vstack((bhrlev, brsvd2)).T)
sigma = AuxCoord(blev,
long_name='sigma',
bounds=np.vstack((brlev, brsvd1)).T)
coords_and_dims.extend([(model_level_number, dim),
(level_pressure, dim),
(sigma, dim)])
factories.append(Factory(HybridPressureFactory,
[{'long_name': 'level_pressure'},
{'long_name': 'sigma'},
Reference('surface_air_pressure')]))
# Hybrid height levels.
if lbvc == 65:
model_level_number = _dim_or_aux(model_level_number,
standard_name='model_level_number',
attributes={'positive': 'up'})
level_height = _dim_or_aux(blev,
long_name='level_height',
units='m',
bounds=np.vstack((brlev, brsvd1)).T,
attributes={'positive': 'up'})
sigma = AuxCoord(bhlev,
long_name='sigma',
bounds=np.vstack((bhrlev, brsvd2)).T)
coords_and_dims.extend([(model_level_number, dim),
(level_height, dim),
(sigma, dim)])
factories.append(Factory(HybridHeightFactory,
[{'long_name': 'level_height'},
{'long_name': 'sigma'},
Reference('orography')]))
return coords_and_dims, factories
def _reshape_vector_args(values_and_dims):
"""
Reshape a group of (array, dimensions-mapping) onto all dimensions.
The resulting arrays are all mapped over the same dimensions; as many as
the maximum dimension number found in the inputs. Those dimensions not
mapped by a given input appear as length-1 dimensions in the output array.
The resulting arrays are thus all mutually compatible in arithmetic -- i.e.
can combine without broadcasting errors (provided that all inputs mapping
to a dimension define the same associated length).
Args:
* values_and_dims (iterable of (array-like, iterable of int)):
Input arrays with associated mapping dimension numbers.
The length of each 'dims' must match the ndims of the 'value'.
Returns:
* reshaped_arrays (iterable of arrays).
The inputs, transposed and reshaped onto common target dimensions.
"""
# Find maximum dimension index, which sets ndim of results.
max_dims = [max(dims) if dims else -1 for _, dims in values_and_dims]
max_dim = max(max_dims) if max_dims else -1
result = []
for value, dims in values_and_dims:
value = np.asarray(value)
if len(dims) != value.ndim:
raise ValueError('Lengths of dimension-mappings must match '
'input array dimensions.')
# Save dim sizes in original order.
original_shape = value.shape
if dims:
# Transpose values to put its dims in the target order.
dims_order = sorted(range(len(dims)),
key=lambda i_dim: dims[i_dim])
value = value.transpose(dims_order)
if max_dim != -1:
# Reshape to add any extra *1 dims.
shape = [1] * (max_dim + 1)
for i_dim, dim in enumerate(dims):
shape[dim] = original_shape[i_dim]
value = value.reshape(shape)
result.append(value)
return result
def _collapse_degenerate_points_and_bounds(points, bounds=None, rtol=1.0e-7):
"""
Collapse points (and optionally bounds) in any dimensions over which all
values are the same.
All dimensions are tested, and if degenerate are reduced to length 1.
Value equivalence is controlled by a tolerance, to avoid problems with
numbers from netcdftime.date2num, which has limited precision because of
the way it calculates with floats of days.
Args:
* points (:class:`numpy.ndarray`)):
Array of points values.
Kwargs:
* bounds (:class:`numpy.ndarray`)
Array of bounds values. This array should have an additional vertex
dimension (typically of length 2) when compared to the points array
i.e. bounds.shape = points.shape + (nvertex,)
Returns:
A (points, bounds) tuple.
"""
array = points
if bounds is not None:
array = np.vstack((points, bounds.T)).T
for i_dim in range(points.ndim):
if array.shape[i_dim] > 1:
slice_inds = [slice(None)] * points.ndim
slice_inds[i_dim] = slice(0, 1)
slice_0 = array[slice_inds]
if np.allclose(array, slice_0, rtol):
array = slice_0
points = array
if bounds is not None:
points = array[..., 0]
bounds = array[..., 1:]
return points, bounds
def _reduce_points_and_bounds(points, lower_and_upper_bounds=None):
"""
Reduce the dimensionality of arrays of coordinate points (and optionally
bounds).
Dimensions over which all values are the same are reduced to size 1, using
:func:`_collapse_degenerate_points_and_bounds`.
All size-1 dimensions are then removed.
If the bounds arrays are also passed in, then all three arrays must have
the same shape or be capable of being broadcast to match.
Args:
* points (array-like):
Coordinate point values.
Kwargs:
* lower_and_upper_bounds (pair of array-like, or None):
Corresponding bounds values (lower, upper), if any.
Returns:
dims (iterable of ints), points(array), bounds(array)
* 'dims' is the mapping from the result array dimensions to the
original dimensions. However, when 'array' is scalar, 'dims' will
be None (rather than an empty tuple).
* 'points' and 'bounds' are the reduced arrays.
If no bounds were passed, None is returned.
"""
orig_points_dtype = np.asarray(points).dtype
bounds = None
if lower_and_upper_bounds is not None:
lower_bounds, upper_bounds = np.broadcast_arrays(
*lower_and_upper_bounds)
orig_bounds_dtype = lower_bounds.dtype
bounds = np.vstack((lower_bounds, upper_bounds)).T
# Attempt to broadcast points to match bounds to handle scalars.
if bounds is not None and points.shape != bounds.shape[:-1]:
points, _ = np.broadcast_arrays(points, bounds[..., 0])
points, bounds = _collapse_degenerate_points_and_bounds(points, bounds)
used_dims = tuple(i_dim for i_dim in range(points.ndim)
if points.shape[i_dim] > 1)
reshape_inds = tuple([points.shape[dim] for dim in used_dims])
points = points.reshape(reshape_inds)
points = points.astype(orig_points_dtype)
if bounds is not None:
bounds = bounds.reshape(reshape_inds + (2,))
bounds = bounds.astype(orig_bounds_dtype)
if not used_dims:
used_dims = None
return used_dims, points, bounds
def _new_coord_and_dims(is_vector_operation,
name, units,
points, lower_and_upper_bounds=None):
"""
Make a new (coordinate, cube_dims) pair with the given points, name, units
and optional bounds.
In 'vector' style operation, the data arrays must have same number of
dimensions as the target cube, and additional operations are performed :
* dimensions with all points and bounds values the same are removed.
* the result coordinate may be an AuxCoord if a DimCoord cannot be made
(e.g. if values are non-monotonic).
Args:
* is_vector_operation (bool):
If True, perform 'vector' style operation.
* points (array-like):
Coordinate point values.
* name (string):
Standard name of coordinate.
* units (string or cf_unit.Unit):
Units of coordinate.
Kwargs:
* lower_and_upper_bounds (pair of array-like, or None):
Corresponding bounds values (lower, upper), if any.
Returns:
a new (coordinate, dims) pair.
"""
bounds = lower_and_upper_bounds
if is_vector_operation:
dims, points, bounds = _reduce_points_and_bounds(points, bounds)
else:
dims = None
coord = _dim_or_aux(points, bounds=bounds, standard_name=name, units=units)
return (coord, dims)
_HOURS_UNIT = cf_units.Unit('hours')
def _convert_time_coords(lbcode, lbtim, epoch_hours_unit,
t1, t2, lbft,
t1_dims=(), t2_dims=(), lbft_dims=()):
"""
Make time coordinates from the time metadata.
Args:
* lbcode(:class:`iris.fileformats.pp.SplittableInt`):
Scalar field value.
* lbtim (:class:`iris.fileformats.pp.SplittableInt`):
Scalar field value.
* epoch_hours_unit (:class:`cf_units.Unit`):
Epoch time reference unit.
* t1 (array-like or scalar):
Scalar field value or an array of values.
* t2 (array-like or scalar):
Scalar field value or an array of values.
* lbft (array-like or scalar):
Scalar field value or an array of values.
Kwargs:
* t1_dims, t2_dims, lbft_dims (tuples of int):
Cube dimension mappings for the array metadata. Each default to
to (). The length of each dims tuple should equal the dimensionality
of the corresponding array of values.
Returns:
A list of (coordinate, dims) tuples. The coordinates are instance of
:class:`iris.coords.DimCoord` if possible, otherwise they are instance
of :class:`iris.coords.AuxCoord`. When the coordinate is of length one,
the `dims` value is None rather than an empty tuple.
"""
def date2hours(t):
epoch_hours = epoch_hours_unit.date2num(t)
if t.minute == 0 and t.second == 0:
epoch_hours = round(epoch_hours)
return epoch_hours
def date2year(t_in):
return t_in.year
# Check whether inputs are all scalar, for faster handling of scalar cases.
do_vector = len(t1_dims) + len(t2_dims) + len(lbft_dims) > 0
if do_vector:
# Reform the input values so they have all the same number of
# dimensions, transposing where necessary (based on the dimension
# mappings) so that the dimensions are common across each array.
# Note: this does not _guarantee_ that the arrays are broadcastable,
# but subsequent arithmetic makes this assumption.
t1, t2, lbft = _reshape_vector_args([(t1, t1_dims), (t2, t2_dims),
(lbft, lbft_dims)])
date2hours = np.vectorize(date2hours)
date2year = np.vectorize(date2year)
t1_epoch_hours = date2hours(t1)
t2_epoch_hours = date2hours(t2)
hours_from_t1_to_t2 = t2_epoch_hours - t1_epoch_hours
hours_from_t2_to_t1 = t1_epoch_hours - t2_epoch_hours
coords_and_dims = []
if ((lbtim.ia == 0) and
(lbtim.ib == 0) and
(lbtim.ic in [1, 2, 3, 4]) and
(len(lbcode) != 5 or (len(lbcode) == 5 and
lbcode.ix not in [20, 21, 22, 23] and
lbcode.iy not in [20, 21, 22, 23]))):
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'time', epoch_hours_unit, t1_epoch_hours))
if ((lbtim.ia == 0) and
(lbtim.ib == 1) and
(lbtim.ic in [1, 2, 3, 4]) and
(len(lbcode) != 5 or (len(lbcode) == 5
and lbcode.ix not in [20, 21, 22, 23]
and lbcode.iy not in [20, 21, 22, 23]))):
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'forecast_period', _HOURS_UNIT, hours_from_t2_to_t1))
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'time', epoch_hours_unit, t1_epoch_hours))
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'forecast_reference_time', epoch_hours_unit,
t2_epoch_hours))
if ((lbtim.ib == 2) and
(lbtim.ic in [1, 2, 4]) and
(np.any(date2year(t1) != 0) and np.any(date2year(t2) != 0)) and
# Note: don't add time coordinates when years are zero and
# lbtim.ib == 2. These are handled elsewhere.
((len(lbcode) != 5) or (len(lbcode) == 5 and
lbcode.ix not in [20, 21, 22, 23]
and lbcode.iy not in [20, 21, 22, 23]))):
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'forecast_period', _HOURS_UNIT,
lbft - 0.5 * hours_from_t1_to_t2,
[lbft - hours_from_t1_to_t2, lbft]))
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'time', epoch_hours_unit,
0.5 * (t1_epoch_hours + t2_epoch_hours),
[t1_epoch_hours, t2_epoch_hours]))
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'forecast_reference_time', epoch_hours_unit,
t2_epoch_hours - lbft))
if ((lbtim.ib == 3) and
(lbtim.ic in [1, 2, 4]) and
((len(lbcode) != 5) or (len(lbcode) == 5 and
lbcode.ix not in [20, 21, 22, 23] and
lbcode.iy not in [20, 21, 22, 23]))):
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'forecast_period', _HOURS_UNIT,
lbft, [lbft - hours_from_t1_to_t2, lbft]))
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'time', epoch_hours_unit,
t2_epoch_hours, [t1_epoch_hours, t2_epoch_hours]))
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'forecast_reference_time', epoch_hours_unit,
t2_epoch_hours - lbft))
if \
(len(lbcode) == 5) and \
(lbcode[-1] == 3) and \
(lbtim.ib == 2) and (lbtim.ic == 2):
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'forecast_reference_time', epoch_hours_unit,
t2_epoch_hours - lbft))
return coords_and_dims
###############################################################################
def _model_level_number(lblev):
"""
Return model level number for an LBLEV value.
Args:
* lblev (int):
PP field LBLEV value.
Returns:
Model level number (integer).
"""
# See Word no. 33 (LBLEV) in section 4 of UM Model Docs (F3).
SURFACE_AND_ZEROTH_RHO_LEVEL_LBLEV = 9999
if lblev == SURFACE_AND_ZEROTH_RHO_LEVEL_LBLEV:
model_level_number = 0
else:
model_level_number = lblev
return model_level_number
def _convert_scalar_realization_coords(lbrsvd4):
"""
Encode scalar 'realization' (aka ensemble) numbers as CM data.
Returns a list of coords_and_dims.
"""
# Realization (aka ensemble) (--> scalar coordinates)
coords_and_dims = []
if lbrsvd4 != 0:
coords_and_dims.append(
(DimCoord(lbrsvd4, standard_name='realization'), None))
return coords_and_dims
def _convert_scalar_pseudo_level_coords(lbuser5):
"""
Encode scalar pseudo-level values as CM data.
Returns a list of coords_and_dims.
"""
coords_and_dims = []
if lbuser5 != 0:
coords_and_dims.append(
(DimCoord(lbuser5, long_name='pseudo_level', units='1'), None))
return coords_and_dims
def convert(f):
"""
Converts a PP field into the corresponding items of Cube metadata.
Args:
* f:
A :class:`iris.fileformats.pp.PPField` object.
Returns:
A :class:`iris.fileformats.rules.ConversionMetadata` object.
"""
factories = []
aux_coords_and_dims = []
# "Normal" (non-cross-sectional) Time values (--> scalar coordinates)
time_coords_and_dims = _convert_time_coords(
lbcode=f.lbcode, lbtim=f.lbtim,
epoch_hours_unit=f.time_unit('hours'),
t1=f.t1, t2=f.t2, lbft=f.lbft)
aux_coords_and_dims.extend(time_coords_and_dims)
# "Normal" (non-cross-sectional) Vertical levels
# (--> scalar coordinates and factories)
vertical_coords_and_dims, vertical_factories = \
_convert_vertical_coords(
lbcode=f.lbcode,
lbvc=f.lbvc,
blev=f.blev,
lblev=f.lblev,
stash=f.stash,
bhlev=f.bhlev,
bhrlev=f.bhrlev,
brsvd1=f.brsvd[0],
brsvd2=f.brsvd[1],
brlev=f.brlev)
aux_coords_and_dims.extend(vertical_coords_and_dims)
factories.extend(vertical_factories)
# Realization (aka ensemble) (--> scalar coordinates)
aux_coords_and_dims.extend(_convert_scalar_realization_coords(
lbrsvd4=f.lbrsvd[3]))
# Pseudo-level coordinate (--> scalar coordinates)
aux_coords_and_dims.extend(_convert_scalar_pseudo_level_coords(
lbuser5=f.lbuser[4]))
# All the other rules.
references, standard_name, long_name, units, attributes, cell_methods, \
dim_coords_and_dims, other_aux_coords_and_dims = _all_other_rules(f)
aux_coords_and_dims.extend(other_aux_coords_and_dims)
return ConversionMetadata(factories, references, standard_name, long_name,
units, attributes, cell_methods,
dim_coords_and_dims, aux_coords_and_dims)
def _all_other_rules(f):
"""
This deals with all the other rules that have not been factored into any of
the other convert_scalar_coordinate functions above.
"""
references = []
standard_name = None
long_name = None
units = None
attributes = {}
cell_methods = []
dim_coords_and_dims = []
aux_coords_and_dims = []
# Season coordinates (--> scalar coordinates)
if (f.lbtim.ib == 3 and f.lbtim.ic in [1, 2, 4] and
(len(f.lbcode) != 5 or
(len(f.lbcode) == 5 and
(f.lbcode.ix not in [20, 21, 22, 23] and
f.lbcode.iy not in [20, 21, 22, 23]))) and
f.lbmon == 12 and f.lbdat == 1 and f.lbhr == 0 and f.lbmin == 0 and
f.lbmond == 3 and f.lbdatd == 1 and f.lbhrd == 0 and
f.lbmind == 0):
aux_coords_and_dims.append(
(AuxCoord('djf', long_name='season', units='no_unit'),
None))
if (f.lbtim.ib == 3 and f.lbtim.ic in [1, 2, 4] and
((len(f.lbcode) != 5) or
(len(f.lbcode) == 5 and
f.lbcode.ix not in [20, 21, 22, 23]
and f.lbcode.iy not in [20, 21, 22, 23])) and
f.lbmon == 3 and f.lbdat == 1 and f.lbhr == 0 and f.lbmin == 0 and
f.lbmond == 6 and f.lbdatd == 1 and f.lbhrd == 0 and
f.lbmind == 0):
aux_coords_and_dims.append(
(AuxCoord('mam', long_name='season', units='no_unit'),
None))
if (f.lbtim.ib == 3 and f.lbtim.ic in [1, 2, 4] and
((len(f.lbcode) != 5) or
(len(f.lbcode) == 5 and
f.lbcode.ix not in [20, 21, 22, 23] and
f.lbcode.iy not in [20, 21, 22, 23])) and
f.lbmon == 6 and f.lbdat == 1 and f.lbhr == 0 and f.lbmin == 0 and
f.lbmond == 9 and f.lbdatd == 1 and f.lbhrd == 0 and
f.lbmind == 0):
aux_coords_and_dims.append(
(AuxCoord('jja', long_name='season', units='no_unit'),
None))
if (f.lbtim.ib == 3 and f.lbtim.ic in [1, 2, 4] and
((len(f.lbcode) != 5) or
(len(f.lbcode) == 5 and
f.lbcode.ix not in [20, 21, 22, 23] and
f.lbcode.iy not in [20, 21, 22, 23])) and
f.lbmon == 9 and f.lbdat == 1 and f.lbhr == 0 and f.lbmin == 0 and
f.lbmond == 12 and f.lbdatd == 1 and f.lbhrd == 0 and
f.lbmind == 0):
aux_coords_and_dims.append(
(AuxCoord('son', long_name='season', units='no_unit'),
None))
# Special case where year is zero and months match.
# Month coordinates (--> scalar coordinates)
if (f.lbtim.ib == 2 and f.lbtim.ic in [1, 2, 4] and
((len(f.lbcode) != 5) or
(len(f.lbcode) == 5 and
f.lbcode.ix not in [20, 21, 22, 23] and
f.lbcode.iy not in [20, 21, 22, 23])) and
f.lbyr == 0 and f.lbyrd == 0 and
f.lbmon == f.lbmond):
aux_coords_and_dims.append(
(AuxCoord(f.lbmon, long_name='month_number'),
None))
aux_coords_and_dims.append(
(AuxCoord(calendar.month_abbr[f.lbmon], long_name='month',
units='no_unit'),
None))
aux_coords_and_dims.append(
(DimCoord(points=f.lbft, standard_name='forecast_period', units='hours'),
None))
# "Normal" (i.e. not cross-sectional) lats+lons (--> vector coordinates)
if (f.bdx != 0.0 and f.bdx != f.bmdi and len(f.lbcode) != 5 and
f.lbcode[0] == 1):
dim_coords_and_dims.append(
(DimCoord.from_regular(f.bzx, f.bdx, f.lbnpt,
standard_name=f._x_coord_name(),
units='degrees',
circular=(f.lbhem in [0, 4]),
coord_system=f.coord_system()),
1))
if (f.bdx != 0.0 and f.bdx != f.bmdi and len(f.lbcode) != 5 and
f.lbcode[0] == 2):
dim_coords_and_dims.append(
(DimCoord.from_regular(f.bzx, f.bdx, f.lbnpt,
standard_name=f._x_coord_name(),
units='degrees',
circular=(f.lbhem in [0, 4]),
coord_system=f.coord_system(),
with_bounds=True),
1))
if (f.bdy != 0.0 and f.bdy != f.bmdi and len(f.lbcode) != 5 and
f.lbcode[0] == 1):
dim_coords_and_dims.append(
(DimCoord.from_regular(f.bzy, f.bdy, f.lbrow,
standard_name=f._y_coord_name(),
units='degrees',
coord_system=f.coord_system()),
0))
if (f.bdy != 0.0 and f.bdy != f.bmdi and len(f.lbcode) != 5 and
f.lbcode[0] == 2):
dim_coords_and_dims.append(
(DimCoord.from_regular(f.bzy, f.bdy, f.lbrow,
standard_name=f._y_coord_name(),
units='degrees',
coord_system=f.coord_system(),
with_bounds=True),
0))
if ((f.bdy == 0.0 or f.bdy == f.bmdi) and
(len(f.lbcode) != 5 or
(len(f.lbcode) == 5 and f.lbcode.iy == 10))):
dim_coords_and_dims.append(
(DimCoord(f.y, standard_name=f._y_coord_name(), units='degrees',
bounds=f.y_bounds, coord_system=f.coord_system()),
0))
if ((f.bdx == 0.0 or f.bdx == f.bmdi) and
(len(f.lbcode) != 5 or
(len(f.lbcode) == 5 and f.lbcode.ix == 11))):
dim_coords_and_dims.append(
(DimCoord(f.x, standard_name=f._x_coord_name(), units='degrees',
bounds=f.x_bounds, circular=(f.lbhem in [0, 4]),
coord_system=f.coord_system()),
1))
# Cross-sectional vertical level types (--> vector coordinates)
if (len(f.lbcode) == 5 and f.lbcode.iy == 2 and
(f.bdy == 0 or f.bdy == f.bmdi)):
dim_coords_and_dims.append(
(DimCoord(f.y, standard_name='height', units='km',
bounds=f.y_bounds, attributes={'positive': 'up'}),
0))
if (len(f.lbcode) == 5 and f.lbcode[-1] == 1 and f.lbcode.iy == 4):
dim_coords_and_dims.append(
(DimCoord(f.y, standard_name='depth', units='m',
bounds=f.y_bounds, attributes={'positive': 'down'}),
0))
if (len(f.lbcode) == 5 and f.lbcode.ix == 10 and f.bdx != 0 and
f.bdx != f.bmdi):
dim_coords_and_dims.append(
(DimCoord.from_regular(f.bzx, f.bdx, f.lbnpt,
standard_name=f._y_coord_name(),
units='degrees',
coord_system=f.coord_system()),
1))
if (len(f.lbcode) == 5 and
f.lbcode.iy == 1 and
(f.bdy == 0 or f.bdy == f.bmdi)):
dim_coords_and_dims.append(
(DimCoord(f.y, long_name='pressure', units='hPa',
bounds=f.y_bounds),
0))
if (len(f.lbcode) == 5 and f.lbcode.ix == 1 and
(f.bdx == 0 or f.bdx == f.bmdi)):
dim_coords_and_dims.append((DimCoord(f.x, long_name='pressure',
units='hPa', bounds=f.x_bounds),
1))
# Cross-sectional time values (--> vector coordinates)
if (len(f.lbcode) == 5 and f.lbcode[-1] == 1 and f.lbcode.iy == 23):
dim_coords_and_dims.append(
(DimCoord(
f.y,
standard_name='time',
units=cf_units.Unit('days since 0000-01-01 00:00:00',
calendar=cf_units.CALENDAR_360_DAY),
bounds=f.y_bounds),
0))
if (len(f.lbcode) == 5 and f.lbcode[-1] == 1 and f.lbcode.ix == 23):
dim_coords_and_dims.append(
(DimCoord(
f.x,
standard_name='time',
units=cf_units.Unit('days since 0000-01-01 00:00:00',
calendar=cf_units.CALENDAR_360_DAY),
bounds=f.x_bounds),
1))
if (len(f.lbcode) == 5 and f.lbcode[-1] == 3 and f.lbcode.iy == 23 and
f.lbtim.ib == 2 and f.lbtim.ic == 2):
epoch_days_unit = cf_units.Unit('days since 0000-01-01 00:00:00',
calendar=cf_units.CALENDAR_360_DAY)
t1_epoch_days = epoch_days_unit.date2num(f.t1)
t2_epoch_days = epoch_days_unit.date2num(f.t2)
# The end time is exclusive, not inclusive.
dim_coords_and_dims.append(
(DimCoord(
np.linspace(t1_epoch_days, t2_epoch_days, f.lbrow,
endpoint=False),
standard_name='time',
units=epoch_days_unit,
bounds=f.y_bounds),
0))
# Site number (--> scalar coordinate)
if (len(f.lbcode) == 5 and f.lbcode[-1] == 1 and f.lbcode.ix == 13 and
f.bdx != 0):
dim_coords_and_dims.append(
(DimCoord.from_regular(f.bzx, f.bdx, f.lbnpt,
long_name='site_number', units='1'),
1))
# Site number cross-sections (???)
if (len(f.lbcode) == 5 and
13 in [f.lbcode.ix, f.lbcode.iy] and
11 not in [f.lbcode.ix, f.lbcode.iy] and
hasattr(f, 'lower_x_domain') and
hasattr(f, 'upper_x_domain') and
all(f.lower_x_domain != -1.e+30) and
all(f.upper_x_domain != -1.e+30)):
aux_coords_and_dims.append(
(AuxCoord((f.lower_x_domain + f.upper_x_domain) / 2.0,
standard_name=f._x_coord_name(), units='degrees',
bounds=np.array([f.lower_x_domain, f.upper_x_domain]).T,
coord_system=f.coord_system()),
1 if f.lbcode.ix == 13 else 0))
if (len(f.lbcode) == 5 and
13 in [f.lbcode.ix, f.lbcode.iy] and
10 not in [f.lbcode.ix, f.lbcode.iy] and
hasattr(f, 'lower_y_domain') and
hasattr(f, 'upper_y_domain') and
all(f.lower_y_domain != -1.e+30) and
all(f.upper_y_domain != -1.e+30)):
aux_coords_and_dims.append(
(AuxCoord((f.lower_y_domain + f.upper_y_domain) / 2.0,
standard_name=f._y_coord_name(), units='degrees',
bounds=np.array([f.lower_y_domain, f.upper_y_domain]).T,
coord_system=f.coord_system()),
1 if f.lbcode.ix == 13 else 0))
# LBPROC codings (--> cell method + attributes)
unhandled_lbproc = True
zone_method = None
time_method = None
if f.lbproc == 0:
unhandled_lbproc = False
elif f.lbproc == 64:
zone_method = 'mean'
elif f.lbproc == 128:
time_method = 'mean'
elif f.lbproc == 4096:
time_method = 'minimum'
elif f.lbproc == 8192:
time_method = 'maximum'
elif f.lbproc == 192:
time_method = 'mean'
zone_method = 'mean'
if time_method is not None:
if f.lbtim.ia != 0:
intervals = '{} hour'.format(f.lbtim.ia)
else:
intervals = None
if f.lbtim.ib == 2:
# Aggregation over a period of time.
cell_methods.append(CellMethod(time_method,
coords='time',
intervals=intervals))
unhandled_lbproc = False
elif f.lbtim.ib == 3 and f.lbproc == 128:
# Aggregation over a period of time within a year, over a number
# of years.
# Only mean (lbproc of 128) is handled as the min/max
# interpretation is ambiguous e.g. decadal mean of daily max,
# decadal max of daily mean, decadal mean of max daily mean etc.
cell_methods.append(
CellMethod('{} within years'.format(time_method),
coords='time', intervals=intervals))
cell_methods.append(
CellMethod('{} over years'.format(time_method),
coords='time'))
unhandled_lbproc = False
else:
# Generic cell method to indicate a time aggregation.
cell_methods.append(CellMethod(time_method,
coords='time'))
unhandled_lbproc = False
if zone_method is not None:
if f.lbcode == 1:
cell_methods.append(CellMethod(zone_method, coords='longitude'))
unhandled_lbproc = False
elif f.lbcode == 101:
cell_methods.append(CellMethod(zone_method,
coords='grid_longitude'))
unhandled_lbproc = False
else:
unhandled_lbproc = True
if unhandled_lbproc:
attributes["ukmo__process_flags"] = tuple(sorted(
[name
for value, name in six.iteritems(iris.fileformats.pp.lbproc_map)
if isinstance(value, int) and f.lbproc & value]))
if (f.lbsrce % 10000) == 1111:
attributes['source'] = 'Data from Met Office Unified Model'
# Also define MO-netCDF compliant UM version.
um_major = (f.lbsrce // 10000) // 100
if um_major != 0:
um_minor = (f.lbsrce // 10000) % 100
attributes['um_version'] = '{:d}.{:d}'.format(um_major, um_minor)
if (f.lbuser[6] != 0 or
(f.lbuser[3] // 1000) != 0 or
(f.lbuser[3] % 1000) != 0):
attributes['STASH'] = f.stash
if str(f.stash) in STASH_TO_CF:
standard_name = STASH_TO_CF[str(f.stash)].standard_name
units = STASH_TO_CF[str(f.stash)].units
long_name = STASH_TO_CF[str(f.stash)].long_name
if (not f.stash.is_valid and f.lbfc in LBFC_TO_CF):
standard_name = LBFC_TO_CF[f.lbfc].standard_name
units = LBFC_TO_CF[f.lbfc].units
long_name = LBFC_TO_CF[f.lbfc].long_name
# Orography reference field (--> reference target)
if f.lbuser[3] == 33:
references.append(ReferenceTarget('orography', None))
# Surface pressure reference field (--> reference target)
if f.lbuser[3] == 409 or f.lbuser[3] == 1:
references.append(ReferenceTarget('surface_air_pressure', None))
return (references, standard_name, long_name, units, attributes,
cell_methods, dim_coords_and_dims, aux_coords_and_dims)
|
SusanJL/iris
|
lib/iris/fileformats/pp_rules.py
|
Python
|
gpl-3.0
| 40,034
|
[
"NetCDF"
] |
8411e5bd196222358e2038f48ad9a726f05ade0d9a44099774f5267bbbec5274
|
from builtins import zip
import math
import sympy
from itertools import chain
from .base import BaseVisitor, BaseDualVisitor, DualWithContextMixin
from nineml.exceptions import (NineMLDualVisitException,
NineMLDualVisitValueException,
NineMLDualVisitTypeException,
NineMLDualVisitKeysMismatchException,
NineMLDualVisitNoneChildException,
NineMLNotBoundException,
NineMLDualVisitAnnotationsMismatchException,
NineMLNameError)
NEARLY_EQUAL_PLACES_DEFAULT = 15
class EqualityChecker(BaseDualVisitor):
def __init__(self, annotations_ns=[], check_urls=True,
nearly_equal_places=NEARLY_EQUAL_PLACES_DEFAULT, **kwargs): # @UnusedVariable @IgnorePep8
super(EqualityChecker, self).__init__(**kwargs)
self.annotations_ns = annotations_ns
self.check_urls = check_urls
self.nearly_equal_places = nearly_equal_places
def check(self, obj1, obj2, **kwargs):
try:
self.visit(obj1, obj2, **kwargs)
except NineMLDualVisitException:
return False
return True
def action(self, obj1, obj2, nineml_cls, **kwargs):
if self.annotations_ns:
try:
annotations_keys = set(chain(obj1.annotations.branch_keys,
obj2.annotations.branch_keys))
skip_annotations = False
except AttributeError:
skip_annotations = True
if not skip_annotations:
for key in annotations_keys:
if key[1] in self.annotations_ns:
try:
annot1 = obj1.annotations.branch(key)
except NineMLNameError:
self._raise_annotations_exception(
nineml_cls, obj1, obj2, key)
try:
annot2 = obj2.annotations.branch(key)
except NineMLNameError:
self._raise_annotations_exception(
nineml_cls, obj1, obj2, key)
self.visit(annot1, annot2, **kwargs)
return super(EqualityChecker, self).action(obj1, obj2, nineml_cls,
**kwargs)
def default_action(self, obj1, obj2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
for attr_name in nineml_cls.nineml_attr:
if attr_name == 'rhs': # need to use Sympy equality checking
self._check_rhs(obj1, obj2, nineml_cls)
else:
self._check_attr(obj1, obj2, attr_name, nineml_cls)
def action_reference(self, ref1, ref2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
if self.check_urls:
self._check_attr(ref1, ref2, 'url', nineml_cls)
def action_definition(self, def1, def2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
if self.check_urls:
self._check_attr(def1, def2, 'url', nineml_cls)
def action_singlevalue(self, val1, val2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
if self._not_nearly_equal(val1.value, val2.value):
self._raise_value_exception('value', val1, val2, nineml_cls)
def action_arrayvalue(self, val1, val2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
if len(val1.values) != len(val2.values):
self._raise_value_exception('values', val1, val2, nineml_cls)
if any(self._not_nearly_equal(s, o)
for s, o in zip(val1.values, val2.values)):
self._raise_value_exception('values', val1, val2, nineml_cls)
def action_unit(self, unit1, unit2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
# Ignore name
self._check_attr(unit1, unit2, 'power', nineml_cls)
self._check_attr(unit1, unit2, 'offset', nineml_cls)
def action_dimension(self, dim1, dim2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
# Ignore name
for sym in nineml_cls.dimension_symbols:
self._check_attr(dim1, dim2, sym, nineml_cls)
def action__annotationsbranch(self, branch1, branch2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
for attr in nineml_cls.nineml_attr:
if attr != 'abs_index':
self._check_attr(branch1, branch2, attr, nineml_cls)
def _check_rhs(self, expr1, expr2, nineml_cls):
try:
expr_eq = (sympy.expand(expr1.rhs - expr2.rhs) == 0)
except TypeError:
expr_eq = sympy.Equivalent(expr1.rhs, expr2.rhs) == sympy.true
if not expr_eq:
self._raise_value_exception('rhs', expr1, expr2, nineml_cls)
def _check_attr(self, obj1, obj2, attr_name, nineml_cls):
try:
attr1 = getattr(obj1, attr_name)
except NineMLNotBoundException:
attr1 = None
try:
attr2 = getattr(obj2, attr_name)
except NineMLNotBoundException:
attr2 = None
if attr1 != attr2:
self._raise_value_exception(attr_name, obj1, obj2, nineml_cls)
def _raise_annotations_exception(self, nineml_cls, obj1, obj2, key):
raise NineMLDualVisitException()
def _raise_value_exception(self, attr_name, obj1, obj2, nineml_cls):
raise NineMLDualVisitException()
def _not_nearly_equal(self, float1, float2):
"""
Determines whether two floating point numbers are nearly equal (to
within reasonable rounding errors
"""
mantissa1, exp1 = math.frexp(float1)
mantissa2, exp2 = math.frexp(float2)
return not ((round(mantissa1, self.nearly_equal_places) ==
round(mantissa2, self.nearly_equal_places)) and
exp1 == exp2)
class Hasher(BaseVisitor):
seed = 0x9e3779b97f4a7c17
def __init__(self, nearly_equal_places=NEARLY_EQUAL_PLACES_DEFAULT,
**kwargs): # @UnusedVariable @IgnorePep8
super(Hasher, self).__init__(**kwargs)
self.nearly_equal_places = nearly_equal_places
def hash(self, nineml_obj):
self._hash = None
self.visit(nineml_obj)
return self._hash
def default_action(self, obj, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
for attr_name in nineml_cls.nineml_attr:
try:
if attr_name == 'rhs': # need to use Sympy equality checking
self._hash_rhs(obj.rhs)
else:
self._hash_attr(getattr(obj, attr_name))
except NineMLNotBoundException:
continue
def _hash_attr(self, attr):
attr_hash = hash(attr)
if self._hash is None:
self._hash = attr_hash
else:
# The rationale behind this equation is explained here
# https://stackoverflow.com/questions/5889238/why-is-xor-the-default-way-to-combine-hashes
self._hash ^= (attr_hash + self.seed + (self._hash << 6) +
(self._hash >> 2))
def action_reference(self, ref, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
self._hash_attr(ref.url)
def action_definition(self, defn, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
self._hash_attr(defn.url)
def action_singlevalue(self, val, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
self._hash_value(val.value)
def action_arrayvalue(self, val, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
for v in val.values:
self._hash_value(v)
def _hash_rhs(self, rhs, **kwargs): # @UnusedVariable
try:
rhs = sympy.expand(rhs)
except:
pass
self._hash_attr(rhs)
def action_unit(self, unit, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
# Ignore name
self._hash_attr(unit.power)
self._hash_attr(unit.offset)
def action_dimension(self, dim, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
for sym in nineml_cls.dimension_symbols:
self._hash_attr(getattr(dim, sym))
def _hash_value(self, val):
mantissa, exp = math.frexp(val)
rounded_val = math.ldexp(round(mantissa, self.nearly_equal_places),
exp)
self._hash_attr(rounded_val)
class MismatchFinder(DualWithContextMixin, EqualityChecker):
def __init__(self, **kwargs):
EqualityChecker.__init__(self, **kwargs)
DualWithContextMixin.__init__(self)
def find(self, obj1, obj2, **kwargs): # @UnusedVariable
self.mismatch = []
self.visit(obj1, obj2)
assert not self.contexts1
assert not self.contexts2
return '\n'.join(str(e) for e in self.mismatch)
def visit(self, *args, **kwargs):
try:
super(MismatchFinder, self).visit(*args, **kwargs)
except NineMLDualVisitException as e:
self.mismatch.append(e)
def visit_child(self, child_name, child_type, parent1, parent2,
parent_cls, parent_result, **kwargs):
try:
super(MismatchFinder, self).visit_child(
child_name, child_type, parent1, parent2, parent_cls,
parent_result, **kwargs)
except NineMLDualVisitException as e:
self.mismatch.append(e)
self._pop_contexts()
def visit_children(self, children_type, parent1, parent2,
parent_cls, parent_result, **kwargs):
try:
super(MismatchFinder, self).visit_children(
children_type, parent1, parent2, parent_cls, parent_result,
**kwargs)
except NineMLDualVisitException as e:
self.mismatch.append(e)
self._pop_contexts()
def _check_attr(self, obj1, obj2, attr_name, nineml_cls, **kwargs):
try:
super(MismatchFinder, self)._check_attr(
obj1, obj2, attr_name, nineml_cls, **kwargs)
except NineMLDualVisitException as e:
self.mismatch.append(e)
def _check_rhs(self, obj1, obj2, attr_name, **kwargs):
try:
super(MismatchFinder, self)._check_rhs(
obj1, obj2, attr_name, **kwargs)
except NineMLDualVisitException as e:
self.mismatch.append(e)
def action_singlevalue(self, val1, val2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
try:
super(MismatchFinder, self).action_singlevalue(
val1, val2, nineml_cls, **kwargs)
except NineMLDualVisitException as e:
self.mismatch.append(e)
def action_arrayvalue(self, val1, val2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
try:
super(MismatchFinder, self).action_arrayvalue(
val1, val2, nineml_cls, **kwargs)
except NineMLDualVisitException as e:
self.mismatch.append(e)
def _raise_annotations_exception(self, nineml_cls, obj1, obj2, key):
raise NineMLDualVisitAnnotationsMismatchException(
nineml_cls, obj1, obj2, key, self.contexts1, self.contexts2)
def _raise_value_exception(self, attr_name, obj1, obj2, nineml_cls):
raise NineMLDualVisitValueException(
attr_name, obj1, obj2, nineml_cls, self.contexts1, self.contexts2)
def _raise_type_exception(self, obj1, obj2):
raise NineMLDualVisitTypeException(
obj1, obj2, self.contexts1, self.contexts2)
def _raise_none_child_exception(self, child_name, child1, child2):
raise NineMLDualVisitNoneChildException(
child_name, child1, child2, self.contexts1, self.contexts2)
def _raise_keys_mismatch_exception(self, children_type, obj1, obj2):
raise NineMLDualVisitKeysMismatchException(
children_type, obj1, obj2, self.contexts1, self.contexts2)
def _pop_contexts(self):
self.contexts1.pop()
self.contexts2.pop()
|
INCF/lib9ML
|
nineml/visitors/equality.py
|
Python
|
bsd-3-clause
| 12,226
|
[
"VisIt"
] |
cd4bf0f75f043707a88522baec5a8674f85e5d81be0a5532586cb994c53264d1
|
""" This is a test of the chain
ProductionClient -> ProductionManagerHandler -> ProductionDB
It supposes that the ProductionDB, TransformationDB and the FileCatalogDB to be present
It supposes the ProductionManager, TransformationManager and that DataManagement/FileCatalog services running
"""
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
import unittest
import json
from DIRAC.ProductionSystem.Client.ProductionClient import ProductionClient
from DIRAC.ProductionSystem.Client.Production import Production
from DIRAC.ProductionSystem.Client.ProductionStep import ProductionStep
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
class TestClientProductionTestCase(unittest.TestCase):
def setUp(self):
self.prodClient = ProductionClient()
self.transClient = TransformationClient()
self.fc = FileCatalog()
# ## Add metadata fields to the DFC
self.MDFieldDict = {
'particle': 'VARCHAR(128)',
'analysis_prog': 'VARCHAR(128)',
'tel_sim_prog': 'VARCHAR(128)',
'outputType': 'VARCHAR(128)',
'zenith': 'int',
'data_level': 'int'}
for MDField in self.MDFieldDict:
MDFieldType = self.MDFieldDict[MDField]
res = self.fc.addMetadataField(MDField, MDFieldType)
self.assert_(res['OK'])
def tearDown(self):
# Delete meta data fields
for MDField in self.MDFieldDict:
res = self.fc.deleteMetadataField(MDField)
self.assert_(res['OK'])
class ProductionClientChain(TestClientProductionTestCase):
def test_SeqProduction(self):
# Define the production
prod = Production()
# Define the first step of the production
prodStep1 = ProductionStep()
prodStep1.Name = 'Sim_prog'
prodStep1.Type = 'MCSimulation'
outputquery = {
'zenith': {
'in': [
20,
40]},
'particle': 'gamma',
'tel_sim_prog': 'simtel',
'outputType': {
'in': [
'Data',
'Log']}}
prodStep1.Outputquery = outputquery
# Add the step to the production
res = prod.addStep(prodStep1)
self.assertTrue(res['OK'])
# Define the second step of the production
prodStep2 = ProductionStep()
prodStep2.Name = 'Reco_prog'
prodStep2.Type = 'DataProcessing'
prodStep2.ParentStep = prodStep1
inputquery = {'zenith': 20, 'particle': 'gamma', 'tel_sim_prog': 'simtel', 'outputType': 'Data'}
outputquery = {
'zenith': 20,
'particle': 'gamma',
'analysis_prog': 'evndisp',
'data_level': 1,
'outputType': {
'in': [
'Data',
'Log']}}
prodStep2.Inputquery = inputquery
prodStep2.Outputquery = outputquery
# Add the step to the production
res = prod.addStep(prodStep2)
self.assertTrue(res['OK'])
# Define the third step of the production
prodStep3 = ProductionStep()
prodStep3.Name = 'Analyis_prog'
prodStep3.Type = 'DataProcessing'
prodStep3.ParentStep = prodStep2
inputquery = {'zenith': 20, 'particle': 'gamma', 'analysis_prog': 'evndisp', 'data_level': 1, 'outputType': 'Data'}
outputquery = {
'zenith': 20,
'particle': 'gamma',
'analysis_prog': 'evndisp',
'data_level': 2,
'outputType': {
'in': [
'Data',
'Log']}}
prodStep3.Inputquery = inputquery
prodStep3.Outputquery = outputquery
# Add the step to the production
res = prod.addStep(prodStep3)
self.assertTrue(res['OK'])
# Get the production description
prodDescription = prod.prodDescription
# Create the production
prodName = 'SeqProd'
res = self.prodClient.addProduction(prodName, json.dumps(prodDescription))
self.assertTrue(res['OK'])
# Start the production, i.e. instatiate the transformation steps
res = self.prodClient.startProduction(prodName)
self.assertTrue(res['OK'])
# Get the transformations of the production
res = self.prodClient.getProduction(prodName)
self.assertTrue(res['OK'])
prodID = res['Value']['ProductionID']
res = self.prodClient.getProductionTransformations(prodID)
self.assertTrue(res['OK'])
self.assertEqual(len(res['Value']), 3)
# Delete the production
res = self.prodClient.deleteProduction(prodName)
self.assertTrue(res['OK'])
def test_MergeProduction(self):
# Define the production
prod = Production()
# Define the first step of the production
prodStep1 = ProductionStep()
prodStep1.Name = 'Sim_prog'
prodStep1.Type = 'MCSimulation'
outputquery = {'zenith': 20, 'particle': 'gamma', 'tel_sim_prog': 'simtel', 'outputType': {'in': ['Data', 'Log']}}
prodStep1.Outputquery = outputquery
# Add the step to the production
res = prod.addStep(prodStep1)
self.assertTrue(res['OK'])
# Define the second step of the production
prodStep2 = ProductionStep()
prodStep2.Name = 'Sim_prog'
prodStep2.Type = 'MCSimulation'
outputquery = {'zenith': 40, 'particle': 'gamma', 'tel_sim_prog': 'simtel', 'outputType': {'in': ['Data', 'Log']}}
prodStep2.Outputquery = outputquery
# Add the step to the production
res = prod.addStep(prodStep2)
self.assertTrue(res['OK'])
# Define the third step of the production
prodStep3 = ProductionStep()
prodStep3.Name = 'Reco_prog'
prodStep3.Type = 'DataProcessing'
prodStep3.ParentStep = [prodStep1, prodStep2]
inputquery = {'zenith': {'in': [20, 40]}, 'particle': 'gamma', 'tel_sim_prog': 'simtel', 'outputType': 'Data'}
outputquery = {
'zenith': {
'in': [
20,
40]},
'particle': 'gamma',
'analysis_prog': 'evndisp',
'data_level': 1,
'outputType': {
'in': [
'Data',
'Log']}}
prodStep3.Inputquery = inputquery
prodStep3.Outputquery = outputquery
# Add the steps to the production
res = prod.addStep(prodStep3)
self.assertTrue(res['OK'])
# Get the production description
prodDescription = prod.prodDescription
# Create the production
prodName = 'MergeProd'
res = self.prodClient.addProduction(prodName, json.dumps(prodDescription))
self.assertTrue(res['OK'])
# Start the production, i.e. instatiate the transformation steps
res = self.prodClient.startProduction(prodName)
self.assertTrue(res['OK'])
# Get the transformations of the production
res = self.prodClient.getProduction(prodName)
self.assertTrue(res['OK'])
prodID = res['Value']['ProductionID']
res = self.prodClient.getProductionTransformations(prodID)
self.assertTrue(res['OK'])
self.assertEqual(len(res['Value']), 3)
# Delete the production
res = self.prodClient.deleteProduction(prodName)
self.assertTrue(res['OK'])
def test_SplitProduction(self):
# Define the production
prod = Production()
# Define the first step of the production
prodStep1 = ProductionStep()
prodStep1.Name = 'Sim_prog'
prodStep1.Type = 'MCSimulation'
outputquery = {
'zenith': {
'in': [
20,
40]},
'particle': 'gamma',
'tel_sim_prog': 'simtel',
'outputType': {
'in': [
'Data',
'Log']}}
prodStep1.Outputquery = outputquery
# Add the step to the production
res = prod.addStep(prodStep1)
self.assertTrue(res['OK'])
# Define the second step of the production
prodStep2 = ProductionStep()
prodStep2.Name = 'Reco_prog'
prodStep2.Type = 'DataProcessing'
prodStep2.ParentStep = prodStep1
inputquery = {'zenith': 20, 'particle': 'gamma', 'tel_sim_prog': 'simtel', 'outputType': 'Data'}
outputquery = {
'zenith': 20,
'particle': 'gamma',
'analysis_prog': 'evndisp',
'data_level': 1,
'outputType': {
'in': [
'Data',
'Log']}}
prodStep2.Inputquery = inputquery
prodStep2.Outputquery = outputquery
# Add the step to the production
res = prod.addStep(prodStep2)
self.assertTrue(res['OK'])
# Define the third step of the production
prodStep3 = ProductionStep()
prodStep3.Name = 'Reco_prog'
prodStep3.Type = 'DataProcessing'
prodStep3.ParentStep = prodStep1
inputquery = {'zenith': 40, 'particle': 'gamma', 'tel_sim_prog': 'simtel', 'outputType': 'Data'}
outputquery = {
'zenith': 40,
'particle': 'gamma',
'analysis_prog': 'evndisp',
'data_level': 1,
'outputType': {
'in': [
'Data',
'Log']}}
prodStep3.Inputquery = inputquery
prodStep3.Outputquery = outputquery
# Add the steps to the production
res = prod.addStep(prodStep3)
self.assertTrue(res['OK'])
# Get the production description
prodDescription = prod.prodDescription
# Create the production
prodName = 'SplitProd'
res = self.prodClient.addProduction(prodName, json.dumps(prodDescription))
self.assertTrue(res['OK'])
# Start the production, i.e. instatiate the transformation steps
res = self.prodClient.startProduction(prodName)
self.assertTrue(res['OK'])
# Get the transformations of the production
res = self.prodClient.getProduction(prodName)
self.assertTrue(res['OK'])
prodID = res['Value']['ProductionID']
res = self.prodClient.getProductionTransformations(prodID)
self.assertTrue(res['OK'])
self.assertEqual(len(res['Value']), 3)
# Delete the production
res = self.prodClient.deleteProduction(prodName)
self.assertTrue(res['OK'])
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestClientProductionTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ProductionClientChain))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
|
petricm/DIRAC
|
tests/Integration/ProductionSystem/Test_Client_TS_Prod.py
|
Python
|
gpl-3.0
| 10,151
|
[
"DIRAC"
] |
2299967525261722566e7fd75410633030d5d0b22e40bd283715f629e61dd1b3
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Wrapper classes for Cif input and output from Structures.
"""
import math
import re
import os
import textwrap
import warnings
from collections import OrderedDict, deque
from io import StringIO
import numpy as np
from functools import partial
from pathlib import Path
from inspect import getfullargspec as getargspec
from itertools import groupby
from pymatgen.core.periodic_table import Element, Specie, get_el_sp, DummySpecie
from monty.io import zopen
from pymatgen.util.coord import in_coord_list_pbc, find_in_coord_list_pbc
from monty.string import remove_non_ascii
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.groups import SpaceGroup, SYMM_DATA
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.electronic_structure.core import Magmom
from pymatgen.core.operations import MagSymmOp
from pymatgen.symmetry.maggroups import MagneticSpaceGroup
__author__ = "Shyue Ping Ong, Will Richards, Matthew Horton"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "4.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
sub_spgrp = partial(re.sub, r"[\s_]", "")
space_groups = {sub_spgrp(k): k for k in SYMM_DATA['space_group_encoding'].keys()} # type: ignore
space_groups.update({sub_spgrp(k): k for k in SYMM_DATA['space_group_encoding'].keys()}) # type: ignore
_COD_DATA = None
def _get_cod_data():
global _COD_DATA
if _COD_DATA is None:
import pymatgen
with open(os.path.join(pymatgen.symmetry.__path__[0],
"symm_ops.json")) \
as f:
import json
_COD_DATA = json.load(f)
return _COD_DATA
class CifBlock:
"""
Object for storing cif data. All data is stored in a single dictionary.
Data inside loops are stored in lists in the data dictionary, and
information on which keys are grouped together are stored in the loops
attribute.
"""
maxlen = 70 # not quite 80 so we can deal with semicolons and things
def __init__(self, data, loops, header):
"""
Args:
data: dict or OrderedDict of data to go into the cif. Values should
be convertible to string, or lists of these if the key is
in a loop
loops: list of lists of keys, grouped by which loop they should
appear in
header: name of the block (appears after the data_ on the first
line)
"""
self.loops = loops
self.data = data
# AJ says: CIF Block names cannot be more than 75 characters or you
# get an Exception
self.header = header[:74]
def __eq__(self, other):
return self.loops == other.loops \
and self.data == other.data \
and self.header == other.header
def __getitem__(self, key):
return self.data[key]
def __str__(self):
"""
Returns the cif string for the data block
"""
s = ["data_{}".format(self.header)]
keys = self.data.keys()
written = []
for k in keys:
if k in written:
continue
for l in self.loops:
# search for a corresponding loop
if k in l:
s.append(self._loop_to_string(l))
written.extend(l)
break
if k not in written:
# k didn't belong to a loop
v = self._format_field(self.data[k])
if len(k) + len(v) + 3 < self.maxlen:
s.append("{} {}".format(k, v))
else:
s.extend([k, v])
return "\n".join(s)
def _loop_to_string(self, loop):
s = "loop_"
for l in loop:
s += '\n ' + l
for fields in zip(*[self.data[k] for k in loop]):
line = "\n"
for val in map(self._format_field, fields):
if val[0] == ";":
s += line + "\n" + val
line = "\n"
elif len(line) + len(val) + 2 < self.maxlen:
line += " " + val
else:
s += line
line = '\n ' + val
s += line
return s
def _format_field(self, v):
v = v.__str__().strip()
if len(v) > self.maxlen:
return ';\n' + textwrap.fill(v, self.maxlen) + '\n;'
# add quotes if necessary
if v == '':
return '""'
if (" " in v or v[0] == "_") \
and not (v[0] == "'" and v[-1] == "'") \
and not (v[0] == '"' and v[-1] == '"'):
if "'" in v:
q = '"'
else:
q = "'"
v = q + v + q
return v
@classmethod
def _process_string(cls, string):
# remove comments
string = re.sub(r"(\s|^)#.*$", "", string, flags=re.MULTILINE)
# remove empty lines
string = re.sub(r"^\s*\n", "", string, flags=re.MULTILINE)
# remove non_ascii
string = remove_non_ascii(string)
# since line breaks in .cif files are mostly meaningless,
# break up into a stream of tokens to parse, rejoining multiline
# strings (between semicolons)
q = deque()
multiline = False
ml = []
# this regex splits on spaces, except when in quotes.
# starting quotes must not be preceded by non-whitespace
# (these get eaten by the first expression)
# ending quotes must not be followed by non-whitespace
p = re.compile(r'''([^'"\s][\S]*)|'(.*?)'(?!\S)|"(.*?)"(?!\S)''')
for l in string.splitlines():
if multiline:
if l.startswith(";"):
multiline = False
q.append(('', '', '', ' '.join(ml)))
ml = []
l = l[1:].strip()
else:
ml.append(l)
continue
if l.startswith(";"):
multiline = True
ml.append(l[1:].strip())
else:
for s in p.findall(l):
# s is tuple. location of the data in the tuple
# depends on whether it was quoted in the input
q.append(s)
return q
@classmethod
def from_string(cls, string):
"""
Reads CifBlock from string.
:param string: String representation.
:return: CifBlock
"""
q = cls._process_string(string)
header = q.popleft()[0][5:]
data = OrderedDict()
loops = []
while q:
s = q.popleft()
# cif keys aren't in quotes, so show up in s[0]
if s[0] == "_eof":
break
if s[0].startswith("_"):
try:
data[s[0]] = "".join(q.popleft())
except IndexError:
data[s[0]] = ""
elif s[0].startswith("loop_"):
columns = []
items = []
while q:
s = q[0]
if s[0].startswith("loop_") or not s[0].startswith("_"):
break
columns.append("".join(q.popleft()))
data[columns[-1]] = []
while q:
s = q[0]
if s[0].startswith("loop_") or s[0].startswith("_"):
break
items.append("".join(q.popleft()))
n = len(items) // len(columns)
assert len(items) % n == 0
loops.append(columns)
for k, v in zip(columns * n, items):
data[k].append(v.strip())
elif "".join(s).strip() != "":
warnings.warn("Possible issue in cif file"
" at line: {}".format("".join(s).strip()))
return cls(data, loops, header)
class CifFile:
"""
Reads and parses CifBlocks from a .cif file or string
"""
def __init__(self, data, orig_string=None, comment=None):
"""
Args:
data (OrderedDict): Of CifBlock objects.å
orig_string (str): The original cif string.
comment (str): Comment string.
"""
self.data = data
self.orig_string = orig_string
self.comment = comment or "# generated using pymatgen"
def __str__(self):
s = ["%s" % v for v in self.data.values()]
return self.comment + "\n" + "\n".join(s) + "\n"
@classmethod
def from_string(cls, string):
"""
Reads CifFile from a string.
:param string: String representation.
:return: CifFile
"""
d = OrderedDict()
for x in re.split(r"^\s*data_", "x\n" + string,
flags=re.MULTILINE | re.DOTALL)[1:]:
# Skip over Cif block that contains powder diffraction data.
# Some elements in this block were missing from CIF files in
# Springer materials/Pauling file DBs.
# This block anyway does not contain any structure information, and
# CifParser was also not parsing it.
if 'powder_pattern' in re.split(r"\n", x, 1)[0]:
continue
c = CifBlock.from_string("data_" + x)
d[c.header] = c
return cls(d, string)
@classmethod
def from_file(cls, filename):
"""
Reads CifFile from a filename.
:param filename: Filename
:return: CifFile
"""
with zopen(str(filename), "rt", errors="replace") as f:
return cls.from_string(f.read())
class CifParser:
"""
Parses a CIF file. Attempts to fix CIFs that are out-of-spec, but will
issue warnings if corrections applied. These are also stored in the
CifParser's errors attribute.
"""
def __init__(self, filename, occupancy_tolerance=1., site_tolerance=1e-4):
"""
Args:
filename (str): CIF filename, bzipped or gzipped CIF files are fine too.
occupancy_tolerance (float): If total occupancy of a site is between 1
and occupancy_tolerance, the occupancies will be scaled down to 1.
site_tolerance (float): This tolerance is used to determine if two
sites are sitting in the same position, in which case they will be
combined to a single disordered site. Defaults to 1e-4.
"""
self._occupancy_tolerance = occupancy_tolerance
self._site_tolerance = site_tolerance
if isinstance(filename, (str, Path)):
self._cif = CifFile.from_file(filename)
else:
self._cif = CifFile.from_string(filename.read())
# store if CIF contains features from non-core CIF dictionaries
# e.g. magCIF
self.feature_flags = {}
self.warnings = []
def is_magcif():
"""
Checks to see if file appears to be a magCIF file (heuristic).
"""
# Doesn't seem to be a canonical way to test if file is magCIF or
# not, so instead check for magnetic symmetry datanames
prefixes = ['_space_group_magn', '_atom_site_moment',
'_space_group_symop_magn']
for d in self._cif.data.values():
for k in d.data.keys():
for prefix in prefixes:
if prefix in k:
return True
return False
self.feature_flags['magcif'] = is_magcif()
def is_magcif_incommensurate():
"""
Checks to see if file contains an incommensurate magnetic
structure (heuristic).
"""
# Doesn't seem to be a canonical way to test if magCIF file
# describes incommensurate strucure or not, so instead check
# for common datanames
if not self.feature_flags["magcif"]:
return False
prefixes = ['_cell_modulation_dimension', '_cell_wave_vector']
for d in self._cif.data.values():
for k in d.data.keys():
for prefix in prefixes:
if prefix in k:
return True
return False
self.feature_flags['magcif_incommensurate'] = is_magcif_incommensurate()
for k in self._cif.data.keys():
# pass individual CifBlocks to _sanitize_data
self._cif.data[k] = self._sanitize_data(self._cif.data[k])
@staticmethod
def from_string(cif_string, occupancy_tolerance=1.):
"""
Creates a CifParser from a string.
Args:
cif_string (str): String representation of a CIF.
occupancy_tolerance (float): If total occupancy of a site is
between 1 and occupancy_tolerance, the occupancies will be
scaled down to 1.
Returns:
CifParser
"""
stream = StringIO(cif_string)
return CifParser(stream, occupancy_tolerance)
def _sanitize_data(self, data):
"""
Some CIF files do not conform to spec. This function corrects
known issues, particular in regards to Springer materials/
Pauling files.
This function is here so that CifParser can assume its
input conforms to spec, simplifying its implementation.
:param data: CifBlock
:return: data CifBlock
"""
"""
This part of the code deals with handling formats of data as found in
CIF files extracted from the Springer Materials/Pauling File
databases, and that are different from standard ICSD formats.
"""
# check for implicit hydrogens, warn if any present
if "_atom_site_attached_hydrogens" in data.data.keys():
attached_hydrogens = [str2float(x) for x in data.data['_atom_site_attached_hydrogens']
if str2float(x) != 0]
if len(attached_hydrogens) > 0:
self.warnings.append("Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.")
# Check to see if "_atom_site_type_symbol" exists, as some test CIFs do
# not contain this key.
if "_atom_site_type_symbol" in data.data.keys():
# Keep a track of which data row needs to be removed.
# Example of a row: Nb,Zr '0.8Nb + 0.2Zr' .2a .m-3m 0 0 0 1 14
# 'rhombic dodecahedron, Nb<sub>14</sub>'
# Without this code, the above row in a structure would be parsed
# as an ordered site with only Nb (since
# CifParser would try to parse the first two characters of the
# label "Nb,Zr") and occupancy=1.
# However, this site is meant to be a disordered site with 0.8 of
# Nb and 0.2 of Zr.
idxs_to_remove = []
new_atom_site_label = []
new_atom_site_type_symbol = []
new_atom_site_occupancy = []
new_fract_x = []
new_fract_y = []
new_fract_z = []
for idx, el_row in enumerate(data["_atom_site_label"]):
# CIF files from the Springer Materials/Pauling File have
# switched the label and symbol. Thus, in the
# above shown example row, '0.8Nb + 0.2Zr' is the symbol.
# Below, we split the strings on ' + ' to
# check if the length (or number of elements) in the label and
# symbol are equal.
if len(data["_atom_site_type_symbol"][idx].split(' + ')) > \
len(data["_atom_site_label"][idx].split(' + ')):
# Dictionary to hold extracted elements and occupancies
els_occu = {}
# parse symbol to get element names and occupancy and store
# in "els_occu"
symbol_str = data["_atom_site_type_symbol"][idx]
symbol_str_lst = symbol_str.split(' + ')
for elocc_idx in range(len(symbol_str_lst)):
# Remove any bracketed items in the string
symbol_str_lst[elocc_idx] = re.sub(
r'\([0-9]*\)', '',
symbol_str_lst[elocc_idx].strip())
# Extract element name and its occupancy from the
# string, and store it as a
# key-value pair in "els_occ".
els_occu[str(re.findall(r'\D+', symbol_str_lst[
elocc_idx].strip())[1]).replace('<sup>', '')] = \
float('0' + re.findall(r'\.?\d+', symbol_str_lst[
elocc_idx].strip())[1])
x = str2float(data["_atom_site_fract_x"][idx])
y = str2float(data["_atom_site_fract_y"][idx])
z = str2float(data["_atom_site_fract_z"][idx])
for et, occu in els_occu.items():
# new atom site labels have 'fix' appended
new_atom_site_label.append(
et + '_fix' + str(len(new_atom_site_label)))
new_atom_site_type_symbol.append(et)
new_atom_site_occupancy.append(str(occu))
new_fract_x.append(str(x))
new_fract_y.append(str(y))
new_fract_z.append(str(z))
idxs_to_remove.append(idx)
# Remove the original row by iterating over all keys in the CIF
# data looking for lists, which indicates
# multiple data items, one for each row, and remove items from the
# list that corresponds to the removed row,
# so that it's not processed by the rest of this function (which
# would result in an error).
for original_key in data.data:
if isinstance(data.data[original_key], list):
for id in sorted(idxs_to_remove, reverse=True):
del data.data[original_key][id]
if len(idxs_to_remove) > 0:
self.warnings.append("Pauling file corrections applied.")
data.data["_atom_site_label"] += new_atom_site_label
data.data["_atom_site_type_symbol"] += new_atom_site_type_symbol
data.data["_atom_site_occupancy"] += new_atom_site_occupancy
data.data["_atom_site_fract_x"] += new_fract_x
data.data["_atom_site_fract_y"] += new_fract_y
data.data["_atom_site_fract_z"] += new_fract_z
"""
This fixes inconsistencies in naming of several magCIF tags
as a result of magCIF being in widespread use prior to
specification being finalized (on advice of Branton Campbell).
"""
if self.feature_flags["magcif"]:
# CIF-1 style has all underscores, interim standard
# had period before magn instead of before the final
# component (e.g. xyz)
# we want to standardize on a specific key, to simplify
# parsing code
correct_keys = ["_space_group_symop_magn_operation.xyz",
"_space_group_symop_magn_centering.xyz",
"_space_group_magn.name_BNS",
"_space_group_magn.number_BNS",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z",
"_atom_site_moment_label"]
# cannot mutate OrderedDict during enumeration,
# so store changes we want to make
changes_to_make = {}
for original_key in data.data:
for correct_key in correct_keys:
# convert to all underscore
trial_key = "_".join(correct_key.split("."))
test_key = "_".join(original_key.split("."))
if trial_key == test_key:
changes_to_make[correct_key] = original_key
# make changes
for correct_key, original_key in changes_to_make.items():
data.data[correct_key] = data.data[original_key]
# renamed_keys maps interim_keys to final_keys
renamed_keys = {
"_magnetic_space_group.transform_to_standard_Pp_abc":
"_space_group_magn.transform_BNS_Pp_abc"}
changes_to_make = {}
for interim_key, final_key in renamed_keys.items():
if data.data.get(interim_key):
changes_to_make[final_key] = interim_key
if len(changes_to_make) > 0:
self.warnings.append("Keys changed to match new magCIF specification.")
for final_key, interim_key in changes_to_make.items():
data.data[final_key] = data.data[interim_key]
# check for finite precision frac co-ordinates (e.g. 0.6667 instead of 0.6666666...7)
# this can sometimes cause serious issues when applying symmetry operations
important_fracs = (1 / 3., 2 / 3.)
fracs_to_change = {}
for label in ('_atom_site_fract_x', '_atom_site_fract_y', '_atom_site_fract_z'):
if label in data.data.keys():
for idx, frac in enumerate(data.data[label]):
try:
frac = str2float(frac)
except Exception:
# co-ordinate might not be defined e.g. '?'
continue
for comparison_frac in important_fracs:
if abs(1 - frac / comparison_frac) < 1e-4:
fracs_to_change[(label, idx)] = str(comparison_frac)
if fracs_to_change:
self.warnings.append("Some fractional co-ordinates rounded to ideal values to "
"avoid issues with finite precision.")
for (label, idx), val in fracs_to_change.items():
data.data[label][idx] = val
return data
def _unique_coords(self, coords_in, magmoms_in=None, lattice=None):
"""
Generate unique coordinates using coord and symmetry positions
and also their corresponding magnetic moments, if supplied.
"""
coords = []
if magmoms_in:
magmoms = []
if len(magmoms_in) != len(coords_in):
raise ValueError
for tmp_coord, tmp_magmom in zip(coords_in, magmoms_in):
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if isinstance(op, MagSymmOp):
# Up to this point, magmoms have been defined relative
# to crystal axis. Now convert to Cartesian and into
# a Magmom object.
magmom = Magmom.from_moment_relative_to_crystal_axes(
op.operate_magmom(tmp_magmom),
lattice=lattice
)
else:
magmom = Magmom(tmp_magmom)
if not in_coord_list_pbc(coords, coord,
atol=self._site_tolerance):
coords.append(coord)
magmoms.append(magmom)
return coords, magmoms
else:
for tmp_coord in coords_in:
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if not in_coord_list_pbc(coords, coord,
atol=self._site_tolerance):
coords.append(coord)
return coords, [Magmom(0)] * len(coords) # return dummy magmoms
def get_lattice(self, data, length_strings=("a", "b", "c"),
angle_strings=("alpha", "beta", "gamma"),
lattice_type=None):
"""
Generate the lattice from the provided lattice parameters. In
the absence of all six lattice parameters, the crystal system
and necessary parameters are parsed
"""
try:
lengths = [str2float(data["_cell_length_" + i])
for i in length_strings]
angles = [str2float(data["_cell_angle_" + i])
for i in angle_strings]
if not lattice_type:
return Lattice.from_parameters(*lengths, *angles)
else:
return getattr(Lattice, lattice_type)(*(lengths + angles))
except KeyError:
# Missing Key search for cell setting
for lattice_lable in ["_symmetry_cell_setting",
"_space_group_crystal_system"]:
if data.data.get(lattice_lable):
lattice_type = data.data.get(lattice_lable).lower()
try:
required_args = getargspec(
getattr(Lattice, lattice_type)).args
lengths = (l for l in length_strings
if l in required_args)
angles = (a for a in angle_strings
if a in required_args)
return self.get_lattice(data, lengths, angles,
lattice_type=lattice_type)
except AttributeError as exc:
self.warnings.append(str(exc))
warnings.warn(exc)
else:
return None
def get_symops(self, data):
"""
In order to generate symmetry equivalent positions, the symmetry
operations are parsed. If the symops are not present, the space
group symbol is parsed, and symops are generated.
"""
symops = []
for symmetry_label in ["_symmetry_equiv_pos_as_xyz",
"_symmetry_equiv_pos_as_xyz_",
"_space_group_symop_operation_xyz",
"_space_group_symop_operation_xyz_"]:
if data.data.get(symmetry_label):
xyz = data.data.get(symmetry_label)
if isinstance(xyz, str):
msg = "A 1-line symmetry op P1 CIF is detected!"
warnings.warn(msg)
self.warnings.append(msg)
xyz = [xyz]
try:
symops = [SymmOp.from_xyz_string(s)
for s in xyz]
break
except ValueError:
continue
if not symops:
# Try to parse symbol
for symmetry_label in ["_symmetry_space_group_name_H-M",
"_symmetry_space_group_name_H_M",
"_symmetry_space_group_name_H-M_",
"_symmetry_space_group_name_H_M_",
"_space_group_name_Hall",
"_space_group_name_Hall_",
"_space_group_name_H-M_alt",
"_space_group_name_H-M_alt_",
"_symmetry_space_group_name_hall",
"_symmetry_space_group_name_hall_",
"_symmetry_space_group_name_h-m",
"_symmetry_space_group_name_h-m_"]:
sg = data.data.get(symmetry_label)
if sg:
sg = sub_spgrp(sg)
try:
spg = space_groups.get(sg)
if spg:
symops = SpaceGroup(spg).symmetry_ops
msg = "No _symmetry_equiv_pos_as_xyz type key found. " \
"Spacegroup from %s used." % symmetry_label
warnings.warn(msg)
self.warnings.append(msg)
break
except ValueError:
# Ignore any errors
pass
try:
for d in _get_cod_data():
if sg == re.sub(r"\s+", "",
d["hermann_mauguin"]):
xyz = d["symops"]
symops = [SymmOp.from_xyz_string(s)
for s in xyz]
msg = "No _symmetry_equiv_pos_as_xyz type key found. " \
"Spacegroup from %s used." % symmetry_label
warnings.warn(msg)
self.warnings.append(msg)
break
except Exception:
continue
if symops:
break
if not symops:
# Try to parse International number
for symmetry_label in ["_space_group_IT_number",
"_space_group_IT_number_",
"_symmetry_Int_Tables_number",
"_symmetry_Int_Tables_number_"]:
if data.data.get(symmetry_label):
try:
i = int(str2float(data.data.get(symmetry_label)))
symops = SpaceGroup.from_int_number(i).symmetry_ops
break
except ValueError:
continue
if not symops:
msg = "No _symmetry_equiv_pos_as_xyz type key found. " \
"Defaulting to P1."
warnings.warn(msg)
self.warnings.append(msg)
symops = [SymmOp.from_xyz_string(s) for s in ['x', 'y', 'z']]
return symops
def get_magsymops(self, data):
"""
Equivalent to get_symops except for magnetic symmetry groups.
Separate function since additional operation for time reversal symmetry
(which changes magnetic moments on sites) needs to be returned.
"""
magsymmops = []
# check to see if magCIF file explicitly contains magnetic symmetry operations
if data.data.get("_space_group_symop_magn_operation.xyz"):
xyzt = data.data.get("_space_group_symop_magn_operation.xyz")
if isinstance(xyzt, str):
xyzt = [xyzt]
magsymmops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]
if data.data.get("_space_group_symop_magn_centering.xyz"):
xyzt = data.data.get("_space_group_symop_magn_centering.xyz")
if isinstance(xyzt, str):
xyzt = [xyzt]
centering_symops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]
all_ops = []
for op in magsymmops:
for centering_op in centering_symops:
new_translation = [i - np.floor(i) for i
in
op.translation_vector + centering_op.translation_vector]
new_time_reversal = op.time_reversal * centering_op.time_reversal
all_ops.append(
MagSymmOp.from_rotation_and_translation_and_time_reversal(
rotation_matrix=op.rotation_matrix,
translation_vec=new_translation,
time_reversal=new_time_reversal))
magsymmops = all_ops
# else check to see if it specifies a magnetic space group
elif data.data.get("_space_group_magn.name_BNS") or data.data.get(
"_space_group_magn.number_BNS"):
if data.data.get("_space_group_magn.name_BNS"):
# get BNS label for MagneticSpaceGroup()
id = data.data.get("_space_group_magn.name_BNS")
else:
# get BNS number for MagneticSpaceGroup()
# by converting string to list of ints
id = list(map(int, (
data.data.get("_space_group_magn.number_BNS").split("."))))
if data.data.get("_space_group_magn.transform_BNS_Pp_abc"):
if data.data.get(
"_space_group_magn.transform_BNS_Pp_abc") != "a,b,c;0,0,0":
jf = data.data.get("_space_group_magn.transform_BNS_Pp_abc")
msg = MagneticSpaceGroup(id, jf)
elif data.data.get("_space_group_magn.transform_BNS_Pp"):
return NotImplementedError(
"Incomplete specification to implement.")
else:
msg = MagneticSpaceGroup(id)
magsymmops = msg.symmetry_ops
if not magsymmops:
msg = "No magnetic symmetry detected, using primitive symmetry."
warnings.warn(msg)
self.warnings.append(msg)
magsymmops = [MagSymmOp.from_xyzt_string("x, y, z, 1")]
return magsymmops
def parse_oxi_states(self, data):
"""
Parse oxidation states from data dictionary
"""
try:
oxi_states = {
data["_atom_type_symbol"][i]:
str2float(data["_atom_type_oxidation_number"][i])
for i in range(len(data["_atom_type_symbol"]))}
# attempt to strip oxidation state from _atom_type_symbol
# in case the label does not contain an oxidation state
for i, symbol in enumerate(data["_atom_type_symbol"]):
oxi_states[re.sub(r"\d?[\+,\-]?$", "", symbol)] = \
str2float(data["_atom_type_oxidation_number"][i])
except (ValueError, KeyError):
oxi_states = None
return oxi_states
def parse_magmoms(self, data, lattice=None):
"""
Parse atomic magnetic moments from data dictionary
"""
if lattice is None:
raise Exception(
'Magmoms given in terms of crystal axes in magCIF spec.')
try:
magmoms = {
data["_atom_site_moment_label"][i]:
np.array(
[str2float(data["_atom_site_moment_crystalaxis_x"][i]),
str2float(data["_atom_site_moment_crystalaxis_y"][i]),
str2float(data["_atom_site_moment_crystalaxis_z"][i])]
)
for i in range(len(data["_atom_site_moment_label"]))
}
except (ValueError, KeyError):
return None
return magmoms
def _parse_symbol(self, sym):
"""
Parse a string with a symbol to extract a string representing an element.
Args:
sym (str): A symbol to be parsed.
Returns:
A string with the parsed symbol. None if no parsing was possible.
"""
# Common representations for elements/water in cif files
# TODO: fix inconsistent handling of water
special = {"Hw": "H", "Ow": "O", "Wat": "O",
"wat": "O", "OH": "", "OH2": "", "NO3": "N"}
parsed_sym = None
# try with special symbols, otherwise check the first two letters,
# then the first letter alone. If everything fails try extracting the
# first letters.
m_sp = re.match("|".join(special.keys()), sym)
if m_sp:
parsed_sym = special[m_sp.group()]
elif Element.is_valid_symbol(sym[:2].title()):
parsed_sym = sym[:2].title()
elif Element.is_valid_symbol(sym[0].upper()):
parsed_sym = sym[0].upper()
else:
m = re.match(r"w?[A-Z][a-z]*", sym)
if m:
parsed_sym = m.group()
if parsed_sym is not None and (m_sp or not re.match(r"{}\d*".format(parsed_sym), sym)):
msg = "{} parsed as {}".format(sym, parsed_sym)
warnings.warn(msg)
self.warnings.append(msg)
return parsed_sym
def _get_structure(self, data, primitive):
"""
Generate structure from part of the cif.
"""
def get_num_implicit_hydrogens(sym):
num_h = {"Wat": 2, "wat": 2, "O-H": 1}
return num_h.get(sym[:3], 0)
lattice = self.get_lattice(data)
# if magCIF, get magnetic symmetry moments and magmoms
# else standard CIF, and use empty magmom dict
if self.feature_flags["magcif_incommensurate"]:
raise NotImplementedError(
"Incommensurate structures not currently supported.")
elif self.feature_flags["magcif"]:
self.symmetry_operations = self.get_magsymops(data)
magmoms = self.parse_magmoms(data, lattice=lattice)
else:
self.symmetry_operations = self.get_symops(data)
magmoms = {}
oxi_states = self.parse_oxi_states(data)
coord_to_species = OrderedDict()
coord_to_magmoms = OrderedDict()
def get_matching_coord(coord):
keys = list(coord_to_species.keys())
coords = np.array(keys)
for op in self.symmetry_operations:
c = op.operate(coord)
inds = find_in_coord_list_pbc(coords, c,
atol=self._site_tolerance)
# cant use if inds, because python is dumb and np.array([0]) evaluates
# to False
if len(inds):
return keys[inds[0]]
return False
for i in range(len(data["_atom_site_label"])):
try:
# If site type symbol exists, use it. Otherwise, we use the
# label.
symbol = self._parse_symbol(data["_atom_site_type_symbol"][i])
num_h = get_num_implicit_hydrogens(
data["_atom_site_type_symbol"][i])
except KeyError:
symbol = self._parse_symbol(data["_atom_site_label"][i])
num_h = get_num_implicit_hydrogens(data["_atom_site_label"][i])
if not symbol:
continue
if oxi_states is not None:
o_s = oxi_states.get(symbol, 0)
# use _atom_site_type_symbol if possible for oxidation state
if "_atom_site_type_symbol" in data.data.keys():
oxi_symbol = data["_atom_site_type_symbol"][i]
o_s = oxi_states.get(oxi_symbol, o_s)
try:
el = Specie(symbol, o_s)
except Exception:
el = DummySpecie(symbol, o_s)
else:
el = get_el_sp(symbol)
x = str2float(data["_atom_site_fract_x"][i])
y = str2float(data["_atom_site_fract_y"][i])
z = str2float(data["_atom_site_fract_z"][i])
magmom = magmoms.get(data["_atom_site_label"][i],
np.array([0, 0, 0]))
try:
occu = str2float(data["_atom_site_occupancy"][i])
except (KeyError, ValueError):
occu = 1
if occu > 0:
coord = (x, y, z)
match = get_matching_coord(coord)
comp_d = {el: occu}
if num_h > 0:
comp_d["H"] = num_h
self.warnings.append("Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.")
comp = Composition(comp_d)
if not match:
coord_to_species[coord] = comp
coord_to_magmoms[coord] = magmom
else:
coord_to_species[match] += comp
# disordered magnetic not currently supported
coord_to_magmoms[match] = None
sum_occu = [sum(c.values()) for c in coord_to_species.values()
if not set(c.elements) == {Element("O"), Element("H")}]
if any([o > 1 for o in sum_occu]):
msg = "Some occupancies (%s) sum to > 1! If they are within " \
"the tolerance, they will be rescaled." % str(sum_occu)
warnings.warn(msg)
self.warnings.append(msg)
allspecies = []
allcoords = []
allmagmoms = []
allhydrogens = []
# check to see if magCIF file is disordered
if self.feature_flags["magcif"]:
for k, v in coord_to_magmoms.items():
if v is None:
# Proposed solution to this is to instead store magnetic
# moments as Specie 'spin' property, instead of site
# property, but this introduces ambiguities for end user
# (such as unintended use of `spin` and Specie will have
# fictious oxidation state).
raise NotImplementedError(
'Disordered magnetic structures not currently supported.')
if coord_to_species.items():
for comp, group in groupby(
sorted(list(coord_to_species.items()), key=lambda x: x[1]),
key=lambda x: x[1]):
tmp_coords = [site[0] for site in group]
tmp_magmom = [coord_to_magmoms[tmp_coord] for tmp_coord in
tmp_coords]
if self.feature_flags["magcif"]:
coords, magmoms = self._unique_coords(tmp_coords,
magmoms_in=tmp_magmom,
lattice=lattice)
else:
coords, magmoms = self._unique_coords(tmp_coords)
if set(comp.elements) == {Element("O"), Element("H")}:
# O with implicit hydrogens
im_h = comp["H"]
species = Composition({"O": comp["O"]})
else:
im_h = 0
species = comp
allhydrogens.extend(len(coords) * [im_h])
allcoords.extend(coords)
allspecies.extend(len(coords) * [species])
allmagmoms.extend(magmoms)
# rescale occupancies if necessary
for i, species in enumerate(allspecies):
totaloccu = sum(species.values())
if 1 < totaloccu <= self._occupancy_tolerance:
allspecies[i] = species / totaloccu
if allspecies and len(allspecies) == len(allcoords) \
and len(allspecies) == len(allmagmoms):
site_properties = dict()
if any(allhydrogens):
assert len(allhydrogens) == len(allcoords)
site_properties["implicit_hydrogens"] = allhydrogens
if self.feature_flags["magcif"]:
site_properties["magmom"] = allmagmoms
if len(site_properties) == 0:
site_properties = None
struct = Structure(lattice, allspecies, allcoords,
site_properties=site_properties)
struct = struct.get_sorted_structure()
if primitive and self.feature_flags['magcif']:
struct = struct.get_primitive_structure(use_site_props=True)
elif primitive:
struct = struct.get_primitive_structure()
struct = struct.get_reduced_structure()
return struct
def get_structures(self, primitive=True):
"""
Return list of structures in CIF file. primitive boolean sets whether a
conventional cell structure or primitive cell structure is returned.
Args:
primitive (bool): Set to False to return conventional unit cells.
Defaults to True. With magnetic CIF files, will return primitive
magnetic cell which may be larger than nuclear primitive cell.
Returns:
List of Structures.
"""
structures = []
for d in self._cif.data.values():
try:
s = self._get_structure(d, primitive)
if s:
structures.append(s)
except (KeyError, ValueError) as exc:
# Warn the user (Errors should never pass silently)
# A user reported a problem with cif files produced by Avogadro
# in which the atomic coordinates are in Cartesian coords.
self.warnings.append(str(exc))
warnings.warn(str(exc))
if self.warnings:
warnings.warn("Issues encountered while parsing CIF: %s" % "\n".join(self.warnings))
if len(structures) == 0:
raise ValueError("Invalid cif file with no structures!")
return structures
def get_bibtex_string(self):
"""
Get BibTeX reference from CIF file.
:param data:
:return: BibTeX string
"""
try:
from pybtex.database import BibliographyData, Entry
except ImportError:
raise RuntimeError("Bibliographic data extraction requires pybtex.")
bibtex_keys = {'author': ('_publ_author_name', '_citation_author_name'),
'title': ('_publ_section_title', '_citation_title'),
'journal': ('_journal_name_full', '_journal_name_abbrev',
'_citation_journal_full', '_citation_journal_abbrev'),
'volume': ('_journal_volume', '_citation_journal_volume'),
'year': ('_journal_year', '_citation_year'),
'number': ('_journal_number', '_citation_number'),
'page_first': ('_journal_page_first', '_citation_page_first'),
'page_last': ('_journal_page_last', '_citation_page_last'),
'doi': ('_journal_DOI', '_citation_DOI')}
entries = {}
# TODO: parse '_publ_section_references' when it exists?
# TODO: CIF specification supports multiple citations.
for idx, data in enumerate(self._cif.data.values()):
# convert to lower-case keys, some cif files inconsistent
data = {k.lower(): v for k, v in data.data.items()}
bibtex_entry = {}
for field, tags in bibtex_keys.items():
for tag in tags:
if tag in data:
if isinstance(data[tag], list):
bibtex_entry[field] = data[tag][0]
else:
bibtex_entry[field] = data[tag]
# convert to bibtex author format ('and' delimited)
if 'author' in bibtex_entry:
# separate out semicolon authors
if isinstance(bibtex_entry["author"], str):
if ";" in bibtex_entry["author"]:
bibtex_entry["author"] = bibtex_entry["author"].split(";")
if isinstance(bibtex_entry['author'], list):
bibtex_entry['author'] = ' and '.join(bibtex_entry['author'])
# convert to bibtex page range format, use empty string if not specified
if ('page_first' in bibtex_entry) or ('page_last' in bibtex_entry):
bibtex_entry['pages'] = '{0}--{1}'.format(bibtex_entry.get('page_first', ''),
bibtex_entry.get('page_last', ''))
bibtex_entry.pop('page_first', None) # and remove page_first, page_list if present
bibtex_entry.pop('page_last', None)
# cite keys are given as cif-reference-idx in order they are found
entries['cifref{}'.format(idx)] = Entry('article', list(bibtex_entry.items()))
return BibliographyData(entries).to_string(bib_format='bibtex')
def as_dict(self):
"""
:return: MSONable dict
"""
d = OrderedDict()
for k, v in self._cif.data.items():
d[k] = {}
for k2, v2 in v.data.items():
d[k][k2] = v2
return d
@property
def has_errors(self):
"""
:return: Whether there are errors/warnings detected in CIF parsing.
"""
return len(self.warnings) > 0
class CifWriter:
"""
A wrapper around CifFile to write CIF files from pymatgen structures.
"""
def __init__(self, struct, symprec=None, write_magmoms=False,
significant_figures=8, angle_tolerance=5.0):
"""
Args:
struct (Structure): structure to write
symprec (float): If not none, finds the symmetry of the structure
and writes the cif with symmetry information. Passes symprec
to the SpacegroupAnalyzer.
write_magmoms (bool): If True, will write magCIF file. Incompatible
with symprec
significant_figures (int): Specifies precision for formatting of floats.
Defaults to 8.
angle_tolerance (float): Angle tolerance for symmetry finding. Passes
angle_tolerance to the SpacegroupAnalyzer. Used only if symprec
is not None.
"""
if write_magmoms and symprec:
warnings.warn(
"Magnetic symmetry cannot currently be detected by pymatgen,"
"disabling symmetry detection.")
symprec = None
format_str = "{:.%df}" % significant_figures
block = OrderedDict()
loops = []
spacegroup = ("P 1", 1)
if symprec is not None:
sf = SpacegroupAnalyzer(struct, symprec, angle_tolerance=angle_tolerance)
spacegroup = (sf.get_space_group_symbol(),
sf.get_space_group_number())
# Needs the refined struture when using symprec. This converts
# primitive to conventional structures, the standard for CIF.
struct = sf.get_refined_structure()
latt = struct.lattice
comp = struct.composition
no_oxi_comp = comp.element_composition
block["_symmetry_space_group_name_H-M"] = spacegroup[0]
for cell_attr in ['a', 'b', 'c']:
block["_cell_length_" + cell_attr] = format_str.format(
getattr(latt, cell_attr))
for cell_attr in ['alpha', 'beta', 'gamma']:
block["_cell_angle_" + cell_attr] = format_str.format(
getattr(latt, cell_attr))
block["_symmetry_Int_Tables_number"] = spacegroup[1]
block["_chemical_formula_structural"] = no_oxi_comp.reduced_formula
block["_chemical_formula_sum"] = no_oxi_comp.formula
block["_cell_volume"] = format_str.format(latt.volume)
reduced_comp, fu = no_oxi_comp.get_reduced_composition_and_factor()
block["_cell_formula_units_Z"] = str(int(fu))
if symprec is None:
block["_symmetry_equiv_pos_site_id"] = ["1"]
block["_symmetry_equiv_pos_as_xyz"] = ["x, y, z"]
else:
sf = SpacegroupAnalyzer(struct, symprec)
symmops = []
for op in sf.get_symmetry_operations():
v = op.translation_vector
symmops.append(SymmOp.from_rotation_and_translation(
op.rotation_matrix, v))
ops = [op.as_xyz_string() for op in symmops]
block["_symmetry_equiv_pos_site_id"] = \
["%d" % i for i in range(1, len(ops) + 1)]
block["_symmetry_equiv_pos_as_xyz"] = ops
loops.append(["_symmetry_equiv_pos_site_id",
"_symmetry_equiv_pos_as_xyz"])
try:
symbol_to_oxinum = OrderedDict([
(el.__str__(),
float(el.oxi_state))
for el in sorted(comp.elements)])
block["_atom_type_symbol"] = symbol_to_oxinum.keys()
block["_atom_type_oxidation_number"] = symbol_to_oxinum.values()
loops.append(["_atom_type_symbol", "_atom_type_oxidation_number"])
except (TypeError, AttributeError):
symbol_to_oxinum = OrderedDict([(el.symbol, 0) for el in
sorted(comp.elements)])
atom_site_type_symbol = []
atom_site_symmetry_multiplicity = []
atom_site_fract_x = []
atom_site_fract_y = []
atom_site_fract_z = []
atom_site_label = []
atom_site_occupancy = []
atom_site_moment_label = []
atom_site_moment_crystalaxis_x = []
atom_site_moment_crystalaxis_y = []
atom_site_moment_crystalaxis_z = []
count = 0
if symprec is None:
for site in struct:
for sp, occu in sorted(site.species.items()):
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("1")
atom_site_fract_x.append(format_str.format(site.a))
atom_site_fract_y.append(format_str.format(site.b))
atom_site_fract_z.append(format_str.format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
magmom = Magmom(
site.properties.get('magmom', getattr(sp, 'spin', 0)))
if write_magmoms and abs(magmom) > 0:
moment = Magmom.get_moment_relative_to_crystal_axes(
magmom, latt)
atom_site_moment_label.append(
"{}{}".format(sp.symbol, count))
atom_site_moment_crystalaxis_x.append(format_str.format(moment[0]))
atom_site_moment_crystalaxis_y.append(format_str.format(moment[1]))
atom_site_moment_crystalaxis_z.append(format_str.format(moment[2]))
count += 1
else:
# The following just presents a deterministic ordering.
unique_sites = [
(sorted(sites, key=lambda s: tuple([abs(x) for x in
s.frac_coords]))[0],
len(sites))
for sites in sf.get_symmetrized_structure().equivalent_sites
]
for site, mult in sorted(
unique_sites,
key=lambda t: (t[0].species.average_electroneg,
-t[1], t[0].a, t[0].b, t[0].c)):
for sp, occu in site.species.items():
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("%d" % mult)
atom_site_fract_x.append(format_str.format(site.a))
atom_site_fract_y.append(format_str.format(site.b))
atom_site_fract_z.append(format_str.format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
count += 1
block["_atom_site_type_symbol"] = atom_site_type_symbol
block["_atom_site_label"] = atom_site_label
block["_atom_site_symmetry_multiplicity"] = \
atom_site_symmetry_multiplicity
block["_atom_site_fract_x"] = atom_site_fract_x
block["_atom_site_fract_y"] = atom_site_fract_y
block["_atom_site_fract_z"] = atom_site_fract_z
block["_atom_site_occupancy"] = atom_site_occupancy
loops.append(["_atom_site_type_symbol",
"_atom_site_label",
"_atom_site_symmetry_multiplicity",
"_atom_site_fract_x",
"_atom_site_fract_y",
"_atom_site_fract_z",
"_atom_site_occupancy"])
if write_magmoms:
block["_atom_site_moment_label"] = atom_site_moment_label
block[
"_atom_site_moment_crystalaxis_x"] = atom_site_moment_crystalaxis_x
block[
"_atom_site_moment_crystalaxis_y"] = atom_site_moment_crystalaxis_y
block[
"_atom_site_moment_crystalaxis_z"] = atom_site_moment_crystalaxis_z
loops.append(["_atom_site_moment_label",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z"])
d = OrderedDict()
d[comp.reduced_formula] = CifBlock(block, loops, comp.reduced_formula)
self._cf = CifFile(d)
@property
def ciffile(self):
"""
Returns: CifFile associated with the CifWriter.
"""
return self._cf
def __str__(self):
"""
Returns the cif as a string.
"""
return self._cf.__str__()
def write_file(self, filename):
"""
Write the cif file.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
def str2float(text):
"""
Remove uncertainty brackets from strings and return the float.
"""
try:
# Note that the ending ) is sometimes missing. That is why the code has
# been modified to treat it as optional. Same logic applies to lists.
return float(re.sub(r"\(.+\)*", "", text))
except TypeError:
if isinstance(text, list) and len(text) == 1:
return float(re.sub(r"\(.+\)*", "", text[0]))
except ValueError as ex:
if text.strip() == ".":
return 0
raise ex
|
gVallverdu/pymatgen
|
pymatgen/io/cif.py
|
Python
|
mit
| 59,382
|
[
"Avogadro",
"CRYSTAL",
"pymatgen"
] |
5d9bd7b76369af70cbe94421dfc493d948dc697b740b0bd08294ee23c959ebf7
|
#!/usr/bin/env python
############################################################
from vtk import *
############################################################
# Create sources
line1 = vtkLineSource()
line1.SetPoint1( 1, 0, 0 )
line1.SetPoint2( -1, 0, 0 )
line1.SetResolution( 32 )
points = vtkPoints()
points.InsertNextPoint( 1, 0, 0 )
points.InsertNextPoint( -.5, 1, 0 )
points.InsertNextPoint( 0, 1, 2 )
points.InsertNextPoint( 2, 1, -1 )
points.InsertNextPoint( -1, 0, 0 )
line2 = vtkLineSource()
line2.SetPoints( points )
line2.SetResolution( 16 )
# Create mappers
mapper1 = vtkPolyDataMapper()
mapper1.SetInputConnection( line1.GetOutputPort() )
mapper2 = vtkPolyDataMapper()
mapper2.SetInputConnection( line2.GetOutputPort() )
# Create actors
actor1 = vtkActor()
actor1.SetMapper( mapper1 )
actor1.GetProperty().SetColor( 1., 0., 0. )
actor2 = vtkActor()
actor2.SetMapper( mapper2 )
actor2.GetProperty().SetColor( 0., 0., 1. )
actor2.GetProperty().SetLineWidth( 2.5 )
# Create renderer
renderer = vtkRenderer()
renderer.AddViewProp( actor1 )
renderer.AddViewProp( actor2 )
renderer.SetBackground( .3, .4 ,.5 )
# Create render window
window = vtkRenderWindow()
window.AddRenderer( renderer )
window.SetSize( 500, 500 )
# Create interactor
interactor = vtkRenderWindowInteractor()
interactor.SetRenderWindow( window )
# Start interaction
window.Render()
interactor.Start()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Examples/Graphics/Python/SegmentAndBrokenLineSources.py
|
Python
|
gpl-3.0
| 1,389
|
[
"VTK"
] |
3d34c1ffcc884f93c773f6b90d3f6483747b35d4bb885d464c7a632d33ebd66d
|
# -*- coding: utf-8 -*-
#
# one-neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
One neuron example
------------------
This script simulates a neuron driven by a constant external current
and records its membrane potential.
'''
# First, we import all necessary modules for simulation, analysis and
# plotting. Additionally, we set the verbosity to suppress info
# messages and reset the kernel.
# Resetting the kernel allows you to execute the script several
# times in a Python shell without interferences from previous NEST
# simulations. Thus, without resetting the kernel the network status
# including connections between nodes, status of neurons, devices and
# intrinsic time clocks, is kept and influences the next simulations.
import nest
import nest.voltage_trace
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
# Second, the nodes (neurons and devices) are created using `Create()`.
# We store the returned handles in variables for later reference.
# The `Create` function also allow you to create multiple nodes
# e.g. nest.Create('iaf_neuron',5)
# Also default parameters of the model can be configured using 'Create'
# by including a list of parameter dictionaries
# e.g. `nest.Create("iaf_neuron", params=[{'I_e':376.0}])`
# or `nest.Create("voltmeter", [{"withgid": True, "withtime": True}])`.
# In this example we will configure these parameters in an additional
# step, which is explained in the third section.
neuron = nest.Create("iaf_neuron")
voltmeter = nest.Create("voltmeter")
# Third, the neuron and the voltmeter are configured using
# `SetStatus()`, which expects a list of node handles and a list of
# parameter dictionaries.
# In this example we use `SetStatus()` to configure the constant
# current input to the neuron. We also want to record the global id of
# the observed nodes and set the withgid flag of the voltmeter to
# True.
nest.SetStatus(neuron, "I_e", 376.0)
nest.SetStatus(voltmeter, [{"withgid": True}])
# Fourth, the neuron is connected to the voltmeter. The command
# `Connect()` has different variants. Plain `Connect()` just takes the
# handles of pre- and post-synaptic nodes and uses the default values
# for weight and delay. `ConvergentConnect()` takes four arguments:
# lists of pre- and post-synaptic nodes and lists of weights and
# delays. Note that the connection direction for the voltmeter is
# reversed compared to the spike detector, because it observes the
# neuron instead of receiving events from it. Thus, `Connect()`
# reflects the direction of signal flow in the simulation kernel
# rather than the physical process of inserting an electrode into the
# neuron. The latter semantics is presently not available in NEST.
nest.Connect(voltmeter, neuron)
# Now we simulate the network using `Simulate()`, which takes the
# desired simulation time in milliseconds.
nest.Simulate(1000.0)
# Finally, we plot the neuron's membrane potential as a function of
# time.
nest.voltage_trace.from_device(voltmeter)
|
INM-6/nest-git-migration
|
pynest/examples/one-neuron.py
|
Python
|
gpl-2.0
| 3,633
|
[
"NEURON"
] |
8d35118f3b9997c12040f436280c732c769c681ac5e84760ca673e4af4726a4a
|
# sql/compiler.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Classes provided include:
:class:`.compiler.SQLCompiler` - renders SQL
strings
:class:`.compiler.DDLCompiler` - renders DDL
(data definition language) strings
:class:`.compiler.GenericTypeCompiler` - renders
type specification strings.
To generate user-defined SQL strings, see
:doc:`/ext/compiler`.
"""
import contextlib
import re
from . import schema, sqltypes, operators, functions, visitors, \
elements, selectable, crud
from .. import util, exc
import itertools
RESERVED_WORDS = set([
'all', 'analyse', 'analyze', 'and', 'any', 'array',
'as', 'asc', 'asymmetric', 'authorization', 'between',
'binary', 'both', 'case', 'cast', 'check', 'collate',
'column', 'constraint', 'create', 'cross', 'current_date',
'current_role', 'current_time', 'current_timestamp',
'current_user', 'default', 'deferrable', 'desc',
'distinct', 'do', 'else', 'end', 'except', 'false',
'for', 'foreign', 'freeze', 'from', 'full', 'grant',
'group', 'having', 'ilike', 'in', 'initially', 'inner',
'intersect', 'into', 'is', 'isnull', 'join', 'leading',
'left', 'like', 'limit', 'localtime', 'localtimestamp',
'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset',
'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps',
'placing', 'primary', 'references', 'right', 'select',
'session_user', 'set', 'similar', 'some', 'symmetric', 'table',
'then', 'to', 'trailing', 'true', 'union', 'unique', 'user',
'using', 'verbose', 'when', 'where'])
LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I)
ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in range(0, 10)]).union(['$'])
BIND_PARAMS = re.compile(r'(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_PARAMS_ESC = re.compile(r'\x5c(:[\w\$]*)(?![:\w\$])', re.UNICODE)
BIND_TEMPLATES = {
'pyformat': "%%(%(name)s)s",
'qmark': "?",
'format': "%%s",
'numeric': ":[_POSITION]",
'named': ":%(name)s"
}
OPERATORS = {
# binary
operators.and_: ' AND ',
operators.or_: ' OR ',
operators.add: ' + ',
operators.mul: ' * ',
operators.sub: ' - ',
operators.div: ' / ',
operators.mod: ' % ',
operators.truediv: ' / ',
operators.neg: '-',
operators.lt: ' < ',
operators.le: ' <= ',
operators.ne: ' != ',
operators.gt: ' > ',
operators.ge: ' >= ',
operators.eq: ' = ',
operators.is_distinct_from: ' IS DISTINCT FROM ',
operators.isnot_distinct_from: ' IS NOT DISTINCT FROM ',
operators.concat_op: ' || ',
operators.match_op: ' MATCH ',
operators.notmatch_op: ' NOT MATCH ',
operators.in_op: ' IN ',
operators.notin_op: ' NOT IN ',
operators.comma_op: ', ',
operators.from_: ' FROM ',
operators.as_: ' AS ',
operators.is_: ' IS ',
operators.isnot: ' IS NOT ',
operators.collate: ' COLLATE ',
# unary
operators.exists: 'EXISTS ',
operators.distinct_op: 'DISTINCT ',
operators.inv: 'NOT ',
operators.any_op: 'ANY ',
operators.all_op: 'ALL ',
# modifiers
operators.desc_op: ' DESC',
operators.asc_op: ' ASC',
operators.nullsfirst_op: ' NULLS FIRST',
operators.nullslast_op: ' NULLS LAST',
}
FUNCTIONS = {
functions.coalesce: 'coalesce%(expr)s',
functions.current_date: 'CURRENT_DATE',
functions.current_time: 'CURRENT_TIME',
functions.current_timestamp: 'CURRENT_TIMESTAMP',
functions.current_user: 'CURRENT_USER',
functions.localtime: 'LOCALTIME',
functions.localtimestamp: 'LOCALTIMESTAMP',
functions.random: 'random%(expr)s',
functions.sysdate: 'sysdate',
functions.session_user: 'SESSION_USER',
functions.user: 'USER'
}
EXTRACT_MAP = {
'month': 'month',
'day': 'day',
'year': 'year',
'second': 'second',
'hour': 'hour',
'doy': 'doy',
'minute': 'minute',
'quarter': 'quarter',
'dow': 'dow',
'week': 'week',
'epoch': 'epoch',
'milliseconds': 'milliseconds',
'microseconds': 'microseconds',
'timezone_hour': 'timezone_hour',
'timezone_minute': 'timezone_minute'
}
COMPOUND_KEYWORDS = {
selectable.CompoundSelect.UNION: 'UNION',
selectable.CompoundSelect.UNION_ALL: 'UNION ALL',
selectable.CompoundSelect.EXCEPT: 'EXCEPT',
selectable.CompoundSelect.EXCEPT_ALL: 'EXCEPT ALL',
selectable.CompoundSelect.INTERSECT: 'INTERSECT',
selectable.CompoundSelect.INTERSECT_ALL: 'INTERSECT ALL'
}
class Compiled(object):
"""Represent a compiled SQL or DDL expression.
The ``__str__`` method of the ``Compiled`` object should produce
the actual text of the statement. ``Compiled`` objects are
specific to their underlying database dialect, and also may
or may not be specific to the columns referenced within a
particular set of bind parameters. In no case should the
``Compiled`` object be dependent on the actual values of those
bind parameters, even though it may reference those values as
defaults.
"""
_cached_metadata = None
execution_options = util.immutabledict()
"""
Execution options propagated from the statement. In some cases,
sub-elements of the statement can modify these.
"""
def __init__(self, dialect, statement, bind=None,
schema_translate_map=None,
compile_kwargs=util.immutabledict()):
"""Construct a new :class:`.Compiled` object.
:param dialect: :class:`.Dialect` to compile against.
:param statement: :class:`.ClauseElement` to be compiled.
:param bind: Optional Engine or Connection to compile this
statement against.
:param schema_translate_map: dictionary of schema names to be
translated when forming the resultant SQL
.. versionadded:: 1.1
.. seealso::
:ref:`schema_translating`
:param compile_kwargs: additional kwargs that will be
passed to the initial call to :meth:`.Compiled.process`.
"""
self.dialect = dialect
self.bind = bind
self.preparer = self.dialect.identifier_preparer
if schema_translate_map:
self.preparer = self.preparer._with_schema_translate(
schema_translate_map)
if statement is not None:
self.statement = statement
self.can_execute = statement.supports_execution
if self.can_execute:
self.execution_options = statement._execution_options
self.string = self.process(self.statement, **compile_kwargs)
@util.deprecated("0.7", ":class:`.Compiled` objects now compile "
"within the constructor.")
def compile(self):
"""Produce the internal string representation of this element.
"""
pass
def _execute_on_connection(self, connection, multiparams, params):
if self.can_execute:
return connection._execute_compiled(self, multiparams, params)
else:
raise exc.ObjectNotExecutableError(self.statement)
@property
def sql_compiler(self):
"""Return a Compiled that is capable of processing SQL expressions.
If this compiler is one, it would likely just return 'self'.
"""
raise NotImplementedError()
def process(self, obj, **kwargs):
return obj._compiler_dispatch(self, **kwargs)
def __str__(self):
"""Return the string text of the generated SQL or DDL."""
return self.string or ''
def construct_params(self, params=None):
"""Return the bind params for this compiled object.
:param params: a dict of string/object pairs whose values will
override bind values compiled in to the
statement.
"""
raise NotImplementedError()
@property
def params(self):
"""Return the bind params for this compiled object."""
return self.construct_params()
def execute(self, *multiparams, **params):
"""Execute this compiled object."""
e = self.bind
if e is None:
raise exc.UnboundExecutionError(
"This Compiled object is not bound to any Engine "
"or Connection.")
return e._execute_compiled(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Execute this compiled object and return the result's
scalar value."""
return self.execute(*multiparams, **params).scalar()
class TypeCompiler(util.with_metaclass(util.EnsureKWArgType, object)):
"""Produces DDL specification for TypeEngine objects."""
ensure_kwarg = r'visit_\w+'
def __init__(self, dialect):
self.dialect = dialect
def process(self, type_, **kw):
return type_._compiler_dispatch(self, **kw)
class _CompileLabel(visitors.Visitable):
"""lightweight label object which acts as an expression.Label."""
__visit_name__ = 'label'
__slots__ = 'element', 'name'
def __init__(self, col, name, alt_names=()):
self.element = col
self.name = name
self._alt_names = (col,) + alt_names
@property
def proxy_set(self):
return self.element.proxy_set
@property
def type(self):
return self.element.type
def self_group(self, **kw):
return self
class SQLCompiler(Compiled):
"""Default implementation of :class:`.Compiled`.
Compiles :class:`.ClauseElement` objects into SQL strings.
"""
extract_map = EXTRACT_MAP
compound_keywords = COMPOUND_KEYWORDS
isdelete = isinsert = isupdate = False
"""class-level defaults which can be set at the instance
level to define if this Compiled instance represents
INSERT/UPDATE/DELETE
"""
isplaintext = False
returning = None
"""holds the "returning" collection of columns if
the statement is CRUD and defines returning columns
either implicitly or explicitly
"""
returning_precedes_values = False
"""set to True classwide to generate RETURNING
clauses before the VALUES or WHERE clause (i.e. MSSQL)
"""
render_table_with_column_in_update_from = False
"""set to True classwide to indicate the SET clause
in a multi-table UPDATE statement should qualify
columns with the table name (i.e. MySQL only)
"""
ansi_bind_rules = False
"""SQL 92 doesn't allow bind parameters to be used
in the columns clause of a SELECT, nor does it allow
ambiguous expressions like "? = ?". A compiler
subclass can set this flag to False if the target
driver/DB enforces this
"""
_textual_ordered_columns = False
"""tell the result object that the column names as rendered are important,
but they are also "ordered" vs. what is in the compiled object here.
"""
_ordered_columns = True
"""
if False, means we can't be sure the list of entries
in _result_columns is actually the rendered order. Usually
True unless using an unordered TextAsFrom.
"""
insert_prefetch = update_prefetch = ()
def __init__(self, dialect, statement, column_keys=None,
inline=False, **kwargs):
"""Construct a new :class:`.SQLCompiler` object.
:param dialect: :class:`.Dialect` to be used
:param statement: :class:`.ClauseElement` to be compiled
:param column_keys: a list of column names to be compiled into an
INSERT or UPDATE statement.
:param inline: whether to generate INSERT statements as "inline", e.g.
not formatted to return any generated defaults
:param kwargs: additional keyword arguments to be consumed by the
superclass.
"""
self.column_keys = column_keys
# compile INSERT/UPDATE defaults/sequences inlined (no pre-
# execute)
self.inline = inline or getattr(statement, 'inline', False)
# a dictionary of bind parameter keys to BindParameter
# instances.
self.binds = {}
# a dictionary of BindParameter instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
# TypeEngine. ResultProxy uses this for type processing and
# column targeting
self._result_columns = []
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self.positiontup = []
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
self.ctes = None
self.label_length = dialect.label_length \
or dialect.max_identifier_length
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = util.PopulateDict(self._process_anon)
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names = {}
Compiled.__init__(self, dialect, statement, **kwargs)
if (
self.isinsert or self.isupdate or self.isdelete
) and statement._returning:
self.returning = statement._returning
if self.positional and dialect.paramstyle == 'numeric':
self._apply_numbered_params()
@property
def prefetch(self):
return list(self.insert_prefetch + self.update_prefetch)
@util.memoized_instancemethod
def _init_cte_state(self):
"""Initialize collections related to CTEs only if
a CTE is located, to save on the overhead of
these collections otherwise.
"""
# collect CTEs to tack on top of a SELECT
self.ctes = util.OrderedDict()
self.ctes_by_name = {}
self.ctes_recursive = False
if self.positional:
self.cte_positional = {}
@contextlib.contextmanager
def _nested_result(self):
"""special API to support the use case of 'nested result sets'"""
result_columns, ordered_columns = (
self._result_columns, self._ordered_columns)
self._result_columns, self._ordered_columns = [], False
try:
if self.stack:
entry = self.stack[-1]
entry['need_result_map_for_nested'] = True
else:
entry = None
yield self._result_columns, self._ordered_columns
finally:
if entry:
entry.pop('need_result_map_for_nested')
self._result_columns, self._ordered_columns = (
result_columns, ordered_columns)
def _apply_numbered_params(self):
poscount = itertools.count(1)
self.string = re.sub(
r'\[_POSITION\]',
lambda m: str(util.next(poscount)),
self.string)
@util.memoized_property
def _bind_processors(self):
return dict(
(key, value) for key, value in
((self.bind_names[bindparam],
bindparam.type._cached_bind_processor(self.dialect))
for bindparam in self.bind_names)
if value is not None
)
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(self, params=None, _group_number=None, _check=True):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam in self.bind_names:
name = self.bind_names[bindparam]
if bindparam.key in params:
pd[name] = params[bindparam.key]
elif name in params:
pd[name] = params[name]
elif _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
elif bindparam.callable:
pd[name] = bindparam.effective_value
else:
pd[name] = bindparam.value
return pd
else:
pd = {}
for bindparam in self.bind_names:
if _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
if bindparam.callable:
pd[self.bind_names[bindparam]] = bindparam.effective_value
else:
pd[self.bind_names[bindparam]] = bindparam.value
return pd
@property
def params(self):
"""Return the bind param dictionary embedded into this
compiled object, for those values that are present."""
return self.construct_params(_check=False)
@util.dependencies("sqlalchemy.engine.result")
def _create_result_map(self, result):
"""utility method used for unit tests only."""
return result.ResultMetaData._create_result_map(self._result_columns)
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label_reference(
self, element, within_columns_clause=False, **kwargs):
if self.stack and self.dialect.supports_simple_order_by_label:
selectable = self.stack[-1]['selectable']
with_cols, only_froms, only_cols = selectable._label_resolve_dict
if within_columns_clause:
resolve_dict = only_froms
else:
resolve_dict = only_cols
# this can be None in the case that a _label_reference()
# were subject to a replacement operation, in which case
# the replacement of the Label element may have changed
# to something else like a ColumnClause expression.
order_by_elem = element.element._order_by_label_element
if order_by_elem is not None and order_by_elem.name in \
resolve_dict and \
order_by_elem.shares_lineage(
resolve_dict[order_by_elem.name]):
kwargs['render_label_as_label'] = \
element.element._order_by_label_element
return self.process(
element.element, within_columns_clause=within_columns_clause,
**kwargs)
def visit_textual_label_reference(
self, element, within_columns_clause=False, **kwargs):
if not self.stack:
# compiling the element outside of the context of a SELECT
return self.process(
element._text_clause
)
selectable = self.stack[-1]['selectable']
with_cols, only_froms, only_cols = selectable._label_resolve_dict
try:
if within_columns_clause:
col = only_froms[element.element]
else:
col = with_cols[element.element]
except KeyError:
# treat it like text()
util.warn_limited(
"Can't resolve label reference %r; converting to text()",
util.ellipses_string(element.element))
return self.process(
element._text_clause
)
else:
kwargs['render_label_as_label'] = col
return self.process(
col, within_columns_clause=within_columns_clause, **kwargs)
def visit_label(self, label,
add_to_result_map=None,
within_label_clause=False,
within_columns_clause=False,
render_label_as_label=None,
**kw):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
render_label_with_as = (within_columns_clause and not
within_label_clause)
render_label_only = render_label_as_label is label
if render_label_only or render_label_with_as:
if isinstance(label.name, elements._truncated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if render_label_with_as:
if add_to_result_map is not None:
add_to_result_map(
labelname,
label.name,
(label, labelname, ) + label._alt_names,
label.type
)
return label.element._compiler_dispatch(
self, within_columns_clause=True,
within_label_clause=True, **kw) + \
OPERATORS[operators.as_] + \
self.preparer.format_label(label, labelname)
elif render_label_only:
return self.preparer.format_label(label, labelname)
else:
return label.element._compiler_dispatch(
self, within_columns_clause=False, **kw)
def _fallback_column_name(self, column):
raise exc.CompileError("Cannot compile Column object until "
"its 'name' is assigned.")
def visit_column(self, column, add_to_result_map=None,
include_table=True, **kwargs):
name = orig_name = column.name
if name is None:
name = self._fallback_column_name(column)
is_literal = column.is_literal
if not is_literal and isinstance(name, elements._truncated_label):
name = self._truncated_identifier("colident", name)
if add_to_result_map is not None:
add_to_result_map(
name,
orig_name,
(column, name, column.key),
column.type
)
if is_literal:
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name)
table = column.table
if table is None or not include_table or not table.named_with_column:
return name
else:
effective_schema = self.preparer.schema_for_object(table)
if effective_schema:
schema_prefix = self.preparer.quote_schema(
effective_schema) + '.'
else:
schema_prefix = ''
tablename = table.name
if isinstance(tablename, elements._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + \
self.preparer.quote(tablename) + \
"." + name
def escape_literal_column(self, text):
"""provide escaping for the literal_column() construct."""
# TODO: some dialects might need different behavior here
return text.replace('%', '%%')
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kw):
kw['type_expression'] = typeclause
return self.dialect.type_compiler.process(typeclause.type, **kw)
def post_process_text(self, text):
return text
def visit_textclause(self, textclause, **kw):
def do_bindparam(m):
name = m.group(1)
if name in textclause._bindparams:
return self.process(textclause._bindparams[name], **kw)
else:
return self.bindparam_string(name, **kw)
if not self.stack:
self.isplaintext = True
# un-escape any \:params
return BIND_PARAMS_ESC.sub(
lambda m: m.group(1),
BIND_PARAMS.sub(
do_bindparam,
self.post_process_text(textclause.text))
)
def visit_text_as_from(self, taf,
compound_index=None,
asfrom=False,
parens=True, **kw):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = toplevel or \
(
compound_index == 0 and entry.get(
'need_result_map_for_compound', False)
) or entry.get('need_result_map_for_nested', False)
if populate_result_map:
self._ordered_columns = \
self._textual_ordered_columns = taf.positional
for c in taf.column_args:
self.process(c, within_columns_clause=True,
add_to_result_map=self._add_to_result_map)
text = self.process(taf.element, **kw)
if asfrom and parens:
text = "(%s)" % text
return text
def visit_null(self, expr, **kw):
return 'NULL'
def visit_true(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'true'
else:
return "1"
def visit_false(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'false'
else:
return "0"
def visit_clauselist(self, clauselist, **kw):
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
return sep.join(
s for s in
(
c._compiler_dispatch(self, **kw)
for c in clauselist.clauses)
if s)
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
x += "WHEN " + cond._compiler_dispatch(
self, **kwargs
) + " THEN " + result._compiler_dispatch(
self, **kwargs) + " "
if clause.else_ is not None:
x += "ELSE " + clause.else_._compiler_dispatch(
self, **kwargs
) + " "
x += "END"
return x
def visit_type_coerce(self, type_coerce, **kw):
return type_coerce.typed_expression._compiler_dispatch(self, **kw)
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % \
(cast.clause._compiler_dispatch(self, **kwargs),
cast.typeclause._compiler_dispatch(self, **kwargs))
def _format_frame_clause(self, range_, **kw):
return '%s AND %s' % (
"UNBOUNDED PRECEDING"
if range_[0] is elements.RANGE_UNBOUNDED
else "CURRENT ROW" if range_[0] is elements.RANGE_CURRENT
else "%s PRECEDING" % (self.process(range_[0], **kw), ),
"UNBOUNDED FOLLOWING"
if range_[1] is elements.RANGE_UNBOUNDED
else "CURRENT ROW" if range_[1] is elements.RANGE_CURRENT
else "%s FOLLOWING" % (self.process(range_[1], **kw), )
)
def visit_over(self, over, **kwargs):
if over.range_:
range_ = "RANGE BETWEEN %s" % self._format_frame_clause(
over.range_, **kwargs)
elif over.rows:
range_ = "ROWS BETWEEN %s" % self._format_frame_clause(
over.rows, **kwargs)
else:
range_ = None
return "%s OVER (%s)" % (
over.element._compiler_dispatch(self, **kwargs),
' '.join([
'%s BY %s' % (
word, clause._compiler_dispatch(self, **kwargs)
)
for word, clause in (
('PARTITION', over.partition_by),
('ORDER', over.order_by)
)
if clause is not None and len(clause)
] + ([range_] if range_ else [])
)
)
def visit_withingroup(self, withingroup, **kwargs):
return "%s WITHIN GROUP (ORDER BY %s)" % (
withingroup.element._compiler_dispatch(self, **kwargs),
withingroup.order_by._compiler_dispatch(self, **kwargs)
)
def visit_funcfilter(self, funcfilter, **kwargs):
return "%s FILTER (WHERE %s)" % (
funcfilter.func._compiler_dispatch(self, **kwargs),
funcfilter.criterion._compiler_dispatch(self, **kwargs)
)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (
field, extract.expr._compiler_dispatch(self, **kwargs))
def visit_function(self, func, add_to_result_map=None, **kwargs):
if add_to_result_map is not None:
add_to_result_map(
func.name, func.name, (), func.type
)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
if disp:
return disp(func, **kwargs)
else:
name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s")
return ".".join(list(func.packagenames) + [name]) % \
{'expr': self.function_argspec(func, **kwargs)}
def visit_next_value_func(self, next_value, **kw):
return self.visit_sequence(next_value.sequence)
def visit_sequence(self, sequence):
raise NotImplementedError(
"Dialect '%s' does not support sequence increments." %
self.dialect.name
)
def function_argspec(self, func, **kwargs):
return func.clause_expr._compiler_dispatch(self, **kwargs)
def visit_compound_select(self, cs, asfrom=False,
parens=True, compound_index=0, **kwargs):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
need_result_map = toplevel or \
(compound_index == 0
and entry.get('need_result_map_for_compound', False))
self.stack.append(
{
'correlate_froms': entry['correlate_froms'],
'asfrom_froms': entry['asfrom_froms'],
'selectable': cs,
'need_result_map_for_compound': need_result_map
})
keyword = self.compound_keywords.get(cs.keyword)
text = (" " + keyword + " ").join(
(c._compiler_dispatch(self,
asfrom=asfrom, parens=False,
compound_index=i, **kwargs)
for i, c in enumerate(cs.selects))
)
group_by = cs._group_by_clause._compiler_dispatch(
self, asfrom=asfrom, **kwargs)
if group_by:
text += " GROUP BY " + group_by
text += self.order_by_clause(cs, **kwargs)
text += (cs._limit_clause is not None
or cs._offset_clause is not None) and \
self.limit_clause(cs, **kwargs) or ""
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def _get_operator_dispatch(self, operator_, qualifier1, qualifier2):
attrname = "visit_%s_%s%s" % (
operator_.__name__, qualifier1,
"_" + qualifier2 if qualifier2 else "")
return getattr(self, attrname, None)
def visit_unary(self, unary, **kw):
if unary.operator:
if unary.modifier:
raise exc.CompileError(
"Unary expression does not support operator "
"and modifier simultaneously")
disp = self._get_operator_dispatch(
unary.operator, "unary", "operator")
if disp:
return disp(unary, unary.operator, **kw)
else:
return self._generate_generic_unary_operator(
unary, OPERATORS[unary.operator], **kw)
elif unary.modifier:
disp = self._get_operator_dispatch(
unary.modifier, "unary", "modifier")
if disp:
return disp(unary, unary.modifier, **kw)
else:
return self._generate_generic_unary_modifier(
unary, OPERATORS[unary.modifier], **kw)
else:
raise exc.CompileError(
"Unary expression has no operator or modifier")
def visit_istrue_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return self.process(element.element, **kw)
else:
return "%s = 1" % self.process(element.element, **kw)
def visit_isfalse_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return "NOT %s" % self.process(element.element, **kw)
else:
return "%s = 0" % self.process(element.element, **kw)
def visit_notmatch_op_binary(self, binary, operator, **kw):
return "NOT %s" % self.visit_binary(
binary, override_operator=operators.match_op)
def visit_binary(self, binary, override_operator=None,
eager_grouping=False, **kw):
# don't allow "? = ?" to render
if self.ansi_bind_rules and \
isinstance(binary.left, elements.BindParameter) and \
isinstance(binary.right, elements.BindParameter):
kw['literal_binds'] = True
operator_ = override_operator or binary.operator
disp = self._get_operator_dispatch(operator_, "binary", None)
if disp:
return disp(binary, operator_, **kw)
else:
try:
opstring = OPERATORS[operator_]
except KeyError:
raise exc.UnsupportedCompilationError(self, operator_)
else:
return self._generate_generic_binary(binary, opstring, **kw)
def visit_custom_op_binary(self, element, operator, **kw):
kw['eager_grouping'] = operator.eager_grouping
return self._generate_generic_binary(
element, " " + operator.opstring + " ", **kw)
def visit_custom_op_unary_operator(self, element, operator, **kw):
return self._generate_generic_unary_operator(
element, operator.opstring + " ", **kw)
def visit_custom_op_unary_modifier(self, element, operator, **kw):
return self._generate_generic_unary_modifier(
element, " " + operator.opstring, **kw)
def _generate_generic_binary(
self, binary, opstring, eager_grouping=False, **kw):
_in_binary = kw.get('_in_binary', False)
kw['_in_binary'] = True
text = binary.left._compiler_dispatch(
self, eager_grouping=eager_grouping, **kw) + \
opstring + \
binary.right._compiler_dispatch(
self, eager_grouping=eager_grouping, **kw)
if _in_binary and eager_grouping:
text = "(%s)" % text
return text
def _generate_generic_unary_operator(self, unary, opstring, **kw):
return opstring + unary.element._compiler_dispatch(self, **kw)
def _generate_generic_unary_modifier(self, unary, opstring, **kw):
return unary.element._compiler_dispatch(self, **kw) + opstring
@util.memoized_property
def _like_percent_literal(self):
return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE)
def visit_contains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notcontains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_startswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notstartswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_endswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notendswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_like_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
# TODO: use ternary here, not "and"/ "or"
return '%s LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notlike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) NOT LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_between_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " BETWEEN SYMMETRIC "
if symmetric else " BETWEEN ", **kw)
def visit_notbetween_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " NOT BETWEEN SYMMETRIC "
if symmetric else " NOT BETWEEN ", **kw)
def visit_bindparam(self, bindparam, within_columns_clause=False,
literal_binds=False,
skip_bind_expression=False,
**kwargs):
if not skip_bind_expression and bindparam.type._has_bind_expression:
bind_expression = bindparam.type.bind_expression(bindparam)
return self.process(bind_expression,
skip_bind_expression=True)
if literal_binds or \
(within_columns_clause and
self.ansi_bind_rules):
if bindparam.value is None and bindparam.callable is None:
raise exc.CompileError("Bind parameter '%s' without a "
"renderable value not allowed here."
% bindparam.key)
return self.render_literal_bindparam(
bindparam, within_columns_clause=True, **kwargs)
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if (existing.unique or bindparam.unique) and \
not existing.proxy_set.intersection(
bindparam.proxy_set):
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name" %
bindparam.key
)
elif existing._is_crud or bindparam._is_crud:
raise exc.CompileError(
"bindparam() name '%s' is reserved "
"for automatic usage in the VALUES or SET "
"clause of this "
"insert/update statement. Please use a "
"name other than column name when using bindparam() "
"with insert() or update() (for example, 'b_%s')." %
(bindparam.key, bindparam.key)
)
self.binds[bindparam.key] = self.binds[name] = bindparam
return self.bindparam_string(name, **kwargs)
def render_literal_bindparam(self, bindparam, **kw):
value = bindparam.effective_value
return self.render_literal_value(value, bindparam.type)
def render_literal_value(self, value, type_):
"""Render the value of a bind parameter as a quoted literal.
This is used for statement sections that do not accept bind parameters
on the target driver/database.
This should be implemented by subclasses using the quoting services
of the DBAPI.
"""
processor = type_._cached_literal_processor(self.dialect)
if processor:
return processor(value)
else:
raise NotImplementedError(
"Don't know how to literal-quote value %r" % value)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
if isinstance(bind_name, elements._truncated_label):
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name.apply_map(self.anon_map)
if len(anonname) > self.label_length - 6:
counter = self.truncated_names.get(ident_class, 1)
truncname = anonname[0:max(self.label_length - 6, 0)] + \
"_" + hex(counter)[2:]
self.truncated_names[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name):
return name % self.anon_map
def _process_anon(self, key):
(ident, derived) = key.split(' ', 1)
anonymous_counter = self.anon_map.get(derived, 1)
self.anon_map[derived] = anonymous_counter + 1
return derived + "_" + str(anonymous_counter)
def bindparam_string(self, name, positional_names=None, **kw):
if self.positional:
if positional_names is not None:
positional_names.append(name)
else:
self.positiontup.append(name)
return self.bindtemplate % {'name': name}
def visit_cte(self, cte, asfrom=False, ashint=False,
fromhints=None,
**kwargs):
self._init_cte_state()
if isinstance(cte.name, elements._truncated_label):
cte_name = self._truncated_identifier("alias", cte.name)
else:
cte_name = cte.name
if cte_name in self.ctes_by_name:
existing_cte = self.ctes_by_name[cte_name]
# we've generated a same-named CTE that we are enclosed in,
# or this is the same CTE. just return the name.
if cte in existing_cte._restates or cte is existing_cte:
return self.preparer.format_alias(cte, cte_name)
elif existing_cte in cte._restates:
# we've generated a same-named CTE that is
# enclosed in us - we take precedence, so
# discard the text for the "inner".
del self.ctes[existing_cte]
else:
raise exc.CompileError(
"Multiple, unrelated CTEs found with "
"the same name: %r" %
cte_name)
self.ctes_by_name[cte_name] = cte
# look for embedded DML ctes and propagate autocommit
if 'autocommit' in cte.element._execution_options and \
'autocommit' not in self.execution_options:
self.execution_options = self.execution_options.union(
{"autocommit": cte.element._execution_options['autocommit']})
if cte._cte_alias is not None:
orig_cte = cte._cte_alias
if orig_cte not in self.ctes:
self.visit_cte(orig_cte, **kwargs)
cte_alias_name = cte._cte_alias.name
if isinstance(cte_alias_name, elements._truncated_label):
cte_alias_name = self._truncated_identifier(
"alias", cte_alias_name)
else:
orig_cte = cte
cte_alias_name = None
if not cte_alias_name and cte not in self.ctes:
if cte.recursive:
self.ctes_recursive = True
text = self.preparer.format_alias(cte, cte_name)
if cte.recursive:
if isinstance(cte.original, selectable.Select):
col_source = cte.original
elif isinstance(cte.original, selectable.CompoundSelect):
col_source = cte.original.selects[0]
else:
assert False
recur_cols = [c for c in
util.unique_list(col_source.inner_columns)
if c is not None]
text += "(%s)" % (", ".join(
self.preparer.format_column(ident)
for ident in recur_cols))
if self.positional:
kwargs['positional_names'] = self.cte_positional[cte] = []
text += " AS \n" + \
cte.original._compiler_dispatch(
self, asfrom=True, **kwargs
)
if cte._suffixes:
text += " " + self._generate_prefixes(
cte, cte._suffixes, **kwargs)
self.ctes[cte] = text
if asfrom:
if cte_alias_name:
text = self.preparer.format_alias(cte, cte_alias_name)
text += self.get_render_as_alias_suffix(cte_name)
else:
return self.preparer.format_alias(cte, cte_name)
return text
def visit_alias(self, alias, asfrom=False, ashint=False,
iscrud=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if isinstance(alias.name, elements._truncated_label):
alias_name = self._truncated_identifier("alias", alias.name)
else:
alias_name = alias.name
if ashint:
return self.preparer.format_alias(alias, alias_name)
elif asfrom:
ret = alias.original._compiler_dispatch(self,
asfrom=True, **kwargs) + \
self.get_render_as_alias_suffix(
self.preparer.format_alias(alias, alias_name))
if fromhints and alias in fromhints:
ret = self.format_from_hint_text(ret, alias,
fromhints[alias], iscrud)
return ret
else:
return alias.original._compiler_dispatch(self, **kwargs)
def visit_lateral(self, lateral, **kw):
kw['lateral'] = True
return "LATERAL %s" % self.visit_alias(lateral, **kw)
def visit_tablesample(self, tablesample, asfrom=False, **kw):
text = "%s TABLESAMPLE %s" % (
self.visit_alias(tablesample, asfrom=True, **kw),
tablesample._get_method()._compiler_dispatch(self, **kw))
if tablesample.seed is not None:
text += " REPEATABLE (%s)" % (
tablesample.seed._compiler_dispatch(self, **kw))
return text
def get_render_as_alias_suffix(self, alias_name_text):
return " AS " + alias_name_text
def _add_to_result_map(self, keyname, name, objects, type_):
self._result_columns.append((keyname, name, objects, type_))
def _label_select_column(self, select, column,
populate_result_map,
asfrom, column_clause_args,
name=None,
within_columns_clause=True):
"""produce labeled columns present in a select()."""
if column.type._has_column_expression and \
populate_result_map:
col_expr = column.type.column_expression(column)
add_to_result_map = lambda keyname, name, objects, type_: \
self._add_to_result_map(
keyname, name,
(column,) + objects, type_)
else:
col_expr = column
if populate_result_map:
add_to_result_map = self._add_to_result_map
else:
add_to_result_map = None
if not within_columns_clause:
result_expr = col_expr
elif isinstance(column, elements.Label):
if col_expr is not column:
result_expr = _CompileLabel(
col_expr,
column.name,
alt_names=(column.element,)
)
else:
result_expr = col_expr
elif select is not None and name:
result_expr = _CompileLabel(
col_expr,
name,
alt_names=(column._key_label,)
)
elif \
asfrom and \
isinstance(column, elements.ColumnClause) and \
not column.is_literal and \
column.table is not None and \
not isinstance(column.table, selectable.Select):
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
elif (
not isinstance(column, elements.TextClause) and
(
not isinstance(column, elements.UnaryExpression) or
column.wraps_column_expression
) and
(
not hasattr(column, 'name') or
isinstance(column, functions.Function)
)
):
result_expr = _CompileLabel(col_expr, column.anon_label)
elif col_expr is not column:
# TODO: are we sure "column" has a .name and .key here ?
# assert isinstance(column, elements.ColumnClause)
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
else:
result_expr = col_expr
column_clause_args.update(
within_columns_clause=within_columns_clause,
add_to_result_map=add_to_result_map
)
return result_expr._compiler_dispatch(
self,
**column_clause_args
)
def format_from_hint_text(self, sqltext, table, hint, iscrud):
hinttext = self.get_from_hint_text(table, hint)
if hinttext:
sqltext += " " + hinttext
return sqltext
def get_select_hint_text(self, byfroms):
return None
def get_from_hint_text(self, table, text):
return None
def get_crud_hint_text(self, table, text):
return None
def get_statement_hint_text(self, hint_texts):
return " ".join(hint_texts)
def _transform_select_for_nested_joins(self, select):
"""Rewrite any "a JOIN (b JOIN c)" expression as
"a JOIN (select * from b JOIN c) AS anon", to support
databases that can't parse a parenthesized join correctly
(i.e. sqlite < 3.7.16).
"""
cloned = {}
column_translate = [{}]
def visit(element, **kw):
if element in column_translate[-1]:
return column_translate[-1][element]
elif element in cloned:
return cloned[element]
newelem = cloned[element] = element._clone()
if newelem.is_selectable and newelem._is_join and \
isinstance(newelem.right, selectable.FromGrouping):
newelem._reset_exported()
newelem.left = visit(newelem.left, **kw)
right = visit(newelem.right, **kw)
selectable_ = selectable.Select(
[right.element],
use_labels=True).alias()
for c in selectable_.c:
c._key_label = c.key
c._label = c.name
translate_dict = dict(
zip(newelem.right.element.c, selectable_.c)
)
# translating from both the old and the new
# because different select() structures will lead us
# to traverse differently
translate_dict[right.element.left] = selectable_
translate_dict[right.element.right] = selectable_
translate_dict[newelem.right.element.left] = selectable_
translate_dict[newelem.right.element.right] = selectable_
# propagate translations that we've gained
# from nested visit(newelem.right) outwards
# to the enclosing select here. this happens
# only when we have more than one level of right
# join nesting, i.e. "a JOIN (b JOIN (c JOIN d))"
for k, v in list(column_translate[-1].items()):
if v in translate_dict:
# remarkably, no current ORM tests (May 2013)
# hit this condition, only test_join_rewriting
# does.
column_translate[-1][k] = translate_dict[v]
column_translate[-1].update(translate_dict)
newelem.right = selectable_
newelem.onclause = visit(newelem.onclause, **kw)
elif newelem._is_from_container:
# if we hit an Alias, CompoundSelect or ScalarSelect, put a
# marker in the stack.
kw['transform_clue'] = 'select_container'
newelem._copy_internals(clone=visit, **kw)
elif newelem.is_selectable and newelem._is_select:
barrier_select = kw.get('transform_clue', None) == \
'select_container'
# if we're still descended from an
# Alias/CompoundSelect/ScalarSelect, we're
# in a FROM clause, so start with a new translate collection
if barrier_select:
column_translate.append({})
kw['transform_clue'] = 'inside_select'
newelem._copy_internals(clone=visit, **kw)
if barrier_select:
del column_translate[-1]
else:
newelem._copy_internals(clone=visit, **kw)
return newelem
return visit(select)
def _transform_result_map_for_nested_joins(
self, select, transformed_select):
inner_col = dict((c._key_label, c) for
c in transformed_select.inner_columns)
d = dict(
(inner_col[c._key_label], c)
for c in select.inner_columns
)
self._result_columns = [
(key, name, tuple([d.get(col, col) for col in objs]), typ)
for key, name, objs, typ in self._result_columns
]
_default_stack_entry = util.immutabledict([
('correlate_froms', frozenset()),
('asfrom_froms', frozenset())
])
def _display_froms_for_select(self, select, asfrom, lateral=False):
# utility method to help external dialects
# get the correct from list for a select.
# specifically the oracle dialect needs this feature
# right now.
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom and not lateral:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
return froms
def visit_select(self, select, asfrom=False, parens=True,
fromhints=None,
compound_index=0,
nested_join_translation=False,
select_wraps_for=None,
lateral=False,
**kwargs):
needs_nested_translation = \
select.use_labels and \
not nested_join_translation and \
not self.stack and \
not self.dialect.supports_right_nested_joins
if needs_nested_translation:
transformed_select = self._transform_select_for_nested_joins(
select)
text = self.visit_select(
transformed_select, asfrom=asfrom, parens=parens,
fromhints=fromhints,
compound_index=compound_index,
nested_join_translation=True, **kwargs
)
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = toplevel or \
(
compound_index == 0 and entry.get(
'need_result_map_for_compound', False)
) or entry.get('need_result_map_for_nested', False)
# this was first proposed as part of #3372; however, it is not
# reached in current tests and could possibly be an assertion
# instead.
if not populate_result_map and 'add_to_result_map' in kwargs:
del kwargs['add_to_result_map']
if needs_nested_translation:
if populate_result_map:
self._transform_result_map_for_nested_joins(
select, transformed_select)
return text
froms = self._setup_select_stack(select, entry, asfrom, lateral)
column_clause_args = kwargs.copy()
column_clause_args.update({
'within_label_clause': False,
'within_columns_clause': False
})
text = "SELECT " # we're off to a good start !
if select._hints:
hint_text, byfrom = self._setup_select_hints(select)
if hint_text:
text += hint_text + " "
else:
byfrom = None
if select._prefixes:
text += self._generate_prefixes(
select, select._prefixes, **kwargs)
text += self.get_select_precolumns(select, **kwargs)
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c for c in [
self._label_select_column(
select,
column,
populate_result_map, asfrom,
column_clause_args,
name=name)
for name, column in select._columns_plus_names
]
if c is not None
]
if populate_result_map and select_wraps_for is not None:
# if this select is a compiler-generated wrapper,
# rewrite the targeted columns in the result map
translate = dict(
zip(
[name for (key, name) in select._columns_plus_names],
[name for (key, name) in
select_wraps_for._columns_plus_names])
)
self._result_columns = [
(key, name, tuple(translate.get(o, o) for o in obj), type_)
for key, name, obj, type_ in self._result_columns
]
text = self._compose_select_body(
text, select, inner_columns, froms, byfrom, kwargs)
if select._statement_hints:
per_dialect = [
ht for (dialect_name, ht)
in select._statement_hints
if dialect_name in ('*', self.dialect.name)
]
if per_dialect:
text += " " + self.get_statement_hint_text(per_dialect)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
if select._suffixes:
text += " " + self._generate_prefixes(
select, select._suffixes, **kwargs)
self.stack.pop(-1)
if (asfrom or lateral) and parens:
return "(" + text + ")"
else:
return text
def _setup_select_hints(self, select):
byfrom = dict([
(from_, hinttext % {
'name': from_._compiler_dispatch(
self, ashint=True)
})
for (from_, dialect), hinttext in
select._hints.items()
if dialect in ('*', self.dialect.name)
])
hint_text = self.get_select_hint_text(byfrom)
return hint_text, byfrom
def _setup_select_stack(self, select, entry, asfrom, lateral):
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom and not lateral:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
new_correlate_froms = set(selectable._from_objects(*froms))
all_correlate_froms = new_correlate_froms.union(correlate_froms)
new_entry = {
'asfrom_froms': new_correlate_froms,
'correlate_froms': all_correlate_froms,
'selectable': select,
}
self.stack.append(new_entry)
return froms
def _compose_select_body(
self, text, select, inner_columns, froms, byfrom, kwargs):
text += ', '.join(inner_columns)
if froms:
text += " \nFROM "
if select._hints:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True,
fromhints=byfrom, **kwargs)
for f in froms])
else:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True, **kwargs)
for f in froms])
else:
text += self.default_from()
if select._whereclause is not None:
t = select._whereclause._compiler_dispatch(self, **kwargs)
if t:
text += " \nWHERE " + t
if select._group_by_clause.clauses:
group_by = select._group_by_clause._compiler_dispatch(
self, **kwargs)
if group_by:
text += " GROUP BY " + group_by
if select._having is not None:
t = select._having._compiler_dispatch(self, **kwargs)
if t:
text += " \nHAVING " + t
if select._order_by_clause.clauses:
text += self.order_by_clause(select, **kwargs)
if (select._limit_clause is not None or
select._offset_clause is not None):
text += self.limit_clause(select, **kwargs)
if select._for_update_arg is not None:
text += self.for_update_clause(select, **kwargs)
return text
def _generate_prefixes(self, stmt, prefixes, **kw):
clause = " ".join(
prefix._compiler_dispatch(self, **kw)
for prefix, dialect_name in prefixes
if dialect_name is None or
dialect_name == self.dialect.name
)
if clause:
clause += " "
return clause
def _render_cte_clause(self):
if self.positional:
self.positiontup = sum([
self.cte_positional[cte]
for cte in self.ctes], []) + \
self.positiontup
cte_text = self.get_cte_preamble(self.ctes_recursive) + " "
cte_text += ", \n".join(
[txt for txt in self.ctes.values()]
)
cte_text += "\n "
return cte_text
def get_cte_preamble(self, recursive):
if recursive:
return "WITH RECURSIVE"
else:
return "WITH"
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list.
"""
return select._distinct and "DISTINCT " or ""
def order_by_clause(self, select, **kw):
order_by = select._order_by_clause._compiler_dispatch(self, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select, **kw):
return " FOR UPDATE"
def returning_clause(self, stmt, returning_cols):
raise exc.CompileError(
"RETURNING is not supported by this "
"dialect's statement compiler.")
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT -1"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def visit_table(self, table, asfrom=False, iscrud=False, ashint=False,
fromhints=None, use_schema=True, **kwargs):
if asfrom or ashint:
effective_schema = self.preparer.schema_for_object(table)
if use_schema and effective_schema:
ret = self.preparer.quote_schema(effective_schema) + \
"." + self.preparer.quote(table.name)
else:
ret = self.preparer.quote(table.name)
if fromhints and table in fromhints:
ret = self.format_from_hint_text(ret, table,
fromhints[table], iscrud)
return ret
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
if join.full:
join_type = " FULL OUTER JOIN "
elif join.isouter:
join_type = " LEFT OUTER JOIN "
else:
join_type = " JOIN "
return (
join.left._compiler_dispatch(self, asfrom=True, **kwargs) +
join_type +
join.right._compiler_dispatch(self, asfrom=True, **kwargs) +
" ON " +
join.onclause._compiler_dispatch(self, **kwargs)
)
def _setup_crud_hints(self, stmt, table_text):
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
stmt.table,
dialect_hints[stmt.table],
True
)
return dialect_hints, table_text
def visit_insert(self, insert_stmt, asfrom=False, **kw):
toplevel = not self.stack
self.stack.append(
{'correlate_froms': set(),
"asfrom_froms": set(),
"selectable": insert_stmt})
crud_params = crud._setup_crud_params(
self, insert_stmt, crud.ISINSERT, **kw)
if not crud_params and \
not self.dialect.supports_default_values and \
not self.dialect.supports_empty_insert:
raise exc.CompileError("The '%s' dialect with current database "
"version settings does not support empty "
"inserts." %
self.dialect.name)
if insert_stmt._has_multi_parameters:
if not self.dialect.supports_multivalues_insert:
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support "
"in-place multirow inserts." %
self.dialect.name)
crud_params_single = crud_params[0]
else:
crud_params_single = crud_params
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT "
if insert_stmt._prefixes:
text += self._generate_prefixes(insert_stmt,
insert_stmt._prefixes, **kw)
text += "INTO "
table_text = preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
insert_stmt, table_text)
else:
dialect_hints = None
text += table_text
if crud_params_single or not supports_default_values:
text += " (%s)" % ', '.join([preparer.format_column(c[0])
for c in crud_params_single])
if self.returning or insert_stmt._returning:
returning_clause = self.returning_clause(
insert_stmt, self.returning or insert_stmt._returning)
if self.returning_precedes_values:
text += " " + returning_clause
else:
returning_clause = None
if insert_stmt.select is not None:
text += " %s" % self.process(self._insert_from_select, **kw)
elif not crud_params and supports_default_values:
text += " DEFAULT VALUES"
elif insert_stmt._has_multi_parameters:
text += " VALUES %s" % (
", ".join(
"(%s)" % (
', '.join(c[1] for c in crud_param_set)
)
for crud_param_set in crud_params
)
)
else:
text += " VALUES (%s)" % \
', '.join([c[1] for c in crud_params])
if insert_stmt._post_values_clause is not None:
post_values_clause = self.process(
insert_stmt._post_values_clause, **kw)
if post_values_clause:
text += " " + post_values_clause
if returning_clause and not self.returning_precedes_values:
text += " " + returning_clause
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom:
return "(" + text + ")"
else:
return text
def update_limit_clause(self, update_stmt):
"""Provide a hook for MySQL to add LIMIT to the UPDATE"""
return None
def update_tables_clause(self, update_stmt, from_table,
extra_froms, **kw):
"""Provide a hook to override the initial table clause
in an UPDATE statement.
MySQL overrides this.
"""
kw['asfrom'] = True
return from_table._compiler_dispatch(self, iscrud=True, **kw)
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
"""Provide a hook to override the generation of an
UPDATE..FROM clause.
MySQL and MSSQL override this.
"""
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in extra_froms)
def visit_update(self, update_stmt, asfrom=False, **kw):
toplevel = not self.stack
self.stack.append(
{'correlate_froms': set([update_stmt.table]),
"asfrom_froms": set([update_stmt.table]),
"selectable": update_stmt})
extra_froms = update_stmt._extra_froms
text = "UPDATE "
if update_stmt._prefixes:
text += self._generate_prefixes(update_stmt,
update_stmt._prefixes, **kw)
table_text = self.update_tables_clause(update_stmt, update_stmt.table,
extra_froms, **kw)
crud_params = crud._setup_crud_params(
self, update_stmt, crud.ISUPDATE, **kw)
if update_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
update_stmt, table_text)
else:
dialect_hints = None
text += table_text
text += ' SET '
include_table = extra_froms and \
self.render_table_with_column_in_update_from
text += ', '.join(
c[0]._compiler_dispatch(self,
include_table=include_table) +
'=' + c[1] for c in crud_params
)
if self.returning or update_stmt._returning:
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning or update_stmt._returning)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
extra_froms,
dialect_hints, **kw)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._whereclause is not None:
t = self.process(update_stmt._whereclause, **kw)
if t:
text += " WHERE " + t
limit_clause = self.update_limit_clause(update_stmt)
if limit_clause:
text += " " + limit_clause
if (self.returning or update_stmt._returning) and \
not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning or update_stmt._returning)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom:
return "(" + text + ")"
else:
return text
@util.memoized_property
def _key_getters_for_crud_column(self):
return crud._key_getters_for_crud_column(self, self.statement)
def visit_delete(self, delete_stmt, asfrom=False, **kw):
toplevel = not self.stack
self.stack.append({'correlate_froms': set([delete_stmt.table]),
"asfrom_froms": set([delete_stmt.table]),
"selectable": delete_stmt})
crud._setup_crud_params(self, delete_stmt, crud.ISDELETE, **kw)
text = "DELETE "
if delete_stmt._prefixes:
text += self._generate_prefixes(delete_stmt,
delete_stmt._prefixes, **kw)
text += "FROM "
table_text = delete_stmt.table._compiler_dispatch(
self, asfrom=True, iscrud=True)
if delete_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
delete_stmt, table_text)
text += table_text
if delete_stmt._returning:
if self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
if delete_stmt._whereclause is not None:
t = delete_stmt._whereclause._compiler_dispatch(self, **kw)
if t:
text += " WHERE " + t
if delete_stmt._returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom:
return "(" + text + ")"
else:
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
class StrSQLCompiler(SQLCompiler):
""""a compiler subclass with a few non-standard SQL features allowed.
Used for stringification of SQL statements when a real dialect is not
available.
"""
def _fallback_column_name(self, column):
return "<name unknown>"
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_json_getitem_op_binary(self, binary, operator, **kw):
return self.visit_getitem_binary(binary, operator, **kw)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
return self.visit_getitem_binary(binary, operator, **kw)
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in elements._select_iterables(returning_cols)
]
return 'RETURNING ' + ', '.join(columns)
class DDLCompiler(Compiled):
@util.memoized_property
def sql_compiler(self):
return self.dialect.statement_compiler(self.dialect, None)
@util.memoized_property
def type_compiler(self):
return self.dialect.type_compiler
def construct_params(self, params=None):
return None
def visit_ddl(self, ddl, **kwargs):
# table events can substitute table and schema name
context = ddl.context
if isinstance(ddl.target, schema.Table):
context = context.copy()
preparer = self.preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ''
else:
table, sch = path[-1], path[0]
context.setdefault('table', table)
context.setdefault('schema', sch)
context.setdefault('fullname', preparer.format_table(ddl.target))
return self.sql_compiler.post_process_text(ddl.statement % context)
def visit_create_schema(self, create):
schema = self.preparer.format_schema(create.element)
return "CREATE SCHEMA " + schema
def visit_drop_schema(self, drop):
schema = self.preparer.format_schema(drop.element)
text = "DROP SCHEMA " + schema
if drop.cascade:
text += " CASCADE"
return text
def visit_create_table(self, create):
table = create.element
preparer = self.preparer
text = "\nCREATE "
if table._prefixes:
text += " ".join(table._prefixes) + " "
text += "TABLE " + preparer.format_table(table) + " "
create_table_suffix = self.create_table_suffix(table)
if create_table_suffix:
text += create_table_suffix + " "
text += "("
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for create_column in create.columns:
column = create_column.element
try:
processed = self.process(create_column,
first_pk=column.primary_key
and not first_pk)
if processed is not None:
text += separator
separator = ", \n"
text += "\t" + processed
if column.primary_key:
first_pk = True
except exc.CompileError as ce:
util.raise_from_cause(
exc.CompileError(
util.u("(in table '%s', column '%s'): %s") %
(table.description, column.name, ce.args[0])
))
const = self.create_table_constraints(
table, _include_foreign_key_constraints= # noqa
create.include_foreign_key_constraints)
if const:
text += separator + "\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
def visit_create_column(self, create, first_pk=False):
column = create.element
if column.system:
return None
text = self.get_column_specification(
column,
first_pk=first_pk
)
const = " ".join(self.process(constraint)
for constraint in column.constraints)
if const:
text += " " + const
return text
def create_table_constraints(
self, table,
_include_foreign_key_constraints=None):
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
constraints = []
if table.primary_key:
constraints.append(table.primary_key)
all_fkcs = table.foreign_key_constraints
if _include_foreign_key_constraints is not None:
omit_fkcs = all_fkcs.difference(_include_foreign_key_constraints)
else:
omit_fkcs = set()
constraints.extend([c for c in table._sorted_constraints
if c is not table.primary_key and
c not in omit_fkcs])
return ", \n\t".join(
p for p in
(self.process(constraint)
for constraint in constraints
if (
constraint._create_rule is None or
constraint._create_rule(self))
and (
not self.dialect.supports_alter or
not getattr(constraint, 'use_alter', False)
)) if p is not None
)
def visit_drop_table(self, drop):
return "\nDROP TABLE " + self.preparer.format_table(drop.element)
def visit_drop_view(self, drop):
return "\nDROP VIEW " + self.preparer.format_table(drop.element)
def _verify_index_table(self, index):
if index.table is None:
raise exc.CompileError("Index '%s' is not associated "
"with any table." % index.name)
def visit_create_index(self, create, include_schema=False,
include_table_schema=True):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" \
% (
self._prepared_index_name(index,
include_schema=include_schema),
preparer.format_table(index.table,
use_schema=include_table_schema),
', '.join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True) for
expr in index.expressions)
)
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX " + self._prepared_index_name(
index, include_schema=True)
def _prepared_index_name(self, index, include_schema=False):
if index.table is not None:
effective_schema = self.preparer.schema_for_object(index.table)
else:
effective_schema = None
if include_schema and effective_schema:
schema_name = self.preparer.quote_schema(effective_schema)
else:
schema_name = None
ident = index.name
if isinstance(ident, elements._truncated_label):
max_ = self.dialect.max_index_name_length or \
self.dialect.max_identifier_length
if len(ident) > max_:
ident = ident[0:max_ - 8] + \
"_" + util.md5_hex(ident)[-4:]
else:
self.dialect.validate_identifier(ident)
index_name = self.preparer.quote(ident)
if schema_name:
index_name = schema_name + "." + index_name
return index_name
def visit_add_constraint(self, create):
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element)
)
def visit_create_sequence(self, create):
text = "CREATE SEQUENCE %s" % \
self.preparer.format_sequence(create.element)
if create.element.increment is not None:
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
if create.element.minvalue is not None:
text += " MINVALUE %d" % create.element.minvalue
if create.element.maxvalue is not None:
text += " MAXVALUE %d" % create.element.maxvalue
if create.element.nominvalue is not None:
text += " NO MINVALUE"
if create.element.nomaxvalue is not None:
text += " NO MAXVALUE"
if create.element.cycle is not None:
text += " CYCLE"
return text
def visit_drop_sequence(self, drop):
return "DROP SEQUENCE %s" % \
self.preparer.format_sequence(drop.element)
def visit_drop_constraint(self, drop):
constraint = drop.element
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
else:
formatted_name = None
if formatted_name is None:
raise exc.CompileError(
"Can't emit DROP CONSTRAINT for constraint %r; "
"it has no name" % drop.element)
return "ALTER TABLE %s DROP CONSTRAINT %s%s" % (
self.preparer.format_table(drop.element.table),
formatted_name,
drop.cascade and " CASCADE" or ""
)
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(
column.type, type_expression=column)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def create_table_suffix(self, table):
return ''
def post_create_table(self, table):
return ''
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.server_default.arg, util.string_types):
return self.sql_compiler.render_literal_value(
column.server_default.arg, sqltypes.STRINGTYPE)
else:
return self.sql_compiler.process(
column.server_default.arg, literal_binds=True)
else:
return None
def visit_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(constraint.sqltext,
include_table=False,
literal_binds=True)
text += self.define_constraint_deferrability(constraint)
return text
def visit_column_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % constraint.sqltext
text += self.define_constraint_deferrability(constraint)
return text
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "PRIMARY KEY "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in (constraint.columns_autoinc_first
if constraint._implicit_generated
else constraint.columns))
text += self.define_constraint_deferrability(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.preparer
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
remote_table = list(constraint.elements)[0].column.table
text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % (
', '.join(preparer.quote(f.parent.name)
for f in constraint.elements),
self.define_constraint_remote_table(
constraint, remote_table, preparer),
', '.join(preparer.quote(f.column.name)
for f in constraint.elements)
)
text += self.define_constraint_match(constraint)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table)
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE (%s)" % (
', '.join(self.preparer.quote(c.name)
for c in constraint))
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
if constraint.onupdate is not None:
text += " ON UPDATE %s" % constraint.onupdate
return text
def define_constraint_deferrability(self, constraint):
text = ""
if constraint.deferrable is not None:
if constraint.deferrable:
text += " DEFERRABLE"
else:
text += " NOT DEFERRABLE"
if constraint.initially is not None:
text += " INITIALLY %s" % constraint.initially
return text
def define_constraint_match(self, constraint):
text = ""
if constraint.match is not None:
text += " MATCH %s" % constraint.match
return text
class GenericTypeCompiler(TypeCompiler):
def visit_FLOAT(self, type_, **kw):
return "FLOAT"
def visit_REAL(self, type_, **kw):
return "REAL"
def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % \
{'precision': type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return "DECIMAL"
elif type_.scale is None:
return "DECIMAL(%(precision)s)" % \
{'precision': type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_INTEGER(self, type_, **kw):
return "INTEGER"
def visit_SMALLINT(self, type_, **kw):
return "SMALLINT"
def visit_BIGINT(self, type_, **kw):
return "BIGINT"
def visit_TIMESTAMP(self, type_, **kw):
return 'TIMESTAMP'
def visit_DATETIME(self, type_, **kw):
return "DATETIME"
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
return "TIME"
def visit_CLOB(self, type_, **kw):
return "CLOB"
def visit_NCLOB(self, type_, **kw):
return "NCLOB"
def _render_string_type(self, type_, name):
text = name
if type_.length:
text += "(%d)" % type_.length
if type_.collation:
text += ' COLLATE "%s"' % type_.collation
return text
def visit_CHAR(self, type_, **kw):
return self._render_string_type(type_, "CHAR")
def visit_NCHAR(self, type_, **kw):
return self._render_string_type(type_, "NCHAR")
def visit_VARCHAR(self, type_, **kw):
return self._render_string_type(type_, "VARCHAR")
def visit_NVARCHAR(self, type_, **kw):
return self._render_string_type(type_, "NVARCHAR")
def visit_TEXT(self, type_, **kw):
return self._render_string_type(type_, "TEXT")
def visit_BLOB(self, type_, **kw):
return "BLOB"
def visit_BINARY(self, type_, **kw):
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_, **kw):
return "BOOLEAN"
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_, **kw)
def visit_boolean(self, type_, **kw):
return self.visit_BOOLEAN(type_, **kw)
def visit_time(self, type_, **kw):
return self.visit_TIME(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_DATETIME(type_, **kw)
def visit_date(self, type_, **kw):
return self.visit_DATE(type_, **kw)
def visit_big_integer(self, type_, **kw):
return self.visit_BIGINT(type_, **kw)
def visit_small_integer(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_integer(self, type_, **kw):
return self.visit_INTEGER(type_, **kw)
def visit_real(self, type_, **kw):
return self.visit_REAL(type_, **kw)
def visit_float(self, type_, **kw):
return self.visit_FLOAT(type_, **kw)
def visit_numeric(self, type_, **kw):
return self.visit_NUMERIC(type_, **kw)
def visit_string(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_unicode(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_enum(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_null(self, type_, **kw):
raise exc.CompileError("Can't generate DDL for %r; "
"did you forget to specify a "
"type on this Column?" % type_)
def visit_type_decorator(self, type_, **kw):
return self.process(type_.type_engine(self.dialect), **kw)
def visit_user_defined(self, type_, **kw):
return type_.get_col_spec(**kw)
class StrSQLTypeCompiler(GenericTypeCompiler):
def __getattr__(self, key):
if key.startswith("visit_"):
return self._visit_unknown
else:
raise AttributeError(key)
def _visit_unknown(self, type_, **kw):
return "%s" % type_.__class__.__name__
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
schema_for_object = schema._schema_getter(None)
def __init__(self, dialect, initial_quote='"',
final_quote=None, escape_quote='"', omit_schema=False):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to
`initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.escape_quote = escape_quote
self.escape_to_quote = self.escape_quote * 2
self.omit_schema = omit_schema
self._strings = {}
def _with_schema_translate(self, schema_translate_map):
prep = self.__class__.__new__(self.__class__)
prep.__dict__.update(self.__dict__)
prep.schema_for_object = schema._schema_getter(schema_translate_map)
return prep
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
return value.replace(self.escape_quote, self.escape_to_quote)
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace(self.escape_to_quote, self.escape_quote)
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return self.initial_quote + \
self._escape_identifier(value) + \
self.final_quote
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(util.text_type(value))
or (lc_value != value))
def quote_schema(self, schema, force=None):
"""Conditionally quote a schema.
Subclasses can override this to provide database-dependent
quoting behavior for schema names.
the 'force' flag should be considered deprecated.
"""
return self.quote(schema, force)
def quote(self, ident, force=None):
"""Conditionally quote an identifier.
the 'force' flag should be considered deprecated.
"""
force = getattr(ident, "quote", None)
if force is None:
if ident in self._strings:
return self._strings[ident]
else:
if self._requires_quotes(ident):
self._strings[ident] = self.quote_identifier(ident)
else:
self._strings[ident] = ident
return self._strings[ident]
elif force:
return self.quote_identifier(ident)
else:
return ident
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name)
effective_schema = self.schema_for_object(sequence)
if (not self.omit_schema and use_schema and
effective_schema is not None):
name = self.quote_schema(effective_schema) + "." + name
return name
def format_label(self, label, name=None):
return self.quote(name or label.name)
def format_alias(self, alias, name=None):
return self.quote(name or alias.name)
def format_savepoint(self, savepoint, name=None):
# Running the savepoint name through quoting is unnecessary
# for all known dialects. This is here to support potential
# third party use cases
ident = name or savepoint.ident
if self._requires_quotes(ident):
ident = self.quote_identifier(ident)
return ident
@util.dependencies("sqlalchemy.sql.naming")
def format_constraint(self, naming, constraint):
if isinstance(constraint.name, elements._defer_name):
name = naming._constraint_name_for_table(
constraint, constraint.table)
if name:
return self.quote(name)
elif isinstance(constraint.name, elements._defer_none_name):
return None
return self.quote(constraint.name)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(name)
effective_schema = self.schema_for_object(table)
if not self.omit_schema and use_schema \
and effective_schema:
result = self.quote_schema(effective_schema) + "." + result
return result
def format_schema(self, name, quote=None):
"""Prepare a quoted schema name."""
return self.quote(name, quote)
def format_column(self, column, use_table=False,
name=None, table_name=None):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if not getattr(column, 'is_literal', False):
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + "." + self.quote(name)
else:
return self.quote(name)
else:
# literal textual elements get stuck into ColumnClause a lot,
# which shouldn't get quoted
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + '.' + name
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
effective_schema = self.schema_for_object(table)
if not self.omit_schema and use_schema and \
effective_schema:
return (self.quote_schema(effective_schema),
self.format_table(table, use_schema=False))
else:
return (self.format_table(table, use_schema=False), )
@util.memoized_property
def _r_identifiers(self):
initial, final, escaped_final = \
[re.escape(s) for s in
(self.initial_quote, self.final_quote,
self._escape_identifier(self.final_quote))]
r = re.compile(
r'(?:'
r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s'
r'|([^\.]+))(?=\.|$))+' %
{'initial': initial,
'final': final,
'escaped': escaped_final})
return r
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
r = self._r_identifiers
return [self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]]
|
amisrs/one-eighty
|
venv2/lib/python2.7/site-packages/sqlalchemy/sql/compiler.py
|
Python
|
mit
| 107,732
|
[
"VisIt"
] |
d789ef142e0ffe6e6df2f404e816f5ab947662b30457e6013dda60978c48fa18
|
#!/usr/bin/python
"""Test of line nav after loading a same-page link."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"1. Line Down to what should be the text below the About heading",
["BRAILLE LINE: 'Orca is a free, open source, flexible, extensible, and'",
" VISIBLE: 'Orca is a free, open source, fle', cursor=1",
"SPEECH OUTPUT: 'Orca is a free, open source, flexible, extensible, and'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
chrys87/orca-beep
|
test/keystrokes/firefox/line_nav_follow_same_page_link_3.py
|
Python
|
lgpl-2.1
| 721
|
[
"ORCA"
] |
3e790d8b1fad883d1e3d175d00d4de319931c77eba499b36177c064ffc69f29c
|
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Class advice.
This module was adapted from 'protocols.advice', part of the Python
Enterprise Application Kit (PEAK). Please notify the PEAK authors
(pje@telecommunity.com and tsarna@sarna.org) if bugs are found or
Zope-specific changes are required, so that the PEAK version of this module
can be kept in sync.
PEAK is a Python application framework that interoperates with (but does
not require) Zope 3 and Twisted. It provides tools for manipulating UML
models, object-relational persistence, aspect-oriented programming, and more.
Visit the PEAK home page at http://peak.telecommunity.com for more information.
$Id: advice.py 25177 2004-06-02 13:17:31Z jim $
"""
import inspect
import sys
def getFrameInfo(frame):
"""Return (kind,module,locals,globals) for a frame
'kind' is one of "exec", "module", "class", "function call", or "unknown".
"""
f_locals = frame.f_locals
f_globals = frame.f_globals
sameNamespace = f_locals is f_globals
hasModule = '__module__' in f_locals
hasName = '__name__' in f_globals
sameName = hasModule and hasName
sameName = sameName and f_globals['__name__']==f_locals['__module__']
module = hasName and sys.modules.get(f_globals['__name__']) or None
namespaceIsModule = module and module.__dict__ is f_globals
frameinfo = inspect.getframeinfo(frame)
try:
sourceline = frameinfo[3][0].strip()
except: #pragma NO COVER
# dont understand circumstance here, 3rdparty code without comment
sourceline = frameinfo[3]
codeinfo = frameinfo[0], frameinfo[1], frameinfo[2], sourceline
if not namespaceIsModule: #pragma no COVER
# some kind of funky exec
kind = "exec" # don't know how to repeat this scenario
elif sameNamespace and not hasModule:
kind = "module"
elif sameName and not sameNamespace:
kind = "class"
elif not sameNamespace:
kind = "function call"
else: #pragma NO COVER
# How can you have f_locals is f_globals, and have '__module__' set?
# This is probably module-level code, but with a '__module__' variable.
kind = "unknown"
return kind, module, f_locals, f_globals, codeinfo
|
anakinsolo/backend
|
Lib/site-packages/venusian-1.0-py2.7.egg/venusian/advice.py
|
Python
|
mit
| 2,846
|
[
"VisIt"
] |
ca9508fa62104239fbd44c3bb96ca672fcfa0ae12c1f679f6c129b911e549d3b
|
#!/usr/bin/env python
import pyemma
import numpy as np
import mdtraj
import time
import os
# Source directory
source_directory = '/cbio/jclab/projects/fah/fah-data/munged3/no-solvent/11401' # Src ensembler
################################################################################
# Load reference topology
################################################################################
print ('loading reference topology...')
reference_pdb_filename = 'protein.pdb'
reference_trajectory = os.path.join(source_directory, 'run0-clone0.h5')
traj = mdtraj.load(reference_trajectory)
traj[0].save_pdb(reference_pdb_filename)
################################################################################
# Initialize featurizer
################################################################################
print('Initializing featurizer...')
import pyemma.coordinates
featurizer = pyemma.coordinates.featurizer(reference_pdb_filename)
#featurizer.add_all() # all atoms
featurizer.add_selection( featurizer.select_Backbone() )
print('Featurizer has %d features.' % featurizer.dimension())
################################################################################
# Define coordinates source
################################################################################
nskip = 40 # number of initial frames to skip
import pyemma.coordinates
from glob import glob
trajectory_filenames = glob(os.path.join(source_directory, 'run*-clone*.h5'))
coordinates_source = pyemma.coordinates.source(trajectory_filenames, features=featurizer)
print("There are %d frames total in %d trajectories." % (coordinates_source.n_frames_total(), coordinates_source.number_of_trajectories()))
################################################################################
# Cluster
################################################################################
print('Clustering...')
generator_ratio = 250
nframes = coordinates_source.n_frames_total()
nstates = int(nframes / generator_ratio)
stride = 1
metric = 'minRMSD'
initial_time = time.time()
clustering = pyemma.coordinates.cluster_uniform_time(data=coordinates_source, k=nstates, stride=stride, metric=metric)
#clustering = pyemma.coordinates.cluster_kmeans(data=coordinates_source, k=nstates, stride=stride, metric=metric, max_iter=10)
#clustering = pyemma.coordinates.cluster_mini_batch_kmeans(data=coordinates_source, batch_size=0.1, k=nstates, stride=stride, metric=metric, max_iter=10)
final_time = time.time()
elapsed_time = final_time - initial_time
print('Elapsed time %.3f s' % elapsed_time)
# Save cluster centers
np.save('clustercenters', clustering.clustercenters)
# Save discrete trajectories.
dtrajs = clustering.dtrajs
dtrajs_dir = 'dtrajs'
clustering.save_dtrajs(output_dir=dtrajs_dir, output_format='npy', extension='.npy')
################################################################################
# Make timescale plots
################################################################################
import matplotlib as mpl
mpl.use('Agg') # Don't use display
import matplotlib.pyplot as plt
from pyemma import msm
from pyemma import plots
lags = [1,2,5,10,20,50]
#its = msm.its(dtrajs, lags=lags, errors='bayes')
its = msm.its(dtrajs, lags=lags)
plots.plot_implied_timescales(its)
plt.savefig('plot.pdf')
|
jchodera/MSMs
|
jchodera/src-11401/pyemma/cluster.py
|
Python
|
gpl-2.0
| 3,309
|
[
"MDTraj"
] |
7f4a331da9b3e69e0e81f05c84f65a37be7eb746b84c3e7fa9620ee6693f4588
|
#
# co_co_function_have_rhs.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from pynestml.cocos.co_co import CoCo
from pynestml.meta_model.ast_neuron import ASTNeuron
from pynestml.utils.logger import Logger, LoggingLevel
from pynestml.utils.messages import Messages
from pynestml.visitors.ast_visitor import ASTVisitor
class CoCoFunctionHaveRhs(CoCo):
"""
This coco ensures that all function declarations, e.g., function V_rest mV = V_m - 55mV, have a rhs.
"""
name = 'functions have rhs'
description = 'TODO'
def check_co_co(self, node):
"""
Ensures the coco for the handed over neuron.
:param node: a single neuron instance.
:type node: ASTNeuron
"""
node.accept(FunctionRhsVisitor())
class FunctionRhsVisitor(ASTVisitor):
"""
This visitor ensures that everything declared as function has a rhs.
"""
def visit_declaration(self, node):
"""
Checks if the coco applies.
:param node: a single declaration.
:type node: ASTDeclaration.
"""
if node.is_function and not node.has_expression():
code, message = Messages.get_no_rhs(node.get_variables()[0].get_name())
Logger.log_message(error_position=node.get_source_position(), log_level=LoggingLevel.ERROR,
code=code, message=message)
return
|
kperun/nestml
|
pynestml/cocos/co_co_function_have_rhs.py
|
Python
|
gpl-2.0
| 2,035
|
[
"NEURON"
] |
70d50c2074d66dc086eeaa61169a6220dae5cabbe2202fd5de87b4d80fbf4a70
|
# -*- Mode: Python; coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2011-2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
"""
stoq/gui/financial/financial.py:
Implementation of financial application.
"""
import datetime
import decimal
from dateutil.relativedelta import relativedelta
import gobject
import gtk
from kiwi.currency import currency
from kiwi.python import Settable
from kiwi.ui.dialogs import selectfile
from kiwi.ui.objectlist import ColoredColumn, Column
import pango
from stoqlib.api import api
from stoqlib.database.expr import Date
from stoqlib.database.queryexecuter import DateQueryState, DateIntervalQueryState
from stoqlib.domain.account import Account, AccountTransaction, AccountTransactionView
from stoqlib.domain.payment.method import PaymentMethod
from stoqlib.domain.payment.views import InPaymentView, OutPaymentView
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.editors.accounteditor import AccountEditor
from stoqlib.gui.editors.accounttransactioneditor import AccountTransactionEditor
from stoqlib.gui.dialogs.spreadsheetexporterdialog import SpreadSheetExporter
from stoqlib.gui.dialogs.importerdialog import ImporterDialog
from stoqlib.gui.dialogs.financialreportdialog import FinancialReportDialog
from stoqlib.gui.search.searchcolumns import IdentifierColumn, SearchColumn
from stoqlib.gui.search.searchoptions import Any, DateSearchOption
from stoqlib.gui.search.searchfilters import DateSearchFilter
from stoqlib.gui.search.searchresultview import SearchResultListView
from stoqlib.gui.search.searchslave import SearchSlave
from stoqlib.gui.utils.keybindings import get_accels
from stoqlib.gui.utils.printing import print_report
from stoqlib.gui.widgets.accounttree import AccountTree
from stoqlib.gui.widgets.notebookbutton import NotebookCloseButton
from stoqlib.lib.dateutils import get_month_names
from stoqlib.lib.message import yesno
from stoqlib.lib.translation import stoqlib_gettext as _
from stoqlib.reporting.payment import AccountTransactionReport
from storm.expr import And, Or
from stoq.gui.shell.shellapp import ShellApp
class FinancialSearchResults(SearchResultListView):
def search_completed(self, results):
page = self.page
executer = page.search.get_query_executer()
if executer.search_spec == AccountTransactionView:
page.append_transactions(results)
else:
super(FinancialSearchResults, self).search_completed(results)
gobject.type_register(FinancialSearchResults)
class MonthOption(DateSearchOption):
name = None
year = None
month = None
def get_interval(self):
start = datetime.date(self.year, self.month, 1)
end = start + relativedelta(months=1, days=-1)
return start, end
class TransactionPage(object):
# shows either a list of:
# - transactions
# - payments
def __init__(self, model, app, parent):
self.model = model
self.app = app
self.parent_window = parent
self._block = False
self._create_search()
self._add_date_filter()
self._setup_search()
self.refresh()
def get_toplevel(self):
return self.parent_window
def _create_search(self):
self.search = SearchSlave(self._get_columns(self.model.kind),
store=self.app.store)
self.search.connect('result-item-activated',
self._on_search__item_activated)
self.search.enable_advanced_search()
self.search.set_result_view(FinancialSearchResults)
self.result_view = self.search.result_view
self.result_view.page = self
self.result_view.set_cell_data_func(self._on_result_view__cell_data_func)
tree_view = self.search.result_view.get_treeview()
tree_view.set_rules_hint(True)
tree_view.set_grid_lines(gtk.TREE_VIEW_GRID_LINES_BOTH)
def _add_date_filter(self):
self.date_filter = DateSearchFilter(_('Date:'))
self.date_filter.clear_options()
self.date_filter.add_option(Any, 0)
year = datetime.datetime.today().year
month_names = get_month_names()
for i, month in enumerate(month_names):
name = month_names[i]
option = type(name + 'Option', (MonthOption, ),
{'name': _(name),
'month': i + 1,
'year': year})
self.date_filter.add_option(option, i + 1)
self.date_filter.add_custom_options()
self.date_filter.select(Any)
self.search.add_filter(self.date_filter)
def _append_date_query(self, field):
date = self.date_filter.get_state()
queries = []
if isinstance(date, DateQueryState) and date.date is not None:
queries.append(Date(field) == date.date)
elif isinstance(date, DateIntervalQueryState):
queries.append(Date(field) >= date.start)
queries.append(Date(field) <= date.end)
return queries
def _payment_query(self, store):
executer = self.search.get_query_executer()
search_spec = executer.search_spec
queries = self._append_date_query(search_spec.due_date)
if queries:
return store.find(search_spec, And(*queries))
return store.find(search_spec)
def _transaction_query(self, store):
queries = [Or(self.model.id == AccountTransaction.account_id,
self.model.id == AccountTransaction.source_account_id)]
queries.extend(self._append_date_query(AccountTransaction.date))
return store.find(AccountTransactionView, And(*queries))
def show(self):
self.search.show()
def _setup_search(self):
if self.model.kind == 'account':
self.search.set_search_spec(AccountTransactionView)
self.search.set_text_field_columns(['description'])
self.search.set_query(self._transaction_query)
elif self.model.kind == 'payable':
self.search.set_text_field_columns(['description', 'supplier_name'])
self.search.set_search_spec(OutPaymentView)
self.search.set_query(self._payment_query)
elif self.model.kind == 'receivable':
self.search.set_text_field_columns(['description', 'drawee'])
self.search.set_search_spec(InPaymentView)
self.search.set_query(self._payment_query)
else:
raise TypeError("unknown model kind: %r" % (self.model.kind, ))
def refresh(self):
self.search.result_view.clear()
if self.model.kind == 'account':
transactions = AccountTransactionView.get_for_account(self.model, self.app.store)
self.append_transactions(transactions)
elif self.model.kind == 'payable':
self._populate_payable_payments(OutPaymentView)
elif self.model.kind == 'receivable':
self._populate_payable_payments(InPaymentView)
else:
raise TypeError("unknown model kind: %r" % (self.model.kind, ))
def _get_columns(self, kind):
if kind in ['payable', 'receivable']:
return self._get_payment_columns()
else:
return self._get_account_columns()
def _on_result_view__cell_data_func(self, column, renderer, account_view, text):
if not isinstance(renderer, gtk.CellRendererText):
return text
if self.model.kind != 'account':
return text
trans = account_view.transaction
is_imbalance = self.app._imbalance_account_id in [
trans.dest_account_id,
trans.source_account_id]
renderer.set_property('weight-set', is_imbalance)
if is_imbalance:
renderer.set_property('weight', pango.WEIGHT_BOLD)
return text
def _get_account_columns(self):
def format_withdrawal(value):
if value < 0:
return currency(abs(value)).format(symbol=True, precision=2)
def format_deposit(value):
if value > 0:
return currency(value).format(symbol=True, precision=2)
if self.model.account_type == Account.TYPE_INCOME:
color_func = lambda x: False
else:
color_func = lambda x: x < 0
return [Column('date', title=_("Date"), data_type=datetime.date, sorted=True),
Column('code', title=_("Code"), data_type=unicode),
Column('description', title=_("Description"),
data_type=unicode, expand=True),
Column('account', title=_("Account"), data_type=unicode),
Column('value',
title=self.model.account.get_type_label(out=False),
data_type=currency,
format_func=format_deposit),
Column('value',
title=self.model.account.get_type_label(out=True),
data_type=currency,
format_func=format_withdrawal),
ColoredColumn('total', title=_("Total"), data_type=currency,
color='red',
data_func=color_func)]
def _get_payment_columns(self):
return [SearchColumn('due_date', title=_("Due date"), data_type=datetime.date, sorted=True),
IdentifierColumn('identifier', title=_("Payment #")),
SearchColumn('description', title=_("Description"), data_type=unicode, expand=True),
SearchColumn('value', title=_("Value"),
data_type=currency)]
def append_transactions(self, transactions):
for transaction in transactions:
description = transaction.get_account_description(self.model)
value = transaction.get_value(self.model)
# If a transaction has the source equals to the destination account.
# Show the same transaction, but with reversed value.
if transaction.source_account_id == transaction.dest_account_id:
self._add_transaction(transaction, description, -value)
self._add_transaction(transaction, description, value)
self.update_totals()
def _populate_payable_payments(self, view_class):
for view in self.app.store.find(view_class):
self.search.result_view.append(view)
def _add_transaction(self, transaction, description, value):
item = Settable(transaction=transaction)
self._update_transaction(item, transaction, description, value)
self.search.result_view.append(item)
return item
def _update_transaction(self, item, transaction, description, value):
item.account = description
item.date = transaction.date
item.description = transaction.description
item.value = value
item.code = transaction.code
def update_totals(self):
total = decimal.Decimal('0')
for item in self.search.result_view:
total += item.value
item.total = total
def _edit_transaction_dialog(self, item):
store = api.new_store()
if isinstance(item.transaction, AccountTransactionView):
account_transaction = store.fetch(item.transaction.transaction)
else:
account_transaction = store.fetch(item.transaction)
model = getattr(self.model, 'account', self.model)
transaction = run_dialog(AccountTransactionEditor, self.app,
store, account_transaction, model)
store.confirm(transaction)
if transaction:
self.app.refresh_pages()
self.update_totals()
self.app.accounts.refresh_accounts(self.app.store)
store.close()
def on_dialog__opened(self, dialog):
dialog.connect('account-added', self.on_dialog__account_added)
def on_dialog__account_added(self, dialog):
self.app.accounts.refresh_accounts(self.app.store)
def add_transaction_dialog(self):
store = api.new_store()
model = getattr(self.model, 'account', self.model)
model = store.fetch(model)
transaction = run_dialog(AccountTransactionEditor, self.app,
store, None, model)
store.confirm(transaction)
if transaction:
self.app.refresh_pages()
self.update_totals()
self.app.accounts.refresh_accounts(self.app.store)
store.close()
def _on_search__item_activated(self, objectlist, item):
if self.model.kind == 'account':
self._edit_transaction_dialog(item)
class FinancialApp(ShellApp):
app_title = _('Financial')
gladefile = 'financial'
def __init__(self, window, store=None):
# Account id -> TransactionPage
self._pages = {}
self.accounts = AccountTree()
ShellApp.__init__(self, window, store=store)
self._tills_account_id = api.sysparam.get_object_id('TILLS_ACCOUNT')
self._imbalance_account_id = api.sysparam.get_object_id('IMBALANCE_ACCOUNT')
self._banks_account_id = api.sysparam.get_object_id('BANKS_ACCOUNT')
#
# ShellApp overrides
#
def create_actions(self):
group = get_accels('app.financial')
actions = [
('TransactionMenu', None, _('Transaction')),
('AccountMenu', None, _('Account')),
('Import', gtk.STOCK_ADD, _('Import...'),
group.get('import'), _('Import a GnuCash or OFX file')),
('ConfigurePaymentMethods', None,
_('Payment methods'),
group.get('configure_payment_methods'),
_('Select accounts for the payment methods on the system')),
('DeleteAccount', gtk.STOCK_DELETE, _('Delete...'),
group.get('delete_account'),
_('Delete the selected account')),
('DeleteTransaction', gtk.STOCK_DELETE, _('Delete...'),
group.get('delete_transaction'),
_('Delete the selected transaction')),
("NewAccount", gtk.STOCK_NEW, _("Account..."),
group.get('new_account'),
_("Add a new account")),
("NewTransaction", gtk.STOCK_NEW, _("Transaction..."),
group.get('new_store'),
_("Add a new transaction")),
("Edit", gtk.STOCK_EDIT, _("Edit..."),
group.get('edit')),
]
self.financial_ui = self.add_ui_actions('', actions,
filename='financial.xml')
self.set_help_section(_("Financial help"), 'app-financial')
self.Edit.set_short_label(_('Edit'))
self.DeleteAccount.set_short_label(_('Delete'))
self.DeleteTransaction.set_short_label(_('Delete'))
user = api.get_current_user(self.store)
if not user.profile.check_app_permission(u'admin'):
self.ConfigurePaymentMethods.set_sensitive(False)
def create_ui(self):
self.trans_popup = self.uimanager.get_widget('/TransactionSelection')
self.acc_popup = self.uimanager.get_widget('/AccountSelection')
self.window.add_new_items([self.NewAccount,
self.NewTransaction])
self.search_holder.add(self.accounts)
self.accounts.show()
self._create_initial_page()
self._refresh_accounts()
def activate(self, refresh=True):
if refresh:
self.refresh_pages()
self._update_actions()
self._update_tooltips()
self.window.SearchToolItem.set_sensitive(False)
def deactivate(self):
self.uimanager.remove_ui(self.financial_ui)
self.window.SearchToolItem.set_sensitive(True)
def print_activate(self):
self._print_transaction_report()
def export_spreadsheet_activate(self):
self._export_spreadsheet()
def get_current_page(self):
widget = self._get_current_page_widget()
if hasattr(widget, 'page'):
return widget.page
#
# Private
#
def _update_actions(self):
is_accounts_tab = self._is_accounts_tab()
self.AccountMenu.set_visible(is_accounts_tab)
self.TransactionMenu.set_visible(not is_accounts_tab)
self.DeleteAccount.set_visible(is_accounts_tab)
self.DeleteTransaction.set_visible(not is_accounts_tab)
self.window.ExportSpreadSheet.set_sensitive(True)
self.window.Print.set_sensitive(not is_accounts_tab)
self.NewAccount.set_sensitive(self._can_add_account())
self.DeleteAccount.set_sensitive(self._can_delete_account())
self.NewTransaction.set_sensitive(self._can_add_transaction())
self.DeleteTransaction.set_sensitive(self._can_delete_transaction())
self.Edit.set_sensitive(self._can_edit_account() or
self._can_edit_transaction())
def _update_tooltips(self):
if self._is_accounts_tab():
self.Edit.set_tooltip(_("Edit the selected account"))
self.window.Print.set_tooltip("")
else:
self.Edit.set_tooltip(_("Edit the selected transaction"))
self.window.Print.set_tooltip(
_("Print a report of these transactions"))
def _create_initial_page(self):
pixbuf = self.accounts.render_icon('stoq-money', gtk.ICON_SIZE_MENU)
page = self.notebook.get_nth_page(0)
hbox = self._create_tab_label(_('Accounts'), pixbuf)
self.notebook.set_tab_label(page, hbox)
def _create_new_account(self):
parent_view = None
if self._is_accounts_tab():
parent_view = self.accounts.get_selected()
else:
page_id = self.notebook.get_current_page()
widget = self.notebook.get_nth_page(page_id)
page = widget.page
if page.account_view.kind == 'account':
parent_view = page.account_view
retval = self._run_account_editor(None, parent_view)
if retval:
self.accounts.refresh_accounts(self.store)
def _refresh_accounts(self):
self.accounts.clear()
self.accounts.insert_initial(self.store)
def _edit_existing_account(self, account_view):
assert account_view.kind == 'account'
retval = self._run_account_editor(account_view,
self.accounts.get_parent(account_view))
if not retval:
return
self.accounts.refresh_accounts(self.store)
def _run_account_editor(self, model, parent_account):
store = api.new_store()
if model:
model = store.fetch(model.account)
if parent_account:
if parent_account.kind in ['payable', 'receivable']:
parent_account = None
if api.sysparam.compare_object('IMBALANCE_ACCOUNT', parent_account):
parent_account = None
retval = self.run_dialog(AccountEditor, store, model=model,
parent_account=parent_account)
if store.confirm(retval):
self.accounts.refresh_accounts(self.store)
store.close()
return retval
def _close_current_page(self):
assert self._can_close_tab()
page = self.get_current_page()
self._close_page(page)
def _get_current_page_widget(self):
page_id = self.notebook.get_current_page()
widget = self.notebook.get_children()[page_id]
return widget
def _close_page(self, page):
for page_id, child in enumerate(self.notebook.get_children()):
if getattr(child, 'page', None) == page:
self.notebook.remove_page(page_id)
del self._pages[page.account_view.id]
break
else:
raise AssertionError(page)
def _is_accounts_tab(self):
page_id = self.notebook.get_current_page()
return page_id == 0
def _is_transaction_tab(self):
page = self.get_current_page()
if not isinstance(page, TransactionPage):
return False
if page.model.kind != 'account':
return False
if (api.sysparam.compare_object('TILLS_ACCOUNT', page.model.account) or
page.model.parent_id == self._tills_account_id):
return False
return True
def _can_close_tab(self):
# The first tab is not closable
return not self._is_accounts_tab()
def _create_tab_label(self, title, pixbuf, account_view_id=None, page=None):
hbox = gtk.HBox()
image = gtk.image_new_from_pixbuf(pixbuf)
hbox.pack_start(image, False, False)
label = gtk.Label(title)
hbox.pack_start(label, True, False)
if account_view_id:
button = NotebookCloseButton()
if page:
button.connect('clicked', lambda button: self._close_page(page))
hbox.pack_end(button, False, False)
hbox.show_all()
return hbox
def _new_page(self, account_view):
if account_view.id in self._pages:
page = self._pages[account_view.id]
page_id = self.notebook.page_num(page.search.vbox)
else:
pixbuf = self.accounts.get_pixbuf(account_view)
page = TransactionPage(account_view,
self, self.get_toplevel())
page.search.connect('result-selection-changed',
self._on_search__result_selection_changed)
page.search.connect('result-item-popup-menu',
self._on_search__result_item_popup_menu)
hbox = self._create_tab_label(account_view.description, pixbuf,
account_view.id, page)
widget = page.search.vbox
widget.page = page
page_id = self.notebook.append_page(widget, hbox)
page.show()
page.account_view = account_view
self._pages[account_view.id] = page
self.notebook.set_current_page(page_id)
self._update_actions()
def refresh_pages(self):
for page in self._pages.values():
page.refresh()
def _import(self):
ffilters = []
all_filter = gtk.FileFilter()
all_filter.set_name(_('All supported formats'))
all_filter.add_pattern('*.ofx')
all_filter.add_mime_type('application/xml')
all_filter.add_mime_type('application/x-gzip')
ffilters.append(all_filter)
ofx_filter = gtk.FileFilter()
ofx_filter.set_name(_('Open Financial Exchange (OFX)'))
ofx_filter.add_pattern('*.ofx')
ffilters.append(ofx_filter)
gnucash_filter = gtk.FileFilter()
gnucash_filter.set_name(_('GNUCash xml format'))
gnucash_filter.add_mime_type('application/xml')
gnucash_filter.add_mime_type('application/x-gzip')
ffilters.append(gnucash_filter)
with selectfile("Import", parent=self.get_toplevel(),
filters=ffilters) as file_chooser:
file_chooser.run()
filename = file_chooser.get_filename()
if not filename:
return
ffilter = file_chooser.get_filter()
if ffilter == gnucash_filter:
format = 'gnucash.xml'
elif ffilter == ofx_filter:
format = 'account.ofx'
else:
# Guess
if filename.endswith('.ofx'):
format = 'account.ofx'
else:
format = 'gnucash.xml'
run_dialog(ImporterDialog, self, format, filename)
# Refresh everthing after an import
self.accounts.refresh_accounts(self.store)
self.refresh_pages()
def _export_spreadsheet(self):
"""Runs a dialog to export the current search results to a CSV file.
"""
if self._is_accounts_tab():
run_dialog(FinancialReportDialog, self, self.store)
else:
page = self.get_current_page()
sse = SpreadSheetExporter()
sse.export(object_list=page.result_view,
name=self.app_title,
filename_prefix=self.app_name)
def _can_add_account(self):
if self._is_accounts_tab():
return True
return False
def _can_edit_account(self):
if not self._is_accounts_tab():
return False
account_view = self.accounts.get_selected()
if account_view is None:
return False
# Can only remove real accounts
if account_view.kind != 'account':
return False
if account_view.id in [self._banks_account_id,
self._imbalance_account_id,
self._tills_account_id]:
return False
return True
def _can_delete_account(self):
if not self._is_accounts_tab():
return False
account_view = self.accounts.get_selected()
if account_view is None:
return False
# Can only remove real accounts
if account_view.kind != 'account':
return False
return account_view.account.can_remove()
def _can_add_transaction(self):
if self._is_transaction_tab():
return True
return False
def _can_delete_transaction(self):
if not self._is_transaction_tab():
return False
page = self.get_current_page()
transaction = page.result_view.get_selected()
if transaction is None:
return False
return True
def _can_edit_transaction(self):
if not self._is_transaction_tab():
return False
page = self.get_current_page()
transaction = page.result_view.get_selected()
if transaction is None:
return False
return True
def _add_transaction(self):
page = self.get_current_page()
page.add_transaction_dialog()
self._refresh_accounts()
def _delete_account(self, account_view):
store = api.new_store()
account = store.fetch(account_view.account)
methods = PaymentMethod.get_by_account(store, account)
if methods.count() > 0:
if not yesno(
_('This account is used in at least one payment method.\n'
'To be able to delete it the payment methods needs to be'
're-configured first'), gtk.RESPONSE_NO,
_("Configure payment methods"), _("Keep account")):
store.close()
return
elif not yesno(
_('Are you sure you want to remove account "%s" ?') % (
(account_view.description, )), gtk.RESPONSE_NO,
_("Remove account"), _("Keep account")):
store.close()
return
if account_view.id in self._pages:
account_page = self._pages[account_view.id]
self._close_page(account_page)
self.accounts.remove(account_view)
self.accounts.flush()
imbalance_id = api.sysparam.get_object_id('IMBALANCE_ACCOUNT')
for method in methods:
method.destination_account_id = imbalance_id
account.remove(store)
store.commit(close=True)
def _delete_transaction(self, item):
msg = _('Are you sure you want to remove transaction "%s" ?') % (
(item.description))
if not yesno(msg, gtk.RESPONSE_YES,
_(u"Remove transaction"), _(u"Keep transaction")):
return
account_transactions = self.get_current_page()
account_transactions.result_view.remove(item)
store = api.new_store()
if isinstance(item.transaction, AccountTransactionView):
account_transaction = store.fetch(item.transaction.transaction)
else:
account_transaction = store.fetch(item.transaction)
account_transaction.delete(account_transaction.id, store=store)
store.commit(close=True)
account_transactions.update_totals()
def _print_transaction_report(self):
assert not self._is_accounts_tab()
page = self.get_current_page()
print_report(AccountTransactionReport, page.result_view,
list(page.result_view),
account=page.model,
filters=page.search.get_search_filters())
#
# Kiwi callbacks
#
def key_escape(self):
if self._can_close_tab():
self._close_current_page()
return True
def key_control_w(self):
if self._can_close_tab():
self._close_current_page()
return True
def on_accounts__row_activated(self, ktree, account_view):
self._new_page(account_view)
def on_accounts__selection_changed(self, ktree, account_view):
self._update_actions()
def on_accounts__right_click(self, results, result, event):
self.acc_popup.popup(None, None, None, event.button, event.time)
def on_Edit__activate(self, button):
if self._is_accounts_tab():
account_view = self.accounts.get_selected()
self._edit_existing_account(account_view)
elif self._is_transaction_tab():
page = self.get_current_page()
transaction = page.result_view.get_selected()
page._edit_transaction_dialog(transaction)
def after_notebook__switch_page(self, notebook, page, page_id):
self._update_actions()
self._update_tooltips()
def _on_search__result_selection_changed(self, search):
self._update_actions()
def _on_search__result_item_popup_menu(self, search, result, event):
self.trans_popup.popup(None, None, None, event.button, event.time)
# Toolbar
def new_activate(self):
if self._is_accounts_tab() and self._can_add_account():
self._create_new_account()
elif self._is_transaction_tab() and self._can_add_transaction():
self._add_transaction()
def on_NewAccount__activate(self, action):
self._create_new_account()
def on_NewTransaction__activate(self, action):
self._add_transaction()
def on_DeleteAccount__activate(self, action):
account_view = self.accounts.get_selected()
self._delete_account(account_view)
def on_DeleteTransaction__activate(self, action):
transactions = self.get_current_page()
transaction = transactions.result_view.get_selected()
self._delete_transaction(transaction)
self.refresh_pages()
self._refresh_accounts()
# Financial
def on_Import__activate(self, action):
self._import()
# Edit
def on_ConfigurePaymentMethods__activate(self, action):
from stoqlib.gui.dialogs.paymentmethod import PaymentMethodsDialog
store = api.new_store()
model = self.run_dialog(PaymentMethodsDialog, store)
store.confirm(model)
store.close()
|
tiagocardosos/stoq
|
stoq/gui/financial.py
|
Python
|
gpl-2.0
| 32,037
|
[
"VisIt"
] |
99efd0d362ab859d4bfe59477b695a53177df3411402c977add603c752eb5e6f
|
# Copyright (c) 2012, GlaxoSmithKline Research & Development Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of GlaxoSmithKline Research & Development Ltd.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Jameed Hussain, September 2012
import sys
import re
from rdkit import Chem
def find_correct(f_array):
core = ""
side_chains = ""
for f in f_array:
attachments = f.count("*")
if (attachments == 1):
side_chains = "%s.%s" % (side_chains,f)
else:
core = f
side_chains = side_chains.lstrip('.')
#cansmi the side chains
temp = Chem.MolFromSmiles(side_chains)
side_chains = Chem.MolToSmiles( temp, isomericSmiles=True )
#and cansmi the core
temp = Chem.MolFromSmiles(core)
core = Chem.MolToSmiles( temp, isomericSmiles=True )
return core,side_chains
def delete_bonds(bonds):
#use the same parent mol object and create editable mol
em = Chem.EditableMol(mol)
#loop through the bonds to delete
isotope = 0
isotope_track = {};
for i in bonds:
isotope += 1
#remove the bond
em.RemoveBond(i[0],i[1])
#now add attachement points
newAtomA = em.AddAtom(Chem.Atom(0))
em.AddBond(i[0],newAtomA,Chem.BondType.SINGLE)
newAtomB = em.AddAtom(Chem.Atom(0))
em.AddBond(i[1],newAtomB,Chem.BondType.SINGLE)
#keep track of where to put isotopes
isotope_track[newAtomA] = isotope
isotope_track[newAtomB] = isotope
#should be able to get away without sanitising mol
#as the existing valencies/atoms not changed
modifiedMol = em.GetMol()
#canonical smiles can be different with and without the isotopes
#hence to keep track of duplicates use fragmented_smi_noIsotopes
fragmented_smi_noIsotopes = Chem.MolToSmiles(modifiedMol,isomericSmiles=True)
valid = True
fragments = fragmented_smi_noIsotopes.split(".")
#check if its a valid triple cut
if(isotope == 3):
valid = False
for f in fragments:
matchObj = re.search( '\*.*\*.*\*', f)
if matchObj:
valid = True
break
if valid:
if(isotope == 1):
fragmented_smi_noIsotopes = re.sub('\[\*\]', '[*:1]', fragmented_smi_noIsotopes)
fragments = fragmented_smi_noIsotopes.split(".")
#print fragmented_smi_noIsotopes
s1 = Chem.MolFromSmiles(fragments[0])
s2 = Chem.MolFromSmiles(fragments[1])
#need to cansmi again as smiles can be different
output = '%s,%s,,%s.%s' % (smi,id,Chem.MolToSmiles(s1,isomericSmiles=True),Chem.MolToSmiles(s2,isomericSmiles=True) )
if( (output in outlines) == False):
print output
#print
outlines[output]=None
elif (isotope >= 2):
#add the isotope labels
for key in isotope_track:
#to add isotope lables
modifiedMol.GetAtomWithIdx(key).SetIsotope(isotope_track[key])
fragmented_smi = Chem.MolToSmiles(modifiedMol,isomericSmiles=True)
#change the isotopes into labels - currently can't add SMARTS or labels to mol
fragmented_smi = re.sub('\[1\*\]', '[*:1]', fragmented_smi)
fragmented_smi = re.sub('\[2\*\]', '[*:2]', fragmented_smi)
fragmented_smi = re.sub('\[3\*\]', '[*:3]', fragmented_smi)
fragments = fragmented_smi.split(".")
#identify core/side chains and cansmi them
core,side_chains = find_correct(fragments)
#now change the labels on sidechains and core
#to get the new labels, cansmi the dot-disconnected side chains
#the first fragment in the side chains has attachment label 1, 2nd: 2, 3rd: 3
#then change the labels accordingly in the core
#this is required by the indexing script, as the side-chains are "keys" in the index
#this ensures the side-chains always have the same numbering
isotope_track = {}
side_chain_fragments = side_chains.split(".")
for s in xrange( len(side_chain_fragments) ):
matchObj = re.search( '\[\*\:([123])\]', side_chain_fragments[s] )
if matchObj:
#add to isotope_track with key: old_isotope, value:
isotope_track[matchObj.group(1)] = str(s+1)
#change the labels if required
if(isotope_track['1'] != '1'):
core = re.sub('\[\*\:1\]', '[*:XX' + isotope_track['1'] + 'XX]' , core)
side_chains = re.sub('\[\*\:1\]', '[*:XX' + isotope_track['1'] + 'XX]' , side_chains)
if(isotope_track['2'] != '2'):
core = re.sub('\[\*\:2\]', '[*:XX' + isotope_track['2'] + 'XX]' , core)
side_chains = re.sub('\[\*\:2\]', '[*:XX' + isotope_track['2'] + 'XX]' , side_chains)
if(isotope == 3):
if(isotope_track['3'] != '3'):
core = re.sub('\[\*\:3\]', '[*:XX' + isotope_track['3'] + 'XX]' , core)
side_chains = re.sub('\[\*\:3\]', '[*:XX' + isotope_track['3'] + 'XX]' , side_chains)
#now remove the XX
core = re.sub('XX', '' , core)
side_chains = re.sub('XX', '' , side_chains)
output = '%s,%s,%s,%s' % (smi,id,core,side_chains)
if( (output in outlines) == False):
print output
outlines[output]=None
if __name__=='__main__':
if (len(sys.argv) >= 2):
print "Program that fragments a user input set of smiles.";
print "The program enumerates every single,double and triple acyclic single bond cuts in a molecule.\n";
print "USAGE: ./rfrag.py <file_of_smiles";
print "Format of smiles file: SMILES ID (space separated)";
print "Output: whole mol smiles,ID,core,context\n";
sys.exit(1)
#read the STDIN
for line in sys.stdin:
line = line.rstrip()
line_fields = re.split('\s|,',line)
smi = line_fields[0]
id = line_fields[1]
mol = Chem.MolFromSmiles(smi)
if(mol == None):
sys.stderr.write("Can't generate mol for: %s\n" % (smi) )
continue
#different cuts can give the same fragments
#to use outlines to remove them
outlines={}
#SMARTS for "acyclic and not in a functional group"
smarts = Chem.MolFromSmarts("[#6+0;!$(*=,#[!#6])]!@!=!#[*]")
#finds the relevant bonds to break
#find the atoms maches
matching_atoms = mol.GetSubstructMatches(smarts)
total = len(matching_atoms)
#catch case where there are no bonds to fragment
if(total == 0):
output = '%s,%s,,' % (smi,id)
if( (output in outlines) == False ):
print output
outlines[output]=None
bonds_selected = []
#loop to generate every single, double and triple cut in the molecule
for x in xrange( total ):
#print matches[x]
bonds_selected.append(matching_atoms[x])
delete_bonds(bonds_selected)
bonds_selected = []
for y in xrange(x+1,total):
#print matching_atoms[x],matching_atoms[y]
bonds_selected.append(matching_atoms[x])
bonds_selected.append(matching_atoms[y])
delete_bonds(bonds_selected)
bonds_selected = []
for z in xrange(y+1, total):
#print matching_atoms[x],matching_atoms[y],matching_atoms[z]
bonds_selected.append(matching_atoms[x])
bonds_selected.append(matching_atoms[y])
bonds_selected.append(matching_atoms[z])
delete_bonds(bonds_selected)
bonds_selected = []
#right, we are done.
|
rdkit/rdkit-orig
|
Contrib/mmpa/rfrag.py
|
Python
|
bsd-3-clause
| 9,430
|
[
"RDKit"
] |
1931c5b018f3f9094d7bf142fb22010203d246cc73744f7907956ef43d8fa261
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import scipy.io
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
# Returns mu,sigma for 20 hidden-states from feature-vectors(123,35) for Smooth, Moderate, and Rough Surface Models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((1,1)))
sigma = np.matrix(np.zeros((1,1)))
DIVS = m/1
while (index < 1):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((1,n)))
DIVS = m/1
for i in range(n):
index = 0
while (index < 1):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
### Simulation Data
tSamples = 400
datasmooth = scipy.io.loadmat('smooth.mat')
datamoderate = scipy.io.loadmat('medium.mat')
datarough = scipy.io.loadmat('rough.mat')
simulforce = np.zeros((tSamples,150))
datatime = np.arange(0,4,0.01)
dataforceSmooth = np.transpose(datasmooth['force'])
dataforceModerate = np.transpose(datamoderate['force'])
dataforceRough = np.transpose(datarough['force'])
j = 0
for i in dataforceSmooth:
simulforce[:,j] = i
j = j+1
j = 50
for i in dataforceModerate:
simulforce[:,j] = i
j = j+1
j = 100
for i in dataforceRough:
simulforce[:,j] = i
j = j+1
Fmat = np.matrix(simulforce)
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
#print " "
#print 'Total_Matrix_Shape:',m_tot,n_tot
mu_smooth,sigma_smooth = feature_to_mu_sigma(Fmat[0:tSamples,0:50])
mu_moderate,sigma_moderate = feature_to_mu_sigma(Fmat[0:tSamples,50:100])
mu_rough,sigma_rough = feature_to_mu_sigma(Fmat[0:tSamples,100:150])
#print [mu_smooth, sigma_smooth]
# HMM - Implementation:
# 10 Hidden States
# Force as Continuous Gaussian Observations from each hidden state
# Three HMM-Models for Smooth, Moderate, Rough Surfaces
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[1.0]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_smooth = np.zeros((1,2))
B_moderate = np.zeros((1,2))
B_rough = np.zeros((1,2))
for num_states in range(1):
B_smooth[num_states,0] = mu_smooth[num_states]
B_smooth[num_states,1] = sigma_smooth[num_states]
B_moderate[num_states,0] = mu_moderate[num_states]
B_moderate[num_states,1] = sigma_moderate[num_states]
B_rough[num_states,0] = mu_rough[num_states]
B_rough[num_states,1] = sigma_rough[num_states]
B_smooth = B_smooth.tolist()
B_moderate = B_moderate.tolist()
B_rough = B_rough.tolist()
# pi - initial probabilities per state
pi = [1.0] * 1
# generate Smooth, Moderate, Rough Surface models from parameters
model_smooth = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_smooth, pi) # Will be Trained
model_moderate = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_moderate, pi) # Will be Trained
model_rough = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rough, pi) # Will be Trained
trial_number = 1
smooth_final = np.matrix(np.zeros((30,1)))
moderate_final = np.matrix(np.zeros((30,1)))
rough_final = np.matrix(np.zeros((30,1)))
while (trial_number < 6):
# For Training
total_seq = Fmat[0:tSamples,:]
m_total, n_total = np.shape(total_seq)
#print 'Total_Sequence_Shape:', m_total, n_total
if (trial_number == 1):
j = 5
total_seq_smooth = total_seq[0:tSamples,1:5]
total_seq_moderate = total_seq[0:tSamples,51:55]
total_seq_rough = total_seq[0:tSamples,101:105]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+1:j+5]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+51:j+55]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+101:j+105]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_smooth = np.column_stack((total_seq[0:tSamples,0],total_seq[0:tSamples,2:5]))
total_seq_moderate = np.column_stack((total_seq[0:tSamples,50],total_seq[0:tSamples,52:55]))
total_seq_rough = np.column_stack((total_seq[0:tSamples,100],total_seq[0:tSamples,102:105]))
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0],total_seq[0:tSamples,j+2:j+5]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50],total_seq[0:tSamples,j+52:j+55]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100],total_seq[0:tSamples,j+102:j+105]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_smooth = np.column_stack((total_seq[0:tSamples,0:2],total_seq[0:tSamples,3:5]))
total_seq_moderate = np.column_stack((total_seq[0:tSamples,50:52],total_seq[0:tSamples,53:55]))
total_seq_rough = np.column_stack((total_seq[0:tSamples,100:102],total_seq[0:tSamples,103:105]))
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0:j+2],total_seq[0:tSamples,j+3:j+5]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50:j+52],total_seq[0:tSamples,j+53:j+55]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100:j+102],total_seq[0:tSamples,j+103:j+105]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_smooth = np.column_stack((total_seq[0:tSamples,0:3],total_seq[0:tSamples,4:5]))
total_seq_moderate = np.column_stack((total_seq[0:tSamples,50:53],total_seq[0:tSamples,54:55]))
total_seq_rough = np.column_stack((total_seq[0:tSamples,100:103],total_seq[0:tSamples,104:105]))
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0:j+3],total_seq[0:tSamples,j+4:j+5]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50:j+53],total_seq[0:tSamples,j+54:j+55]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100:j+103],total_seq[0:tSamples,j+104:j+105]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_smooth = total_seq[0:tSamples,0:4]
total_seq_moderate = total_seq[0:tSamples,50:54]
total_seq_rough = total_seq[0:tSamples,100:104]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0:j+4]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50:j+54]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100:j+104]))
j = j+5
train_seq_smooth = (np.array(total_seq_smooth).T).tolist()
train_seq_moderate = (np.array(total_seq_moderate).T).tolist()
train_seq_rough = (np.array(total_seq_rough).T).tolist()
#m,n = np.shape(train_seq_smooth)
#print m,n
#print train_seq_smooth
final_ts_smooth = ghmm.SequenceSet(F,train_seq_smooth)
final_ts_moderate = ghmm.SequenceSet(F,train_seq_moderate)
final_ts_rough = ghmm.SequenceSet(F,train_seq_rough)
model_smooth.baumWelch(final_ts_smooth)
model_moderate.baumWelch(final_ts_moderate)
model_rough.baumWelch(final_ts_rough)
# For Testing
if (trial_number == 1):
j = 5
total_seq_smooth = total_seq[0:tSamples,0]
total_seq_moderate = total_seq[0:tSamples,50]
total_seq_rough = total_seq[0:tSamples,100]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_smooth = total_seq[0:tSamples,1]
total_seq_moderate = total_seq[0:tSamples,51]
total_seq_rough = total_seq[0:tSamples,101]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+1]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+51]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+101]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_smooth = total_seq[0:tSamples,2]
total_seq_moderate = total_seq[0:tSamples,52]
total_seq_rough = total_seq[0:tSamples,102]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+2]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+52]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+102]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_smooth = total_seq[0:tSamples,3]
total_seq_moderate = total_seq[0:tSamples,53]
total_seq_rough = total_seq[0:tSamples,103]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+3]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+53]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+103]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_smooth = total_seq[0:tSamples,4]
total_seq_moderate = total_seq[0:tSamples,54]
total_seq_rough = total_seq[0:tSamples,104]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+4]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+54]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+104]))
j = j+5
total_seq_obj = np.matrix(np.column_stack((total_seq_smooth,total_seq_moderate,total_seq_rough)))
smooth = np.matrix(np.zeros(np.size(total_seq_obj,1)))
moderate = np.matrix(np.zeros(np.size(total_seq_obj,1)))
rough = np.matrix(np.zeros(np.size(total_seq_obj,1)))
m,n = np.shape(smooth)
print m,n
k = 0
while (k < np.size(total_seq_obj,1)):
test_seq_obj = (np.array(total_seq_obj[0:tSamples,k]).T).tolist()
new_test_seq_obj = np.array(sum(test_seq_obj,[]))
ts_obj = new_test_seq_obj
final_ts_obj = ghmm.EmissionSequence(F,ts_obj.tolist())
# Find Viterbi Path
path_smooth_obj = model_smooth.viterbi(final_ts_obj)
path_moderate_obj = model_moderate.viterbi(final_ts_obj)
path_rough_obj = model_rough.viterbi(final_ts_obj)
obj = max(path_smooth_obj[1],path_moderate_obj[1],path_rough_obj[1])
if obj == path_smooth_obj[1]:
smooth[0,k] = 1
elif obj == path_moderate_obj[1]:
moderate[0,k] = 1
else:
rough[0,k] = 1
k = k+1
#print smooth.T
smooth_final = smooth_final + smooth.T
moderate_final = moderate_final + moderate.T
rough_final = rough_final + rough.T
trial_number = trial_number + 1
#print smooth_final
#print moderate_final
#print rough_final
# Confusion Matrix
cmat = np.zeros((3,3))
arrsum_smooth = np.zeros((3,1))
arrsum_moderate = np.zeros((3,1))
arrsum_rough= np.zeros((3,1))
k = 10
i = 0
while (k < 31):
arrsum_smooth[i] = np.sum(smooth_final[k-10:k,0])
arrsum_moderate[i] = np.sum(moderate_final[k-10:k,0])
arrsum_rough[i] = np.sum(rough_final[k-10:k,0])
i = i+1
k = k+10
i=0
while (i < 3):
j=0
while (j < 3):
if (i == 0):
cmat[i][j] = arrsum_smooth[j]
elif (i == 1):
cmat[i][j] = arrsum_moderate[j]
else:
cmat[i][j] = arrsum_rough[j]
j = j+1
i = i+1
#print cmat
# Plot Confusion Matrix
Nlabels = 3
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5])
ax.set_xticklabels(['Smooth', 'Moderate', 'Rough'])
ax.set_yticks([2.5,1.5,0.5])
ax.set_yticklabels(['Smooth', 'Moderate', 'Rough'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 3):
j = 0
while (j < 3):
pp.text(j+0.5,2.5-i,cmat[i][j])
j = j+1
i = i+1
pp.show()
|
tapomayukh/projects_in_python
|
sandbox_tapo/src/AI/Code for Project-3/HMM Code/hmm_crossvalidation_force_1_states.py
|
Python
|
mit
| 14,803
|
[
"Gaussian",
"Mayavi"
] |
e23d446b03917258c309293b2fa12ba52e4055b331388775845387e0130729ae
|
#----------------------------------------------------------------------
# This file was generated by encode_bitmaps.py
#
from wx.lib.embeddedimage import PyEmbeddedImage
AutoRefresh = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAABmJLR0QA/wD/AP+gvaeTAAAA"
"CXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH1wUIEhkj6H6O3AAABNZJREFUOMvFlVtsVEUc"
"xr+ZOWfPdk+3LdvL0hXaAqESBQRayrW1pRAfUBKNIQ2LCS8GfFSJCdEYjTHEoCEBDaAEL7Dl"
"+mBAFB+E3gKUpTcapEXUdun2BnTbvZ/bjA9um22BxDcn+ZLJzH9+Z+abb3KA/7NtPbOb/Ze6"
"6lOrS9afXeMCAPLEgh8qSpkiv8eADYZpFgpwmRIWpZSOcmFd5pyfJVHedGVnmzG5pvLkKjfj"
"/KoGc8M1b0f/NDARXlJ7+u+jAOrWza20PVvwnORUVDDCkDQ0xI0Y7k/0W51DHbHR2AiDIJ+Z"
"tuh+h55JkxR+WFYpEaK44Y22wDTwxlNrvy3Onrd129LtDs3SEE6OQ1WccMgqKCgoIRAAKCGI"
"aBG09DUkW4OtukXE4Pqi9fNvj3QbocTEokZv64A0dfzjZatUWX19w/yNjsP+r/ho4gGVQGFy"
"LigRplstTJTPqVCXz17BDHBoIoHqBbX2FXPK7X+O3XPWFG8it4Y7TZ3qHADoJJhJ9l1OW5Z6"
"rP2omDDDRJUzkpZlnWn0tlJLJNwDseCmn/+4eG5v8yfx9iG/yGAqBhNBjJtjKPdUEIlK0C0L"
"alKaDqbAS/3hAHE5c0Ol7oUdSVMPgWMfADR5u0Mt227euFx3tU6zjHW/3r3kP9b2TTyb5uAZ"
"RxFaRhsgUxsgLKIzbTrYIkJx2BzBkoKiI6o96wQo7nLFCs1MTLPX36mb+p6IHmYO2YnhRBAA"
"h0LtMIRFIMscAKY8FpYVz8rMO6Dasy7EFe2hTsT37vys8Exwla9sCYN0/s2ytxS3owAetRBr"
"C6qhMAWcm4TLtulgl8cxz2w8zz96+Tx/2gOoOl5WyAm5RLjlOOQ/qAkhYBEBKgS4EDDBbWbs"
"38ubilu1b+UDTkjmk4BCiIgkyKaka6xHjGZmPO3DVMjKtR2lo4L4hJQ2nvPdZp8kUxsEEaCg"
"0MwkPm5+XwuE+965sv1GV6pOA4CaI2Wy4YJj+bLKZJ4+a1bP/a61Q4/ufy2IL2+aFRZAFMmO"
"lpHfQATByvx1OH3Hpw1EA182eP0nZu6OZ0puu8UOZUWkHxN8Ij8cC+1kks1KS1kqxylfdG7C"
"o86FAND9oEsQ0CWrT66cOxPMgCwurDXheHh7b/D27mgiUsIE+WtyXkrzkRBQLM5ZiqH4ACQq"
"YW/NF/YzPfW1v9z7qaemvuKgwflNUAQUk0oW+OcZckZOT/BOtV2yCZeaazxKhI4+BgYAQgji"
"ehwLs5+HYWm4FWpDZVEVK/dUONqG/O8OR4aSg5EgeZh4pCqEoSDDjUUFi5Cn5pP6zhNJqmf6"
"HgNTSkl/uA8fNO4Zp6B084JXbC8W19jHzTEIcCwrXC4ZsxdnMlAQUJjCgMFNTGghnOzwRQ3D"
"ert5x+VkurUAwEpe83x4Y+BarK8psKXrQO/+YHZg8HqoecFEckKVmY06bU7ilJ3g4DC5iage"
"QfvATX6x94IWHorsu7qr3ZfiCQDWZI7tVb6yUOj36NbuT3tvA8hIyV6yxfOCp9Zdx2bJKyQK"
"xSGrRsKMMy4EjChvGWkaPXSvPtAFIJFSHEB8EkxWH1766vVdtxoB2AEoaZJTYnIuk3MXu1zR"
"4Xgo2hsLAdBTudZS0EiqL570a2JpsElJ6cdMyUhJB2CmxqbaP0R8PsUa4mUWAAAAAElFTkSu"
"QmCC")
#----------------------------------------------------------------------
Copy = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAABmJLR0QA/wD/AP+gvaeTAAAA"
"CXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH1wUJAiwtCW2leAAAAmpJREFUOI2Vlc9PE0EU"
"x79vZrctPbS3Xrm0dOtfoQcTjVf/Ae9ESxSMxLMJYEEx/oga/TM8mRACJURSJQHa1DMegIMQ"
"SLqddp6HYXc7dbuUubTpe/uZz3vzdkq1V0sPWOsKrrlIiObj6uzXUXFHdbve09n563LxenXZ"
"S4o7QhAA4PT079jQXC4PIkrMcZKCzEzdblcO/YZUKg1m5rHB7fZvC9r4tXP/4uJ8EsAAhEBk"
"Nni5vPAohklEtGaBp6ZK4Xff9+Xa+vfJuSfPiJmT67apqK0s3hxprHoKl6Z0dnY6Lhe5XB4A"
"eKRxp9PBZn19bCBg2hOsGGMTVKprJTabzSuhnhe9Dha4VCrCsNgYb22EMc+LH9tgc2aON2Zm"
"tNvtEKyUshJbrVYsNEphVCo3InAQYGYUiwPGvo+t7c0wsVKJyrQtTT4RJRmbHjMzlOr9ZzwI"
"NJ/Bph6YgXQ6bYO11qGxAQO+38H2j3qYWC6XQ7thYyIBx5FQSkFrHYH7/b5VFrM9OsyMg4Ng"
"Kuy8bDaLfD4PACgUCiFrABzAAguywOXylAUEGEJISCmhtYYQAkH1l4dnJmBiYiJsCRHBdd2h"
"wyIQRVPgui6IhLFznPDw6lsb0FrDcVOp1rsPb2IveiIKNxu0FUKASEBrjY+f38c92nSS/gUW"
"ll48HO5/UD4zmzaAcff2PQDA8ckRGj93wIRPiffxcP9d14GU5hFjTZDSXNeHfw6xt7+LTDb7"
"pTo9s3olWCmFTCZjvbJSRtb9vsbxyRH29ncBQdPV6Zm3AJB4z9ZWFr/1er07gD1+wWIwGNAC"
"1NDgW/Nzz8+D2D8qwJznbf5BFgAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
Cut = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAABmJLR0QA/wD/AP+gvaeTAAAA"
"CXBIWXMAABwgAAAcIAHND5ueAAAAB3RJTUUH1gcMCjIxQXznpwAABCZJREFUOMuFk11IXEcU"
"x/8zO3fv3awm8WNjzK66hTaxNDG2JdWIhiTa0pY+hIC0FElfBKU2rfRBBUuLhUCSBrQUIaQh"
"DyEUP4KxhpIWU4lpirUPNSUmaaQVY9VtNKvu9917Z+70ISvIsq5/uAzcc/jNOf85hyBJnV+f"
"G7Msq9w0zaq2lvY72ECnz55qBtCpaY755o8/9STHaXIypbTswIEy5ObkDiONNE376u233gGl"
"1H367KlracFOp/PDN15/k3gLn4ON2rRvL57vSAX9prurx73LzbK2Z+NQ1WHszNu5Ly2Yc+7O"
"zXEBAKoOHQaA9hQWFMXj8XfLyyoghICmaYhEIwVpwfF4fGro+jXLbrdDYQry8vJtV767PLI+"
"JzMzc/BgeQU4F6CU4tbozwiFQq1pwQCOLT1dIroeg5QSJfv2IxwOHVlXbaUQotTjLoSUEk8W"
"nyAeN1bbWtq70oLbWtofA7g0ODQgnU4npJR4ef+r6OvveQQAGRkZPxw9UgPOORhjGL09glgs"
"WprqHZIrRltLe72u64GHf92XhBDk5roQCgV3X7x0oWv7tqytmuqAlBITd/+Aqqq9iWI2ByfU"
"+Nv4GNmy5RnkYHklMjMzPykoLIKUEpQSzM3PGiebmt/baBxTgtta2nstyxr95c5ty2azQVEU"
"qKoGwTkIIfhp+EeEQqHqdHNO08Q++Gf6bwkiIaVE8e4XEYmEMT0zDWazPUi3lQBg2yhwc3gk"
"UF1z1PHf1FRlcUkpTNOEYRjwP3yAE41NO7CJ0lWM4s86jPjMDCb/nEBxcTGJhUJYmnqEQbu9"
"YzMwSf5xBfBu07QOSkhtnHNt796XiN/hwL/jv6Og7DXkxGKYnLwvVcZ0S8r+gK5/UQfMpAUP"
"2u2Nis32eVFWVr6mKFDos4bGZ2dlIleWFRYSADAtC7pp4vHKis8U4stjhnE+pRUDHneNkLJ1"
"j8uVvxqL+Sd9vhV/NIrFcBiGEGdY7fELhhBnFsNh+KNRTPp8K6uxmH+Py5UvpGwd8LhrUnsc"
"jdZXeL1efySCxXC4zxCi26EoWAgGYQF3j/f0NwCYWAgG4VAUGEJ0L4bDff5IBBVerxfRaH1q"
"sGk+rzGGiGHwJSGGVMYanHY7KCHzJ4DrALQ64HtKyLzTbofKWMOSEEMRw+AaY5CG8cJ6a9na"
"BcTknqCuY6umoYSxG57sbEwsLCzNct4HwA1AAsAs573w+epe8Xh2zC0v31AY40FdB+XCDUAD"
"YAIQNHELtTN2eS4QQIaiMIeq4p7Ph2Upf/2I834A2YnP1cT51adSjt3z+eBQVWQoCpsLBGAx"
"1gNATewGIesWhV3VtBYqZbUlJfFJefOkaQ4kYlZSh7yTsdpdlFYzQkSckFvv6/o5AHytYpI0"
"eiThO01AlKSRXOvQSkDEuhMJuywA+B+n69WfhO60vAAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
Locate = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAABmJLR0QA/wD/AP+gvaeTAAAA"
"CXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH1wUJAwcewQ9TMAAAAypJREFUOMuF1cFvVFUU"
"x/HPDDMtdBKZAk8gMZGLCcaA6RhNWMwCCAZ2bo1JTf0TgIUrVBL32q0LI5jGsDESNgbcdOEs"
"XDFGTEQSbokaC8PQFnmvpULrYt4MM0OLJ3nJu++d+73n/O459xY8x0I9zCYHkiMb/cvameuX"
"rxc2m1vYBDiNGibsVDWygVMbq5pYio149H/BOXQqeTOpKtGab3HbLPblzwXjpoyRvJTI5jLp"
"nbQZG/GNTcH90Na9Fre9hz8wjy9xBIdRRsW4K5W9FdqegZeeB42NeLHvfy+A2IiN/NvJVHql"
"sreiolIL9XCtCy/1BVxLDifVPPUeNEzmeq+asIayL0IISzgVZ+LVYXgXVuyLdqJ1d0NoE+cs"
"mLOI1AWcw6kwGWqxEa9acDL9O5U+SIV6uNYDd6O1xgbQS3EmzsosWUZbM87EWUwPwGEHyYGk"
"1g/u2G1T/dJgLs7ExXx8CsfyxcSZ2MyrpAoWnJQ9nVwaqrb3Qz0cx8tWTVjweajnejZi85k6"
"H/QZsGHw2723NazrbkZ1yK+GI0M+A1YcGk/FRizERiwoaxpzuj/9PuvIMuhzbPOIx13A12CL"
"JS9oxstxdqjNe7KEEHo+4XhYt3WDiLO5jDFCPZzIN+coPgiToTaQfi5LXjXne9lsQ9Y5nHot"
"Heqhhq/sVbPS2eFuCYXJcB778gapKmvaYimHXoozcTEcD+vGsYhlp2MjTheGj8hW2iLDPe+4"
"40esY6tX/IBDyj62xfdW/eWmtv0eGadSqpDpHaXFvio407rR+iWpJIxhl8t2OZHvQ0nmH8tY"
"8JNf3XLTyjA0vZeeCfVQHQTP+9OKD1s3Wtd78N0uOqjlRW/lkbNm1H5tB90fgN5Nz3rou9jo"
"NNRTKULYjVE7HDDqs+TV5HXoSdNvWzvAsW1jsnbWgaa+8dDDGGNrGLwnT7tgp/2Kqko+7S6w"
"0dWU3k3PeuI3y36WeoDHMcb7w+ARHQG25wf5qqpDirYrWDPiEwWveeIj/7ql4LHU71bMYwQP"
"kMUYV5+9QUIo5LqXMZpPKKBsj28VHfbIu9qudgRRxCMsYD3GuN5l/Qe3YXJJdwMq5QAAAABJ"
"RU5ErkJggg==")
#----------------------------------------------------------------------
LocateArmed = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAABmJLR0QA/wD/AP+gvaeTAAAA"
"CXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH1wQaExMj1CO6xQAAAWRJREFUSMe1lTFuwzAM"
"RSmje87g3XsM9BwZvGeXjiLt2TP4HAGcPbvP4BOoQ/DbX5qSG6PhaBHvi5+kLPLmcFsJQ9fm"
"2vn1MbtdAgz2BzsnLdtCTQ3uDz/w6eQlLU/odPKiz0uVNjU4wABy9LfZsVBJpNmCi4iEEFwI"
"YWVBCMH1t6c1JRGnBSz49GmXD7iICHLSInI8e8GFGqupGg47uAfTyQsLs9j9kmyLeFoYHmPM"
"2iLYo6sDI8aYzSbrhjL8+pjd8bxuOGBcBeJDf0B5sOx+STJ0bcacQ4wtRc6f98AK3PLVWFXw"
"y4IxiWUJ7Iox5lXOmOoC/Zi+vUQDtS1sF+dY49xYbws3rVc34nNMWel9qi6atUTWomHK9KKh"
"Kld7KlgkxpgxYey5huN8tcnsLRL55taileDVMbVELIv4u4bzZf7th2PBN3+Z7HspdD9e/ifz"
"WFpNLoF3xdC1eejavPfZeEt8AZHHDNdIUA3RAAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
MoveDown = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAACXBIWXMAAAsTAAALEwEAmpwY"
"AAAAB3RJTUUH1wUJAjEUqQRBbAAABBVJREFUOI2NlE1oXFUYht9z5t7JnZuZyZ+pBnpmJhis"
"goR02UWCacnAJF0UERRsF+JCcRErdKFgcSGou6iIKPiD1EV1K+3oSCC6kZqNSrUVtLl3TshM"
"TYK5c+f+ZO7P58IZHJKb4gcHDi/veXjPd757WaVSMarVagkH6tSpU59MTk6eHRkZSTUaDVdK"
"+dX6+voLB31HlZIkzszMvL24uPj4wsLCEBGh0WiMrq6uPsk5t+7evfuOYRjN/wMerFQq53uC"
"7/sPAJjN5XLZzc1N+L4PRVEwMTGRz+fzJ3Z2dlI1IR7JMKYxICZgd7Ze30wCqwCu9ARN0wAA"
"pmlGnuchnU7DcRxIKdHpdMAYywKYV4ATnLFnCfjjh0LhpRD4e7Ze/6kfvFetVocPtOI9VVUv"
"2Lad1zQNtm2j2WzarVbrzyAI7LKU73etL64VCstZxj7LAPb3hcITc/X6rcQeF4tFtd1ufyql"
"HLcs63Qmk0m5ruu1Wq1V13W/NE1zq9/fIfqwDbSzjH08xNiba4XCGx2i24fApmkGpVLp51ar"
"9Zrv+18oiqKEYegHQfB7FEUbB/1lKfdrQlxtERWHOX+ZATfLUv6YOBVEFAPocM73VVUN4zj2"
"AQSMsTjJX5bSrQnx+m4cP3Qf5+dXhVAPgUulkpJOpx8+fvz4K1NTU+WhoSFla2vLNQzjm729"
"vXcnJydvbmxshAnwsCZECKDIgJlDYMMwwpMnTz5/5syZs/Pz8/k4jrG9vT28trZ27saNG7ue"
"560A2Dp4rr8YUFYA5CuVysWe2Ol0xoloLpPJDPZGjHOOsbGxXC6Xe3B3dzdfLBYbpmnSveC9"
"xCs9IZ1O95JHnudhYGAA7XYbUkrs7+/fiwXWXdQFt6rV6mi/YXp6eoVz/oxt20MDAwNwHAfN"
"ZtOyLOt2GIbWUWl7KQnJc6w4jvORlHLMsqyyruuK4ziubdtfe553NY7j7aMS825ingQ2TTMs"
"Fou3bNt+1XXdK6lUikVR1Imi6E4cx1umaR6aCACoCaEyQOlexUuc4+5V692VWDUhUmUpo+5+"
"WAHeynL+lEWEeSn1JLDGGNOIaE/XdXJdd6mrX+Ocl4ioQUSdspRRTYjfAMylGbucZey51L9/"
"u3UktQKAT0Q+gB70eldf0jTN8H1fENFmTYgWB9qjnF+OgWUFgEX03WP1+mkAUIIgGAaw2D18"
"vW9/vg+K8fHxwPf9ix8cO3aupKqXcoxlVcZyCmPLThzDA76dq9fLPT/nnAe6rl/Tdf0aAPTt"
"tf5r2LbdcBzn2F9RtA2AQoB1iGDF8S820YXZPigAoFKpGEe8D/WlB2PsaV3XiXN+PwDUhNir"
"CWHUhHg06XBSjzmAGP99REsAQESfu67LLo2OaguDg6mylMPdx/s1CfwPBWAC+CiCmmwAAAAA"
"SUVORK5CYII=")
#----------------------------------------------------------------------
MoveLeft = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAACXBIWXMAAAsTAAALEwEAmpwY"
"AAAAB3RJTUUH1wUJAjQCIKcAeAAAA5tJREFUOI2dlO9rHEUYx7+zP87drZduLngm6PZ2EVFC"
"iecLkRR8U4i4qUjavOgbhfpKLL7whb7wvdo3ohV9UxFFFAVBQbQ5mph/QAMhBUUazO3eBu7S"
"9NK7vcnc3u3urC+6FzbNNQk+MMzMd3Y/z3efnRli27ZTqVRMpGGapug4Tjw9Pf2VZVkvj46O"
"ivV6nXme9yul9LMgCG5Vq9UYR4R0v+A4Tlwul6/Ozs5emJmZOZkkCer1emF5efniyspKu9fr"
"fQqgcRzwCdu2Xx0IQRCMAzibz+fzm5ubCIIAkiRhYmJiZGRk5Klms6mXSqUt13WTo8AygG8H"
"gqIoA+e82+0il8thd3cXnueh3++DEHKU2T1wq1Kp6FmxXC5/Lsvya5TSEUVR0Ol00Gg0Or7v"
"/xtFkX+U2wF4X5RKJZlS+rXneY+02+2zqqqKjLGu7/vLjLEfOed3juNYuF9wXTeMomhtdXX1"
"YqvVejeO4zvb29tvUko/CMNwzXXd/nHABxwDgOM4UalUIsVicUGSpLcYY78BSLIlME1TlmX5"
"aVVVT0mSRIIg6PT7/Vuc862NjQ0+FJw6TyYnJ3k65tk10zRlTdOeN03zQ9M0TyuKQmq1Wtvz"
"vO993//CsqzaA8GHBSHksXw+/97c3NyZqakpMQxD1Go1fWlp6fW1tbWtIAi+PA64aNv221kh"
"CILHc7ncM6Ioio7jIIoiAMDY2NiYoihPiqL40FHgXQAGgE+y4mCvr6+vQ9d1CIIA3/fRaDT2"
"khwKrlQqXQAHToRlWad0Xb+qquorhUJBFEURvu+jXq/fZoz9Hcdx8L9qzDmvU0o/qlarerPZ"
"fEKW5ROU0hal9Ider3edc34omKQtyfQJcG+vW5b1x0t3777/ws7OVgQYW4QUrgjCIud8x3Xd"
"B2+3LCjTDxLgWhQlItBRCenIhHyjxvHHG46zdyoPnDzcu5QAAJqm9QBcSNsgUS5dPhcDmzIh"
"IECRABtZyDDH4cAlY+wcgIV0Pp+CjRuG8SyA7wTgPIAracZ9P1kKw1AHMJvOF9LxLoBLGSgA"
"/Axg/tr4+BwB3skT8rBAyO9RkiAG/kyAf/aBBUEINU27DgCMMTIYd7vdy0ly8HbcjuNtS5aT"
"CCAkScCS5CaAN170vJv7HrRt2xlSDqSfPZuZD+r86A3DOLNoGK1Fw3AWDeP0sJeH1VgAwAEQ"
"TdMSxth8qv8EgJzP52UO3BaB5wD8AuCvYeD/AOQorv2N7OunAAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
MoveRight = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAACXBIWXMAAAsTAAALEwEAmpwY"
"AAAAB3RJTUUH1wUJAjUzaGIxAwAAA2VJREFUOI2dkk1oI3UYxp//fOTjnyZtkq27WTfGiDWF"
"0kKgWuoXeAlMe1uKLos3PSsIelAPenPrwcPqag+rLOLJm+IGmkNFELbSg1C0UkyZ6Wyatmmr"
"ziRvMp1k/h5MpAzJNuxzGuZ5n987PPMyTdP0YrH4OHyampqKJxKJO9ls9tlIJMIqlYp1eHh4"
"o91u39nY2Gj65/1SBhmBQODjpaWlwvz8fLDdbsMwjESpVPpgZ2enDKA0DDiiadqrfsNxnDnO"
"eWB3dxeO40CWZaRSqQQR8UwmwwzDEOeBVQBf+41gMIhyuSzi8ThkWUa9XkelUkGz2QQACUDn"
"PPDfxWJxzG/k8/mvOOfXk8lkQFVVWJaFarX6l23bBOCBX9sD91Wj0XhT1/Urx8fHTyuKEm61"
"WgdEtOK67o+GYXgPDd7e3rYmJydf2dvbe9G2beRyuY1ms1nb2tpyz4MCADRN089bzjmvDgU7"
"I2mIGQbg0oBshHN+BGABwALnvNd9YGAVZ8FE9E73uTffliQpFAqF6kS0COAuABDRIudcEBEb"
"BixxzpeJ6BcAUQCPAPjO87wlImK+2btEtDg+Ph4fpor/T4tz/n04HL7dXfb5oIBt20P9vL5i"
"jPFupwtnXi9wzoUkSY8NU0VfCSFaRBTnnN8nopcBgHP+AxGNAmg9NBiAB8COnp4+8eXly9cU"
"xsbertVSL42OOtdiMfeB4JmZmUczmQyPRqOsWq12jo6Oapubm1bPX02nVQCLEnADQGjl4kUA"
"+KRgmk5fcCaTYSMjI7lsNntrYmIiHw6HJVVV64yxlbm5uU/X19dPVtNpGcDrAG6GGYMMwBLi"
"QwCN1XT6Vl+wYRhidnb2/UKh8Fw+nw+4rouDg4PY2traG6l7935aTqf/6AAveMBNBkAG4AJg"
"wJoAcgDCCoAxTdOu++Gu6z7DGFN1XYfrumCMIRaLxa54nhtibFJi7AKAjxhwVQBPWZ5XAvAW"
"gN8KpimU7rJv/GBVVVEul0UikYCiKLAsC5VKBdlOJ3kK3JaFuNA98FYH+BbAuwXT/LOXVwA0"
"isXiuB88PT39maIoryWTyaCqqrBtG/v7+7Vfhbj/pBBXGf47CwA1ARwXTLN2Nj/wKhzHeU/X"
"9UsnJyfPB4NB1mg0/iGi5Z8l6fcvTLM1KNfTv8MRYRfOZO7TAAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
MoveUp = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAACXBIWXMAAAsTAAALEwEAmpwY"
"AAAAB3RJTUUH1wUJAjIEn54CywAABCZJREFUOI2FlE9oHFUcx7+/ebP/Znd2U5PUgx2ya9rG"
"ovjn5ClpQ+mWTXrwLKIHEQsq1oPSiuiheBBBIZ4KgkgoFAQRtV27IER7sLmUHoIVSdJNnolJ"
"szG7mZ2Z3Z198/OQ2bAm2/jgMb95j/n8vu/Ldx4KhUIZ/x2iUxiGwQAmwsmapvUBEEXLopJl"
"HStZ1u8lyyL0GHqPNRU+2XXdSQA3wvfJeDy+xZ73mAbkAFwHUAXwJIC5fWClVDJUhBDSqV/q"
"ggLADSHEhUsDA+MEfJwgygggo4CrJcv6FMDdvJS7DbQgCCKGYVw3DON6ePxO3d6rwnGcw/1C"
"HAqPylEiZDTtaZNoOkH0WcmyntoFRyKRquu65LouAUBXfa1LPYjovXg8/v7ra2vfMvCWzVyv"
"BsHfbeapCBFMojNxoqmSZR0DAK2Hx7us0McJABPM/EnYsHJWyh8AvBIA1UoQXK4zT7UBZIhO"
"RommS5Z1+CBwKJR+C72mcLbCve8BnMtLWWkxf7QdBFcUoBHwPIBEr1RgaGhIE0Jk4/H4iUQi"
"QZ7ntZrN5jwzLy8uLrYBIC+lArAY1lUA53+xrJGMpp3aYr63D5zNZvVoNPrEkSNHLh09ejSf"
"yWT01dVVt1wu36xWq1MA7j7seAw4Yaj3KyaigWQyef706dPnxsfH00EQYGNjo29mZuaF2dnZ"
"zYPACviTgckAOz9IulAoXAj3Ikopv91un0wkEkkpJVqtFjRNQ39/v2ma5vDDoKHidKfuKP68"
"syCEgO/7c+VyGZ7nIRaLoV6vQ0qJZrN5EBfclTIdwHaxWHyks5DL5R41TfPiwsKCZdt2JhaL"
"wXEcrK2t1Wq12h//AxaMHfo+j4Mg2HQc50spZX+tVssbhqE7juPatv2T53nXDpQMpMIGrX3g"
"paWldjabvVev1z/wPG9aCCGUUg2l1GIQBKsHUQkwQ/BqzxwzsyaESJumOWAYhnAcp+E4zoZS"
"SqDHHVKyrIQOjPdpWmybGQr4tVeOI7FY7Nnh4eGLIyMjp9LptL6ysuLNz8//XKlUvsjlcnfu"
"37/f6oLGo0THU0RnBDDGwFcA3umV48FUKvXq2NjYmdHRUVMphc3NzfStW7cmb9++/cB13b9u"
"WpZOwAMAiBI9kyJ6QwderDEXW8wf5qWs6QD6CoXCyx2w7/smM4/pum4sLS3B930QEdLpdDKZ"
"TA4JIVLwfcskGtGIjjNw3mc+4QHfNJjfzku5BuykwgfwdQcciUTged6d7hzbtg0pJRqNBpgZ"
"AC63geeIeZWB7zzmN7Fz0W91ODoAp1gsDnZ5PJhKpd5dWFh43Lbtvmg0CsdxsL6+vlWr1eaU"
"UlsAXvOYGwCaAOp5Kf/Za+k+j5l503XdK8vLy4eq1eqkYRgR13Xr29vbPzabzatBEFTOSrm+"
"97u9418tpvCNFrJAcwAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
New = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAABmJLR0QA/ADpAE8017ENAAAA"
"CXBIWXMAAA3XAAAN1wFCKJt4AAAAB3RJTUUH1QISDiUwykMmCQAAAkFJREFUOMutlc1rE2EQ"
"h5+Z3VSa4gdNmzUInjyIeBLEVvAf8NKDJ2/i1bMIYi+i4sc/4cGLeJDcRREENaUXC3qyudSQ"
"GCgiMabZZMfDfmZT7Wp9IWR32Pd5Z36/mV1hn2vUWRGgBPiuV7c4LvHF/Yd3bwOrfwNVCagc"
"GVBb7OGosTPUxxcvNK+5Xr3vZp5bvXH9ZmGomQ/+R4L+S3yrYJTZ/vrpiuG8GnVWnrr5Dc1m"
"E8dxMDNEBJGwqOw1GCVpUdYXOKVZbOYS4sxzaOE1Fjy7LPZtXfPgGBBDzAzMMDMsMMwAxqj0"
"ENnBZAF0HlDgADAuA4f3BCNAHFNBFUQcAjlIwCIy2gT/DQQdhoMGYj82gO3fglU1/BdBRNPD"
"RFFxCDjKgGWGdgKGa7j+czY/byEET4CWuxtYVVONEZCsxgIqCDOYHMOnwphTzM6VaGx858zJ"
"1gfg51TGa+uNSbMy2Sc/4iocTOcw5zgyc5pev4Tr1fuuV7cp8LmzS9RqtZympHKoRPdRdaJh"
"VZNjwa5StNvtcJNpKENSPkkVkadED0wt909dYWJJdvleTg4QQSkATjYBGpkVagqi0X0cj5IN"
"47lxzwfevX87lZFIKEUMzWossQl7gZeXzlOtVtMe1shENDJOEBTVbJdoMY273e5EuzHxnpDk"
"gGxlhTROT9GMeRGQeMRJ9C8MTrLR1LiIF1uZTqNJpo8LZpy4Ho82k8aFVVAsY8/z+B8rAQdB"
"cOfBo3u39gP7stW6OvXNy6vwj+zkY/oLI/uTo02YtuAAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
Open = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAABmJLR0QA/wD/AP+gvaeTAAAA"
"CXBIWXMAAA3XAAAN1wFCKJt4AAAAB3RJTUUH1QsVES80sHy0HwAAA3ZJREFUOMu1lV9oW1Uc"
"xz/n5ja9SW7Wptmytuuqc0Wn7TpkymQKguCrIM6JDwVBfFZh+KaC+qq++aAPg+nUsnUguIeB"
"Pvgg7sH+WWxruz92pSVJ2yymadI2ybnn58NN0si2yAp+4Rx+/H7nfn+/e35/juI+OPfN2dMo"
"Rj3tIQitoFAIggifvPnGW++3PHzh0nfyICgUCnLu/NlGBPbIyMhxg3dZKct3jiIcDlm66gHg"
"edpX3wfGGAC0rhIMBht6u1DIf/n2O+/ubz68spqmt/cAAJOTk6TSaSylfH7ZuQAQjAixWCdD"
"g0MNJwC2QXSxWGRifByAcCRMX38vJ595lnw+x8DAAEeOPIZlBXwiI4hIQwbBtm1S6Qxa6yZi"
"z6y3Ox3E43sBiCc6OXniOUqlEouLi/w+Po4TCjXFSIOgPRjkof5+BgeHWF3L4LruDrEVUJHK"
"dqHqOE7b6lqGPbEIoVCIhYWbVLXm6PAQSvw7VpZCjKAsUEphPCGxv5u5+Xnm5ueIx7s+bRA7"
"jpNMZ1JPh0MRIq7LC8+/SDa7xl8Lt9koboDgl5sxSD2NlsK2A4ScEIlEgj9mrtHVFfvs5ZdO"
"nWlk4NXXXnnv2PCxD/v6DoadcBudsR6+vZJkNr+vZTk6Ab9qtr3AXbZH3ZWrNnA7l/u7eujw"
"wzw5/BRjP1xm5PXTKFUrMaX8FlD+8hPHjv0e+Pyri4/bQK5crlQ9zyMajXJ1yeFoxTCbKrEb"
"9MYcimXRFjCTza5WPa1JZVIEA+Y/W7gVCrk7sq0Dy9aF0bG044QkGu2guFHg+BMHkF3ydoTb"
"KG2VTV/p5zMWQGF9vWxE8/2Pv9KTiJPf8hABI4IxYIzUFrUGabbR0IfbAyyls1VgxgaIuJHx"
"WzcWDvX0Hcbd108qX2Fb19qz1mVSm2HUJVW3+ehy29CekJy9ufTL6Fja9j9mslgsnup+5CC5"
"oiadKzfNRAHxx6Kqyb6uZgMQRcUTJiaS1cTmbx8D2ED78nIqGd0TNSd6E9bMqt6Jtua1vqt/"
"zd8dW6KjnapnmJtNrv00OvZ1nXjv9PR0pLun27ry5+bdGRH/GmjaVSNS36a1wEbGY3PlImAB"
"xgY2stns9WtTUx9F3NkPdltm+s71G1OXvjgPuECh/ncOEK4p3ZrXB4EAFWATyAFb9+pLq+WT"
"0Rqm+Sn4X/APqDnRZPJeirYAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
Paste = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAABmJLR0QA/wD/AP+gvaeTAAAA"
"CXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH1wUJAiwoeQdR9wAAAq1JREFUOI2lkk1IVFEU"
"gL/75ictbdRx/EmEQFByF+bCTVjkiLTIrE1gO3eR26ZFMNQgbbJFFIQTBAlua+PsykVJkC4k"
"EAoCMUNnxnTe8znTjOO9Leb/OaNYBx7v3nvu+d53z32CI2J0dPQD0G9Znpuenr50WJ0onvj9"
"aCtzOAFSzdfO20+cfiel8vT0dCFEZqtSisXFb2iaiKaTOyPO8NsFgLP9pPx+5AHw+IDttkI+"
"k5IagD/1fVpzZ59ob28ua7S2FmXj+7yq2pqXCtAEppLa3efv998A2HOm25/k05GLHa7wXhNf"
"jG4Sxj5ut4eOjs68Q0Y6804mYWOjS1S3nbP1nl7GRdg1O/9jEiiAV+Zw1tqpczg0Fra68T14"
"jB7TmZx8wtLSclnj2toafL771DfUM/HwHiMtm0glGkGRB+f7ogRmEtwNbuw2G4FAgHQ6XRZs"
"t9sRQuBy1RFPidLLsoKtIYTA4XBUzCuliiZHgIvzoVDoECgMDnor5g81HhoaKgNU2UcipSxT"
"VQlcpGw1LkAVoBgYGPg/44Jh5p2DG4ZOU1PL0WBL/5mdDQGq5Pherzc/NnaMirUW40JaSsng"
"oLfEUMpMX3Nr0Ui04mkrtqIYUNyK9fVfmOYu8XgcpY5zeWXBMmusWF39yfDwjfw+XY8dF7xf"
"As23Imup6zEMw8A0d/7FuHBJu6ZJJBJBZf9dwzAIhzeYejXF1ZbFyuDctZ1ySqKbUVpb2/Kb"
"2ixFuh4rgbqdu0cbt8uvTDzyEU9plQ6Tj5ttnzlpS5XN2QFez5Ecv8KWmUh7LpzR6VUfSzZZ"
"/9EDISBmpgEVKVrKxJ3LXBea9gKlPNacZVz2mwqxLaubA3st/S+DwZnEgYKxsVtVgAuoAxoA"
"N9CYHdcACSAGbAG/ge3sfBswg8EZCfAXXMGHi74Y/3sAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
Redo = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAABmJLR0QA/wD/AP+gvaeTAAAA"
"CXBIWXMAAA3XAAAN1wFCKJt4AAAAB3RJTUUH1gcaChMjSyiLTwAAArdJREFUOMutlE1rE1EU"
"ht9z5yOtqa39NNaKjSKiVAtWhYJYqShtlSwEEcGVyPwGl/4BF+ouGxf6C2bhR6GgoBQXxbYL"
"sdQWa2maJpiJzUczNzP3uknCZJiktXjgMHDP4TnvvHPmAv8YsbguY3H94G59DPuL7Vhc7/zv"
"4CsT5wEg0wy+X8W7wmkXP8MAHgJ47odul1No1/rw6d0iAHSZBrf2BI7F9UkAbyb1/kX38sDZ"
"YltJrdbKZQd5noGdE4hEIoFwagC9CeD97fFRFFpd/Mla4HIHjBFIJXDbBlMJVrIAYkA0Gq3C"
"w6bBi4HgWFw/AWDl+q1RlGURaStZkyAcCaYQpGbDzknYeQGqfCUPXDMN7qgBgl9cvXTaJlAo"
"ZW1CuvVFl2yoCsGxRe3MA+0yDe4AQBB4SvaGkMyvQlEYujsPA2DQKARZaZBwsS2WoIcJA5HB"
"QI+DwNCoFQqp6Ohph7ZVxMzscl392tRFqC1oCG2yx4S2jlZspRNyZnY5C+CMaXAyDU4AoJDW"
"FNoQrEBFRBmSS5+zLoC7psG/e+u2W6hCjwVBG1phy5xbkqoDIHQqwb/5t8cD3djrL00AVo/O"
"u7k/pTQA4OkTJCt9VB3AC/K4afCE96yZYgJAUsovv62FCcKwqDnjCdPguk+U9GVdsTqZ5Tfx"
"eHpH6XRcLtVSKDv5TLtfgau+1CpPxf9GXjVUKbKfH0Tx5A2WSqxlYhfGoyK5kRmzVmW8mAZV"
"+r3JPFB4FSs+xQSAlt+Kr4NjbC6VtB70dx9Ybx9y7x0ZYT/WPoqEB1YFKhWg8Nqh+C6jGnxl"
"WqwcitJLrjoaAUSMcpFhZod7ycn+krZwahARACbygZnHlprvI4+U7r5z7A4x9EkXWwuv3NfC"
"laJHKCLST2qoRYqNdbkzP+c61SEUsG5B2eiK9W5DnRV/AfIcOgCbYwEfAAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
Refresh = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAABHNCSVQICAgIfAhkiAAAABl0"
"RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAATmSURBVDiNtZVpTBRnGMf/7zsz"
"O7uzSzmWW+QQa5GgFkWoaBuPmiatMWnaEIKamKYC1k+2TROTJv3SNDE1adJaFCQ1toCkNGm1"
"sUdiPEqtCgRBWwVCPRAWAXGBved4n35gIRx+8Esn+WeSmWd+85tnnpmXERH+j40/S1FZWYv0"
"LHUb9jdnl77bkgAA7GnGJZXfrlAU9SMwbDVNK00QFIkzP+ds1BJ0QZBoERT4o6O20pi55qV9"
"p1MEp79MC1s76ysezAMzBlb6XnM9wMpffnGpbWVOsuxyquCcQdcNBEIGHjyatG70egIj4wGJ"
"MRz2c+ULl6Rz6LzdEmIFMZHVUbtnYB5444Hmk9lpcWV7Xl+tRQwLE/4wYjQVmqpA4gDjDKBp"
"A38wgkud98PX/x7SScCzqTBr2a3+EWNyIph37cSuQXkGWlT1XUmCS3t7e/Ey7euWdjHmDXLO"
"AWEREYOZ6naFSgoynIUvpEpEQMggbF2fay/Kz7D3P3wS82pRFuvqHTZ1IYl5L88uK9XPuVRn"
"/ZlOmgoYzGFXwpYQ31+rreAhjhTPiH/7uda+Hz79pjXY1jNMTlXC0HgITwIm1uenM0XmsCwB"
"2R6eDwbDawPDk8wdF+Ndnp16Q9dNLyA+B4CbNRXe9vqKtis15eWGbm38/Upf+4kfO4LxDo7M"
"JA2Xbo1CkTgsIVhkoTEJqJpDHcrMSK6N1ewNYLzPIpt34cS01VV06YZ5aCqgSzEOBUPjIQgA"
"doXDsogpQhYAMNtjS4hgosvxZYzD/rOuq48pwk7FLE2eWgguqmpcpcjS2eq3itSkeA1piU5s"
"LkiGqkgwBTGbtQDsSEvPOYvL4szeNwQAoHLFovkuqmpIY8R/syzSjra0R0gQiADC9F4IsulW"
"QABzPpDiqqYxBrgW0QAQwce42P7E5u5x+UYcT6sBAIWgtp/aO0oEkuccj2v4ZIdskzkABsaA"
"sG7i47rWyP2RyfevH9/dHa2LTNvXKUC8VlpQGHbH6PHd/z4sHRx9XEeExHmtABGz22RcuDUC"
"BkJpXhKazt+JDIz6jrYd39Ww0E4WzhRJFcdUh++nybBImvD7q2yybM2cnzNu05aGKZCR6AQD"
"0N0/RpyxVcXvNC9d9NwSnrMssWHSF9x9+97gh75AOJtx3J298awwiDEAa3LiMDgexJCX48iB"
"Lfam8z3bzl3p7ympbvpKWNQBYgNcsWQi6YhDVeJ67g5ttikSuWM1wzsRql8EnpZmCIZ05C+J"
"RcS00HnPi1cKM6Xi/HSt487wB57HvrBnzMfGJ4JOycaQ4taQl5OMxDgna/ylK6yZeuMiMGeM"
"3X80hUM1lyY4A9+xablty7osuzdgwiKgMC9NLrBSXYxN988QBNMUmPBH0PjrDb9lGAcvntwb"
"npUkIjDGpPWVDUas0xbo77q4w9Nx5lHulsqd7iW51aufT8vIz01RMlNjmdNhQ8QQMEwLkwEd"
"Xb0ecfXmQz04MXK4+/TBGgBhAGEi0mfA9nX7Grx+z+2y3nOf/QPAEY09fe3ONSkrt5UrWvxa"
"cKY6VcUIRkyJCBAR35+jfa3HBq42dQMIRRMkouAMmK3efezNmw37LwOwA1DnRIlGkpxuJSGj"
"ICHoHfYGRvu8APToXEeiUB+ACBHRoqWJMSbNgc1EBiBh+jdvRWNEowMwiciay/kPKWlcmnLc"
"32AAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
Remove = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAFZJ"
"REFUOI1jYBgFAw8YkTkiIiLnGRgYDAjoufDmzRtDGIcFTdLgdUUAA8OXHwwMn78zMHz6xsDw"
"6SsDwwcY/sIg+vYHigVMGObj0czw/gv1vTAKBgMAAEXnKp/wjOxsAAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
Save = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAABmJLR0QA/wD/AP+gvaeTAAAA"
"CXBIWXMAAA3XAAAN1wFCKJt4AAAAB3RJTUUH1QsKDTQVSGVyBgAABAtJREFUOMutlV1sFFUU"
"x38z+zmdbbvTZZfd1mVZPpYiBdqSVosfWBMfMEI0RnzQF9P4ADVIVVID+iBGAhiLaWwfRB80"
"ipLIiw9oDDE2xog0QVSsJbS1NsXW3bbTdr9nZ3d8aHdsqVBM/Cc35+aec/73f889M1dgAZ57"
"o/39PxKVj8amtIqF65LNSPpc8b6V8m9tna9+9h23AaE46fqgrfbsj+GLqyMbs6GVPjGeE51V"
"ZU7xz7jGVDrL9NXzQ6lkxhFW+tu7Xzv98XLEliLp+f7A2VVbHnAPD/TbB4eHbSO/D4ojE5PY"
"SitQnHac3nXKdLpQdj3m3LnrEc+li9/8OnArYhHgh2vO43essA4nBr662ui98HRPxz7xvlDv"
"ztzM9dnRwStGdDZOPJlA8QWQfUHHL7Gtn770+i55WWI1Ja2z5BMz968ffejoi+99AnDs4Edf"
"NgRHjyRUVUjOTqFlUgD4gutFr6yNTGjh7mVr/NShFz4PO3I/z6qFwwudDkng26kQqzfVL0py"
"p/uY+GucysL0jXztnW93nQCwAgTL4l+kJu3dx46+uWTnB9u6lqxlPHcxOtTDhycX6eDlQweP"
"AyfMUqTG7dcUt5dUau64yWTSHCZZ+p+5xWZfFDcyOkIymcTr9bL/QGutqRhY6/cHAEgkE7hk"
"178qLcIoFAB47MhpdE3jzOE9ACiKQiwWiwCXxfnY3XVb60mn0wgIptLqhh1UN+ygXPECkM1m"
"5mw6zrrau1lT0wBAiVQCwD1N9wJsNksBPFxZWbVI1bvPNzPw0/cAjI0OgWGQ13Nk0kniMyoA"
"Q1d6eWffdjMnGAwBvAIg7j/QWitJJdjtdjwejxkU8IboaNlGf28PFd5KCoYxXweDMreHgcsX"
"6GjZRqgyAoAsy/Pl8LD/QGutCERW+vwAqKqKLMvIsowoioSrNtC5t8kkFwUBV3kFA5cv0Lm3"
"iXDVBjNeVedOUV5WDhARgc13btxELpczrFYLMzMzqKqKJEnIskxNpJ5Tbc309/bgXhFguO8S"
"p9qaqYnUm4SqqpLP6yQSCWPtmrUATWI2m93i9/txuVyCrucpzN94MUFVVSpKA3Q820B/bw9v"
"PVNPRWnA9AHk8zoIoCiKEAyuIpPJllodDsfuSCRitsvNoCiNfH2ycdnfZXV1NU6no8UKEIvF"
"mJycvHnfFi/uNnxe71xrFj8QNE3D4/Fw7tw5HA4HsquEUlcZoigiSRKCICBJEi6XC1VV8fl8"
"RKPRJbZILC7cTdd1LBYLhmEgChay2SyapmEYBjab7ZbKb4SpWBAE0uk0dXW16Hoei8WCrutm"
"oN1u57/AChCNRg2/3z//THmWTXK73QCEw+EldmxszAAE4YknH98TCPjP8D9ibGx8+9/nYJ9T"
"cCLRagAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
Test = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAABmJLR0QA/wD/AP+gvaeTAAAA"
"CXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH1wUJAw4SGXukUgAABGlJREFUOI19lU1sVFUU"
"x3/3481M5810XssMSGmFWCJQbEXjUghRlJgQ44JE45qFCzcaExITY0JiiCwwxoXBhcTEGCwf"
"MYYYsXwtECuIkgZF1EhbPuw0xXY6M53O+7jXxZsZ2grc5OS93Hve7/zPOffeJ3jAsNaK7u7u"
"ji8OTpzWGk8piCKLMXy4eZv54EHfivvMe4Ofiz97elRe6wTdqxy0kmhtsTZidNxnairk3Pfm"
"yz177WvAzFKAvgd0l5TylSc3ibwVGaRoxw/awCosEIQ1OjvmKCwrc+MG26WcP2KMeQu4vBCi"
"lkBfEkJ83Nvb621Yb7OFQhqllqG0i1QplE5iTBJjoF4POH/BTY3fXJEul8tbjDFTwO/3Am9y"
"HOf9tWvXtjmOs3J6OsXmp32USqOkRCqBUpbIGKLQJwzm2LO3CyllxvM8WSqVBowxPwETC8Ee"
"8POaNWtcx3FWCiGYuuOglWX9o1WkjJByHilrBGGFwK8weDTNld+SGGMQQmRd11UzMzNPAF8D"
"803w5VwuF3ie1yNE3E9rLVevpdiwbppCvoSUVZSsEPoVgmCOd9/rxhjT8tVaZ4QQtWq1uhIY"
"kgBCiEyhUOiw1rLQwjDk0GGXKKxiojLYCsbUOHQkhzGGpf65XG6VEOLF5q7Y0d7enpFS5mwc"
"flE3r15z+eFHl3I1QSoleGrTDIPHlv8PCiCl8LTWc0EQ7NDAzlQqdcdamxPAQqy1FiEEr78Z"
"0tu7Gq01t29LUqky6XS6BbQLxHR2ds4Xi8WdEkBrnWw6NK2Z6uTkJPl8vlXPfD5PtVptrS8t"
"idY6mcvlEAc+YrSri9V+EMsVDdmW+P3EyQSnzj68qDzPbh1n+zY/zs4CAkTj6Thw6gyfycjE"
"ZQ0DCEPwfagHUK/DXA0e749It83j+z6VSoVEYo6N60PKFajVYj/fBz+AIIhZxoDa2MdfjsOr"
"jo4nIhMHiMKmo6X3kQrDF3xmZ31eeH6OFcsDrAVr7oKsBWPhzjSUy7wj9+3neBhBGMXAMIit"
"7jesDpd+dgGPbDbL5RGXej2er/ux2rChtilo336OS4DZWYpCxgtBQ2nrGcDwxfZWff+ZSDI2"
"nmytNX3DEKSAep1ZAAkwOcmuWi3uQxTG6qOGgvPD2UVbylrLV8fzcXbh3UytjXuiFHtad8Xw"
"Bf7Y2MfLrktBiEZKBsbGkwyd6Vi0T1tHHuju8hEClIwDfDuk3zh7rv+TYrEYSICBgYH2b74b"
"eO7vUW7/OwNaQxTB8MUs9xsXL7UTmTiAH8DYOBNjN/oOW2OdVimstRIQQ6cHtl4f1QfHxsHR"
"cPNWomyXHPEFp7I8eDR9q1SCySl17MTJx7YAQsg4pdavqb+//yGgzVrr9K27/kynV9194FOB"
"UiqtlEplMpmatZZKpZKOoqgWRVFCCPFLT0/P7mw2OyOECKSUtZGRkeIicAO+wlqbsNY6xBeU"
"LJVK7aVS6e1qtQqA67p4nncim82eASIhRCgEwZUrv95cyPoP7sJ02sxdq5AAAAAASUVORK5C"
"YII=")
#----------------------------------------------------------------------
ToolPanel_Controls = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAACXBIWXMAAA6cAAAOxAF19oSB"
"AAAAB3RJTUUH1wcFDyACNseopgAAA5BJREFUaN7tl09MHFUcxz87DsIMbJa5kE22GvGwrkCv"
"KKYlJsDGsIg3wlKSPah3TuVgPRA4UJWsWq81sZqIVBorWBtqICkHAyvWCHKwuIdaDhyWrOyy"
"3Z3uzs9DLSnL8kdamiGZT/KymZlfXr7f9/v99r0HDg4ODg4OxxhX8YvRsW9F13VbilVVFV3X"
"eL359JZu9dGAH69PSVPTK5SpZYhNV3zyh2vbTT36cC+bRVXLuJ/P27ZkpGhl1VIRllg2NiB7"
"GxAE7Ksf2M+AgPUYHfD91av8dusWemUl6dQG7517n/6zZ/nwoxG+vPQFBavA2++8+4TklzQg"
"OwvtgJimyY0bU1y48BkAN2dvcvnyGCB8cH6Y+oYGOjs7Dz1/KQdKqQhLDjfSm2mMamPr+cSJ"
"51hbW0OA27f/JPBy4NBzPxzFPaCU7PKHWfifo9pTTWI9QTz+F7lsls8vXqStLQhA9JNP+Tga"
"ZWlp8dDz/ydu/xKyHiPFw8PnGb8yzj/JJOFwmJcCAdqCQXRNY2QkytjYNwQCARTlmSPqgZJh"
"B0fTNXp7e7fNGAw+yELZsypnes/sIuVwDkrvAzb+G5X9SsgSC8vGDvbcyLSKCjKZLJnMPRun"
"YLuBHafRS199LffzecSy33FOLVN58YXnaT59ymXfFXY4Zuyopdgvv8rfd1dtKbaiopz2N4Ku"
"XQ38vvSHpDZSnGyos6WBbDbHz3Mx3nqzvfSV8s6duzSfasLtdtvSgNvtRtMqtr0rOszZ9Sa8"
"O8pBglKpFNPT0weacGJiYsdueZSou32Ym5tjZmaGlpYWfD4fo6OjLCwsEAqFqKurIx6PMz4+"
"Tk1NDZFIhFgsxuzsLPPz84RCIZaXl5mcnKSjo4P6+vqn42by2nXZ2NgQEZFEIiGmaUpXV5es"
"rq5Ke3u7ZDIZCYfDYpqmdHd3y+bmpiQSCcnlctLT0yOZTEYaGxslm81KJBLZ+n2STP00Lftm"
"wDRN+vv7qa2tZWVlBcuyCAQCaJqGpmmk02m8Xi+6rqPrOuvr63i9XjRNw+/3k06nicfjRKNR"
"/H4/IoLL5Xp6JVQoFEgmkyiKgqI8aJNYLMbg4CA+nw/DMDAMg4GBATweD319feTzeYaGhlhc"
"XMQwDFpbW8nn81RVVR2Z+D1LyK4Ul5DCMWebgfLycnI509aCiy9bO4rzyncTUlVVaUvxhUIB"
"j8fDa682OvcBBwcHBwcA/gWitAOivrbnzAAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
ToolPanel_Default = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAABmJLR0QA/wD/AP+gvaeTAAAM"
"nklEQVR42u2Ze4xc1X3HP+ece+/c2dnd2dm3bbAx5mlswMFOCI0hdUuI1FapmlZCav8h6SOq"
"WqmK+ohQm0r9I1QKJWlKURqpiVpKE6DUEGD9wBjbLMbEGNtgGxvvrr3e58zsY2Z2dmbu45zT"
"P/Yu2bg2sSGof9Q/6ejO/WPu/X7P+f6eF67YFbtiV+yK/X828TE9T1zCs22yWHL9PyOwCFie"
"t8QHgDfnLfthiIhfIPAU4ABNQEfyO7vkHXNACdBADETJCpP7+MMQEb8A4G4C+ubJgd2vCgHS"
"8wiDiDCo8/p/PU6jEdPd7qE1jBXn+ON/+PE3gJ1AMSFWS4hEl0tE/AKA/0ph6JUnpZL4mRYQ"
"HsNDpylPTdK/+yXu/cJvk2lqw6meozh8Ahk3mJiZY3qqwQMPbX0EeBk4B5SBOhBcDhHxUYDn"
"B19+0vF9Ul4GIR0mRs5Qmp2lf+uPEMrQ25ml59a7yXV2kct14LopTDDL6BsvIXRAJYgZOFPg"
"Kw8//yiwPSFSuhwi4jKB+8C6/NDufcpx8DNZBDA+OsJ0Mc+LT/w7YagByLWlWdbWwvKNdyMV"
"uK5DOt1ONpvFcSW1sTPMjQ+QkjWGJkrMlQMeeGjro8DzwMSlEhE/B7xMnDENfDo/vKfPUSk8"
"P4OxlvFzZ5nNT/LKM09SDqL3/+gpSXOzR64lw7Wf+ixS/TT2RGFEc7aTpqZmcrk24uIw00NH"
"cURAcTpgfLrMlx969m+AfmAqkVYtIRImQWCRyEUJCEAlkeWXCsN7dziuh+s2Yaxluljg3OBJ"
"Xtn6DI1Q4zoLCMMoxgqBEILWtEdXe4ZVGzejHBchBMaC1THGaOIwJJNpJdPaRi7XjporUDxz"
"HBHNMTMfMnB2ij/99gt/DewFZpNVAxrJiRjAXoyAC9w1dXbPHgCZSiFVM8X8KCODp+h/9jlC"
"a1BKIIwgshprwVoLFoQUNKUdsuk0q+/cjKNc9FQeffpdapPnqHg+rWvX43f1YKIGLS1t5Hp6"
"6Vm+BjMzwsTJN7GmzuDEDFOzAX/2yIt/lfjIDFBJSMSAURcAL4EvjZzc/kyjHhBEAYXJCYrF"
"PEdf/E/GBgZAgnIF1gikBC+lUEoilWCq0sNo7XNMldpo9c+RznWhT5+gfXKKbj9Lyk8TlAoU"
"3zuCrtfx29sJ5vKUx4eplmbwMlm6127i5MEDzNc1Uaz51M0r7t17+GwDOLEk3BoSff8vcxzn"
"0NU3ff5zCcubCgO7HmvOdZL2UgwdOUCleJb5akQYxDSMRkoYGslyaPAqcl1Z0n4F6yj2nugB"
"9xzXNSZIr7yJrmwPYmYU38uDmKJ65iReazNOJkNsYky5RtuqFqpBxODYLA9+bzfAEeAJIYRj"
"rfUTWdcW5X+hExDGmEKSZHrzQzueTqU9BILu3h5Wr72Nqk4RNwJ8L0RYwePbb+BsoRXlWrSp"
"EQYVypU8oQ7pnD+Kl07TnO1ASqjX55gtT1KtVwAwOsK/agM33PV5lq2+nmh2nLETb2DCkDtu"
"7GH/8YnnhBBVIcSItTYEqolDX1RCIonz6/JDO/b5fgbHSSEwGB1hjGXVNSu57rY7UC0dnDhe"
"ZnBYU5m3KGWwpkGjUWa+PkOtXqIzGqCBi8USR2XmqkUmy5M46Qydn7iXdV/8A6664QZEaZLS"
"6NvE5Qkc26DFd8j4KT5xfe/GvUdHTgEz1tp5IURgra0nUrLnE/CTU1k/Nbz7dS/l4jgppALl"
"+QuyMxptDHFYo6v3am7/5C1cv7rC8pZpitOW+UBhCYmiORqNKk2pBvX5CvVGmUZ9FusYujbc"
"wy1f/ENWrl1HPDtKafgoYX0KpUOsCSCOwUBL1seXLptu6t20680zo8A4MG+tXcwN+nwC8v77"
"7//lV3d+dw/WoKSLEgIhBUI6COkAJok4hjCYx+iQ1Wtu4vY7b2XV1XWWpYvMVBzKtRhsSFPa"
"IurTdORaWb95C7f/zu9zzS0biGdHqY4eJ67P4BJjtcaaGKwBbYhDcFKS1lwrbZ7L+tWdm3Ye"
"HBTW2ncTH4iAWJwnnTuO/eT5g9esWo50XUxcX2ClQCon0WyM0aDjCB3HGGvQkcFxPFLpDJGO"
"OfjqHt49PMTuQ2VW5KbZ8pnb2XjvfVg8CmeOYeJZnLhBHMVIqxEmJo41WIPRAYQao0A5DtJJ"
"Yxyf6ZFzDI7N8NXv7NgFfM1aOwmUzyeQBR54+h+/8si1167hls/ch5KSWIeYKPhpMS8kcVzH"
"xCwQ0NH77iSUIt3cxuxMmbf372LTlt+io72V4RP7qc2XicN5olhDHEEQgF54rtZmYfcBYTVG"
"OCgPqoUye48O0wg1Dz/R/0NgmzHmJDAJlMQFSocmoA3Y/NwPHnziuuXdrPnkFqS01GtzWBsv"
"nEQMFos2MdYYMIpYRzhuGmMMI2+9zMv97/Clv/hbfGWZOnuIOAgxcQOtQxqNCB2GGKNRYoFE"
"rB1MFKGFpDxe4PXjI2AF3/zRa9uttf3W2mljzHBSK+WBirhAEnOT2qcD6AXu3vpPf/KNmzdu"
"YsWa9RhrqddKCBv/TBCLdYBUaXQUUDy6m5cPDrP517/A6KF9/Orv/RGukoydfB0dzGMkhI2A"
"xlwVE9YxxmKNJtKWidFJXntrmNgVfP+5w3uBA8aYEpA3xhQT4MXF8lt9QK8aJdVg6cltP8nf"
"tcrdXClMkG5ycNNZhPIIgoWMbgEhJUG9xuSh59n/5gB33nkr3ctWMfTO6xQGB7hx46dpae+m"
"NHkWdIyIInQcMDVToVYLmRif4o0jA8xW6sw3Ir7/wlGEEE8ZYwrW2glr7Vgim6mkCWoAsbxI"
"r7oIvgwMAMG169biyzoHtm/l+L6d5E8ewXM8pJsmjjSN+Rpjrz3Lzv7TrFqeQ8oI31e4ymFm"
"eoK+f3kYIX1WrtsMcYySMUE9YqYwS/9bpzg5nKentYVl3a2s7M0ipdwNaGtt2Vo7nsimkJTZ"
"ix2clh/QcC+SqAIVhUEIwbVrVuDGU4y/9ybHD+xiZuAdgkbEqV1b2X9sgg03LqezqxllDQ4C"
"RwmEgFK1TN/3vo3X1MbKDVuYmyqz89XDHBucpLO5iY6WJrSw2NgyUaySlM0NIUQ1KeJmEyw/"
"U43KnzM10Ivx1sQar3sVeG04TVla29pxw1mOvNbPsVd/zL7DQ6xa1kZTswNIYiSx46IcB1dJ"
"sIapuTz/8c2/ozwf83z/abJ+ilyLTxjHGLsQ94UjWN7dgrVWG2OCJb1AsGQA8H4/4FxCO2kB"
"I6zF8TII5aCas+igBnMlVq6EWEdsuet6/KZmTFRbiAZuChMblADPdYgMiMhSi8o89dhDNDua"
"eKF5Q0mR1PAST7qMF4oYYyKgYoxpfFBH5lwCeAvY2kyBdFsvKpMjDGqoFBBFWKFImRArNZo0"
"jlo4VCEdrHRISYfIBkgJKU9CoDDWYFILjY8ORQJE0tnVRUdvDzNVjTEmstaWE4cNlu76UlOX"
"2NCf+7e+w7Xf/bUN99Co4fkZlN8K0kUqB6Ek0kkjlUIIhREC4aRo7bqa0vgptI4IIwMClJII"
"BFhQQuL5ivb2TlasXsnI2AxPHyjy3R/2PWKMOWitLSZRZ7GJ0eeTEJdIwEmSW04p+fCOf/3z"
"32jxU2S6l4HfTFArYaIIwoXSw0iwsWHl+ns4tee/mZnNY0ODNYLQGIxeyLheU5bW9nbeGzjH"
"ntMR27ZteyyKojFjzLS1tpCEzXzixIv1z4cisJjgMkCnkrIDIb5+4KkH71PKx2vNIVIZGvUy"
"WI21ljg2rLxhE6f3bmV2Ko8WAAYTAG6abHcPJ0+d5ZX3Qvr6+v7ZGDOptS4BpSRxFYHpJb1w"
"eCEZXc5cSC0ZrWSAdiVlm8XufeOpr6McD68li2jKMj9fJQpCrrn+Nk7t20plukAUamxTC9mO"
"HoYGh9n+dpUdO3Y8GkXRuLW2Yq0tWWtriWRKyZpPQnl0Iflc7mRu6ZjFS1q7NNCmlOo0xuze"
"/8TXEFZhc904fjPLV6zm1L4XqVRLyHSW4cExdr1XZ9u2bd+Jomgi0XbZWltJdnrRaeeXxPv4"
"YuA/6mjRWTwRIUTGcZw2oNPo+KWnv/VlfL+FtZ/9TcaPH+D40WO8cKRCX1/f30dRNJMkpBJQ"
"EUKUEgKLwC8U7z/yaPEDJ3ZKqbS11lNKNVlrW4wxPcaYr+74wYP37TtwjG89vusva7Xaoo4r"
"ySol12oik/qSHbcXC5sfx3hdAtJ13ZQxxrXWetbaNNBqrc0k/pL0ozSSXa4k19rlDnM/rg8c"
"Sz9oLEorlQD3k3uSE2gkux1+FOAf5yempT6yuBYTpj7PMT808Ct2xa7Ygv0PzFW8GpCLx+gA"
"AAAASUVORK5CYII=")
#----------------------------------------------------------------------
ToolPanel_Gizmos = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAACXBIWXMAAC4jAAAuIwF4pT92"
"AAAAB3RJTUUH1wcLFzgWXOxheQAACbJJREFUaN7t2VvIZtddx/HvWmufnr2fw3vMnDqTmRzM"
"ZGYEkUBFQ0ARVPBAKHrRGy9KL6xNtK1KqqAoHrAiaYPFGMQDVsSLWgtiDaaVWEGkmdKE2saM"
"k5nYTGbmnff4nPZhrfX/e/EQ8DbvOzzxIn/Yd8+C34f/3mvt/37gvXqvjlTmKIv/9Nm/KF7+"
"z8uXdsZ3Hhqd6J9XK2Vxj1svB9WxxNqimQVcbtAutnfeOrjeTdr9MOHK2VPnvvJ7v/371941"
"wCd/4+NnWuM/m5yMP7RypiqH6xXV8ZzJrZbRyRJSCFOlOWgpV1LqnUgInoObM3Zfn2JAwi5P"
"PfM7z/7BuwL44Ed++sULP3z2sWvfvE610ked0qtyNBqMsZhUMSIEIhKFMDVQdYQY6Pf7nPue"
"e3nj6zdDe9ldfPrpZ147CiB5pwt++alf+q6b/vpjONi4bwMvLb1jjrSCJLeEHShPFCSFo21a"
"QhOQfUd1epU717Y5deIE4xtTVs+UyfVvbT8JfPQoAPtOF7zyyisXhhsDsixh0K9o7ngGvRGT"
"7Zrxdk099ozvTDm4PaGdd7TTQONbdq8eYJoMR4JVi04t/eO9n/nn5/8pXWoHqmF5NobAbLsm"
"tobJVs21b7yFw2LTgImCG4PNHIoQglD0MmY3WzZPbLB/fUpeJkRR8uNm8/Nf+sL7gX9bWgeM"
"JcuqlHwlIx0ZTCIkFVAJwUZi4vEaCEmHuEg6hM62gLB6vMLlSt5PQWC0McAnzY8ttQPz+XxH"
"4ggVhaiAI3jFoCiCREOcCEWREjshBEUaxWlODIKI0rYRiYqI0j+WPnoUwDvuwGxab7dzj3oh"
"hIBNDKGOaADUEkIkdIqvA+3UE+ZCvd+R5znTvZqm9ogXktTS7Ld4Gx554YUXyqUBLp6/sDXb"
"nxOC0rUBohK7iHhFOoGoWGfQYNFOkSZi1bF6coR4g0RhPpkTfGB20FCu5uXffvGvzy8NcOGh"
"i9+YH7ShmXmyLCFJUlQAgdAGCNDNA2EeEBF8G+nlORalmwdSY8myFJc6sjSlGuYczCY/sDTA"
"E088WYdZvJ4XDpc4onhijEgQANQYTG4QDxhoph1lWRGiYgxEUQigQQGLjRn99eK7lwYAEM/l"
"8e0aP4s084A2EANohK72JAk0ex0mWEIj5HmJBJAgmM4x2W1p9wUN4KPHVVxYMiC+WDc1aVVg"
"jOLnim8ieIt24DTBiiM2kISM+qCj249oUJraYzMhH0C1mtHOW9KevbRUwKmTZ/5l58YeooJN"
"ISkMJolkA4vJFZsrXj3RNKT9lLRymEIpVnLER1QFFV1sxWKwmR19+o8+faid6FCAP/uTP391"
"sjV706rgfSCEgFEIXrDOEHzEZko9CwyHIyQoMSqqiwsxqIKIkGYZvUHO1Wv/9fDSAADR8/z8"
"oGXvzQnTOzV7b8yY3KoRlK4ViiqhO/BkWUoza2lnnvm0o5l52ibQtREVxdc1Rdljb2//7FIB"
"iU2/dOfGHnmRc/LSOqce2SDrp+xem6AqCFBWJUWZkWYJeZGQOEfeT7EkIJa2DszrGusc49n+"
"ylIBP/KDP/r81n9vt2qExDmw0N/ocfzhVXau7BNDwBi3+LEBRVEVsiKjHs+xzpDlOf1hn3pa"
"k/TS1aUCfvGJj02JyQsmgRAULGi09DYLkixn/NacsiqInaBRMMYsDjwHzrrF8yCKs8nipF4b"
"LbcDAKENfz/cqGj2G5yzGBSjhmMPrrDz1gHrJ1dpZh1RBLDI2yd2UCRGRAQJoDXM2kl2mAzv"
"+G30/9ZgNKyKviVqXJwDGKS22NyQ2hxrUpCAwYAqFoN4JbaRMAdjhKiCy2AwHK4tHSASTlvb"
"J82hmdQUvQKZgXaOalgx3ZqjIhg1JDaiURAU44BEManBeMEYh8Ecaj4/0i1kc3tq8/QKapVq"
"rUcbOmaThlkzpSgzVAWXJITOgwWTOVyWkGYpzaSDKKgscofgw9IBWZE+OOj3wUbECyvvK+nf"
"k5H3UtJsMerGGGin3WJrjYuLxBC7SIygalARfBsOlg4I0Z/0UUmzDJdZ1AJiiSESW6GrAxIW"
"70l+FnHO4lIDKCoGVQMqiCpdK8sFfOaZp51JOSbBMxxVoAZjlSR3uCQhSRwuMbSNx/vAdL+h"
"m3cYNRi3QKC6mBmawGx3dnOpAJfY40lprQQlLwpiK1hjURbzsbGWYlBQjgpGG31C1yKizPbm"
"0BlCq8R2MSPPpw2ba/d8Z6mAyy+9tDlYKZAAsVPCPCKtoi2AIYaIioARqtWKvMzJ8oS0ysiH"
"DpMoXdfQ7Hds3xhz5sy9l5cKuLV9Zz1Lc3wTyPIUErC5gRysMxT9nGI1Q1EQSDILzmCdwSUJ"
"eZHi8oT+So9+2bv9m7/2W7uHyXHoc2BlMDprswTrLMFHVBYTmXghyVNcP6FcyVEEZx2+TdCo"
"2NQQWk/Wc3gRfBdQXeKHrbcr0JUuc8QQ8fMOXweascfPPU4c1VqPtfcNWLt3QFI4jj04Yv34"
"KuoXz0o+TBAECUq7E75y2ByH7sBosF4RIcjildioZVAOkGgQNahrGN1fIa8L5UqBsRY/DqwV"
"Q3avj8lMhrWWnVt74d5T931+6YC6qaWaGWxqEQMPPHwf5T05e3HC2rmK6UENY8fm/SP8vuCT"
"QNd5zBQ2Hxhx+8oeqoCYL3/qdz91+7A5Dn0LTcbT3aarsYlldX2FEDwmU4YnSlxmWT83pGlb"
"1o6vMDjTQ7xAUHqrGWnpsLnlYGtK0mSfPWyGIwFUwuvzeYtLDf3NnKyXIEHIsoRqs6AbB4TA"
"1vU9wliZbXUMNkvy9ZwgQuoS5jvNS4//5Af+4V0BfP/3vf/b0inFKAc1+BhJnKOee6QF6ZR8"
"mNA7YQhFTXUyZbbfEJrI9GbH9o098TN98qd+/HF9VwCffOrXb4aJvKZGCW0k+AheKUrH3htj"
"+psF21cmdLXQ1RGXJnSTwM6VMdPdCX6bz/zd33zh348S/kgAgOnW7I/rg4aujpTDjNl+g6ow"
"326InZAWKdvfnLL77Zrp6zWTWw3ttGXvav3VR7/30aeOGv7IgIfuv/jsnasHr4VWwFgmuzWz"
"vZZilPHm13aRqDSzDnXKfF6z8509tr81+Y9zxy/+xIc//HPd3QAc6X9igJ/90IceaNPxv568"
"tHGiKBJ8F8nzDEWoxx3lao56uPbym+Rd+ZeXHn7kI5/42C/M70b4uwIA+MSvfPzs1etX/nB0"
"uny8v9Y33nvSNCFKYL7bMt1qXy5t/1c/91ef+8e7FfyuAt6ujz7x8+dfvfrqY84l54u8iNPZ"
"5H/uP/3Ai88999wrdzv4e/X/pf4XB3h4NJVf36kAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
ToolPanel_Menus = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAACXBIWXMAAA6cAAAOxAF19oSB"
"AAAAB3RJTUUH1wcFDx8Yk3t74AAABbZJREFUaN7tmM9vE+kZxz8z9oxnxjHeJE6cFJJNSTZG"
"hCK12hVBFNREQliCvSAOiDNVL6jNsf9BDxx65MSBU6hU5AuCSEFq6UoFyRACFcUWoZtkjddW"
"mjixEzyx533fHkzYZUXAAueXmq80mtHMq+d5v+/zfZ7nfQd2sYv/b2jvelkqldR2nXAoFHpr"
"zv53DTIMg+LyK/7yzXNWCaD7/A1wrYjaHl8f6W0ooXfOTClFqVzlm5cGfstp3Oopl9NfyY0n"
"oGkaChASNNk4h1LTUaqx6tTXj7hCSoEQgp4Wk98djxIKaPzx1F6UlPS0moQCGs22zt6wQYuj"
"02zrSCFotnU6Qn5+++sokaAPIWp2pJQopVBK8ejRIy5dukQ+n+fcuXMIIZiYmCCfz5PJZHj6"
"9Cmzs7NkMhmklGQyGdLpdH0ElFKvI6AQQiKkpOR6/Kq7iel5l192BYmGDE4ebGZwfwifrjjx"
"RZjB/SGkUgzuD/HdgsvETAkhZc2GkG8RUEoRDocZGxvj4MGDjI2N8eLFC65evUoikaBcLjM6"
"OkoikUAIQSKRoK+vrz4CmqahlEJIgZA1x5OzJTwhWXxVJWBoSCn5V6aElIrnuRXKFYFbETSZ"
"Gp/ZPoQUKCVRsrYAa9ePCZw4cQKA1tZWisUiSikGBwdRSnH48GFs2yYQCDA3N0cul2NxcbH+"
"JEaBFAqE5PvFVeaKFRZWqnzeavHdgsuXP9/DakWQ/HYJT0gefLtEvljhy549/HNqkfYmA8On"
"8XmrRWbBfZ0DCvk6p7q6uohGowwPD5NKpejv72d8fBzTNBkeHgZgaGiI7u5u7ty5QzweZ35+"
"vr4+UC6X1Wy+wO9H/4NmWA1LuBbD5c/nv/gkG9Fo9MN94AcJSTTZuKqxJqFN6QNNhuJPp5p4"
"WSij+82PdhC2fQT9kvb2dgAqlcrm9AHTNJkuVPnr8yC6z1hHbB9Gq1bkD7+J0NzczEbgvXuE"
"ivShGTafEvSqz2IjsX4Vqj0hhKA9ZBCyakNfzJXfazDW4dDfbnPzyTwKkJrcfAI/EAEhJLap"
"EYta7G0OEPBDZ9hkeVXQEjQolj06wyZzy1VWViUTsyX6oxaelLVSrG/sxnbdRrbGQAiJFIq5"
"UoXU9yv0tlm4VYnp09BR3E0XcKuSv6cKtDg+3FWB8CTyTffdAgJvIgAIIVhxq+QWXfZYPiam"
"i2gosgWX/NIqnifILbpUq4K5YoUDHTZhx0csatf2P2ILJLSWA0qBJyQz/63p/t8vlwFIZZff"
"Gv+3ZwsA/CNduz+eLb75JtUWEFiTkK48hAA07aMdKG9165L42C+6ibSVUbrxUcY1FJ2f/QzU"
"q62RUNAyOfXV3k92MjU1tUVVaAegrtP6w4cPefDgAfl8npGREZ49e0YsFmNpaYlCoUAkEiGX"
"y9Hb28vk5CRTU1MEg0EuXLiw4QT090nox7AsC8dxuHHjBtlslitXrnDt2jV0Xefy5cvous7o"
"6ChDQ0McOXIEn8+3KRGoW0LBYJBwOMzKygqapnH8+HEcx+HQoUN0dXUxMDCA53k8fvyYZDLJ"
"2bNnt47ATyPQ09PDsWPHaGtr4/z58yilCAaDxONxNE0jHo/j9/s5efIky8vLBAIBksnkphBY"
"90RWKpUAaGtra0gV6ujoaMiEf/pnbsdXobqTeEcR2EmoS0Lz8/PMzMyQyWQ+aPDJkydcv36d"
"bDa7KQT89Uhoenqau3fvkkqlGBkZIZlMcuDAAbLZLJ7n4XkeSin6+voYGBggn89z69YtLl68"
"uD0iALBv3z6OHj3KzZs3qVQq3L59m3Q6zenTp0mn05w5c4b79+9j2zadnZ0IIbZXDjiOw+Tk"
"JLFYDKj9NfP7/ViWRVNTE4FAAIDx8XHu3btHJBLZFAK7fWBbVqGdhHWrkGma5HI5FhYWPtmJ"
"4zgbRuCdWnFdV1Wr1W254nXlwC52sYudg/8BPBDU8FaMEoEAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
ToolPanel_Panels = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAACXBIWXMAAA6cAAAOxAF19oSB"
"AAAAB3RJTUUH1wcFEDkIWmgM7QAAA6pJREFUaN7tmU1IKl0Yx/9qZgaihhgVtOh7UQpC0ibU"
"jbRoGbRwI1EERQYRtH3fhYvAWhUERdDC3OTaTVAuYmhhaItaCC1skdGgMTPYMI1z7srheq3u"
"LebN8b3+YRjOB8z5zXPOc87zHKCppv5uaX6tODo6+qfOYzo3m83ey8vLqkqz2Qy/33/ucrnO"
"PwRIp9PE6XTWbfTRaBRDQ0MYGBiAKIqQJAmSJIFhGKRSKQQCgaoxa9U2JR4fHzE6OgqNpvrf"
"GgwG6HS6mv6qAwAgD75QKODq6qqm/me1qHVxsiwLiqIgCAIMBgP6+vre7PeuBTiOQzKZ/O2H"
"FhcXIYqi4gAmkwkzMzMIBAJwu93Q6/Vv9nvXAgzD4PT0FMfHx+jt7cXd3R2CwSBsNht2dnbQ"
"3d2NqakpAEAkEkE2m0UwGMTk5KRiEC8vL/Iifn19/RzAz1pbWwNN0zg8PESxWEQ4HIbFYpHb"
"V1dX5XYlAJLJpOyBCCEghEAUReTz+a8B6PV66HQ6SJKE/v5+UBQFv98PjuNq2pWQ3++vcqGi"
"KKJUKuHi4uLP14DJZILP58P09DS0Wi1MJhO8Xi+Wl5dRLBYRiURA03RNe92VTqdJPbW1tUUk"
"SSKCIBCe50mpVCIMw5B8Pk/i8ThpiH3gM2p4ANVuZIVCATRNy2WbzdZYAJIkoVwugxDyoXdT"
"LYDdbofdbpdd6fPzc2MB0DSt7BTiOA6pVAoej+dbADo6OmCxWKo2sk8BsCyLRCKBXC6H8fFx"
"uFwulMtl1VlK+xFANBpFKBTC7u4uWJbF2dlZ4wAAgMPhQGtrK6xWq2LnnG8FaASpLqjf3t6G"
"w+GQ/X/lLYoicrkcVlZWNF/yQt+prq4utLS01MTAPM///6ZQE6AJ0ARoAtRX7+4DPM9jYWEB"
"w8PD8Hg8cDqd2Nvbw+3tLZaWlnBwcFCV8FIyoaUIgEajgdFoRFtbG8bGxiAIAoxGI+x2OxKJ"
"BIDqhJfqAAAgHA7j5uYG+/v7yOfzmJubw/39PSiKAqB8Qquih4eHqqNEJTP39PT0OYBYLIZy"
"uYz5+XmwLItYLIaRkRF4vV5wHPefJbR8Pp8cSlYenuffjEfeBTAYDAiFQnLZarViY2Ojpl8l"
"g9f0Qr8om82ip6cH7e3tWF9fbzyAwcFBxONxzM7OYnNz82uLuN6amJiA2+2W7wcIIeq3QGdn"
"J66vr2vqeZ6HIAi/t0Amk/k3k8nUk+E8l8t5T05Oqv545Z5YzTOmqYbUD2LQSUJ+JkydAAAA"
"AElFTkSuQmCC")
#----------------------------------------------------------------------
ToolPanel_Sizers = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAACXBIWXMAAAsTAAALEwEAmpwY"
"AAAAB3RJTUUH1wcFEQkvIeblQgAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAAAmElEQVRo3u2XQQ6AIAwEWcP/v1wvHghRAtIUMTNX47aDFjElAAAAGMb+0LyL"
"hDqq2US46hy9eAxq9Jn71uyhlNS+dpcjJTNLKu8dzSk4wt+fq1EznzEIF/AGAQQQ2JzwD9ls"
"DgD8bIjjT6PD3X/sNOoNAqvJodWkzQXqIW5tAp3SfQKtEK09psRuowwxAgggUMMvJQAAbM0J"
"LEI1OD2LmAsAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
ToolPanel_Windows = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAACXBIWXMAAA6cAAAOxAF19oSB"
"AAAAB3RJTUUH1wcFDx4V9NE2HAAAAw5JREFUaN7tmM1rVFcYh59z7yDOkJAK0YkaKLaSdNQp"
"CDYj3VTaXQq1FItm3X2xKLhz4S4QUPwfTApCoJB2pyhoi00TPyijSEM+pNHY4kyuIcy993y4"
"uB9J0EkmM4sYOD+4zJ1zfrznfc77njMwYGVlta0lGjENDl02W5HchfM/bZhfZr3Jm7dumyuj"
"48x2n9qS3f3mR2XOfvcZX574QjQFcGV0nB2Ffl5WXje8aK0yT7v26s7rXCcq29mQb0ehn7ND"
"V5urwODQZfNP17csvIqS9/99hBDOWz5pBNn9h9Ne1EsvcQ/21V1w4e87fHS0m/lnjfnyfacY"
"PNFr6rXTuhVQWhOEEoC+D17RvqsTIRwcx4k/BY+fTjFd+xjHjUI5gJSSQ/tyHNqbS2OVny9T"
"nl+OY6oIPvYBlOeX17wnPqV182fAaINWUYCvjn9KT28PruPiZlwymQyu6/LrjbtMPdGATgEC"
"KendsxOpVBqrd89OHsx5GGPS8UBKQim5dPIA1//6j++P7ebiL9MEUqY+0xIABm2iAEIIHMfB"
"caMKJI8QAq01iJWFpJQorfn5zxfp2Jm+LmScmIo3RUrJwzmPkXsLDJTyjNxb4OFcdC4S30bX"
"n7MugIkAtNEIR6xJfDWAiT0JbBgqtNaEoUqf5LsxINWKr9CVY6CU59ofzxko5Sl05db4jDEt"
"VMBEbQTw+2SZmReVOHEXJwa6X57CmCLolYVCKZmcqXKmlE/HJmeqhFLCqgqEcQudG3nMo2ev"
"mZyppuOJryUAtMKoEIDfqj1QfZfpExxXp74kgYnpChPTlXdsikLG3sSXaPV76tOqlUOs0DLY"
"8O7XcqX/a2QJZifqejv2HcSrVsg06DMtAUgfXfM29esZAB0fHqk7v+RVgVrDPpRsDqCjLcvn"
"uVlKBcnm9X/9qf2b8/k1nzayzVXgh9NfEwQBW6nFxUXGxsaau0a3gyyABbAAFsACWAALYAEs"
"gAWwABbAAlgAC7BNte7fKr7v43nelibo+35zAMVikeHh4fdil4vF4nZvFCsrq3p6A8PNt8Q7"
"ja/MAAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
ToolPin = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAACXBIWXMAAAsTAAALEwEAmpwY"
"AAAAB3RJTUUH1wcJAQIldDfJXwAAAvhJREFUSMftlVtIk2EYx//f9n1z0+k23fSbm5pFB6Uj"
"qWGZqIQJ2QGKCIJuiq6qu4qIIKKiu7roRoKuCgoNQxMPLQLNWWlqqeWpppMv59zZHb99h66C"
"itycIt30v3xf/s/v+T88Ly/wL8W2Pd7Ff2ovFB0TkuXWIBa74Hq71nuvnx+ThyyEtLggLG4/"
"Msjq15iVufmvCW3eK0kaHVoRgB8e0AW6W8Z5W5c6lXQBMjXEBRt4kgMKa72iStuUtLn6MZJp"
"k0Sl5xMGAIAw821dwGF7Ev0+VgT7JGSeESgEF0SvFfDPgVXQkBRUWiT5ZbeoHfseEcm5kYQA"
"AOCdHDJKJ8yf5X4mlXd+AEuqADcDGecDJQYguO0Q/AR4Y8k3Ymf5ZXnN1YaEAAAwef9SndL5"
"5ayUNgJyCghOQ4y6wBI0+AgHBTcLWO3gHIC/+PixDRfvPPvpJZcCsMm07GDXDEprjNCSgJLU"
"gVJngwy5kFRQCXs4DRLt6IJ3ZKAzxbixP+EE1o6HL4bfmw94PG6IiIKdZVCYrQZtzJzSVZy8"
"Nw/tc1KeZjVsLBT/9MZNwHwwZc321pcXHz4IadCBQDiI+bk5OC3TXkn65tq8bbUjsfwxE/AB"
"9w73YHszNdNm8LAColwUmVsrwKtohMPiTf2WI9fiNfjXBExPx1puqPFB4GOjkXcxBhkBRHkB"
"HmYa6ap+eB0eGGqvzC9lvH8F+Jmxc5nseFVoPAjCUIS5KR8WuDAiwSjCAR9S0rWwW8a+LBug"
"M+bXJaXXWJQyxf4ombLVPf89p8/8FUeraaQKs5AqsyAa6FMAXsYDxN0i0TeaMfGus37C1LGn"
"qlhOURQIAlIEI6Tg9GUdWnPmdkvCCX7rIG2TMy/XUKJRq1oGbrRe4G0jVQIzdNrR/WYvqc1Z"
"0juKqabmp5syMjRCb49J8+u5x9ywXbQOUSsGKJVKV9nuotEVF1pMGo06YmqtP7Fcf8yfSqFQ"
"vC0tLXJlG/KaVqV7vT7r7tRkn2zVxvNfAPADQ+E0joDDAqQAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
ToolPinDown = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAACXBIWXMAAAsTAAALEwEAmpwY"
"AAAAB3RJTUUH1wcJARYBZpr62wAAAsxJREFUSMftkltIUwEch39nO2dOnW7q1KObt6LUYaak"
"RmWiEiZkJRS9BL0UPVVvFRFBREWPPfQSQU8JhYahSWqLQHOS5iV13nM6OTp339zmPDuXXktT"
"tgx6qN/j/+H7+OAP/N+/Oba94SA/0qET7TOSnbKIjQeuv3uP5+7VKfmaiZCW5gfFovphNi3b"
"oMjM+Uiosz5I4um1HQn4saFkf0/bNG/pVsWRTkCmgrhqAU9ygK7OIyrVLVEFNQ2IofUSZRof"
"sQAAhMW53X675WVoaaoE1lnI3EZEC06IHjPgWwEbTUOSX2WS5JQ/oIqPvSBiMtcjEgCAZ3ZU"
"K50xjMt9TBzvGABLKgEXAxnnBSX6IbisEHwEeG3ZHHGg4qa89nZTRAIAmH1y46nCMXFZSmsB"
"OQUEFiCGnGAJGvw6h2huGTBbwdkBX+m5s3uvP3q9kUFuJ7DI1Oxw9yIO1WqhJgEFmQxKlQ5y"
"zYmo/CpYg/GQqCdXPcahrlht7mDEBebO52/H+gwn3G4XRITALjPQpatAa1PmkyvPP7ZB/YaU"
"x5s1uTpxK8aWBcyAPnW5v7Gi9PRJSAN2+IMB2FZW4DAteCSJBXVZ++uMCGO/LOD9rmLXcEcr"
"tdiucbMCQlwIKYWV4JU0gkHxftq++jvhwDcVML2du7jR5mf+r81a3sloZAQQ4gW4mQUkKgfh"
"sbuhqbtlCxe+SeBjpq6ksNPVa9MBEJoSrMx7scoFsR4IIej3IjZRDatpauK3BcnanKdRibUm"
"hSz6eIiMLXTZljK+GL7hTA2NOGEZUkUqRA19AcD7cAVbfpHonUya+dzVOKPvPFJdKqcoCgQB"
"KQLrpODwpp7KvvSwLeKCn8zxeY6sTE1ZgkrZNnTv3TXeYqwWmNGL9p5PR0l1BhkOfNu1tL7K"
"S0pKEPp79Qk/3t2GpiLRPErtWAAA5YdLJv8I6K9tfKQvZqeM79ccK3IFJvEiAAAAAElFTkSu"
"QmCC")
#----------------------------------------------------------------------
TreeBitmap = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAA4AAAASCAYAAABrXO8xAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDhwKdc1BSQAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAACsUlEQVQ4y2WTy49URRTGf/W4t4e2u8f00E0UZZgHCAECwWAiJsYHklE0UWNM"
"YAEJceGCsCNhR/gzSFy4IERhCSqJRCTIZgSJCS6E7llglBnnAZluw517q85hcRlH8CS1q993"
"qr7zHTOVTamiAIgqiqIqPF3GWAwGawwAfhmKKgQp+OrmWfpZn6zISFxC6lOiRN7f+h5rG2sB"
"V4LL0Ex/mtOTZ/hox4e0a61STCJBAlnIuHDrW0ZWr2fPhrdxxmNFlSw85PTkGT7bfZjnG8+R"
"upTEJiQuwVtPxVf4YNs+phdnuP7nDUQjVjTy9c1zfLLzYyq+QmITKm6A1KU443DWYY3FGssb"
"46/z0+1rKIoFyIqM5qrm489bvHVY4zCPjfhvDdWHuD13pwRDDIgKokLUSC/vEaRAVFDVlYMy"
"1hqlM9cpQW8dUUsjFpcWudz5kQfZA6JEugtdpnvTKyIoSyEvxyGqRIkAqCpRI1e6V2k+8yy9"
"rE+73qYx0CCqMNubY2RofdlRUQopKGKBotQqNTa2xzHGUh+o0661CBKIErk7f5eta7ZgDYa9"
"m9/h0u8/UMSCPOZU0yrNapOx5igDvgJAkMj9hwsMVht467HWOEabI0QJdOa75CEnSiAPOYUU"
"BIkUMZCHJS79dpkDO/djjSufao3j0K6D3JnpcOOPX8qLsSCPBUECs//Mcv7Xbzj82iESl2Aw"
"mG7WVVFFNCIqnDh/ksQnvLt9glpa47tbF7l6/RpfHv2CRqVRzhdTZrVMvENRhtvDDA+tY3Lq"
"ZxRlfM0Y+rJST+v/QtYYvMGwvCHWOF5svsBSyHlr05t46wkSmOvP46zHGVuu2HLHUgVE4ZV1"
"u/j81BE2jW1ksDrIX/P32Lt9zxMQUJqzAhta1RbHPz3G3wuzXLzyPbs3v8rESxP/y+wjHwdM"
"yMvIwOAAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
TreeBitmapButton = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAQCAYAAAAS7Y8mAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDhopgfCXvQAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAAB8klEQVQ4y62U3WpTQRSFv3POHJP0tJLTJMZISaJetDY0WrxReqH48w6CDyHe"
"iY8hPog1ir+INUK1YEvF2Iq1tETSqCUtITHmb7YXbSqScwQhezMMzLAWe69Ze4x7G7NSbVRR"
"hs0gwjAhaAdR1V9VLp68gEYzqHj66RnKEouWbjHQEAMlgNb91dbbdQpbK77YTOIUju34MWPu"
"bf05t5qnLU0iwy7RkQiVxjYL6wt8r3/DtAzmVvOeOEF6xN6XnW6HxOFjpNwUubcPeDL/nEAw"
"wIe1Arl3D+l0O77EAigAEelrRovGNEyKu0UKnz9y89oNYk6Uzd1NljaW0aIpVUskRhIeQuAv"
"BYAyLSr1HbKnp4gMjaJFEx+OkxmbBMANud5YEUxE2Jfl77XnStyhMKXSFpXGDrV2HWUq1r5+"
"odVqEbACvljVq9irH2VamHaQTHKSV+/zxGNHKJXLjMbCzOZzJKNJzh6f/j9X9NIyLK5mL3Pl"
"zCVs22Zm6hz55dfMTJ/nzv273ihhz8dejycihFSIdDh9cJY9muXF+kv0T2F+8Q0TJ8a9sf+S"
"YmJsnEcrj7HVoYOzZrvJj/I2t6/fIupE8JUR2Z886Z+8dCRFOpLqx2T/2NF3ohGUZZnUWrWB"
"fhVd0Sgn4LBYXEJ3uwMhNS2LpJvkN6H3I3eVaYw5AAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
TreeButton = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAASCAYAAABfJS4tAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDDc1tl9FcwAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABTklEQVQ4y7WUP0uCURTGf9f3SqKFlc0uDREtLg01lPQFQugThPRZgj5Bf0ax"
"gmiLoECHGlIjDVISg4SGaKtXwTTfexocXHqr4XoOl7uc58fhuQ9XNToN8cTDZjnKQVffargd"
"F62CVqAqAKFgCO1+uiRnVzEYaxtf1C/Rjjj0TM+qFYhCC2CMwTIZPbhkFGDxBedKeR6f60Rj"
"UVIr64SD4X9iIQAgIj+eXCnP1kaahbl5sudH5Ip5POPR6ra5qRR8dUOwTwPsnR6QPTtmeXGJ"
"3G0eI4Z2t0XhvuivE0Ejgp/FAqRTmzy8VilXKygUfdOnZ76GAz715+PtnuzzzgdriSSxyWm2"
"D3eYmBlHi/5FJ6hMLSOJeMJqJq6ergc5FhHLYWOEORbAiLGMFbTjBGj32lbBnhh0ZCzC3UsZ"
"49n5kwOOQ3wqjmp2m6KUsmuFCN99Z81uYi/9qQAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeCheckBox = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDQQLgpdX3wAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAAA2ElEQVQY043QMYrCUBDG8f97GFZEjYUaESwiSIolHsDGQrvXrkcQDyKeyBSK"
"iBZeQRZNIVjIKmtgQReEZ2IhLIRs4bTzG2a+Ef3xIOKFSgGM1DDRuN1vTPwpZjrPbD1H/jet"
"Q8340+Pr54gO7wBPqEMdg6v9ivM1wK2+0613nlAIgbfxWOyWABwuBzZHn4pp0aq14jcCbE8+"
"AMFvgBSCtt2ObZFRFKEchZUvsT35fF/O2EWbwpsZhwCGNFCOIpfOAtCsuImAf6kNadBzP3Cs"
"BuVMOQHFqw9/AAnUP+Hao6QqAAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
TreeCheckListBox = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAIAAABPIytRAAAAAXNSR0IArs4c6QAAAAlwSFlz"
"AAANEgAADToB6N2Z3gAAAAd0SU1FB9sLHg8RIv7ecjgAAAAddEVYdENvbW1lbnQAQ3JlYXRl"
"ZCB3aXRoIFRoZSBHSU1Q72QlbgAAAO5JREFUOMtj3P54hyi3CAPp4PXXNyyi3CJyAnIMZAEm"
"BgaGX39/IQudvnn69os7WFXP3DUbmcvCwMDw7/+/zRe3SPNJGykaMjAwXLh/8cOXT6KiIjde"
"3ZDllP31/5eKnDJEVoxXFN1mBgYGV22Xl29eQdji/OIcnOxCvEKu2q6SghIKIvJw2Yu3L124"
"dxGumfHMuzPk+fnRh0dMDBSAgdVMfFTturL77ONzZEaVtZrVvuv7jWWNyImqplWtKmIqo1FF"
"dFQdvnnk2J3jZEYVDyvP/U/3yclVf/79eff9PQsj6yCIKpbXX9+Qp/P11zcAPRSkxqrj4tsA"
"AAAASUVORK5CYII=")
#----------------------------------------------------------------------
TreeChoice = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAQCAYAAAAS7Y8mAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDhc2uVbkBQAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABWElEQVQ4y7WUPUoDQRSAv+xOSCCFpIlN3MKIYqFX0D5Y5haCTYhYeAERb6Ao"
"FhaKRAsLrbxAUttFUmgCplgikXX3PYvNj4mQ3UCcxzA8+ObNzMfMJKqNO3V7LiaRZB4tYUE6"
"mca4Xy7bhS0EiZzk+R4pk0LRqdzjyxPGVhtPvFi7aX+2WVpw8OV7OqgJjAIiEquwqKBIDF4x"
"4aAzeYzmFSvE4oX0Cw7yQAP2zw6ovdYnSLAAVDVWRxURGeZH18dUr+7ZPdyj8dEYsTCbCp1Q"
"USmVqZTKfxWpYlAlrmJVHVvh5vmWVrMFwE6xiJN1hqz121lUMOF4c32Di4dL3oJ38tn8GDez"
"itFkWMkVOD85pZBbHlOhSniPh0eMVCFh2V/82uLquKb+Bv7tHocvT+O/vEGPWtjYtkXX68Yq"
"bCxDp9fBF38qF6hgMqkMtWYdCYK5fJuWbeNkHX4AhOgP4SlaxN0AAAAASUVORK5CYII=")
#----------------------------------------------------------------------
TreeComboBox = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAQCAYAAAAS7Y8mAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDhcL4T6oFAAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABFUlEQVQ4y7WSwUpCQRiFvzt3AhctrMBFxiXdRSC6k95B27btJaIH8AUC6QWK"
"NhG1cGUm9BhqKxdpBip0IRCdv0VUCl2die4/q2HOfBzOf7xatyb9txeU5xM1/XGP3c0MNuMp"
"SKwl0INwQHmvtFTcaDc4yBaxnXrrHg1qpVAEpmZqDUY8tJUOMMbgQLYDf0olDrA4gQUcHMsP"
"+Pbxjudu7/t+WCqzk0wvgJVLFF8nt5/jsn5F9eKcoT8kndxeeEdsoxBhPonsVoZq5YzrhxtO"
"j074LaU/L68Q5Ckc5yMWG1MrRLDv8fzy/rcVcfRYACMGFxMaVn8wMiOchNbgmRh0aj1F86mJ"
"VtHm268dxu8jK6jyfYKNgA+sz3ZB07qXmQAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeComment = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAALCAYAAAB24g05AAAAAXNSR0IArs4c6QAAAAZiS0dE"
"AP8A/wD/oL2nkwAAAAlwSFlzAAANEgAADToB6N2Z3gAAAAd0SU1FB9oEBggrF1QqReMAAAAd"
"dEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIFRoZSBHSU1Q72QlbgAAAN9JREFUKM9jvPfj3n8G"
"CgATLom5x+czbLi0kYGBgYFh3vEFcDZRBnz99Y3h8fsnDGpiagw/fv9gePzhMYOamBpWAxiR"
"vXDz5S2GZWdX4HQuPwc/Q5FTPm4DYGDludUMP//8ZIgzi2FYfX4Nw9df3xkSzGOJ98LTD08Z"
"pPmlGRgYGBgev3/KICsgjdNVcBf8/fePoWlHC94Qz7LNYBDnFcPvhXOPzzNsubqNodqtguHy"
"sysMm65sYah2q2BgZmImzgvPPj5jEOcVY2BmYkZhE50Onn58ziDJJ4nBJhgG5AIAiU9gp6jc"
"WfUAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
TreeDefault = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAALCAYAAAB24g05AAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDgoZ7eu1QAAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABtElEQVQoz4WQz2sTQRiGn5lsQhvUNkmLERvRQ38c9FQvVtGb/hXiXf9OBT2p"
"VFBiUgxIye5mm81md2Z3Zmc8RKNUwef2vfA+vHxirMeePwiLkPffP1DVFUWlUEahK8VGa5MX"
"x8+5jLwcDOMh9wfH7G3fpH/tOjtXemy1t5hlM86X538JgsuBtiXaalK9IFUpqUqZZQlpnmKc"
"+b9A/ZydqpQwiwjnIa2gxcnBA6ZZSFIkOO9w3tHZ7BBERcQ4OcM6g6ktw+lXOu1tvsUTkuUF"
"pdE8PDzh2f5TcpOvy847PkdfCE6nn7jXv0uUxyhTIBCMojFxNkOXGmN/z659jfce6yzWWZx3"
"BKUpcd6RV0vyqmDQ2+Pj5BRdKm7tDqi9oymbeDzOO0xt0FbjWYkCbTXaarJyybyYk+QJO1d7"
"mLbh1aOXALyevKGsS5RRWGfxeASCQDYI4ixmoTOmiykX+ZzKVhhrcN6tpwshKG2JdXZ1I5BC"
"0my0CPKyIFyGxNlsVa4NdV3TDJprwdHuIaNkhBQSgUAISUM2uNO5TXDQ3+ft6B2/fuGcQ0rJ"
"k6PHa0F3o0v3Rpd/8QPZ/Bl4qenM9gAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeDefaultContainer = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAIAAABL1vtsAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDiYubGd74QAAAfFJREFUOMvdlEtvEzEQx21nN2ojoHk0IoiGtqr6ONBT"
"uVAQ3OBTIO7wneCTwA0kOBVUKlBIadKiKPtqNptdv8ZjDgEURZtKCTf+N3tmfv7bHg19dfS6"
"UW6QRXUWnjnrtfXN+sbCCLSGkX/Wf4Nwcne9zDv6+UkZlSnONReKLxWXnx88m8NFK2jdax6s"
"lW83btxcvVZbKa2ESdgb9eZwIUAKELEYxjyOeRwmUZzGGvUcCK441zzmsZf43sArOsXDnfv9"
"xIuyCC2ixcpyZauyNY3wM/80+gGotYFW/3ulVO4E3Wh0KbV4sHv4dPtJqtNxPVr86n/LQRz3"
"v+w37vppwHVGCW37p0ESCik0/PZvrLHWAgIgoMWci0gt0WKqRqnKmrW1z91jIfmdetNYdJlr"
"iUWL2mgBwhILCDkIAUKASORokA2iNFq9XtMl/fLhC0LIu+57aSTXHBAssZRQhxVyEEESDEXS"
"H/Yv04ECpUH/dUsplSDHJ1NCGWVuoZiDSGXmjbwgCRUobbQxxnXccWivvtuO2owySiilrMAK"
"m5WNHMROY/tD++P4RRCRMfZ479E4VF2qVm9VZzU4fXP+NndeaITihFs78QWTOumdOLPYLnNm"
"lV3VnZTQyaUldtrzn4TJkHN1zZRyE5xO1BHAFx4W59HFL52bPckdnR6/AAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
TreeDialog = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAIAAABL1vtsAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDSwcXBl8sgAAAjtJREFUOMvtks1vEkEYxt8ZhgV2gX4wtKSCQNLU9KAH"
"Y62nJsZ4Uk82/iXGP0fvGtuzGo3Gr0ST2rQ1pmrirtCuLOxWWFhgd2deD2jVirHi1ecwmTfz"
"5jfzvPOQ+5UHK2ur8Vg8EAEcTn4YACIAIMKVU8uRxJI6xacopSktlUlP+hjkxqfTWpowmojF"
"IUJSakpRooRRFmUsypBIPpaZyczwMR5C+GT7KQMCrW6z7XXqe43ykRIAlidKZquWncjanr1Y"
"nHe7ru3ZqqLansPVzJb5uh/6ZwqLpvvJsD8iASpREkIXSgvFyaPT6WkhZavrRoDqDYMirTfr"
"dttpdd2e32s0G6EIASAMw7Xqq92mGYhAomRCCkS0OlYhV6h+rvrCf1F5iYiUULNtDswTIAhI"
"CLFcqx/2gzDY7G4BACEkEAG5vn4jN56DUaXbOitmiuVsaWSEREHhn/Uf8V1sf9fstTy/Q8mh"
"oL7wC+OFgwjP78zy2f3y2fZzwzEunbyYjCV/RXyw9SGvAACJcrDeeXNXr+k7nd2l3tKOs+O4"
"e1zlSGWe5+PROAAgyGGzQJAoJcqH7x9ZnqVosQRLqEpCN41yrnT78cpGZVNhyqBncNlBBAJI"
"lEKKuezc+rsNu2afnz+nRtVaw7p579bisdMnisdbPVdIIaSQiMONCCkQgCf5tQtXtZimRBSB"
"cvns5Z96vpkdglCYsl1/C/AVX+/84UcA8gcRXMtkNX7ILCD8xsiPB38RLcMxemF35GhWnOoX"
"dDYswVEEFVoAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
TreeFrame = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAIAAABL1vtsAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDSw5Fx2o9QAAAXBJREFUOMvtkM1OwlAQhWcutwg14E+LEA0CS/caXfkU"
"xsfTN9An0JVuNEETceGOCrEKtFoKFHp/xoUJQaOEYOLKk9lMzpkvM4NnjfOT6mlqISWUgNkU"
"SwFEAEAEh9sHifS+uWavMcYyixkruxqTKCzns4tZ5Cy9kIIEZsxMMmkgZ9zg3OCE2l6y1q11"
"e8mWIC8eLjkgdKOgN+i3XzuVjTIAVVbKbvclt5LzBt5uaSuMQm/gmUnTG/i2adXc+5GM94q7"
"bvjseI+EwDRpRLZT3imtbuazeaV1NwoTwOodhxFrB22v53ejcBgPO0FHKgkAUspq8+YpcIUS"
"mjRXWhFRq98qForNt2as4qvGNRExZG7P/TgeAQkIEVthayRHQoq7qAYAiCiUwKPb48JyAeZV"
"3avzklWq5MpzIzQpBr/WP+If8dcIoSUiG9cUBP/JMBgn0rNs8QmBgJMtAX1JjwOTFp8+80Xf"
"BrjjO0MZzf3Lht98B9fgtCT+vOW+AAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
TreeGauge = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAQCAIAAACdjxhxAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDiMlhsJWLAAAANpJREFUOMul0kFuwjAQBdD/x1bSQth0X27cnowjwAW6"
"oq0oQcnM7wKJxnRji9l5JD9/28O33ft5GlHW6XLaPG1QUV3u8nkaV/1q2V13z0O/3r681hD7"
"j0MGBGjZFUBaslxDGC1DiCgJCRKqKwsKRElEAwBkABEF0QYAWYDuUpTLqhTSv7eAmgjFHQFJ"
"eijF7E41XsTIv/0xS06kJoJmdjs/JNAgNhAEEg3A7LPkiVyGqhtQ0JhdcgVppBmNbZ9KhFzh"
"1yzXsXd6AzH0w9f4vWwdfz4v0+hepSRLv0Gpb9SsRdiOAAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
TreeListBox = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAYAAADAQbwGAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDQ4JlnbeeQAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABBklEQVQ4y+2Uz0oCcRRGz8RIidofmZ8WMlPR0MZVtqtt82JBDxG9QS9QGNSi"
"qIUWTgQpSgO6KWcwkYZgsJmW7czFXRj0rS+Hj3sPVzvrnScqYyARPwzQVcbAWraQyhxA9BVR"
"a9Vov3YmDh9XT34F6gBxEtPwXIYfI5QyaPabmGmTKImwrS1KiyUqmzsUcmq6hgDFpSIL6Xny"
"uTxO2WFtZZUNYx2nfMBb0AfAbT/SeHEnArX6oJ5I7bA77P40FD3K7AOn1ab6dMF970FOm/3t"
"PS6fr9g1KzLaHJ4eYRfsf21mSZvr1g23nTs5bbKpLN7Ik/k243jM4PMdXUv9cW10PwzEYH4Y"
"8A3oJpVpLSV68QAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeListCtrl = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAALCAYAAAB24g05AAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDgoZ7eu1QAAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABtElEQVQoz4WQz2sTQRiGn5lsQhvUNkmLERvRQ38c9FQvVtGb/hXiXf9OBT2p"
"VFBiUgxIye5mm81md2Z3Zmc8RKNUwef2vfA+vHxirMeePwiLkPffP1DVFUWlUEahK8VGa5MX"
"x8+5jLwcDOMh9wfH7G3fpH/tOjtXemy1t5hlM86X538JgsuBtiXaalK9IFUpqUqZZQlpnmKc"
"+b9A/ZydqpQwiwjnIa2gxcnBA6ZZSFIkOO9w3tHZ7BBERcQ4OcM6g6ktw+lXOu1tvsUTkuUF"
"pdE8PDzh2f5TcpOvy847PkdfCE6nn7jXv0uUxyhTIBCMojFxNkOXGmN/z659jfce6yzWWZx3"
"BKUpcd6RV0vyqmDQ2+Pj5BRdKm7tDqi9oymbeDzOO0xt0FbjWYkCbTXaarJyybyYk+QJO1d7"
"mLbh1aOXALyevKGsS5RRWGfxeASCQDYI4ixmoTOmiykX+ZzKVhhrcN6tpwshKG2JdXZ1I5BC"
"0my0CPKyIFyGxNlsVa4NdV3TDJprwdHuIaNkhBQSgUAISUM2uNO5TXDQ3+ft6B2/fuGcQ0rJ"
"k6PHa0F3o0v3Rpd/8QPZ/Bl4qenM9gAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeMenu = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAWCAIAAABGyIsrAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDSMTSz597AAAAepJREFUOMvNlN1v0lAYxs9pTz+ASvnsAEeGum4kmpi0"
"zLF5oRd6rYlGr+a/qFeaODO50mic2YWRKmI719VBPxhtKaWU4QWEhGQmzCvfm/ecvM+bJ88v"
"JwfunewlI0mwWHX6HZSKpgpMYcEFCCE2OamOqnv6uaLBaFBTaqPxaHJFk/b2R03Rlbvrd1pO"
"u8Dmte5vP/QJjHh4/QEE8ND6tXHZjxExAAA28+Jz/NeTOgBANhVxWYAAJqKsPbBJnGQj8Znh"
"1EFcFizPqhTFn5ZcSq5kohmxKNCIZkjGDdwgHErGNzEvAADggX2weGjN1TBwwfrXhWaneewc"
"n6vwQ3+3+WY2nYZ+p7yXdeX26rbRM7JM1nCNXtAjcOLxjUc4hleLm8+/vHgm7MxhXcvxjXYD"
"YajttDeKFRIns0zGCRwI4Evp1f21e3MO26Ut07OWGO670biWusrFuK1SlcKpOBX3Q58m6Zbb"
"yjP5/xpr3ajLp/LfsL5u7kqmNBf609G+rCu3rlQ63mkiynb7drdvU4h8evMJjeh0NL1/9Lmc"
"Ls+/1qVVxTxEOLJ9p7qyeYlm8mzODdxgFJQz6+FZOFVOKKmOankWx3CSLvFpPhVJmX2TxMkE"
"lRieDT+oH4WCkKBYzdUujBVZnjUejxf/BP4At2vcLl9p4lgAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
TreeMenuBar = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAKCAYAAACwoK7bAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDS8jwVICTAAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABc0lEQVQoz7WSz2sTYRiEn939YrNFk+1nm8CGtoFEm0OUhOKlIFh6En/9iXqT"
"VJAiiNqLbamosYIN8SCua6BNSrCw0Ww23aJ5PRS8SI8752GYYR5js7spJCDj69hLJFgJExIJ"
"nsiErb0dgl8Bl7XGnXWxUzaFvPufOQgDnr9+gaMd1m6sYl+w+db1MQ0TS1mc/j6l7JYAMEUE"
"79Djwa17WKaF3/Pp/ejx9MMGjTdPGMUjRAQRYRD9JK9z3Fm5jWVaPN5t0Nhep3PU4e3+Ox6u"
"PyKMQkQEJQjRScTGzjOCKCCfzTEYDdhrfmRhfp7+cZ+iWwRARHj/uUlsxJQXSmTJUivViOKI"
"dHqKeqXOtD2NIGeNZzIO92/epTDjolQK56JD5fpVcnM5tNb/GpuGgTIVkz+CzmgORod88vdR"
"KsWldAb/+Dvh+Gyh0Q7b8nL7FdHJmOVqncXC4rmHeB2P1pc2AFeKZa4tVc/HrTVsJYPbMB4m"
"gttfwd+fUZxCmekAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
TreeMenuItem = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAICAIAAAB/FOjAAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDSMxnl48CAAAAM5JREFUGNONkM0OwVAQhWeqbahGVSwoDRuxsLuXNW/A"
"E9NHsPKTiJZI/ZQrqqkWTS0qVhadzZyTnMmXMzg+jNWcCunm+rjyJamkyVrKA0TkErW77xzf"
"+RsKo9CwjCiOEssna7IyLMcatPvH+0lTqvZtH7wDgRNGnSECbti2VwvyQh4AuB+rVWnNDnMA"
"MC8WrRMELEqKG7piRlRyhR/wS6B1wnzW1emamU21UZbKVCdZPiuLsvf0nu/X4rykVQIAOHWn"
"6Uvbns0zn8VxnP6tH1wDT0VKdioxAAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
TreeNotebook = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDQ0SNz5EVgAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABXUlEQVQ4y+2RvU4CQRSFz+7O/gyYFZKNBYXGYKEGjT6ANjTG1tgb38j4CsbO"
"RGPhC4g/jQ004h+BAg1G0BCFgeHakAXdQZRoYziTae7JfHPvudrWxTYVygWoZBkWluNLsJmt"
"9KOhCFzbVXpsIjKO1ekVpSlJIpU7wZQXD3gE4OrxGrNjM+AmD4K5yeGYjhLcohYc5kDXdaXP"
"TQ4hhRKsAwSizs3cZSCaAkSEFhEGlU4AqH2EFDhIH+KmdAsCtQfuaOdoF3un+7ivPPQHA/C7"
"Pcuew3M8HKdTfi0QT1OCGawvmHV3Neq62ExuIJ3LtBf0sWvbssE5/zyIGkw+AEjEEgCA+ck5"
"P55urSfXvp0xA0E5sh/RgMtj/Z7WGjVU3p6VXrVexdNrGUI2VBn3lqEZWIwtoOfnI+ryZSkL"
"pkH7suNoKPrjGMJWGDr+SEPwEPwfwCxfyUPI+q9Ciy9FvAPrC5AiX0iARwAAAABJRU5ErkJg"
"gg==")
#----------------------------------------------------------------------
TreePanel = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAIAAABL1vtsAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDS0dMgV9ZQAAAGxJREFUOMtjnHdhvoSABAO54MHbByzywvKKogpkG/Hv"
"/18mBorBqBGjRowaMaSN+P3vDyMjExzhMYIFlwQrE8v///+IcQWKEYwMjMjc/wz/0VTDFSBL"
"seDXgwawKmB5+O7hjz/fyQ7Lx++eAACyLh8pK2aCxwAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeRadioBox = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAALCAYAAAB24g05AAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDgoZ7eu1QAAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABtElEQVQoz4WQz2sTQRiGn5lsQhvUNkmLERvRQ38c9FQvVtGb/hXiXf9OBT2p"
"VFBiUgxIye5mm81md2Z3Zmc8RKNUwef2vfA+vHxirMeePwiLkPffP1DVFUWlUEahK8VGa5MX"
"x8+5jLwcDOMh9wfH7G3fpH/tOjtXemy1t5hlM86X538JgsuBtiXaalK9IFUpqUqZZQlpnmKc"
"+b9A/ZydqpQwiwjnIa2gxcnBA6ZZSFIkOO9w3tHZ7BBERcQ4OcM6g6ktw+lXOu1tvsUTkuUF"
"pdE8PDzh2f5TcpOvy847PkdfCE6nn7jXv0uUxyhTIBCMojFxNkOXGmN/z659jfce6yzWWZx3"
"BKUpcd6RV0vyqmDQ2+Pj5BRdKm7tDqi9oymbeDzOO0xt0FbjWYkCbTXaarJyybyYk+QJO1d7"
"mLbh1aOXALyevKGsS5RRWGfxeASCQDYI4ixmoTOmiykX+ZzKVhhrcN6tpwshKG2JdXZ1I5BC"
"0my0CPKyIFyGxNlsVa4NdV3TDJprwdHuIaNkhBQSgUAISUM2uNO5TXDQ3+ft6B2/fuGcQ0rJ"
"k6PHa0F3o0v3Rpd/8QPZ/Bl4qenM9gAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeRadioButton = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDDYJgCsItQAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAAB50lEQVQ4y6WTzW8SURTFfw8Gi6mdptAE0cIYbJHUrxAjxUx0Af4NmujGj/+s"
"O43pyo+VLtRJLDERFkJrIgo0Qg1gHBJkaGeeCzOlU5CYeHfv5p2Te+45V1QHVcl/lHK0IZFY"
"toXt2AAIIVB8Cn6h4Be+6QRDZ0h/r0/xW4lq+wudXgeBIDwXIhlZIbOUIeALeAiEK8GyLdr9"
"Ns8+vmD+uMqNs9fR5jUAaj9rvP78BnNgcjt9C/WY6iWQSEzL5ElpgwunzpNP5CbqfVl9RblZ"
"5v7Ve14Jlm1R/l4hfCJEPpFj39nHqBt87dYAOBPS0OM6+USOptmk2CqSjqaRUuIDsB2bSmuL"
"rLYGgFE3WA4vc3Mlz2DPYmv3E0bdACCrrVHcKSEQABystdvrElNjfzR362Pju72YGqP5o4UQ"
"YiTBfRyup+XnU/33TKD4FMJzIRpmAwAtFB8DuL2G2SC6EEUiRwR+oXDp9EXe1TYB0OM6qUiS"
"YGCGYGCGVCSJHtcB2KwVSC9dRkrpzYEtHdbfr7MaXZ1qY6VV4WHmwSiph2/BHJo8+vAYNTgh"
"SNW39H6Z3L1yh9nALI50xgncSQo7BbZ3t2mbHQAW1UVSJ89xLZZFIA7AEwlcVwTC446U0gP8"
"6zW6nyUS/uHQfwM6BdufWiyrrgAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeRoot = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAALCAYAAAB24g05AAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDgoZ7eu1QAAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABtElEQVQoz4WQz2sTQRiGn5lsQhvUNkmLERvRQ38c9FQvVtGb/hXiXf9OBT2p"
"VFBiUgxIye5mm81md2Z3Zmc8RKNUwef2vfA+vHxirMeePwiLkPffP1DVFUWlUEahK8VGa5MX"
"x8+5jLwcDOMh9wfH7G3fpH/tOjtXemy1t5hlM86X538JgsuBtiXaalK9IFUpqUqZZQlpnmKc"
"+b9A/ZydqpQwiwjnIa2gxcnBA6ZZSFIkOO9w3tHZ7BBERcQ4OcM6g6ktw+lXOu1tvsUTkuUF"
"pdE8PDzh2f5TcpOvy847PkdfCE6nn7jXv0uUxyhTIBCMojFxNkOXGmN/z659jfce6yzWWZx3"
"BKUpcd6RV0vyqmDQ2+Pj5BRdKm7tDqi9oymbeDzOO0xt0FbjWYkCbTXaarJyybyYk+QJO1d7"
"mLbh1aOXALyevKGsS5RRWGfxeASCQDYI4ixmoTOmiykX+ZzKVhhrcN6tpwshKG2JdXZ1I5BC"
"0my0CPKyIFyGxNlsVa4NdV3TDJprwdHuIaNkhBQSgUAISUM2uNO5TXDQ3+ft6B2/fuGcQ0rJ"
"k6PHa0F3o0v3Rpd/8QPZ/Bl4qenM9gAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeScrollBar = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAICAYAAAD9aA/QAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDQEAaDJ6EgAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABeUlEQVQoz22SwWpTQRSGv7l3TIlgIriI1Aoqgi4K6tqK+ASCvoHgi/hQca3o"
"IkpX6s42tWmKKEkarZ17Z+ac4+LaVJqc1fB/Z37OP3Ncf9S3w/l3Lvo252v865DrlzdYVaOj"
"Aza6yyzkE652evjJyYQntx/jKJaatkfbbN16uNL43e77lUxNebP7lgJ1RInUUjGrZuzPv1FL"
"RS0VhqGmDKdDvk52ABhOh6gphiEmiAn9j68ZzQ4QEwzDO0+hKGLCcTxmsD+gSvXigpkhKkRJ"
"RKkxM6IkRBumqqgqnz994cWrl4yPxo1misea8dUU5xwOh5o2sdCzsxmGLeL+z8wM/qU71fxp"
"w5pf48G1+6jamZkqSRNlUSImZM2URUnStJgM4O7mHZ4/fUbvUm9h7rNmQgo45wBolReocgVA"
"0kTIgW67A8C8ntNtdwg5NEwCAI/ubTUbIaF5Lo349c46g70PtHxr6Yenf2ZEiSu34sfvn2TN"
"S3qVa25eucFfTP0TLifzTpkAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
TreeSeparator = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAACCAYAAABc8yy2AAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDTYU4u8OWwAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAAAF0lEQVQI12Nce3/tfwYaAMYXf1/QxGAAfsIHBx4nJ1QAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
TreeSizerFlexGrid = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsTAAALEwEAmpwY"
"AAAAB3RJTUUH1wsXDgwjfb3LdAAAAD9JREFUOMtjFBAQYPig8+E/A5mABcYQuCLAiCzxQefD"
"f3QxdPBB58N/JgYKwcAbwDgaiKMGDIqExCggIECRCwBFqxoCQOm6JQAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeSizerGrid = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsTAAALEwEAmpwY"
"AAAAB3RJTUUH1wsXDgw2EGAvnwAAADZJREFUOMtjFBAQYPig8+E/A5mABcYQuCLAiE3BB50P"
"//HJMTFQCEYNYGBgHI3G0WiEGUAJAABQ2RoC2hMJYQAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeSizerGridBag = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsSAAALEgHS3X78"
"AAAAB3RJTUUH1wsXDg0IyBoDdQAAAEtJREFUOMtjFBAQYPig8+E/A5mABcYQuCLAiCwREBLA"
"sGHNBryaP+h8+M/EQCEYeAMYaRaIH3Q+/EcXG6aBOGoAFRISo4CAAEUuAACOExkCdACh7AAA"
"AABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeSizerH = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsTAAALEwEAmpwY"
"AAAAB3RJTUUH1wsXDg0Y1a0TEQAAAEVJREFUOMtjFBAQYKAEMDFQCCg2gIWBgYHhg86H/xQZ"
"wMDAwCBwRYARm4IPOh/+45Mb+DAYNYAKBjAKCAhQlJAYh35mAgBtQQ7IJHdgMwAAAABJRU5E"
"rkJggg==")
#----------------------------------------------------------------------
TreeSizerV = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsTAAALEwEAmpwY"
"AAAAB3RJTUUH1wsXDg0ljcVfAAAAADlJREFUOMtjFBAQYEAGH3Q+/GcgAASuCDDC2CyEFKAD"
"dAuYGCgEowbgiAVionI0GodVNDKiZ2dSAQDuMhKd7SieJQAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeSlider = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAICAYAAAD9aA/QAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDiEeBf/digAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABeUlEQVQoz42Sz2tTQRSFv5l5816TxXuWxloXuhCrdVNQzLL/hP7L4jJSdKdk"
"YWKQgjGk5v2auXdchCatUPDu7rmHj8Phmmk7Tdya76sZV9dX/O9453lxfMogG9zRs9vLYr1g"
"EzaMn75l2Sw5PDi8F7hsloyGI+pQM/kxYfxkjLd+D57MP+2W2WrG+/N3OOvw1mONBaCXnqiR"
"siipQ03u8q3HeSpX4W3O5eKSgR/uwR+mHzmpjgG47v5gjEGTIknoNQCQAIyhk54E9BqIGtGk"
"3BhqadCtk/nvOdlBllMOSgCCBEQFAE1KLx0AhSsIKmgSrDF00hE17r0omc3wdtusNZbs4tnF"
"Lr5GpZceZx2SlCDbxKJCG1tsYQkS0KREFSTtQ1RFuavi/PEDslcnZztwNSz5/PMLp6PntKGl"
"M1t96IeklFi3a7zz1KGmCS2bvqaJDa20vHn4msIVO5b5992+/vrGfDUnaiSz2b1fcXPPXc7Z"
"o5ccDY7u3P8CCPa93kLRoaQAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
TreeSpacer = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsSAAALEgHS3X78"
"AAAAB3RJTUUH1wsXDwYJXSuAHwAAAG5JREFUOMvtk7ENgDAMBM+IAX6QDEPBmBQZJoN4g1Bg"
"kEBQQCgouOblL6z3SzZJePIKGHBbexZMRYQJYMM4kKe8zWfqya0Lo/KM2h0236U9gUWJPGUt"
"sapod4YnJ4q9xJN/oIM/wQsJrPUbTRItzCrJLwrVI82eAAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
TreeSpinButton = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAgAAAAQCAYAAAArij59AAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDhkmOmLZ7wAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABBklEQVQoz22Rz0oCcRSFvxl/YwpjVgS5GRF7hAiindBTiBC9Sz1A+AARRO9Q"
"BG16CFu4NSVX/llM+LunxYyJTvfu7v3gnHNvcPtxp/bxKYUKRLVcxSWHCZfti8LezHj5fMUJ"
"WJkHtAVIAoEDYTJMnoe3RwBurq4JCBDC5ThP78/c9/sARKWIXqcLkAFC9Drdv+F6lktketr1"
"kLfLwCKwLifE12yMye8eYmOyUTv5V2IwGWxMjmYj5ukCgNpeTGO/AUC4Nlmv1BnPJ3wvpzQP"
"moSEGwkhKlGF8+SMuBzj5fHyWUzllxQiKkWkPt2OaTKWP0sMK/zCMNxwOmSRzncSBoRhSOuo"
"xS+qlIjuQyw/7wAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeSpinCtrl = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAQCAYAAAAS7Y8mAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDQMVN9n8ewAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABmUlEQVQ4y6WUTUsbURSGn7m5TkRNo6LiQkahKaXi0orob9CVSPEnCF22KriW"
"rqQ/oKsuxG0hmxYVsuiqVAy4UAkaVNQqCdQZdPIx97gRjUwWk+S93M058JyXwznHSp+m5cr9"
"h7JiNKLz4hmpvjehuKXA1jb62rtm9t0MjWrnaIfp19MEEoRyPw9/oUHRrAIJqJpq2LWoFqiA"
"iGCM4W9+l0/flqmYKsYYQNCtgvPFPIurHylcFkh2vWJlfukl2IhhI7OJbdvMT81Fhju9Dr+/"
"Z56LIQg1Dd7IbDLsOKBh62A7mmMEkTq/Fpzdz/LeGWfy7QR7f7LRwXUeUtOKREeC29Itbskj"
"2Z+MBHZ9l8JdoW7uCbww+4H1H1/xSz7Lc58jgRPxBLa2Q/HcTe4ZnOpL8WVhrbGpQHDLLhf/"
"LwGI6zjDPQ4itD7HnW2d3FfuOCmeMNIzgrIUUtuKpuEIY4NjjA6MElOKclBufUEMBiPm8fhY"
"+FX/qZgG0zTYr/h4Fa/uDbHSp2nxfA+tGjOfuzmmvS08ESoWY6h7iAfDaL5HCRgjkAAAAABJ"
"RU5ErkJggg==")
#----------------------------------------------------------------------
TreeSplitterWindow = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAIAAABL1vtsAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDiYubGd74QAAAfFJREFUOMvdlEtvEzEQx21nN2ojoHk0IoiGtqr6ONBT"
"uVAQ3OBTIO7wneCTwA0kOBVUKlBIadKiKPtqNptdv8ZjDgEURZtKCTf+N3tmfv7bHg19dfS6"
"UW6QRXUWnjnrtfXN+sbCCLSGkX/Wf4Nwcne9zDv6+UkZlSnONReKLxWXnx88m8NFK2jdax6s"
"lW83btxcvVZbKa2ESdgb9eZwIUAKELEYxjyOeRwmUZzGGvUcCK441zzmsZf43sArOsXDnfv9"
"xIuyCC2ixcpyZauyNY3wM/80+gGotYFW/3ulVO4E3Wh0KbV4sHv4dPtJqtNxPVr86n/LQRz3"
"v+w37vppwHVGCW37p0ESCik0/PZvrLHWAgIgoMWci0gt0WKqRqnKmrW1z91jIfmdetNYdJlr"
"iUWL2mgBwhILCDkIAUKASORokA2iNFq9XtMl/fLhC0LIu+57aSTXHBAssZRQhxVyEEESDEXS"
"H/Yv04ECpUH/dUsplSDHJ1NCGWVuoZiDSGXmjbwgCRUobbQxxnXccWivvtuO2owySiilrMAK"
"m5WNHMROY/tD++P4RRCRMfZ479E4VF2qVm9VZzU4fXP+NndeaITihFs78QWTOumdOLPYLnNm"
"lV3VnZTQyaUldtrzn4TJkHN1zZRyE5xO1BHAFx4W59HFL52bPckdnR6/AAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
TreeStaticBitmap = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAA4AAAASCAYAAABrXO8xAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDhwKdc1BSQAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAACsUlEQVQ4y2WTy49URRTGf/W4t4e2u8f00E0UZZgHCAECwWAiJsYHklE0UWNM"
"YAEJceGCsCNhR/gzSFy4IERhCSqJRCTIZgSJCS6E7llglBnnAZluw517q85hcRlH8CS1q993"
"qr7zHTOVTamiAIgqiqIqPF3GWAwGawwAfhmKKgQp+OrmWfpZn6zISFxC6lOiRN7f+h5rG2sB"
"V4LL0Ex/mtOTZ/hox4e0a61STCJBAlnIuHDrW0ZWr2fPhrdxxmNFlSw85PTkGT7bfZjnG8+R"
"upTEJiQuwVtPxVf4YNs+phdnuP7nDUQjVjTy9c1zfLLzYyq+QmITKm6A1KU443DWYY3FGssb"
"46/z0+1rKIoFyIqM5qrm489bvHVY4zCPjfhvDdWHuD13pwRDDIgKokLUSC/vEaRAVFDVlYMy"
"1hqlM9cpQW8dUUsjFpcWudz5kQfZA6JEugtdpnvTKyIoSyEvxyGqRIkAqCpRI1e6V2k+8yy9"
"rE+73qYx0CCqMNubY2RofdlRUQopKGKBotQqNTa2xzHGUh+o0661CBKIErk7f5eta7ZgDYa9"
"m9/h0u8/UMSCPOZU0yrNapOx5igDvgJAkMj9hwsMVht467HWOEabI0QJdOa75CEnSiAPOYUU"
"BIkUMZCHJS79dpkDO/djjSufao3j0K6D3JnpcOOPX8qLsSCPBUECs//Mcv7Xbzj82iESl2Aw"
"mG7WVVFFNCIqnDh/ksQnvLt9glpa47tbF7l6/RpfHv2CRqVRzhdTZrVMvENRhtvDDA+tY3Lq"
"ZxRlfM0Y+rJST+v/QtYYvMGwvCHWOF5svsBSyHlr05t46wkSmOvP46zHGVuu2HLHUgVE4ZV1"
"u/j81BE2jW1ksDrIX/P32Lt9zxMQUJqzAhta1RbHPz3G3wuzXLzyPbs3v8rESxP/y+wjHwdM"
"yMvIwOAAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
TreeStaticBox = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAIAAABL1vtsAAAAAXNSR0IArs4c6QAAAAlwSFlz"
"AAANEgAADToB6N2Z3gAAAAd0SU1FB9sLHg8XC+o2TdIAAACnSURBVDjLY7z34x4DZYCJgWJA"
"VSOqN9XCGXA2sjgBI6o31bb6NcNVw9kQcfxGsBy4cdBBwx7NNkybb7y4iVX/g7cPWOSF5dFs"
"RgMQh2hIqGM14t//v4x7H+9TFFUgOyyvPb822CJ11Ijhb8Tvf38YGZngCF82wyXBysTy//8/"
"YlyBYgQjAyMy9z/DfzTVcAXIUiz49aABrApYHr57+OPPd7LD8vG7JwCKb0Uo5BCtBwAAAABJ"
"RU5ErkJggg==")
#----------------------------------------------------------------------
TreeStaticBoxSizerH = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsTAAALEwEAmpwY"
"AAAAB3RJTUUH1wsXDg4M5FqUrwAAAFJJREFUOMtjFBAQYKAEMKELfND58J9sAz7ofPgvcEWA"
"kRRDWJA1oruAGINY0AWQXQBjwwzH5l0mdM3INFmBSHEsjBpAOmAUEBAgOfliGDC0wwAA8pIl"
"0wUY404AAAAASUVORK5CYII=")
#----------------------------------------------------------------------
TreeStaticBoxSizerV = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsTAAALEwEAmpwY"
"AAAAB3RJTUUH1wsXDg4eF+Pl5wAAAFRJREFUOMtjFBAQYMAFPuh8+C9wRYCRAQ9gIqT5g86H"
"/yQZgKwBnQ3DRLmAgYGBAdn5AlcEGLF5h4mQZrLDgFgwDAxgwZUGKDIAX8iTlA5GY4E4AABb"
"Fil3I9jn1wAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeStaticLine = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAACCAYAAABc8yy2AAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDTYHZlFPhQAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAAAF0lEQVQI12Nce3/tfwYaAMYXf1/QxGAAfsIHBx4nJ1QAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
TreeStaticText = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAICAYAAAD9aA/QAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDTsNMyrY1gAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAACAUlEQVQoz42QW0iTcRjGfxv/LxTL0oJBBFGODhdWFIWyrorUm6ItS2jpYlkZ"
"i0CoG4luUjPXASQ7iKUJCYElo9bhomarLMhsbjhP7cOg1tRAdKO21P27WH3QnT944YHnfR8e"
"Xp0aVyV/qX5ey536VgB63O/JTssGoC/Sh9m6n30OC05LHfNB/0+EJkO0NrRpxsD4oKb7I0EA"
"jMtzmC9a8I2XTSR/J7FWHgTg80RIWxr5ntKRyTG2nsxnfUkut941af5UYorzz2rYXLGNnKK1"
"5FVuTwV/+NbDw+udHHAUU5C7E4DR8S/aoX84AEDsZ5RTdgdSwsVzlwjHwiTmEtgaj9By+S4W"
"814u1NdQtKsAAdDgvgZCx54tu0kXaQCoX1UAZpIzBDwBstZkUWuuRtErvCjswtvuZSI2wevQ"
"G/xuP7bTpZwtrEo12QDCPfSEt/e7ATh02Ka1/OjthaMw+GOI2egs+SV5KHqFxFyCT90+xCKB"
"cakRl+8RAKsNq/77sXC2XUFkKlytc6LoFQAaXTcJPA0QjoUJRgYA8LhfcW9dO70hH9HRaSqq"
"jpGhZGBYYgCguaMFoRdMx6NsWrERMAlp67RLNa5qY+0ok5iEbA7elnZXucQkpN1VLheXLZNp"
"5oXy+OMTcvjXiFTjquyP9cvSBzaZXpwpdTsWyJVnjNIz5pF/AHm51hdHwnK2AAAAAElFTkSu"
"QmCC")
#----------------------------------------------------------------------
TreeTextCtrl = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAQCAYAAAAS7Y8mAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDQIL1M3wWQAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAAAYElEQVQ4y+3MoRWAMAwFwJ82AkEtzMAwMG00TIKpgloqENBgGCE8TG+AI4mi"
"27HDkYcF1YI+dOCUE6ZhhKV5XeAAB2tM/oP1VeMa1/inuJinl94giaL5zGDHNmm50DYBD+9q"
"HJo8ucgXAAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
TreeToggleButton = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAQCAYAAAAS7Y8mAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDhgkzXeJggAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAAA80lEQVQ4y9XSvUoDYRCF4Xdnxywp1kWxW+ETK9Nkg3gTdgqCjT83JN6AP4Vg"
"IYIoFhZego0bJLFLhKhFUMEizW6+tbfQwknhaWd4OAwTXPavqt5bj7H3WERDZWHWof33J7K0"
"iYhglc5rFy3HJSJCGIRmsPceu5rfMjFYfxred3Pyxza+9IgK2VKTViP7e+NWI2N3fZvh55Cd"
"tS3iuZiT61MGH892pyh8wdntOZurGxzeHNvBo2LEdBkTaUStqNnBSZRQd3UOLo6YX0x/3Q/2"
"7varFbds+sf5oP0P/3hy8JSqPRoK6mYcDy8dqCoTNJCANEn5Am64QzC1J/rQAAAAAElFTkSu"
"QmCC")
#----------------------------------------------------------------------
TreeTool = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAsAAAAJCAYAAADkZNYtAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDRwXFI2TyQAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABPElEQVQY0z3QUUrjQACA4T+dSdMmCK6NiqmKstYK+moF8UHxBOJZ9Gh7lFUR"
"DS6UZWts09pMQiYzs0/6HeHzntWzq3TF0/sToR9xuDFAG01WZLwvM062jpEtCYB39+vehX7E"
"fn+Pl/Er1lq8NoStECdgPYhJegkOiwx0h+vTKxrT0P+RoE1DbWq00SzKBb//PPDxOeX1X4r0"
"e5K8nFPUBc5ZfOHT9UOidkgc9UhWE8bTMZ4AaZwhL3NKXXK0McQBZa1QusRYg3WGt+yNy4NL"
"ZCADZmpGHMXMqwXGNhhnsc7Q2AZVlyyXBbvxDjJfzrHOcbD+E1UrGttQG02lK3I15yF95GJ4"
"jvAEUgpBt92lqBUzNeXlb0qWZwSiTX9lm9vRDcPNQwCksJLJZEI6SVlrrXE2GDGIB3T84Pv3"
"y38ceKGMmSSP8gAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeToolBar = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAJCAYAAAA2NNx1AAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDRstncDcvAAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAACS0lEQVQoz1WSzW4TZxiFn/HMeDweUUHiJopT00AJCbRSs0i7KJUIYoGQYAHc"
"TgT3AZvS3EQr0bQLftQVqio5RmmwSJVWVSaJnXpm7O/nfVkQkDjS2RwdHZ3FE2yX2zq2Y17t"
"v6IZZ1ycWcR6S17k7I9yvpr7kqgW8V7rGw/o/tXlXGuBLMlOUkUBL57Xwz63r9wievjbI5px"
"xrn5Bf7o/8mz3nOCOjRrTTSE/DCnPd1GES7PXaYYF6xdv8q9lbucPd1BVXHicOpw4vjh9x/5"
"6cXPRIltcP2bazjvmD/TxnqH8QbrLcfVMd3dLQ7+P+T1f33O3zqPqDL7ySztU3MkYYITh6KI"
"CLWgRhxG6ESJ4umIQTWkMAWqQhzGpHGTrN6klU3TPt1m73CPIIQ0TlFVakGAIFixWLHvHn+w"
"R0SIvHoG1YDKVizPLKFAZUpKW+HFI+p5k79h7cIaAOI9xluMM9SCEHcy7NXjxGGdRVSIkijh"
"qDyilbUYjo/x4vAqyEmxNBWjUcHZVudkWKhMyebOr5STkuHoGCcOEUEDoZyU7x4PRkNElQuf"
"fkFpSpw4jLeM7ZhBOWSr3+P7pe8IgxAAZz0Ta9gf5VyaWWbX7bL6+SoH5QGPn26QNlK8eqIo"
"DEnrKYUpOSoP2fm3Tz7IScI686c+4963d1iavfgBt5tXbvDyn5d02h2cOrp7W1zqLJM0Ehpx"
"AxFBVAnWn9xXYwwFBVO1KVYWv2axtUgjTj7i971EhV+6mzzpbZLVMyJCwlrI2Ezo/d0jzVJS"
"Ut4Cv35XCWG/LuAAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
TreeTreeCtrl = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAALCAYAAAB24g05AAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDgoZ7eu1QAAAAB10RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJ"
"TVDvZCVuAAABtElEQVQoz4WQz2sTQRiGn5lsQhvUNkmLERvRQ38c9FQvVtGb/hXiXf9OBT2p"
"VFBiUgxIye5mm81md2Z3Zmc8RKNUwef2vfA+vHxirMeePwiLkPffP1DVFUWlUEahK8VGa5MX"
"x8+5jLwcDOMh9wfH7G3fpH/tOjtXemy1t5hlM86X538JgsuBtiXaalK9IFUpqUqZZQlpnmKc"
"+b9A/ZydqpQwiwjnIa2gxcnBA6ZZSFIkOO9w3tHZ7BBERcQ4OcM6g6ktw+lXOu1tvsUTkuUF"
"pdE8PDzh2f5TcpOvy847PkdfCE6nn7jXv0uUxyhTIBCMojFxNkOXGmN/z659jfce6yzWWZx3"
"BKUpcd6RV0vyqmDQ2+Pj5BRdKm7tDqi9oymbeDzOO0xt0FbjWYkCbTXaarJyybyYk+QJO1d7"
"mLbh1aOXALyevKGsS5RRWGfxeASCQDYI4ixmoTOmiykX+ZzKVhhrcN6tpwshKG2JdXZ1I5BC"
"0my0CPKyIFyGxNlsVa4NdV3TDJprwdHuIaNkhBQSgUAISUM2uNO5TXDQ3+ft6B2/fuGcQ0rJ"
"k6PHa0F3o0v3Rpd/8QPZ/Bl4qenM9gAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
TreeWizard = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAIAAABL1vtsAAAACXBIWXMAAA0SAAANOgHo3Zne"
"AAAAB3RJTUUH1wsXDwEvwGeTJQAAAqNJREFUOMudk0tPU1EQx+ece24LxT6gb2xpMSqPgisJ"
"JBoSH1E/AIJK3Lg00Y36BUB8xLXRjbhRF8bgiw0LXfhIBBSJoERQ6StcbHtb+6Dt7T33HBck"
"xGADlMmsZia//P8zGfQq8npk6mmVvkrVVNhalKgKnAMA59C7v0eo7jY4bA6MsbHGaDXVlbjq"
"sjhNNSZEcLW+CgRkNBh1OhERTERCRMIRs5mt9dZ6m9lGgb77/p4AgkwhncuvxFOJxp1+AN5Y"
"65cyv+21djkvd/pasoWsnJcNOoOcT9oM1lnpq0JLXd5OKbscksMcAWacIYQ7/B2+uganyakx"
"lilkBcDBRAhzHE/H5VwyU8gWS8VEOkE1CgCU0qno56W0pGoq44xoTOOcx1ZiXpc3+ida0koT"
"kUnOOUZYykmr5hEgDhwhFMvGFKqoVJ0pzAIAQkjVVDQ8fd9lccF2IygHic/qa7T7t41gXMNl"
"G6li6uXEaGwlzjijjG5MKY/QC/rbD+/cezOMEX4xPXr+7oUNEKRsVcBCg62hzRsYfD5k1pt3"
"6GoqQyiaMj4/7va4A55Au6ctmopykVWGePD20bw0f/jAoUwxQzBhwBbDIQCgGiUC2XwXX5Zm"
"EssJnUH/YXI8kook80lFU1qbmo9cPj7y8ZlClc1V7Ktvbz3ZomhK37V+i8NCNTofXggHIyc6"
"jvV29mzVCBGIIAjdXQe/heZ+zi06/HaRiFf6LlV2VATIX+vvP3r6+sXBjJw2u819t85UdlQA"
"WJPdtjvwQ/qVTqQrUwEAKqMIYYTwzbM3VKXk2eV5PPGEA68AIWLCOVvNq+cGcvnc2KexhdjC"
"JggE6N9cqzc7m/a69+SLhSVJWtdav4uyOgFAFMShUwMAwDn/f4aEkqEiLWz72SPJ6F8fukGl"
"454ftwAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
Undo = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABYAAAAWCAYAAADEtGw7AAAABmJLR0QA/wD/AP+gvaeTAAAA"
"CXBIWXMAAA3XAAAN1wFCKJt4AAAAB3RJTUUH1gcaCjgAnz8HVAAAA4BJREFUOMutlF9oW1Uc"
"x7/n/slNe5u0qY3NXeyfdOlcdT7MlyJbhdWiosiK+jKRMcqEPu1R8UVBpj6Kij44KKEvGxOh"
"ogzELVtjdOZh08kE3azapH+SdclC/t7knnuOD55bLlkaoXjgx7333B+f8z3f3+8coMO4dG7s"
"ZDIGjl0MaacfSwtDRzTzzzPY5ZB2gg5Imfjks8/gfwNvQ2fGAXtt12BlJ6jFR6Aq3QB+RTIG"
"3rBww7TwU4OR89c31MTp95rVTmC5HZTTVcDzOAih2DOqIzwWRjBshPr8cqWYL+195EH7xfCE"
"+kMiyUodwRcWRw4FkF6enBlH3RqD5D0IAOBQAckApAEo2h7oPWx4dN/Dj6oa3T/A7z0ZnlCv"
"JZJssy340tm9x3X611eTM+OoWSOQiAJZ0VwpNqxmGY16Dpn0VXB4ofv9xB8cDnuraXv0gHol"
"kWRWK5gAwJcLQ0ceEDaYNArAAogXtcoG8lu/3F8YBRh86Glc//7Hn+vlQuq5ecy3Bbd6nLvT"
"RKm0jvFoBJBDAADTrKNaycKsFEAUAk/XEBrWfqxcW7r51Ek8tmNXzM5lLi8tDE1fjW/Gn5g2"
"UBM1T33z3XZyifcgZAzz3r51gkYJeg8FPMFRYKtzH8/OZS4XaGA6dfE2IpHI9vzhEyCHT4AU"
"yt5jxfzdrXp1kNvUBocFi8k9NHfUoLmjRscDMjuXubJWN55PXbwNKGPuPOnVU3fPZwvktfX1"
"IrSuXk6tPGe2Ccq8oMz7nydRBqAufhR6IRkDF5eQxxXqt7FAMf/7AfuPVIhe+MzH3bVqVUwc"
"VcJ3z/FT2fjN3OBL4r8brMmEQ5LUZqnc3bTtxqYQI7UrngNWRZICQJp/I7cMYACA162qzrQs"
"UXSjnP9NsihZFQtSEcwBkzaKFZcK550AIGdO9876fCTUpOrG2iYLVouNcwC6AdQFlAPgSktP"
"ty6iunYh+3xQovv0NyPRWvaLz5fVOzn+7tsf4msA2r+nCpYDl1ugkvtiEnMyABKNSvriB4FP"
"An1mOb5csTdW2VvvfIqE2L4DdazgpMVjxeWxrKpQpg5pgamDsuHXWb9XsyeqNSq9/j6PiS3b"
"IpoiqPhmjsd45ZiHdHVxj2kq5NYtxmybEXDYL09Zen8/C66krfTfK7jx8VncE1AIdcy1gOPx"
"ff1H2nhNWu4V58ldwdyFA4B/ANW9dTHXQUUsAAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
Icon = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAABOVJ"
"REFUWIWllmtsU2UYx3/n0na9nLbboLvgLt2F4RxCDEgmJIoxaDRigiwIJn7VqAkJMTFRPxpi"
"CDF+wEQTEmOIMUgEIsgHox/Q6GCMyyAhJDA3xmDretu605527TmvHxYqZd3Wdk9yPpzL8///"
"3ue87/O+SLLCctfR748JSVY4dfac+Hvgshi4OiRKySvpkmSFYvHH+T+FpmkAaLLAUbMaNyZx"
"Q8chZIYnw7z0wvNS0eQyQgU49uNxEQwG8apgU1WqqqowZDseyWQmJ0jGwtx+MIWu60QiYTrb"
"mlAdnpV6/w+wvrsDn8PJnbM/E54I0bp7D6HQJJLq4sqVqzQ01KO5bKiqiqIoxBNpFGVugdi5"
"334XbS31xBJpntu8Sfrh+Anx9p6+JaukAuiGyZyZ4fbQEPcGLxFb1YjhcBF/MEZjRxdpI03a"
"SOeTQpOTSPJC3YDbQbXTg9NTS2hsVETTOQBO/nJG7Nr5elEQSZIVTp0+IexOPyMj/y74IBSa"
"YmL0Lke/O7rkSD7/4pDY/uJ2XE4XfpvE7VCCC4NDjM1k2LGxia516/DKguC6bun02V+FkU6z"
"d/ebUn4SHvnmW/Go4IfvvZs37KqpFq11Dbj9Gif7LxYFOXjosOjt7cWyTCzL4vyFG1y6Oc6G"
"jtVs29yD01mFrus01fgIBALzcyuZXHwVPB4bV/lFY0MT527cWLQSXx35Wrjdbnw+H6mUQSwa"
"paGpjejEHaaiswRsMs0bnqGn5yncmKRSqdIBAHqfqBeramo4c/3mssvv/bf2Cq22msb2Lqrr"
"67DZbciShMej4a/243K6GB8fLw+g3esRwZpa7Jp30Urs3LxJaAhSCZ3QzDT/TIakTz46IDyB"
"RrxeDbvdjs/nQ9O8jI6OzK+CUmM4oUsuRRJtLuei33glmJyKEEkaXAtPSQAHD39ZAPvxp5+J"
"xtrV7D+wXyqrAgB1LqdY6/fz14OJBRXo27heROPTRHSD69FISV2ybACAbp9fdK6px6H5+Oni"
"RWnf1q0CXedeeIqZTLZk84oBmt120eKrpTPYiixLpBJJ7kcjhGeT3JyZLmt/KGsOPAwjJzGT"
"ThKPxKiSBNFkmnA6TTSbKVtLrgQgnMlIyZxFZDbB3FwWw8ySmcsQShll744VAQA4JBU9m8ay"
"2cmZJsMJvaKtuWIAI5dGtSRExkCSKj8WVAzgVKtI5ixSJihyxTKVAwAoioKqWNhWILMiANM0"
"mZ0zVyKxMgCbKtDsCmkrV7FGyX1gYLBfRGJJAJx2wQdv9GEJlWxOWtEcKKkTDgz2i472joJn"
"21o7sakCv9ODYoHZWM+ufe+wtrsdgGCwdYFOLBYnkUjwysuv5ZfNshUoZp6dna+ELMvYkTGx"
"sCwr/37Lli0AjIxNEgqFUJg/wGZyMplYtEBrSYBi5gA2zQ1AzpSxqQLdyGKXZVzVvrw5QLC5"
"nmBzfQH45VuFlkUBBgb7BUAx84dCANPZLHV4UBUFj8dDW1vbUuMhbui8uuOlgq61AGCxURck"
"eVzs6NsNgGbliKYydG9YT0+wedGc2VSOK0O3FjwvmISlmD8MIQQ5PZX/HcvF+PAIT296dkHP"
"zgM8ai6EWFF/fzwikQhrO58sKpj/BUPXhojH4kUFAg0tBfdOuyh8H6hbEmAxc6jwRLRU3L0/"
"UUDXsqZhyVL+B0Qv7WbNxR4hAAAAAElFTkSuQmCC")
|
garrettcap/Bulletproof-Backup
|
wx/tools/XRCed/images.py
|
Python
|
gpl-2.0
| 83,540
|
[
"VMD"
] |
0e1eb1fe093b0f3f5d37c6d27a868dcb7b5e4188658e791180ab7cb44d5710e7
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Hybrid LFP scheme example script, applying the methodology with the model of:
Potjans, T. and Diesmann, M. "The Cell-Type Specific Cortical Microcircuit:
Relating Structure and Activity in a Full-Scale Spiking Network Model".
Cereb. Cortex (2014) 24 (3): 785-806.
doi: 10.1093/cercor/bhs358
Synopsis of the main simulation procedure:
1. Loading of parameterset
a. network parameters
b. parameters for hybrid scheme
2. Set up file destinations for different simulation output
3. network simulation
a. execute network simulation using NEST (www.nest-simulator.org)
b. merge nest spike output from different MPI ranks
4. Create a object-representation that uses sqlite3 of all the spiking output
5. Iterate over post-synaptic populations:
a. Create Population object with appropriate parameters for
each specific population
b. Run all computations for populations
c. Postprocess simulation output of all cells in population
6. Postprocess all cell- and population-specific output data
7. Create a tarball for all non-redundant simulation output
The full simulation can be evoked by issuing a mpirun call, such as
mpirun -np 64 python example_microcircuit.py
where the number 64 is the desired number of MPI threads & CPU cores
Given the size of the network and demands for the multi-compartment LFP-
predictions using the present scheme, running the model on a large scale
compute facility is strongly encouraged.
'''
from example_plotting import *
import matplotlib.pyplot as plt
from example_microcircuit_params import multicompartment_params, \
point_neuron_network_params
import os
if 'DISPLAY' not in os.environ:
import matplotlib
matplotlib.use('Agg')
import numpy as np
from time import time
import neuron # NEURON compiled with MPI must be imported before NEST and mpi4py
# to avoid NEURON being aware of MPI.
import nest # Import not used, but done in order to ensure correct execution
from hybridLFPy import PostProcess, Population, CachedNetwork
from hybridLFPy import setup_file_dest, helpers
from glob import glob
import tarfile
import lfpykit
from mpi4py import MPI
# set some seed values
SEED = 12345678
SIMULATIONSEED = 12345678
np.random.seed(SEED)
################# Initialization of MPI stuff ############################
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
# if True, execute full model. If False, do only the plotting. Simulation results
# must exist.
properrun = True
# check if mod file for synapse model specified in expisyn.mod is loaded
if not hasattr(neuron.h, 'ExpSynI'):
if RANK == 0:
os.system('nrnivmodl')
COMM.Barrier()
neuron.load_mechanisms('.')
##########################################################################
# PARAMETERS
##########################################################################
# Full set of parameters including network parameters
params = multicompartment_params()
##########################################################################
# Function declaration(s)
##########################################################################
def merge_gdf(model_params,
raw_label='spikes_',
file_type='gdf',
fileprefix='spikes',
skiprows=0):
'''
NEST produces one file per virtual process containing recorder output.
This function gathers and combines them into one single file per
network population.
Parameters
----------
model_params : object
network parameters object
raw_label : str
file_type : str
fileprefix : str
skiprows : int
Returns
-------
None
'''
def get_raw_gids(model_params):
'''
Reads text file containing gids of neuron populations as created within
the NEST simulation. These gids are not continuous as in the simulation
devices get created in between.
Parameters
----------
model_params : object
network parameters object
Returns
-------
gids : list
list of neuron ids and value (spike time, voltage etc.)
'''
gidfile = open(os.path.join(model_params.raw_nest_output_path,
model_params.GID_filename), 'r')
gids = []
for l in gidfile:
a = l.split()
gids.append([int(a[0]), int(a[1])])
return gids
# some preprocessing
raw_gids = get_raw_gids(model_params)
pop_sizes = [raw_gids[i][1] - raw_gids[i][0] + 1
for i in np.arange(model_params.Npops)]
raw_first_gids = [raw_gids[i][0] for i in np.arange(model_params.Npops)]
converted_first_gids = [int(1 + np.sum(pop_sizes[:i]))
for i in np.arange(model_params.Npops)]
for pop_idx in np.arange(model_params.Npops):
if pop_idx % SIZE == RANK:
files = glob(os.path.join(model_params.raw_nest_output_path,
raw_label + '{}*.{}'.format(pop_idx,
file_type)))
gdf = [] # init
for f in files:
new_gdf = helpers.read_gdf(f, skiprows)
for line in new_gdf:
line[0] = line[0] - raw_first_gids[pop_idx] + \
converted_first_gids[pop_idx]
gdf.append(line)
print(
'writing: {}'.format(
os.path.join(
model_params.spike_output_path,
fileprefix +
'_{}.{}'.format(
model_params.X[pop_idx],
file_type))))
helpers.write_gdf(
gdf,
os.path.join(
model_params.spike_output_path,
fileprefix +
'_{}.{}'.format(
model_params.X[pop_idx],
file_type)))
COMM.Barrier()
return
def dict_of_numpyarray_to_dict_of_list(d):
'''
Convert dictionary containing numpy arrays to dictionary containing lists
Parameters
----------
d : dict
sli parameter name and value as dictionary key and value pairs
Returns
-------
d : dict
modified dictionary
'''
for key, value in d.items():
if isinstance(value, dict): # if value == dict
# recurse
d[key] = dict_of_numpyarray_to_dict_of_list(value)
elif isinstance(value, np.ndarray): # or isinstance(value,list) :
d[key] = value.tolist()
return d
def send_nest_params_to_sli(p):
'''
Read parameters and send them to SLI
Parameters
----------
p : dict
sli parameter name and value as dictionary key and value pairs
Returns
-------
None
'''
for name in list(p.keys()):
value = p[name]
if isinstance(value, np.ndarray):
value = value.tolist()
if isinstance(value, dict):
value = dict_of_numpyarray_to_dict_of_list(value)
if name == 'neuron_model': # special case as neuron_model is a
# NEST model and not a string
try:
nest.ll_api.sli_run('/' + name)
nest.ll_api.sli_push(value)
nest.ll_api.sli_run('eval')
nest.ll_api.sli_run('def')
except BaseException:
print('Could not put variable %s on SLI stack' % (name))
print(type(value))
else:
try:
nest.ll_api.sli_run('/' + name)
nest.ll_api.sli_push(value)
nest.ll_api.sli_run('def')
except BaseException:
print('Could not put variable %s on SLI stack' % (name))
print(type(value))
return
def sli_run(parameters=object(),
fname='microcircuit.sli',
verbosity='M_INFO'):
'''
Takes parameter-class and name of main sli-script as input, initiating the
simulation.
Parameters
----------
parameters : object
parameter class instance
fname : str
path to sli codes to be executed
verbosity : str,
nest verbosity flag
Returns
-------
None
'''
# Load parameters from params file, and pass them to nest
# Python -> SLI
send_nest_params_to_sli(vars(parameters))
# set SLI verbosity
nest.ll_api.sli_run("%s setverbosity" % verbosity)
# Run NEST/SLI simulation
nest.ll_api.sli_run('(%s) run' % fname)
def tar_raw_nest_output(raw_nest_output_path,
delete_files=True,
filepatterns=['voltages*.dat',
'spikes*.dat',
'weighted_input_spikes*.dat'
'*.gdf']):
'''
Create tar file of content in `raw_nest_output_path` and optionally
delete files matching given pattern.
Parameters
----------
raw_nest_output_path: path
params.raw_nest_output_path
delete_files: bool
if True, delete .dat files
filepatterns: list of str
patterns of files being deleted
'''
if RANK == 0:
# create tarfile
fname = raw_nest_output_path + '.tar'
with tarfile.open(fname, 'a') as t:
t.add(raw_nest_output_path)
# remove files from <raw_nest_output_path>
for pattern in filepatterns:
for f in glob(os.path.join(raw_nest_output_path, pattern)):
try:
os.remove(f)
except OSError:
print('Error while deleting {}'.format(f))
# sync
COMM.Barrier()
return
###############################################################################
# MAIN simulation procedure
###############################################################################
# tic toc
tic = time()
if properrun:
# set up the file destination
setup_file_dest(params, clearDestination=True)
######## Perform network simulation ######################################
if properrun:
# initiate nest simulation with only the point neuron network parameter
# class
networkParams = point_neuron_network_params()
sli_run(parameters=networkParams,
fname='microcircuit.sli',
verbosity='M_INFO')
# preprocess the gdf files containing spiking output, voltages, weighted and
# spatial input spikes and currents:
merge_gdf(networkParams,
raw_label=networkParams.spike_recorder_label,
file_type='dat',
fileprefix=params.networkSimParams['label'],
skiprows=3)
# create tar file archive of <raw_nest_output_path> folder as .dat files are
# no longer needed. Remove
tar_raw_nest_output(params.raw_nest_output_path, delete_files=True)
# Create an object representation of the simulation output that uses sqlite3
networkSim = CachedNetwork(**params.networkSimParams)
toc = time() - tic
print('NEST simulation and gdf file processing done in %.3f seconds' % toc)
# Set up LFPykit measurement probes for LFPs and CSDs
if properrun:
probes = []
probes.append(lfpykit.RecExtElectrode(cell=None, **params.electrodeParams))
probes.append(
lfpykit.LaminarCurrentSourceDensity(
cell=None,
**params.CSDParams))
probes.append(lfpykit.CurrentDipoleMoment(cell=None))
####### Set up populations ###############################################
if properrun:
# iterate over each cell type, run single-cell simulations and create
# population object
for i, y in enumerate(params.y):
# create population:
pop = Population(
# parent class parameters
cellParams=params.yCellParams[y],
rand_rot_axis=params.rand_rot_axis[y],
simulationParams=params.simulationParams,
populationParams=params.populationParams[y],
y=y,
layerBoundaries=params.layerBoundaries,
probes=probes,
savelist=params.savelist,
savefolder=params.savefolder,
dt_output=params.dt_output,
POPULATIONSEED=SIMULATIONSEED + i,
# daughter class kwargs
X=params.X,
networkSim=networkSim,
k_yXL=params.k_yXL[y],
synParams=params.synParams[y],
synDelayLoc=params.synDelayLoc[y],
synDelayScale=params.synDelayScale[y],
J_yX=params.J_yX[y],
tau_yX=params.tau_yX[y],
recordSingleContribFrac=params.recordSingleContribFrac,
)
# run population simulation and collect the data
pop.run()
pop.collect_data()
# object no longer needed
del pop
####### Postprocess the simulation output ################################
# reset seed, but output should be deterministic from now on
np.random.seed(SIMULATIONSEED)
if properrun:
# do some postprocessing on the collected data, i.e., superposition
# of population LFPs, CSDs etc
postproc = PostProcess(y=params.y,
dt_output=params.dt_output,
probes=probes,
savefolder=params.savefolder,
mapping_Yy=params.mapping_Yy,
savelist=params.savelist
)
# run through the procedure
postproc.run()
# create tar-archive with output
postproc.create_tar_archive()
# tic toc
print('Execution time: %.3f seconds' % (time() - tic))
##########################################################################
# Create set of plots from simulation output
##########################################################################
########## matplotlib settings ###########################################
plt.close('all')
if RANK == 0:
# create network raster plot
x, y = networkSim.get_xy((500, 1000), fraction=1)
fig, ax = plt.subplots(1, figsize=(5, 8))
fig.subplots_adjust(left=0.2)
networkSim.plot_raster(ax, (500, 1000), x, y, markersize=1, marker='o',
alpha=.5, legend=False, pop_names=True)
remove_axis_junk(ax)
ax.set_xlabel(r'$t$ (ms)', labelpad=0.1)
ax.set_ylabel('population', labelpad=0.1)
ax.set_title('network raster')
fig.savefig(os.path.join(params.figures_path, 'network_raster.pdf'),
dpi=300)
plt.close(fig)
# plot cell locations
fig, ax = plt.subplots(1, 1, figsize=(5, 8))
fig.subplots_adjust(left=0.2)
plot_population(ax, params.populationParams, params.electrodeParams,
params.layerBoundaries,
X=params.y,
markers=['*' if 'b' in y else '^' for y in params.y],
colors=['b' if 'b' in y else 'r' for y in params.y],
layers=['L1', 'L2/3', 'L4', 'L5', 'L6'],
isometricangle=np.pi / 24, aspect='equal')
ax.set_title('layers')
fig.savefig(os.path.join(params.figures_path, 'layers.pdf'), dpi=300)
plt.close(fig)
# plot cell locations
fig, ax = plt.subplots(1, 1, figsize=(5, 8))
fig.subplots_adjust(left=0.2)
plot_population(ax, params.populationParams, params.electrodeParams,
params.layerBoundaries,
X=params.y,
markers=['*' if 'b' in y else '^' for y in params.y],
colors=['b' if 'b' in y else 'r' for y in params.y],
layers=['L1', 'L2/3', 'L4', 'L5', 'L6'],
isometricangle=np.pi / 24, aspect='equal')
plot_soma_locations(ax, X=params.y,
populations_path=params.populations_path,
markers=['*' if 'b' in y else '^' for y in params.y],
colors=['b' if 'b' in y else 'r' for y in params.y],
isometricangle=np.pi / 24, )
ax.set_title('soma positions')
fig.savefig(os.path.join(params.figures_path, 'soma_locations.pdf'),
dpi=150)
plt.close(fig)
# plot morphologies in their respective locations
fig, ax = plt.subplots(1, 1, figsize=(5, 8))
fig.subplots_adjust(left=0.2)
plot_population(ax, params.populationParams, params.electrodeParams,
params.layerBoundaries,
X=params.y,
markers=['*' if 'b' in y else '^' for y in params.y],
colors=['b' if 'b' in y else 'r' for y in params.y],
layers=['L1', 'L2/3', 'L4', 'L5', 'L6'],
isometricangle=np.pi / 24, aspect='equal')
plot_morphologies(ax,
X=params.y,
markers=['*' if 'b' in y else '^' for y in params.y],
colors=['b' if 'b' in y else 'r' for y in params.y],
isometricangle=np.pi / 24,
populations_path=params.populations_path,
cellParams=params.yCellParams,
fraction=0.02)
ax.set_title('LFP generators')
fig.savefig(os.path.join(params.figures_path, 'populations.pdf'), dpi=300)
plt.close(fig)
# plot morphologies in their respective locations
fig, ax = plt.subplots(1, 1, figsize=(5, 8))
fig.subplots_adjust(left=0.2)
plot_population(ax, params.populationParams, params.electrodeParams,
params.layerBoundaries,
X=params.y,
markers=['*' if 'b' in y else '^' for y in params.y],
colors=['b' if 'b' in y else 'r' for y in params.y],
layers=['L1', 'L2/3', 'L4', 'L5', 'L6'],
isometricangle=np.pi / 24, aspect='equal')
plot_individual_morphologies(
ax,
X=params.y,
markers=[
'*' if 'b' in y else '^' for y in params.y],
colors=[
'b' if 'b' in y else 'r' for y in params.y],
isometricangle=np.pi / 24,
cellParams=params.yCellParams,
populationParams=params.populationParams)
ax.set_title('morphologies')
fig.savefig(os.path.join(params.figures_path, 'cell_models.pdf'), dpi=300)
plt.close(fig)
# plot compound LFP and CSD traces
fig = plt.figure(figsize=(13, 8))
fig.subplots_adjust(left=0.075, right=0.95, bottom=0.075, top=0.95,
hspace=0.2, wspace=0.2)
gs = gridspec.GridSpec(2, 2)
ax0 = fig.add_subplot(gs[:, 0])
ax1 = fig.add_subplot(gs[0, 1])
ax2 = fig.add_subplot(gs[1, 1])
ax0.set_title('network raster')
ax1.set_title('CSD')
ax2.set_title('LFP')
T = (500, 700)
x, y = networkSim.get_xy(T, fraction=1)
networkSim.plot_raster(ax0, T, x, y, markersize=1, marker='o',
alpha=.5, legend=False, pop_names=True)
remove_axis_junk(ax0)
ax0.set_xlabel(r'$t$ (ms)', labelpad=0.1)
ax0.set_ylabel('population', labelpad=0.1)
plot_signal_sum(ax1, z=params.electrodeParams['z'],
fname=os.path.join(params.savefolder,
'LaminarCurrentSourceDensity_sum.h5'),
unit='nA$\\mu$m$^{-3}$', T=T)
ax1.set_xticklabels([])
ax1.set_xlabel('')
plot_signal_sum(ax2, z=params.electrodeParams['z'],
fname=os.path.join(params.savefolder,
'RecExtElectrode_sum.h5'),
unit='mV', T=T)
ax2.set_xlabel('$t$ (ms)')
fig.savefig(os.path.join(params.figures_path, 'compound_signals.pdf'),
dpi=300)
plt.close(fig)
# plot some stats for current dipole moments of each population,
# temporal traces,
# and EEG predictions on scalp using 4-sphere volume conductor model
from LFPy import FourSphereVolumeConductor
T = [500, 1000]
P_Y_var = np.zeros((len(params.Y) + 1, 3)) # dipole moment variance
for i, Y in enumerate(params.Y):
f = h5py.File(
os.path.join(
params.savefolder,
'populations',
'{}_population_CurrentDipoleMoment.h5'.format(Y)),
'r')
srate = f['srate'][()]
P_Y_var[i, :] = f['data'][:, int(T[0] * 1000 / srate):].var(axis=-1)
f_sum = h5py.File(os.path.join(params.savefolder,
'CurrentDipoleMoment_sum.h5'), 'r')
P_Y_var[-1, :] = f_sum['data'][:, int(T[0] * 1000 / srate):].var(axis=-1)
tvec = np.arange(f_sum['data'].shape[-1]) * 1000. / srate
fig = plt.figure(figsize=(5, 8))
fig.subplots_adjust(left=0.2, right=0.95, bottom=0.075, top=0.95,
hspace=0.4, wspace=0.2)
ax = fig.add_subplot(3, 2, 1)
ax.plot(P_Y_var, '-o')
ax.legend(['$P_x$', '$P_y$', '$P_z$'], fontsize=8, frameon=False)
ax.set_xticks(np.arange(len(params.Y) + 1))
ax.set_xticklabels(params.Y + ['SUM'], rotation='vertical')
ax.set_ylabel(r'$\sigma^2 (\mathrm{nA}^2 \mu\mathrm{m}^2)$', labelpad=0)
ax.set_title('signal variance')
# make some EEG predictions
radii = [79000., 80000., 85000., 90000.]
sigmas = [0.3, 1.5, 0.015, 0.3]
r = np.array([[0., 0., 90000.]])
rz = np.array([0., 0., 78000.])
# draw spherical shells
ax = fig.add_subplot(3, 2, 2, aspect='equal')
phi = np.linspace(np.pi / 4, np.pi * 3 / 4, 61)
for R in radii:
x = R * np.cos(phi)
y = R * np.sin(phi)
ax.plot(x, y, lw=0.5)
ax.plot(0, rz[-1], 'k.', clip_on=False)
ax.plot(0, r[0, -1], 'k*', clip_on=False)
ax.axis('off')
ax.legend(['brain', 'CSF', 'skull', 'scalp', r'$\mathbf{P}$', 'EEG'],
fontsize=8, frameon=False)
ax.set_title('4-sphere head model')
sphere_model = FourSphereVolumeConductor(r, radii, sigmas)
# current dipole moment
p = f_sum['data'][:, int(T[0] * 1000 / srate):int(T[1] * 1000 / srate)]
# compute potential
potential = sphere_model.get_dipole_potential(p, rz)
# plot dipole moment
ax = fig.add_subplot(3, 1, 2)
ax.plot(tvec[(tvec >= T[0]) & (tvec < T[1])], p.T)
ax.set_ylabel(r'$\mathbf{P}(t)$ (nA$\mu$m)', labelpad=0)
ax.legend(['$P_x$', '$P_y$', '$P_z$'], fontsize=8, frameon=True)
ax.set_title('current dipole moment sum')
# plot surface potential directly on top
ax = fig.add_subplot(3, 1, 3, sharex=ax)
ax.plot(tvec[(tvec >= T[0]) & (tvec < T[1])],
potential.T * 1000) # mV->uV unit conversion
ax.set_ylabel(r'EEG ($\mu$V)', labelpad=0)
ax.set_xlabel(r'$t$ (ms)', labelpad=0)
ax.set_title('scalp potential')
fig.savefig(
os.path.join(
params.figures_path,
'current_dipole_moments.pdf'),
dpi=300)
plt.close(fig)
# add figures to output .tar archive
with tarfile.open(params.savefolder + '.tar', 'a:') as f:
for pdf in glob(os.path.join(params.figures_path, '*.pdf')):
arcname = os.path.join(os.path.split(
params.savefolder)[-1], 'figures', os.path.split(pdf)[-1])
f.add(name=pdf, arcname=arcname)
|
espenhgn/hybridLFPy
|
examples/example_microcircuit.py
|
Python
|
gpl-3.0
| 23,377
|
[
"NEURON"
] |
1b155f9448edd35868f24dfd687b6c46012dd9f3b9d75fea52a20c577188ea25
|
# -*- coding: utf-8 -*-
""" Tests for student account views. """
import re
from unittest import skipUnless
from urllib import urlencode
import json
import mock
import ddt
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core import mail
from django.contrib import messages
from django.contrib.messages.middleware import MessageMiddleware
from django.test import TestCase
from django.test.utils import override_settings
from django.http import HttpRequest
from course_modes.models import CourseMode
from openedx.core.djangoapps.user_api.accounts.api import activate_account, create_account
from openedx.core.djangoapps.user_api.accounts import EMAIL_MAX_LENGTH
from openedx.core.djangolib.js_utils import dump_js_escaped_json
from student.tests.factories import UserFactory
from student_account.views import account_settings_context
from third_party_auth.tests.testutil import simulate_running_pipeline, ThirdPartyAuthTestMixin
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from openedx.core.djangoapps.theming.test_util import with_edx_domain_context
@ddt.ddt
class StudentAccountUpdateTest(UrlResetMixin, TestCase):
""" Tests for the student account views that update the user's account information. """
USERNAME = u"heisenberg"
ALTERNATE_USERNAME = u"walt"
OLD_PASSWORD = u"ḅḷüëṡḳÿ"
NEW_PASSWORD = u"🄱🄸🄶🄱🄻🅄🄴"
OLD_EMAIL = u"walter@graymattertech.com"
NEW_EMAIL = u"walt@savewalterwhite.com"
INVALID_ATTEMPTS = 100
INVALID_EMAILS = [
None,
u"",
u"a",
"no_domain",
"no+domain",
"@",
"@domain.com",
"test@no_extension",
# Long email -- subtract the length of the @domain
# except for one character (so we exceed the max length limit)
u"{user}@example.com".format(
user=(u'e' * (EMAIL_MAX_LENGTH - 11))
)
]
INVALID_KEY = u"123abc"
def setUp(self):
super(StudentAccountUpdateTest, self).setUp("student_account.urls")
# Create/activate a new account
activation_key = create_account(self.USERNAME, self.OLD_PASSWORD, self.OLD_EMAIL)
activate_account(activation_key)
# Login
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertTrue(result)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_password_change(self):
# Request a password change while logged in, simulating
# use of the password reset link from the account page
response = self._change_password()
self.assertEqual(response.status_code, 200)
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
# Retrieve the activation link from the email body
email_body = mail.outbox[0].body
result = re.search('(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
activation_link = result.group('url')
# Visit the activation link
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
# Submit a new password and follow the redirect to the success page
response = self.client.post(
activation_link,
# These keys are from the form on the current password reset confirmation page.
{'new_password1': self.NEW_PASSWORD, 'new_password2': self.NEW_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Your password has been set.")
# Log the user out to clear session data
self.client.logout()
# Verify that the new password can be used to log in
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
# Try reusing the activation link to change the password again
response = self.client.post(
activation_link,
{'new_password1': self.OLD_PASSWORD, 'new_password2': self.OLD_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "The password reset link was invalid, possibly because the link has already been used.")
self.client.logout()
# Verify that the old password cannot be used to log in
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertFalse(result)
# Verify that the new password continues to be valid
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
@ddt.data(True, False)
def test_password_change_logged_out(self, send_email):
# Log the user out
self.client.logout()
# Request a password change while logged out, simulating
# use of the password reset link from the login page
if send_email:
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
else:
# Don't send an email in the POST data, simulating
# its (potentially accidental) omission in the POST
# data sent from the login page
response = self._change_password()
self.assertEqual(response.status_code, 400)
def test_password_change_inactive_user(self):
# Log out the user created during test setup
self.client.logout()
# Create a second user, but do not activate it
create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
# Send the view the email address tied to the inactive user
response = self._change_password(email=self.NEW_EMAIL)
# Expect that the activation email is still sent,
# since the user may have lost the original activation email.
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
def test_password_change_no_user(self):
# Log out the user created during test setup
self.client.logout()
# Send the view an email address not tied to any user
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 400)
def test_password_change_rate_limited(self):
# Log out the user created during test setup, to prevent the view from
# selecting the logged-in user's email address over the email provided
# in the POST data
self.client.logout()
# Make many consecutive bad requests in an attempt to trigger the rate limiter
for attempt in xrange(self.INVALID_ATTEMPTS):
self._change_password(email=self.NEW_EMAIL)
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 403)
@ddt.data(
('post', 'password_change_request', []),
)
@ddt.unpack
def test_require_http_method(self, correct_method, url_name, args):
wrong_methods = {'get', 'put', 'post', 'head', 'options', 'delete'} - {correct_method}
url = reverse(url_name, args=args)
for method in wrong_methods:
response = getattr(self.client, method)(url)
self.assertEqual(response.status_code, 405)
def _change_password(self, email=None):
"""Request to change the user's password. """
data = {}
if email:
data['email'] = email
return self.client.post(path=reverse('password_change_request'), data=data)
@ddt.ddt
class StudentAccountLoginAndRegistrationTest(ThirdPartyAuthTestMixin, UrlResetMixin, ModuleStoreTestCase):
""" Tests for the student account views that update the user's account information. """
USERNAME = "bob"
EMAIL = "bob@example.com"
PASSWORD = "password"
@mock.patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(StudentAccountLoginAndRegistrationTest, self).setUp('embargo')
# For these tests, two third party auth providers are enabled by default:
self.configure_google_provider(enabled=True)
self.configure_facebook_provider(enabled=True)
@ddt.data(
("signin_user", "login"),
("register_user", "register"),
)
@ddt.unpack
def test_login_and_registration_form(self, url_name, initial_mode):
response = self.client.get(reverse(url_name))
expected_data = '"initial_mode": "{mode}"'.format(mode=initial_mode)
self.assertContains(response, expected_data)
@ddt.data("signin_user", "register_user")
def test_login_and_registration_form_already_authenticated(self, url_name):
# Create/activate a new account and log in
activation_key = create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
activate_account(activation_key)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result)
# Verify that we're redirected to the dashboard
response = self.client.get(reverse(url_name))
self.assertRedirects(response, reverse("dashboard"))
@ddt.data(
(False, "signin_user"),
(False, "register_user"),
(True, "signin_user"),
(True, "register_user"),
)
@ddt.unpack
def test_login_and_registration_form_signin_preserves_params(self, is_edx_domain, url_name):
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
]
# The response should have a "Sign In" button with the URL
# that preserves the querystring params
with with_edx_domain_context(is_edx_domain):
response = self.client.get(reverse(url_name), params)
expected_url = '/login?{}'.format(self._finish_auth_url_param(params + [('next', '/dashboard')]))
self.assertContains(response, expected_url)
# Add additional parameters:
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination')
]
# Verify that this parameter is also preserved
with with_edx_domain_context(is_edx_domain):
response = self.client.get(reverse(url_name), params)
expected_url = '/login?{}'.format(self._finish_auth_url_param(params))
self.assertContains(response, expected_url)
@mock.patch.dict(settings.FEATURES, {"ENABLE_THIRD_PARTY_AUTH": False})
@ddt.data("signin_user", "register_user")
def test_third_party_auth_disabled(self, url_name):
response = self.client.get(reverse(url_name))
self._assert_third_party_auth_data(response, None, None, [])
@ddt.data(
("signin_user", None, None),
("register_user", None, None),
("signin_user", "google-oauth2", "Google"),
("register_user", "google-oauth2", "Google"),
("signin_user", "facebook", "Facebook"),
("register_user", "facebook", "Facebook"),
)
@ddt.unpack
def test_third_party_auth(self, url_name, current_backend, current_provider):
params = [
('course_id', 'course-v1:Org+Course+Run'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination'),
]
# Simulate a running pipeline
if current_backend is not None:
pipeline_target = "student_account.views.third_party_auth.pipeline"
with simulate_running_pipeline(pipeline_target, current_backend):
response = self.client.get(reverse(url_name), params)
# Do NOT simulate a running pipeline
else:
response = self.client.get(reverse(url_name), params)
# This relies on the THIRD_PARTY_AUTH configuration in the test settings
expected_providers = [
{
"id": "oa2-facebook",
"name": "Facebook",
"iconClass": "fa-facebook",
"loginUrl": self._third_party_login_url("facebook", "login", params),
"registerUrl": self._third_party_login_url("facebook", "register", params)
},
{
"id": "oa2-google-oauth2",
"name": "Google",
"iconClass": "fa-google-plus",
"loginUrl": self._third_party_login_url("google-oauth2", "login", params),
"registerUrl": self._third_party_login_url("google-oauth2", "register", params)
}
]
self._assert_third_party_auth_data(response, current_backend, current_provider, expected_providers)
def test_hinted_login(self):
params = [("next", "/courses/something/?tpa_hint=oa2-google-oauth2")]
response = self.client.get(reverse('signin_user'), params)
self.assertContains(response, '"third_party_auth_hint": "oa2-google-oauth2"')
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_microsite_uses_old_login_page(self):
# Retrieve the login page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("signin_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Log into your Test Microsite Account")
self.assertContains(resp, "login-form")
def test_microsite_uses_old_register_page(self):
# Retrieve the register page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("register_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Register for Test Microsite")
self.assertContains(resp, "register-form")
def test_login_registration_xframe_protected(self):
resp = self.client.get(
reverse("register_user"),
{},
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'DENY')
self.configure_lti_provider(name='Test', lti_hostname='localhost', lti_consumer_key='test_key', enabled=True)
resp = self.client.get(
reverse("register_user"),
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'ALLOW')
def _assert_third_party_auth_data(self, response, current_backend, current_provider, providers):
"""Verify that third party auth info is rendered correctly in a DOM data attribute. """
finish_auth_url = None
if current_backend:
finish_auth_url = reverse("social:complete", kwargs={"backend": current_backend}) + "?"
auth_info = {
"currentProvider": current_provider,
"providers": providers,
"secondaryProviders": [],
"finishAuthUrl": finish_auth_url,
"errorMessage": None,
}
auth_info = dump_js_escaped_json(auth_info)
expected_data = '"third_party_auth": {auth_info}'.format(
auth_info=auth_info
)
self.assertContains(response, expected_data)
def _third_party_login_url(self, backend_name, auth_entry, login_params):
"""Construct the login URL to start third party authentication. """
return u"{url}?auth_entry={auth_entry}&{param_str}".format(
url=reverse("social:begin", kwargs={"backend": backend_name}),
auth_entry=auth_entry,
param_str=self._finish_auth_url_param(login_params),
)
def _finish_auth_url_param(self, params):
"""
Make the next=... URL parameter that indicates where the user should go next.
>>> _finish_auth_url_param([('next', '/dashboard')])
'/account/finish_auth?next=%2Fdashboard'
"""
return urlencode({
'next': '/account/finish_auth?{}'.format(urlencode(params))
})
class AccountSettingsViewTest(ThirdPartyAuthTestMixin, TestCase):
""" Tests for the account settings view. """
USERNAME = 'student'
PASSWORD = 'password'
FIELDS = [
'country',
'gender',
'language',
'level_of_education',
'password',
'year_of_birth',
'preferred_language',
]
@mock.patch("django.conf.settings.MESSAGE_STORAGE", 'django.contrib.messages.storage.cookie.CookieStorage')
def setUp(self):
super(AccountSettingsViewTest, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.request = HttpRequest()
self.request.user = self.user
# For these tests, two third party auth providers are enabled by default:
self.configure_google_provider(enabled=True)
self.configure_facebook_provider(enabled=True)
# Python-social saves auth failure notifcations in Django messages.
# See pipeline.get_duplicate_provider() for details.
self.request.COOKIES = {}
MessageMiddleware().process_request(self.request)
messages.error(self.request, 'Facebook is already in use.', extra_tags='Auth facebook')
def test_context(self):
context = account_settings_context(self.request)
user_accounts_api_url = reverse("accounts_api", kwargs={'username': self.user.username})
self.assertEqual(context['user_accounts_api_url'], user_accounts_api_url)
user_preferences_api_url = reverse('preferences_api', kwargs={'username': self.user.username})
self.assertEqual(context['user_preferences_api_url'], user_preferences_api_url)
for attribute in self.FIELDS:
self.assertIn(attribute, context['fields'])
self.assertEqual(
context['user_accounts_api_url'], reverse("accounts_api", kwargs={'username': self.user.username})
)
self.assertEqual(
context['user_preferences_api_url'], reverse('preferences_api', kwargs={'username': self.user.username})
)
self.assertEqual(context['duplicate_provider'], 'facebook')
self.assertEqual(context['auth']['providers'][0]['name'], 'Facebook')
self.assertEqual(context['auth']['providers'][1]['name'], 'Google')
def test_view(self):
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
for attribute in self.FIELDS:
self.assertIn(attribute, response.content)
@override_settings(SITE_NAME=settings.MICROSITE_LOGISTRATION_HOSTNAME)
class MicrositeLogistrationTests(TestCase):
"""
Test to validate that microsites can display the logistration page
"""
def test_login_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
def test_registration_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_no_override(self):
"""
Make sure we get the old style login/registration if we don't override
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
|
analyseuc3m/ANALYSE-v1
|
lms/djangoapps/student_account/test/test_views.py
|
Python
|
agpl-3.0
| 20,778
|
[
"VisIt"
] |
6b9d05fb31ed3f67fc5e4d3d16810f0ff9e94a3a4258180533d098e91616010b
|
# utilities for data file managements for the tests
"""\
gromacs.tests.datafiles
=======================
In the test code, access a data file "fixtures.dat" in the ``data`` directory with::
from gromacs.tests.datafiles import datafile
test_something():
filepath = datafile("fixtures.dat")
contents = open(filepath).read()
Basically, wheneever you need the path to the file, wrap the filename in ``datafile()``.
"""
import os.path
from pkg_resources import resource_filename
def datafile(name):
return resource_filename(__name__, os.path.join("data", name))
|
Becksteinlab/GromacsWrapper
|
tests/datafiles.py
|
Python
|
gpl-3.0
| 585
|
[
"Gromacs"
] |
8e5540da87b3b4e47173c0b8e09a0b87b533ee6a2075d93f3426c0a171a1eb26
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import six
from owmeta_core.dataobject import DatatypeProperty, ObjectProperty
from .biology import BiologyType
from .cell import Cell
__all__ = ['Connection']
class SynapseType:
Chemical = 'send'
GapJunction = 'gapJunction'
class Termination:
Neuron = 'neuron'
Muscle = 'muscle'
class Connection(BiologyType):
class_context = BiologyType.class_context
post_cell = ObjectProperty(value_type=Cell)
''' The post-synaptic cell '''
pre_cell = ObjectProperty(value_type=Cell)
''' The pre-synaptic cell '''
number = DatatypeProperty()
''' The weight of the connection '''
synclass = DatatypeProperty()
''' The kind of Neurotransmitter (if any) sent between `pre_cell` and `post_cell` '''
syntype = DatatypeProperty()
''' The kind of synaptic connection. 'gapJunction' indicates a gap junction and 'send' a chemical synapse '''
termination = DatatypeProperty()
''' Where the connection terminates. Inferred from type of post_cell at initialization '''
key_properties = (pre_cell, post_cell, syntype)
# Arguments are given explicitly here to support positional arguments
def __init__(self,
pre_cell=None,
post_cell=None,
number=None,
syntype=None,
synclass=None,
termination=None,
**kwargs):
super(Connection, self).__init__(pre_cell=pre_cell,
post_cell=post_cell,
number=number,
syntype=syntype,
synclass=synclass,
**kwargs)
if isinstance(termination, six.string_types):
termination = termination.lower()
if termination in ('neuron', Termination.Neuron):
self.termination(Termination.Neuron)
elif termination in ('muscle', Termination.Muscle):
self.termination(Termination.Muscle)
if isinstance(syntype, six.string_types):
syntype = syntype.lower()
if syntype in ('send', SynapseType.Chemical):
self.syntype(SynapseType.Chemical)
elif syntype in ('gapjunction', SynapseType.GapJunction):
self.syntype(SynapseType.GapJunction)
def __str__(self):
nom = []
props = ('pre_cell', 'post_cell', 'syntype', 'termination', 'number', 'synclass',)
for p in props:
if getattr(self, p).has_defined_value():
nom.append((p, getattr(self, p).defined_values[0]))
if len(nom) == 0:
return super(Connection, self).__str__()
else:
return 'Connection(' + \
', '.join('{}={}'.format(n[0], n[1]) for n in nom) + \
')'
|
openworm/PyOpenWorm
|
owmeta/connection.py
|
Python
|
mit
| 3,024
|
[
"NEURON"
] |
617f762343813c6939198e0244c80ab753671585ab9ce9cca55b560177b4018e
|
#! /usr/bin/env python
import sys
from matplotlib.pyplot import *
from numpy import *
def Usage():
print '='*80
print 'Usage: ./%s [photo_file] [kernel_size]' % sys.argv[0]
print 'Eg: ./%s myphoto.png 3' % sys.argv[0]
print '='*80
def fspecial(func_name,kernel_size=3,sigma=1):
if func_name=='gaussian':
m=n=(kernel_size-1.)/2.
y,x=ogrid[-m:m+1,-n:n+1]
h=exp( -(x*x + y*y) / (2.*sigma*sigma) )
h[ h < finfo(h.dtype).eps*h.max() ] = 0
sumh=h.sum()
if sumh!=0:
h/=sumh
return h
def RGB(rgb_mat,g_filter,flag=255):
def foo(A,B):
t=sum(A*B)
if t>flag: return flag
return t
return [foo(rgb_mat[:,:,i],g_filter) for i in range(3)]
# Return a Nx3 matrix of pixels
def loadImageData(self,imagefile):
# If you don't have matplotlib but have PIL,
# you can use this to load image data.
from PIL import Image
im=Image.open(imagefile)
m,n=im.size
data=im.getdata()
imgMat=zeros((m*n,3))
for i in xrange(m*n):
imgMat[i]=data[i]
return imgMat
def GaussianFilter(image_file,k=3):
# Read image data
im=imread(image_file)
m,n,a=im.shape
g_im=im.copy()
print 'Load Image Data Successful!'
# Initial
if im.max()>1:
flag=255
else:
flag=1
sigma=1
w=k/2
g_filter=fspecial('gaussian',k,sigma)
print 'Gaussian Kernel is setup.'
print 'The Gaussian Filter is processing...'
for i in xrange(w,m-w):
for j in xrange(w,n-w):
t=RGB(im[i-w:i+w+1,j-w:j+w+1],g_filter,flag)
g_im[i,j]=t
print 'Finished.'
print 'Show the photo.'
subplot(121)
title('Original')
imshow(im)
subplot(122)
title('Filtered')
imshow(g_im)
show()
if __name__=='__main__':
argc=len(sys.argv)
if argc<3:
Usage()
else:
image_file=sys.argv[1]
# Kernel size
k=int(sys.argv[2])
GaussianFilter(image_file,k)
|
Urinx/SomeCodes
|
Python/GaussianFilter/gaussian_filter.py
|
Python
|
gpl-2.0
| 1,746
|
[
"Gaussian"
] |
5c7d15ae4af7c8a2df2e747cb965172ac2c7ee84b53e72408c4310786204bb9b
|
"""
neurotools.analysis
===================
A collection of analysis functions that may be used by
neurotools.signals or other packages.
.. currentmodule:: neurotools.analysis
Classes
-------
.. autosummary::
TuningCurve
Functions
---------
.. autosummary::
:nosignatures:
ccf
crosscorrelate
make_kernel
simple_frequency_spectrum
"""
import numpy as np
from neurotools import check_dependency
HAVE_MATPLOTLIB = check_dependency('matplotlib')
if HAVE_MATPLOTLIB:
import matplotlib
matplotlib.use('Agg')
HAVE_PYLAB = check_dependency('pylab')
if HAVE_PYLAB:
import pylab
else:
PYLAB_ERROR = "The pylab package was not detected"
if not HAVE_MATPLOTLIB:
MATPLOTLIB_ERROR = "The matplotlib package was not detected"
def ccf(x, y, axis=None):
"""Fast cross correlation function based on fft.
Computes the cross-correlation function of two series.
Note that the computations are performed on anomalies (deviations from
average).
Returns the values of the cross-correlation at different lags.
Parameters
----------
x, y : 1D MaskedArrays
The two input arrays.
axis : integer, optional
Axis along which to compute (0 for rows, 1 for cols).
If `None`, the array is flattened first.
Examples
--------
>>> z = arange(5)
>>> ccf(z,z)
array([ 3.90798505e-16, -4.00000000e-01, -4.00000000e-01,
-1.00000000e-01, 4.00000000e-01, 1.00000000e+00,
4.00000000e-01, -1.00000000e-01, -4.00000000e-01,
-4.00000000e-01])
"""
assert x.ndim == y.ndim, "Inconsistent shape !"
# assert(x.shape == y.shape, "Inconsistent shape !")
if axis is None:
if x.ndim > 1:
x = x.ravel()
y = y.ravel()
npad = x.size + y.size
xanom = (x - x.mean(axis=None))
yanom = (y - y.mean(axis=None))
Fx = np.fft.fft(xanom, npad, )
Fy = np.fft.fft(yanom, npad, )
iFxy = np.fft.ifft(Fx.conj() * Fy).real
varxy = np.sqrt(np.inner(xanom, xanom) * np.inner(yanom, yanom))
else:
npad = x.shape[axis] + y.shape[axis]
if axis == 1:
if x.shape[0] != y.shape[0]:
raise ValueError("Arrays should have the same length!")
xanom = (x - x.mean(axis=1)[:, None])
yanom = (y - y.mean(axis=1)[:, None])
varxy = np.sqrt((xanom * xanom).sum(1) *
(yanom * yanom).sum(1))[:, None]
else:
if x.shape[1] != y.shape[1]:
raise ValueError("Arrays should have the same width!")
xanom = (x - x.mean(axis=0))
yanom = (y - y.mean(axis=0))
varxy = np.sqrt((xanom * xanom).sum(0) * (yanom * yanom).sum(0))
Fx = np.fft.fft(xanom, npad, axis=axis)
Fy = np.fft.fft(yanom, npad, axis=axis)
iFxy = np.fft.ifft(Fx.conj() * Fy, n=npad, axis=axis).real
# We just turn the lags into correct positions:
iFxy = np.concatenate((iFxy[len(iFxy) / 2:len(iFxy)],
iFxy[0:len(iFxy) / 2]))
return iFxy / varxy
from neurotools.plotting import get_display, set_labels
HAVE_PYLAB = check_dependency('pylab')
def crosscorrelate(sua1, sua2, lag=None, n_pred=1, predictor=None,
display=False, kwargs={}):
"""Cross-correlation between two series of discrete events (e.g. spikes).
Calculates the cross-correlation between
two vectors containing event times.
Returns ``(differeces, pred, norm)``. See below for details.
Adapted from original script written by Martin P. Nawrot for the
FIND MATLAB toolbox [1]_.
Parameters
----------
sua1, sua2 : 1D row or column `ndarray` or `SpikeTrain`
Event times. If sua2 == sua1, the result is the autocorrelogram.
lag : float
Lag for which relative event timing is considered
with a max difference of +/- lag. A default lag is computed
from the inter-event interval of the longer of the two sua
arrays.
n_pred : int
Number of surrogate compilations for the predictor. This
influences the total length of the predictor output array
predictor : {None, 'shuffle'}
Determines the type of bootstrap predictor to be used.
'shuffle' shuffles interevent intervals of the longer input array
and calculates relative differences with the shorter input array.
`n_pred` determines the number of repeated shufflings, resulting
differences are pooled from all repeated shufflings.
display : boolean
If True the corresponding plots will be displayed. If False,
int, int_ and norm will be returned.
kwargs : dict
Arguments to be passed to np.histogram.
Returns
-------
differences : np array
Accumulated differences of events in `sua1` minus the events in
`sua2`. Thus positive values relate to events of `sua2` that
lead events of `sua1`. Units are the same as the input arrays.
pred : np array
Accumulated differences based on the prediction method.
The length of `pred` is ``n_pred * length(differences)``. Units are
the same as the input arrays.
norm : float
Normalization factor used to scale the bin heights in `differences` and
`pred`. ``differences/norm`` and ``pred/norm`` correspond to the linear
correlation coefficient.
Examples
--------
>> crosscorrelate(np_array1, np_array2)
>> crosscorrelate(spike_train1, spike_train2)
>> crosscorrelate(spike_train1, spike_train2, lag = 150.0)
>> crosscorrelate(spike_train1, spike_train2, display=True,
kwargs={'bins':100})
See also
--------
ccf
.. [1] Meier R, Egert U, Aertsen A, Nawrot MP, "FIND - a unified framework
for neural data analysis"; Neural Netw. 2008 Oct; 21(8):1085-93.
"""
assert predictor is 'shuffle' or predictor is None, "predictor must be \
either None or 'shuffle'. Other predictors are not yet implemented."
#Check whether sua1 and sua2 are SpikeTrains or arrays
sua = []
for x in (sua1, sua2):
#if isinstance(x, SpikeTrain):
if hasattr(x, 'spike_times'):
sua.append(x.spike_times)
elif x.ndim == 1:
sua.append(x)
elif x.ndim == 2 and (x.shape[0] == 1 or x.shape[1] == 1):
sua.append(x.ravel())
else:
raise TypeError("sua1 and sua2 must be either instances of the" \
"SpikeTrain class or column/row vectors")
sua1 = sua[0]
sua2 = sua[1]
if sua1.size < sua2.size:
if lag is None:
lag = np.ceil(10*np.mean(np.diff(sua1)))
reverse = False
else:
if lag is None:
lag = np.ceil(20*np.mean(np.diff(sua2)))
sua1, sua2 = sua2, sua1
reverse = True
#construct predictor
if predictor is 'shuffle':
isi = np.diff(sua2)
sua2_ = np.array([])
for ni in xrange(1,n_pred+1):
idx = np.random.permutation(isi.size-1)
sua2_ = np.append(sua2_, np.add(np.insert(
(np.cumsum(isi[idx])), 0, 0), sua2.min() + (
np.random.exponential(isi.mean()))))
#calculate cross differences in spike times
differences = np.array([])
pred = np.array([])
for k in xrange(0, sua1.size):
differences = np.append(differences, sua1[k] - sua2[np.nonzero(
(sua2 > sua1[k] - lag) & (sua2 < sua1[k] + lag))])
if predictor == 'shuffle':
for k in xrange(0, sua1.size):
pred = np.append(pred, sua1[k] - sua2_[np.nonzero(
(sua2_ > sua1[k] - lag) & (sua2_ < sua1[k] + lag))])
if reverse is True:
differences = -differences
pred = -pred
norm = np.sqrt(sua1.size * sua2.size)
# Plot the results if display=True
if display:
subplot = get_display(display)
if not subplot or not HAVE_PYLAB:
return differences, pred, norm
else:
# Plot the cross-correlation
try:
counts, bin_edges = np.histogram(differences, **kwargs)
edge_distances = np.diff(bin_edges)
bin_centers = bin_edges[1:] - edge_distances/2
counts = counts / norm
xlabel = "Time"
ylabel = "Cross-correlation coefficient"
#NOTE: the x axis corresponds to the upper edge of each bin
subplot.plot(bin_centers, counts, label='cross-correlation', color='b')
if predictor is None:
set_labels(subplot, xlabel, ylabel)
pylab.draw()
elif predictor is 'shuffle':
# Plot the predictor
norm_ = norm * n_pred
counts_, bin_edges_ = np.histogram(pred, **kwargs)
counts_ = counts_ / norm_
subplot.plot(bin_edges_[1:], counts_, label='predictor')
subplot.legend()
pylab.draw()
except ValueError:
print "There are no correlated events within the selected lag"\
" window of %s" % lag
else:
return differences, pred, norm
def _dict_max(D):
"""For a dict containing numerical values, return the key for the
highest value. If there is more than one item with the same highest
value, return one of them (arbitrary - depends on the order produced
by the iterator).
"""
max_val = max(D.values())
for k in D:
if D[k] == max_val:
return k
def make_kernel(form, sigma, time_stamp_resolution, direction=1):
"""Creates kernel functions for convolution.
Constructs a numeric linear convolution kernel of basic shape to be used
for data smoothing (linear low pass filtering) and firing rate estimation
from single trial or trial-averaged spike trains.
Exponential and alpha kernels may also be used to represent postynaptic
currents / potentials in a linear (current-based) model.
Adapted from original script written by Martin P. Nawrot for the
FIND MATLAB toolbox [1]_ [2]_.
Parameters
----------
form : {'BOX', 'TRI', 'GAU', 'EPA', 'EXP', 'ALP'}
Kernel form. Currently implemented forms are BOX (boxcar),
TRI (triangle), GAU (gaussian), EPA (epanechnikov), EXP (exponential),
ALP (alpha function). EXP and ALP are aymmetric kernel forms and
assume optional parameter `direction`.
sigma : float
Standard deviation of the distribution associated with kernel shape.
This parameter defines the time resolution of the kernel estimate
and makes different kernels comparable (cf. [1] for symetric kernels).
This is used here as an alternative definition to the cut-off
frequency of the associated linear filter.
time_stamp_resolution : float
Temporal resolution of input and output in ms.
direction : {-1, 1}
Asymmetric kernels have two possible directions.
The values are -1 or 1, default is 1. The
definition here is that for direction = 1 the
kernel represents the impulse response function
of the linear filter. Default value is 1.
Returns
-------
kernel : array_like
Array of kernel. The length of this array is always an odd
number to represent symmetric kernels such that the center bin
coincides with the median of the numeric array, i.e for a
triangle, the maximum will be at the center bin with equal
number of bins to the right and to the left.
norm : float
For rate estimates. The kernel vector is normalized such that
the sum of all entries equals unity sum(kernel)=1. When
estimating rate functions from discrete spike data (0/1) the
additional parameter `norm` allows for the normalization to
rate in spikes per second.
For example:
``rate = norm * scipy.signal.lfilter(kernel, 1, spike_data)``
m_idx : int
Index of the numerically determined median (center of gravity)
of the kernel function.
Examples
--------
To obtain single trial rate function of trial one should use::
r = norm * scipy.signal.fftconvolve(sua, kernel)
To obtain trial-averaged spike train one should use::
r_avg = norm * scipy.signal.fftconvolve(sua, np.mean(X,1))
where `X` is an array of shape `(l,n)`, `n` is the number of trials and
`l` is the length of each trial.
See also
--------
SpikeTrain.instantaneous_rate
SpikeList.averaged_instantaneous_rate
.. [1] Meier R, Egert U, Aertsen A, Nawrot MP, "FIND - a unified framework
for neural data analysis"; Neural Netw. 2008 Oct; 21(8):1085-93.
.. [2] Nawrot M, Aertsen A, Rotter S, "Single-trial estimation of neuronal
firing rates - from single neuron spike trains to population activity";
J. Neurosci Meth 94: 81-92; 1999.
"""
assert form.upper() in ('BOX','TRI','GAU','EPA','EXP','ALP'), "form must \
be one of either 'BOX','TRI','GAU','EPA','EXP' or 'ALP'!"
assert direction in (1,-1), "direction must be either 1 or -1"
sigma = sigma / 1000. #convert to SI units
time_stamp_resolution = time_stamp_resolution / 1000. #convert to SI units
norm = 1./time_stamp_resolution
if form.upper() == 'BOX':
w = 2.0 * sigma * np.sqrt(3)
width = 2 * np.floor(w / 2.0 / time_stamp_resolution) + 1 # always odd number of bins
height = 1. / width
kernel = np.ones((1, width)) * height # area = 1
elif form.upper() == 'TRI':
w = 2 * sigma * np.sqrt(6)
halfwidth = np.floor(w / 2.0 / time_stamp_resolution)
trileft = np.arange(1, halfwidth + 2)
triright = np.arange(halfwidth, 0, -1) # odd number of bins
triangle = np.append(trileft, triright)
kernel = triangle / triangle.sum() # area = 1
elif form.upper() == 'EPA':
w = 2.0 * sigma * np.sqrt(5)
halfwidth = np.floor(w / 2.0 / time_stamp_resolution)
base = np.arange(-halfwidth, halfwidth + 1)
parabula = base**2
epanech = parabula.max() - parabula # inverse parabula
kernel = epanech / epanech.sum() # area = 1
elif form.upper() == 'GAU':
SI_sigma = sigma / 1000.0
w = 2.0 * sigma * 2.7 # > 99% of distribution weight
halfwidth = np.floor(w / 2.0 / time_stamp_resolution) # always odd
base = np.arange(-halfwidth, halfwidth + 1) / 1000.0 * (
time_stamp_resolution)
g = np.exp(-(base**2) / 2.0 / SI_sigma**2) / SI_sigma / np.sqrt(
2.0 * np.pi)
kernel = g / g.sum()
elif form.upper() == 'ALP':
SI_sigma = sigma / 1000.0
w = 5.0 * sigma
alpha = np.arange(1, (2.0 * np.floor(
(w / time_stamp_resolution / 2.0)) + 1) + 1) / 1000.0 * \
time_stamp_resolution
alpha = (2.0 / SI_sigma**2) * alpha * np.exp(-alpha * np.sqrt(2) \
/ SI_sigma)
kernel = alpha / alpha.sum() # normalization
if direction == -1:
kernel = np.flipud(kernel)
elif form.upper() == 'EXP':
SI_sigma = sigma / 1000.0
w = 5.0 * sigma
expo = np.arange(1, (2.0 * np.floor(w / time_stamp_resolution / (
2.0)) + 1) + 1) / 1000.0 * time_stamp_resolution
expo = np.exp(-expo / SI_sigma)
kernel = expo / expo.sum()
if direction == -1:
kernel = np.flipud(kernel)
kernel = kernel.ravel()
m_idx = np.nonzero(kernel.cumsum() >= 0.5)[0].min()
return kernel, norm, m_idx
def simple_frequency_spectrum(x):
"""Simple frequency spectrum.
Very simple calculation of frequency spectrum with no detrending,
windowing, etc, just the first half (positive frequency components) of
abs(fft(x))
Parameters
----------
x : array_like
The input array, in the time-domain.
Returns
-------
spec : array_like
The frequency spectrum of `x`.
"""
spec = np.absolute(np.fft.fft(x))
spec = spec[:len(x) / 2] # take positive frequency components
spec /= len(x) # normalize
spec *= 2.0 # to get amplitudes of sine components, need to multiply by 2
spec[0] /= 2.0 # except for the dc component
return spec
class TuningCurve(object):
"""Class to facilitate working with tuning curves."""
def __init__(self, D=None):
"""
If `D` is a dict, it is used to give initial values to the tuning curve.
"""
self._tuning_curves = {}
self._counts = {}
if D is not None:
for k,v in D.items():
self._tuning_curves[k] = [v]
self._counts[k] = 1
self.n = 1
else:
self.n = 0
def add(self, D):
for k,v in D.items():
self._tuning_curves[k].append(v)
self._counts[k] += 1
self.n += 1
def __getitem__(self, i):
D = {}
for k,v in self._tuning_curves[k].items():
D[k] = v[i]
return D
def __repr__(self):
return "TuningCurve: %s" % self._tuning_curves
def stats(self):
"""Return the mean tuning curve with stderrs."""
mean = {}
stderr = {}
n = self.n
for k in self._tuning_curves.keys():
arr = np.array(self._tuning_curves[k])
mean[k] = arr.mean()
stderr[k] = arr.std()*n/(n-1)/np.sqrt(n)
return mean, stderr
def max(self):
"""Return the key of the max value and the max value."""
k = _dict_max(self._tuning_curves)
return k, self._tuning_curves[k]
|
tbekolay/neurotools
|
neurotools/analysis.py
|
Python
|
gpl-2.0
| 17,953
|
[
"Gaussian",
"NEURON"
] |
6f5e38bc491e153e0cd17dbac08a68e7902d8381edeea83cc640098e0f6048e5
|
# GNU Solfege - free ear training software
# Copyright (C) 2000, 2001, 2002, 2003, 2004, 2006, 2007, 2008 Tom Cato Amundsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
"""
Api used to export exercises to midi file
=========================================
soundcard.start_export(export_to_filename)
Change soundcard.synth to point to another object that collects
the music send to it.
soundcard.end_export()
Write the music collected since start_export to the file named when
start_export was called.
Then set soundcard.synth back to what is was earlier.
"""
import atexit
import subprocess
import sys
import os
import solfege
from solfege.soundcard.soundcardexceptions import SoundInitException
from solfege.soundcard.exporter import MidiExporter
from solfege import cfg
from solfege import osutils
synth = None
midiexporter = None
# _saved_synth is used to store the object pointed to by synth when
# we are exporting using midiexporter.
_saved_synth = None
if sys.platform == 'win32':
import winsound
_mediaplayer = None
def _kill_mediaplayer():
"""
We need to do this atexit to avoid some text about an ignored
exception in Popen.
"""
if _mediaplayer:
_mediaplayer.kill()
_mediaplayer.wait()
atexit.register(_kill_mediaplayer)
def play_mediafile(typeid, filename):
global _mediaplayer
if sys.platform == 'win32' and typeid == 'wav':
winsound.PlaySound(filename, winsound.SND_FILENAME | winsound.SND_ASYNC)
else:
args = [cfg.get_string("sound/%s_player" % typeid)]
# We only add %s_player_options if is is a non empty string,
# since args should not contain any empty strings.
if cfg.get_string("sound/%s_player_options"% typeid):
args.extend(
cfg.get_string("sound/%s_player_options"% typeid).split(" "))
found = False
for i, s in enumerate(args):
if '%s' in s:
args[i] = args[i] % os.path.abspath(filename)
found = True
if not found:
args.append(os.path.abspath(filename))
if _mediaplayer and _mediaplayer.poll() == None:
_mediaplayer.kill()
_mediaplaer = None
try:
if sys.platform == 'win32':
info = subprocess.STARTUPINFO()
info.dwFlags = 1
info.wShowWindow = 0
_mediaplayer = osutils.Popen(args=args, startupinfo=info)
else:
_mediaplayer = osutils.Popen(args=args)
except OSError, e:
raise osutils.BinaryForMediaPlayerException(typeid,
cfg.get_string("sound/%s_player" % typeid), e)
def initialise_winsynth(synthnum, verbose_init=0):
from solfege.soundcard import winsynth
global synth
solfege.mpd.track.set_patch_delay = cfg.get_int("app/set_patch_delay")
synth = winsynth.WinSynth(synthnum, verbose_init)
def initialise_alsa_sequencer(port, verbose_init=0):
"""
This function should only be called if the pyalsa module is available.
"""
global synth
from solfege.soundcard import alsa_sequencer
solfege.mpd.track.set_patch_delay = cfg.get_int("app/set_patch_delay")
synth = alsa_sequencer.AlsaSequencer(port, verbose_init)
def initialise_external_midiplayer(verbose_init=0):
global synth
import solfege.soundcard.midifilesynth
solfege.mpd.track.set_patch_delay = cfg.get_int("app/set_patch_delay")
synth = solfege.soundcard.midifilesynth.MidiFileSynth(verbose_init)
def initialise_devicefile(devicefile, devicenum=0, verbose_init=0):
global synth
if devicefile == '/dev/sequencer2' or devicefile == '/dev/music':
import solfege.soundcard.oss_sequencer2
synth = solfege.soundcard.oss_sequencer2.OSSSequencer2Synth(devicefile, devicenum,
verbose_init)
else:#if devicefile == '/dev/sequencer':
if devicefile != '/dev/sequencer':
print "warning: the device file is unknown. Assuming it is /dev/sequencer - compatible"
import solfege.soundcard.oss_sequencer
synth = solfege.soundcard.oss_sequencer.OSSSequencerSynth(devicefile, devicenum,
verbose_init)
solfege.mpd.track.set_patch_delay = cfg.get_int("app/set_patch_delay")
def initialise_using_fake_synth(verbose_init=None):
global synth
import solfege.soundcard.fakesynth
synth = solfege.soundcard.fakesynth.Synth(verbose_init)
def start_export(filename):
global midiexporter, _saved_synth, synth
if not midiexporter:
midiexporter = MidiExporter()
assert _saved_synth is None
_saved_synth = synth
synth = midiexporter
midiexporter.start_export(filename)
def end_export():
global midiexporter, _saved_synth, synth
midiexporter.end_export()
assert _saved_synth is not None
synth = _saved_synth
_saved_synth = None
instrument_sections = (
'piano',
'cromatic percussion',
'organ',
'guitar',
'bass',
'strings',
'ensemble',
'brass',
'reed',
'pipe',
'synth lead',
'synth pad',
'synth effects',
'ethnic',
'percussive',
'sound effects')
instrument_names = (
"acoustic grand", # 0
"bright acoustic", # 1
"electric grand", # 2
"honky-tonk", # 3
"electric piano 1", # 4
"electric piano 2", # 5
"harpsichord", # 6
"clav", # 7
"celesta", # 8
"glockenspiel", # 9
"music box", # 10
"vibraphone", # 11
"marimba", # 12
"xylophone", # 13
"tubular bells", # 14
"dulcimer", # 15
"drawbar organ", # 16
"percussive organ", # 17
"rock organ", # 18
"church organ", # 19
"reed organ", # 20
"accordion", # 21
"harmonica", # 22
"concertina", # 23
"acoustic guitar (nylon)", # 24
"acoustic guitar (steel)", # 25
"electric guitar (jazz)", # 26
"electric guitar (clean)", # 27
"electric guitar (muted)", # 28
"overdriven guitar", # 29
"distorted guitar", # 30
"guitar harmonics", # 31
"acoustic bass", # 32
"electric bass (finger)", # 33
"electric bass (pick)", # 34
"fretless bass", # 35
"slap bass 1", # 36
"slap bass 2", # 37
"synth bass 1", # 38
"synth bass 2", # 39
"violin", # 40
"viola", # 41
"cello", # 42
"contrabass", # 43
"tremolo strings", # 44
"pizzicato strings", # 45
"orchestral strings", # 46
"timpani", # 47
"string ensemble 1", # 48
"string ensemble 2", # 49
"synthstrings 1", # 50
"synthstrings 2", # 51
"choir aahs", # 52
"voice oohs", # 53
"synth voice", # 54
"orchestra hit", # 55
"trumpet", # 56
"trombone", # 57
"tuba", # 58
"muted trumpet", # 59
"french horn", # 60
"brass section", # 61
"synthbrass 1", # 62
"synthbrass 2", # 63
"soprano sax", # 64
"alto sax", # 65
"tenor sax", # 66
"baritone sax", # 67
"oboe", # 68
"english horn", # 69
"bassoon", # 70
"clarinet", # 71
"piccolo", # 72
"flute", # 73
"recorder", # 74
"pan flute", # 75
"blown bottle", # 76
"shakuhachi", # 77
"whistle", # 78
"ocarina", # 79
"lead 1 (square)", # 80
"lead 2 (sawtooth)", # 81
"lead 3 (calliope)", # 82
"lead 4 (chiff)", # 83
"lead 5 (charang)", # 84
"lead 6 (voice)", # 85
"lead 7 (fifths)", # 86
"lead 8 (bass+lead)", # 87
"pad 1 (new age)", # 88
"pad 2 (warm)", # 89
"pad 3 (polysynth)", # 90
"pad 4 (choir)", # 91
"pad 5 (bowed)", # 92
"pad 6 (metallic)", # 93
"pad 7 (halo)", # 94
"pad 8 (sweep)", # 95
"fx 1 (rain)", # 96
"fx 2 (soundtrack)", # 97
"fx 3 (crystal)", # 98
"fx 4 (atmosphere)", # 99
"fx 5 (brightness)", # 100
"fx 6 (goblins)", # 101
"fx 7 (echoes)", # 102
"fx 8 (sci-fi)", # 103
"sitar", # 104
"banjo", # 105
"shamisen", # 106
"koto", # 107
"kalimba", # 108
"bagpipe", # 109
"fiddle", # 110
"shanai", # 111
"tinkle bell", # 112
"agogo", # 113
"steel drums", # 114
"woodblock", # 115
"taiko drum", # 116
"melodic tom", # 117
"synth drum", # 118
"reverse cymbal", # 119
"guitar fret noise", # 120
"breath noise", # 121
"seashore", # 122
"bird tweet", # 123
"telephone ring", # 124
"helicopter", # 125
"applause", # 126
"gunshot") # 127
def find_midi_instrument_number(instr_name):
"""
Try to find the integer representing the instrument instr_name.
Do a substring search if we don't get an exact match.
Raise KeyError if we don't find the instrument.
"""
for i in range(len(instrument_names)):
if instr_name == instrument_names[i]:
return i
for i in range(len(instrument_names)):
if instr_name in instrument_names[i]:
return i
raise KeyError(instr_name)
# the names are taken directly from the OSS documentation (pdf file)
percussion_names = [
"Acoustic Bass Drum", # 35
"Bass Drum 1",
"Side Stick",
"Acoustic Snare",
"Hand Clap",
"Electric Snare",
"Low Floor Tom",
"Closed Hi Hat",
"High Floor Tom",
"Pedal Hi Hat",
"Low Tom",
"Open HiHat",
"Low-Mid Tom",
"Hi-Mid Tom",
"Crash Cymbal 1",
"High Tom",
"Ride Cymbal 1",
"Chinese Cymbal",
"Ride Bell",
"Tambourine",
"Splash Cymbal",
"Cowbell",
"Crash Cymbal 2",
"Vibraslap",
"Ride Cymbal 2",
"Hi Bongo",
"Low Bongo",
"Mute Hi Conga",
"Open High Conga",
"Low Conga",
"High Timbale",
"Low Timbale",
"High Agogo",
"Agogo Low",
"Cabasa",
"Maracas",
"Short Whistle",
"Long Whistle",
"Short Guiro",
"Long Guiro",
"Claves",
"Hi Wood Block",
"Low Wood Block",
"Mute Cuica",
"Open Cuica",
"Mute Triangle",
"Open Triangle"]
first_percussion_int_value = 35
def percussionname_to_int(name):
assert isinstance(name, basestring)
return percussion_names.index(name) + first_percussion_int_value
def int_to_percussionname(i):
assert isinstance(i, int)
return percussion_names[i - first_percussion_int_value]
|
gabrielelanaro/solfege
|
solfege/soundcard/__init__.py
|
Python
|
gpl-3.0
| 11,078
|
[
"CRYSTAL"
] |
4356f3bc13804a2bc89294ae09638ce6215f5e6e4057fb0689f2ef99fd77ef95
|
#
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2011 Red Hat, Inc.
# This file is part of python-fedora
#
# python-fedora is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# python-fedora is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with python-fedora; if not, see <http://www.gnu.org/licenses/>
#
'''
Cross-site Request Forgery Protection.
http://en.wikipedia.org/wiki/Cross-site_request_forgery
.. moduleauthor:: John (J5) Palmieri <johnp@redhat.com>
.. moduleauthor:: Luke Macken <lmacken@redhat.com>
.. versionadded:: 0.3.17
'''
import logging
from munch import Munch
from kitchen.text.converters import to_bytes
from webob import Request
try:
# webob > 1.0
from webob.headers import ResponseHeaders
except ImportError:
# webob < 1.0
from webob.headerdict import HeaderDict as ResponseHeaders
from paste.httpexceptions import HTTPFound
from paste.response import replace_header
from repoze.who.interfaces import IMetadataProvider
from zope.interface import implements
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
from fedora.urlutils import update_qs
log = logging.getLogger(__name__)
class CSRFProtectionMiddleware(object):
'''
CSRF Protection WSGI Middleware.
A layer of WSGI middleware that is responsible for making sure
authenticated requests originated from the user inside of the app's domain
and not a malicious website.
This middleware works with the :mod:`repoze.who` middleware, and requires
that it is placed below :mod:`repoze.who` in the WSGI stack,
since it relies upon ``repoze.who.identity`` to exist in the environ before
it is called.
To utilize this middleware, you can just add it to your WSGI stack below
the :mod:`repoze.who` middleware. Here is an example of utilizing the
`CSRFProtectionMiddleware` within a TurboGears2 application.
In your ``project/config/middleware.py``, you would wrap your main
application with the `CSRFProtectionMiddleware`, like so:
.. code-block:: python
from fedora.wsgi.csrf import CSRFProtectionMiddleware
def make_app(global_conf, full_stack=True, **app_conf):
app = make_base_app(global_conf, wrap_app=CSRFProtectionMiddleware,
full_stack=full_stack, **app_conf)
You then need to add the CSRF token to every url that you need to be
authenticated for. When used with TurboGears2, an overridden version of
:func:`tg.url` is provided. You can use it directly by calling::
from fedora.tg2.utils import url
[...]
url = url('/authentication_needed')
An easier and more portable way to use that is from within TG2 to set this
up is to use :func:`fedora.tg2.utils.enable_csrf` when you setup your
application. This function will monkeypatch TurboGears2's :func:`tg.url`
so that it adds a csrf token to urls. This way, you can keep the same
code in your templates and controller methods whether or not you configure
the CSRF middleware to provide you with protection via
:func:`~fedora.tg2.utils.enable_csrf`.
'''
def __init__(self, application, csrf_token_id='_csrf_token',
clear_env='repoze.who.identity repoze.what.credentials',
token_env='CSRF_TOKEN', auth_state='CSRF_AUTH_STATE'):
'''
Initialize the CSRF Protection WSGI Middleware.
:csrf_token_id: The name of the CSRF token variable
:clear_env: Variables to clear out of the `environ` on invalid token
:token_env: The name of the token variable in the environ
:auth_state: The environ key that will be set when we are logging in
'''
log.info('Creating CSRFProtectionMiddleware')
self.application = application
self.csrf_token_id = csrf_token_id
self.clear_env = clear_env.split()
self.token_env = token_env
self.auth_state = auth_state
def _clean_environ(self, environ):
''' Delete the ``keys`` from the supplied ``environ`` '''
log.debug('clean_environ(%s)' % to_bytes(self.clear_env))
for key in self.clear_env:
if key in environ:
log.debug('Deleting %(key)s from environ' %
{'key': to_bytes(key)})
del(environ[key])
def __call__(self, environ, start_response):
'''
This method is called for each request. It looks for a user-supplied
CSRF token in the GET/POST parameters, and compares it to the token
attached to ``environ['repoze.who.identity']['_csrf_token']``. If it
does not match, or if a token is not provided, it will remove the
user from the ``environ``, based on the ``clear_env`` setting.
'''
request = Request(environ)
log.debug('CSRFProtectionMiddleware(%(r_path)s)' %
{'r_path': to_bytes(request.path)})
token = environ.get('repoze.who.identity', {}).get(self.csrf_token_id)
csrf_token = environ.get(self.token_env)
if token and csrf_token and token == csrf_token:
log.debug('User supplied CSRF token matches environ!')
else:
if not environ.get(self.auth_state):
log.debug('Clearing identity')
self._clean_environ(environ)
if 'repoze.who.identity' not in environ:
environ['repoze.who.identity'] = Munch()
if 'repoze.who.logins' not in environ:
# For compatibility with friendlyform
environ['repoze.who.logins'] = 0
if csrf_token:
log.warning('Invalid CSRF token. User supplied'
' (%(u_token)s) does not match what\'s in our'
' environ (%(e_token)s)' %
{'u_token': to_bytes(csrf_token),
'e_token': to_bytes(token)})
response = request.get_response(self.application)
if environ.get(self.auth_state):
log.debug('CSRF_AUTH_STATE; rewriting headers')
token = environ.get('repoze.who.identity', {})\
.get(self.csrf_token_id)
loc = update_qs(
response.location, {self.csrf_token_id: str(token)})
response.location = loc
log.debug('response.location = %(r_loc)s' %
{'r_loc': to_bytes(response.location)})
environ[self.auth_state] = None
return response(environ, start_response)
class CSRFMetadataProvider(object):
'''
Repoze.who CSRF Metadata Provider Plugin.
This metadata provider is called with an authenticated users identity
automatically by repoze.who. It will then take the SHA1 hash of the
users session cookie, and set it as the CSRF token in
``environ['repoze.who.identity']['_csrf_token']``.
This plugin will also set ``CSRF_AUTH_STATE`` in the environ if the user
has just authenticated during this request.
To enable this plugin in a TurboGears2 application, you can
add the following to your ``project/config/app_cfg.py``
.. code-block:: python
from fedora.wsgi.csrf import CSRFMetadataProvider
base_config.sa_auth.mdproviders = [('csrfmd', CSRFMetadataProvider())]
Note: If you use the faswho plugin, this is turned on automatically.
'''
implements(IMetadataProvider)
def __init__(self, csrf_token_id='_csrf_token', session_cookie='tg-visit',
clear_env='repoze.who.identity repoze.what.credentials',
login_handler='/post_login', token_env='CSRF_TOKEN',
auth_session_id='CSRF_AUTH_SESSION_ID',
auth_state='CSRF_AUTH_STATE'):
'''
Create the CSRF Metadata Provider Plugin.
:kwarg csrf_token_id: The name of the CSRF token variable. The
identity will contain an entry with this as key and the
computed csrf_token as the value.
:kwarg session_cookie: The name of the session cookie
:kwarg login_handler: The path to the login handler, used to determine
if the user logged in during this request
:kwarg token_env: The name of the token variable in the environ.
The environ will contain the token from the request
:kwarg auth_session_id: The environ key containing an optional
session id
:kwarg auth_state: The environ key that indicates when we are
logging in
'''
self.csrf_token_id = csrf_token_id
self.session_cookie = session_cookie
self.clear_env = clear_env
self.login_handler = login_handler
self.token_env = token_env
self.auth_session_id = auth_session_id
self.auth_state = auth_state
def strip_script(self, environ, path):
# Strips the script portion of a url path so the middleware works even
# when mounted under a path other than root
if path.startswith('/') and 'SCRIPT_NAME' in environ:
prefix = environ.get('SCRIPT_NAME')
if prefix.endswith('/'):
prefix = prefix[:-1]
if path.startswith(prefix):
path = path[len(prefix):]
return path
def add_metadata(self, environ, identity):
request = Request(environ)
log.debug('CSRFMetadataProvider.add_metadata(%(r_path)s)'
% {'r_path': to_bytes(request.path)})
session_id = environ.get(self.auth_session_id)
if not session_id:
session_id = request.cookies.get(self.session_cookie)
log.debug('session_id = %(s_id)r' % {'s_id':
to_bytes(session_id)})
if session_id and session_id != 'Set-Cookie:':
environ[self.auth_session_id] = session_id
token = sha1(session_id).hexdigest()
identity.update({self.csrf_token_id: token})
log.debug('Identity updated with CSRF token')
path = self.strip_script(environ, request.path)
if path == self.login_handler:
log.debug('Setting CSRF_AUTH_STATE')
environ[self.auth_state] = True
environ[self.token_env] = token
else:
environ[self.token_env] = self.extract_csrf_token(request)
app = environ.get('repoze.who.application')
if app:
# This occurs during login in some application configurations
if isinstance(app, HTTPFound) and environ.get(self.auth_state):
log.debug('Got HTTPFound(302) from'
' repoze.who.application')
# What possessed people to make this a string or
# a function?
location = app.location
if hasattr(location, '__call__'):
location = location()
loc = update_qs(location, {self.csrf_token_id:
str(token)})
headers = app.headers.items()
replace_header(headers, 'location', loc)
app.headers = ResponseHeaders(headers)
log.debug('Altered headers: %(headers)s' % {
'headers': to_bytes(app.headers)})
else:
log.warning('Invalid session cookie %(s_id)r, not setting CSRF'
' token!' % {'s_id': to_bytes(session_id)})
def extract_csrf_token(self, request):
'''Extract and remove the CSRF token from a given
:class:`webob.Request`
'''
csrf_token = None
if self.csrf_token_id in request.GET:
log.debug("%(token)s in GET" % {'token':
to_bytes(self.csrf_token_id)})
csrf_token = request.GET[self.csrf_token_id]
del(request.GET[self.csrf_token_id])
request.query_string = '&'.join(['%s=%s' % (k, v) for k, v in
request.GET.items()])
if self.csrf_token_id in request.POST:
log.debug("%(token)s in POST" % {'token':
to_bytes(self.csrf_token_id)})
csrf_token = request.POST[self.csrf_token_id]
del(request.POST[self.csrf_token_id])
return csrf_token
|
vivekanand1101/python-fedora
|
fedora/wsgi/csrf.py
|
Python
|
gpl-2.0
| 13,006
|
[
"VisIt"
] |
52f5602a9b25664e242771e4ea9df850fceca9c26e9a1095dc760d94be63d6f0
|
"""Utility modules for the VTK-Python wrappers."""
__all__ = ['colors', 'misc', 'vtkConstants', 'vtkImageExportToArray',
'vtkImageImportFromArray', 'vtkMethodParser', 'vtkVariant',
'numpy_support']
|
collects/VTK
|
Wrapping/Python/vtk/util/__init__.py
|
Python
|
bsd-3-clause
| 221
|
[
"VTK"
] |
4b616e8a755f7c82ec43b2127b1bf359f9310cb06e609657ee4b49a0b256d0d3
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import os
import tarfile
import tempfile
import threading
from io import StringIO
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.FrameworkSystem.Client.Logger import gLogger
file_types = (io.IOBase,)
gLogger = gLogger.getSubLogger("FileTransmissionHelper")
class FileHelper(object):
__validDirections = ("toClient", "fromClient", "receive", "send")
__directionsMapping = {"toClient": "send", "fromClient": "receive"}
def __init__(self, oTransport=None, checkSum=True):
self.oTransport = oTransport
self.__checkMD5 = checkSum
self.__oMD5 = hashlib.md5()
self.bFinishedTransmission = False
self.bReceivedEOF = False
self.direction = False
self.packetSize = 1048576
self.__fileBytes = 0
self.__log = gLogger.getSubLogger("FileHelper")
def disableCheckSum(self):
self.__checkMD5 = False
def enableCheckSum(self):
self.__checkMD5 = True
def setTransport(self, oTransport):
self.oTransport = oTransport
def setDirection(self, direction):
if direction in FileHelper.__validDirections:
if direction in FileHelper.__directionsMapping:
self.direction = FileHelper.__directionsMapping[direction]
else:
self.direction = direction
def getHash(self):
return self.__oMD5.hexdigest()
def getTransferedBytes(self):
return self.__fileBytes
def sendData(self, sBuffer):
if isinstance(sBuffer, str):
sBuffer = sBuffer.encode(errors="surrogateescape")
if self.__checkMD5:
self.__oMD5.update(sBuffer)
retVal = self.oTransport.sendData(S_OK([True, sBuffer]))
if not retVal["OK"]:
return retVal
retVal = self.oTransport.receiveData()
return retVal
def sendEOF(self):
retVal = self.oTransport.sendData(S_OK([False, self.__oMD5.hexdigest()]))
if not retVal["OK"]:
return retVal
self.__finishedTransmission()
return S_OK()
def sendError(self, errorMsg):
retVal = self.oTransport.sendData(S_ERROR(errorMsg))
if not retVal["OK"]:
return retVal
self.__finishedTransmission()
return S_OK()
def receiveData(self, maxBufferSize=0):
retVal = self.oTransport.receiveData(maxBufferSize=maxBufferSize)
if "AbortTransfer" in retVal and retVal["AbortTransfer"]:
self.oTransport.sendData(S_OK())
self.__finishedTransmission()
self.bReceivedEOF = True
return S_OK("")
if not retVal["OK"]:
return retVal
stBuffer = retVal["Value"]
if stBuffer[0]:
if isinstance(stBuffer[1], str):
stBuffer[1] = stBuffer[1].encode(errors="surrogateescape")
if self.__checkMD5:
self.__oMD5.update(stBuffer[1])
self.oTransport.sendData(S_OK())
else:
self.bReceivedEOF = True
if self.__checkMD5 and not self.__oMD5.hexdigest() == stBuffer[1]:
self.bErrorInMD5 = True
self.__finishedTransmission()
return S_OK("")
return S_OK(stBuffer[1])
def receivedEOF(self):
return self.bReceivedEOF
def markAsTransferred(self):
if not self.bFinishedTransmission:
if self.direction == "receive":
self.oTransport.receiveData()
abortTrans = S_OK()
abortTrans["AbortTransfer"] = True
self.oTransport.sendData(abortTrans)
else:
abortTrans = S_OK([False, ""])
abortTrans["AbortTransfer"] = True
retVal = self.oTransport.sendData(abortTrans)
if not retVal["OK"]:
return retVal
self.oTransport.receiveData()
self.__finishedTransmission()
def __finishedTransmission(self):
self.bFinishedTransmission = True
def finishedTransmission(self):
return self.bFinishedTransmission
def errorInTransmission(self):
return self.bErrorInMD5
def networkToString(self, maxFileSize=0):
"""Receive the input from a DISET client and return it as a string"""
stringIO = StringIO()
result = self.networkToDataSink(stringIO, maxFileSize=maxFileSize)
if not result["OK"]:
return result
return S_OK(stringIO.getvalue())
def networkToFD(self, iFD, maxFileSize=0):
dataSink = os.fdopen(iFD, "w")
try:
return self.networkToDataSink(dataSink, maxFileSize=maxFileSize)
finally:
try:
dataSink.close()
except Exception:
pass
def networkToDataSink(self, dataSink, maxFileSize=0):
if "write" not in dir(dataSink):
return S_ERROR("%s data sink object does not have a write method" % str(dataSink))
self.__oMD5 = hashlib.md5()
self.bReceivedEOF = False
self.bErrorInMD5 = False
receivedBytes = 0
# try:
result = self.receiveData(maxBufferSize=maxFileSize)
if not result["OK"]:
return result
strBuffer = result["Value"]
if isinstance(strBuffer, str):
strBuffer = strBuffer.encode(errors="surrogateescape")
receivedBytes += len(strBuffer)
while not self.receivedEOF():
if maxFileSize > 0 and receivedBytes > maxFileSize:
self.sendError("Exceeded maximum file size")
return S_ERROR("Received file exceeded maximum size of %s bytes" % (maxFileSize))
dataSink.write(strBuffer)
result = self.receiveData(maxBufferSize=(maxFileSize - len(strBuffer)))
if not result["OK"]:
return result
strBuffer = result["Value"]
if isinstance(strBuffer, str):
strBuffer = strBuffer.encode(errors="surrogateescape")
receivedBytes += len(strBuffer)
if strBuffer:
dataSink.write(strBuffer)
# except Exception as e:
# return S_ERROR("Error while receiving file, %s" % str(e))
if self.errorInTransmission():
return S_ERROR("Error in the file CRC")
self.__fileBytes = receivedBytes
return S_OK()
def stringToNetwork(self, stringVal):
"""Send a given string to the DISET client over the network"""
stringIO = StringIO(stringVal)
iPacketSize = self.packetSize
ioffset = 0
strlen = len(stringVal)
try:
while (ioffset) < strlen:
if (ioffset + iPacketSize) < strlen:
result = self.sendData(stringVal[ioffset : ioffset + iPacketSize])
else:
result = self.sendData(stringVal[ioffset:strlen])
if not result["OK"]:
return result
if "AbortTransfer" in result and result["AbortTransfer"]:
self.__log.verbose("Transfer aborted")
return S_OK()
ioffset += iPacketSize
self.sendEOF()
except Exception as e:
return S_ERROR("Error while sending string: %s" % str(e))
try:
stringIO.close()
except Exception:
pass
return S_OK()
def FDToNetwork(self, iFD):
self.__oMD5 = hashlib.md5()
iPacketSize = self.packetSize
self.__fileBytes = 0
sentBytes = 0
try:
sBuffer = os.read(iFD, iPacketSize)
while len(sBuffer) > 0:
dRetVal = self.sendData(sBuffer)
if not dRetVal["OK"]:
return dRetVal
if "AbortTransfer" in dRetVal and dRetVal["AbortTransfer"]:
self.__log.verbose("Transfer aborted")
return S_OK()
sentBytes += len(sBuffer)
sBuffer = os.read(iFD, iPacketSize)
self.sendEOF()
except Exception as e:
gLogger.exception("Error while sending file")
return S_ERROR("Error while sending file: %s" % str(e))
self.__fileBytes = sentBytes
return S_OK()
def BufferToNetwork(self, stringToSend):
sIO = StringIO(stringToSend)
try:
return self.DataSourceToNetwork(sIO)
finally:
sIO.close()
def DataSourceToNetwork(self, dataSource):
if "read" not in dir(dataSource):
return S_ERROR("%s data source object does not have a read method" % str(dataSource))
self.__oMD5 = hashlib.md5()
iPacketSize = self.packetSize
self.__fileBytes = 0
sentBytes = 0
try:
sBuffer = dataSource.read(iPacketSize)
while len(sBuffer) > 0:
dRetVal = self.sendData(sBuffer)
if not dRetVal["OK"]:
return dRetVal
if "AbortTransfer" in dRetVal and dRetVal["AbortTransfer"]:
self.__log.verbose("Transfer aborted")
return S_OK()
sentBytes += len(sBuffer)
sBuffer = dataSource.read(iPacketSize)
self.sendEOF()
except Exception as e:
gLogger.exception("Error while sending file")
return S_ERROR("Error while sending file: %s" % str(e))
self.__fileBytes = sentBytes
return S_OK()
def getFileDescriptor(self, uFile, sFileMode):
closeAfter = True
if isinstance(uFile, str):
try:
self.oFile = open(uFile, sFileMode)
except IOError:
return S_ERROR("%s can't be opened" % uFile)
iFD = self.oFile.fileno()
elif isinstance(uFile, file_types):
iFD = uFile.fileno()
elif isinstance(uFile, int):
iFD = uFile
closeAfter = False
else:
return S_ERROR("%s is not a valid file." % uFile)
result = S_OK(iFD)
result["closeAfterUse"] = closeAfter
return result
def getDataSink(self, uFile):
closeAfter = True
if isinstance(uFile, str):
try:
oFile = open(uFile, "wb")
except IOError:
return S_ERROR("%s can't be opened" % uFile)
elif isinstance(uFile, file_types):
oFile = uFile
closeAfter = False
elif isinstance(uFile, int):
oFile = os.fdopen(uFile, "wb")
closeAfter = True
elif "write" in dir(uFile):
oFile = uFile
closeAfter = False
else:
return S_ERROR("%s is not a valid file." % uFile)
result = S_OK(oFile)
result["closeAfterUse"] = closeAfter
return result
def __createTar(self, fileList, wPipe, compress, autoClose=True):
if "write" in dir(wPipe):
filePipe = wPipe
else:
filePipe = os.fdopen(wPipe, "w")
tarMode = "w|"
if compress:
tarMode = "w|bz2"
with tarfile.open(name="Pipe", mode=tarMode, fileobj=filePipe) as tar:
for entry in fileList:
tar.add(os.path.realpath(entry), os.path.basename(entry), recursive=True)
if autoClose:
try:
filePipe.close()
except Exception:
pass
def bulkToNetwork(self, fileList, compress=True, onthefly=True):
if not onthefly:
try:
filePipe, filePath = tempfile.mkstemp()
except Exception as e:
return S_ERROR("Can't create temporary file to pregenerate the bulk: %s" % str(e))
self.__createTar(fileList, filePipe, compress)
try:
fo = open(filePath, "rb")
except Exception as e:
return S_ERROR("Can't read pregenerated bulk: %s" % str(e))
result = self.DataSourceToNetwork(fo)
try:
fo.close()
os.unlink(filePath)
except Exception:
pass
return result
else:
rPipe, wPipe = os.pipe()
thrd = threading.Thread(target=self.__createTar, args=(fileList, wPipe, compress))
thrd.start()
response = self.FDToNetwork(rPipe)
try:
os.close(rPipe)
except Exception:
pass
return response
def __extractTar(self, destDir, rPipe, compress):
filePipe = os.fdopen(rPipe, "r")
tarMode = "r|*"
if compress:
tarMode = "r|bz2"
with tarfile.open(mode=tarMode, fileobj=filePipe) as tar:
for tarInfo in tar:
tar.extract(tarInfo, destDir)
try:
filePipe.close()
except Exception:
pass
def __receiveToPipe(self, wPipe, retList, maxFileSize):
retList.append(self.networkToFD(wPipe, maxFileSize=maxFileSize))
try:
os.close(wPipe)
except Exception:
pass
def networkToBulk(self, destDir, compress=True, maxFileSize=0):
retList = []
rPipe, wPipe = os.pipe()
thrd = threading.Thread(target=self.__receiveToPipe, args=(wPipe, retList, maxFileSize))
thrd.start()
try:
self.__extractTar(destDir, rPipe, compress)
except Exception as e:
return S_ERROR("Error while extracting bulk: %s" % e)
thrd.join()
return retList[0]
def bulkListToNetwork(self, iFD, compress=True):
filePipe = os.fdopen(iFD, "r")
try:
tarMode = "r|"
if compress:
tarMode = "r|bz2"
entries = []
with tarfile.open(mode=tarMode, fileobj=filePipe) as tar:
for tarInfo in tar:
entries.append(tarInfo.name)
filePipe.close()
return S_OK(entries)
except tarfile.ReadError as v:
return S_ERROR("Error reading bulk: %s" % str(v))
except tarfile.CompressionError as v:
return S_ERROR("Error in bulk compression setting: %s" % str(v))
except Exception as v:
return S_ERROR("Error in listing bulk: %s" % str(v))
|
ic-hep/DIRAC
|
src/DIRAC/Core/DISET/private/FileHelper.py
|
Python
|
gpl-3.0
| 14,574
|
[
"DIRAC"
] |
e88a6d222de609f1a9507078f5ef5a9ff5ca8246022c6bec38c1f62162b317b0
|
###############################################################################
# Copyright 2016 - Climate Research Division
# Environment and Climate Change Canada
#
# This file is part of the "EC-CAS diags" package.
#
# "EC-CAS diags" is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "EC-CAS diags" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with "EC-CAS diags". If not, see <http://www.gnu.org/licenses/>.
###############################################################################
# Interface for reading / writing GEOS-CHEM data that is converted to netCDF
# from Tailong He.
from . import DataProduct
class GEOSCHEM_Data(DataProduct):
"""
GEOS-Chem tracer data, converted to netCDF by Tailong He (UofT).
"""
# Define all the possible variables we might have in this dataset.
# (original_name, standard_name, units)
field_list = (
# From original files
# geoschem-emissions-djf1415.nc
# geoschem_biogenic_prod_and_pressures.nc
('pressure', 'pressure_edges', 'hPa'),
('co_emiss', 'CO_nonbio_flux', 'molecules(CO) cm-2 s-1'),
('CO_ISO', 'CO_isoprene_flux', 'molecules(CO) cm-2 s-1'),
('CO_MET', 'CO_methanol_flux', 'molecules(CO) cm-2 s-1'),
('CO_MONO', 'CO_monoterpene_flux', 'molecules(CO) cm-2 s-1'),
('CO_ACET', 'CO_acetone_flux', 'molecules(CO) cm-2 s-1'),
('surf_area', 'cell_area', 'm2'),
('co_init', 'CO', 'mol mol(semidry_air)-1'),
# From updated file
# geoschem-monthly-mean-emissions-2015.nc
('p_center', 'pressure_edges', 'hPa'),
#('co_init', 'CO', 'mol mol(semidry_air)-1'),
#('surf_area', 'cell_area', 'm2'),
('an_emiss', 'CO_anthro_flux', 'molecules(CO) cm-2 s-1'),
('bb_emiss', 'CO_biomass_flux', 'molecules(CO) cm-2 s-1'),
('bf_emiss', 'CO_biofuel_flux', 'molecules(CO) cm-2 s-1'),
('isoprenes', 'CO_isoprene_flux', 'molecules(CO) cm-2 s-1'),
('methanols', 'CO_methanol_flux', 'molecules(CO) cm-2 s-1'),
('monos', 'CO_monoterpene_flux', 'molecules(CO) cm-2 s-1'),
('acetones', 'CO_acetone_flux', 'molecules(CO) cm-2 s-1'),
# Burning emissions, from Dylan.
# replaces an_emiss, bb_emiss, and bf_emiss.
('COanth', 'CO_combust_flux', 'kg(CO) cm-2 s-1'),
)
# Method to open a single file
@staticmethod
def open_file (filename):
from pygeode.formats import netcdf
from pygeode.axis import ZAxis, Height, TAxis
data = netcdf.open(filename)
# Annotate some of the axes with specific types, to help the data_scanner
# figure things out. Otherwise, get weird crashes.
data = data.replace_axes(date_time=TAxis, date_dim=TAxis, ground_level=Height, level=ZAxis, level_centers=ZAxis, level_edges=ZAxis)
# Use consistent name for level_centers across the files.
data = data.rename_axes(level='level_centers')
return data
# Method to decode an opened dataset (standardize variable names, and add any
# extra info needed (pressure values, cell area, etc.)
@classmethod
def decode (cls,dataset):
import numpy as np
from pygeode.axis import Hybrid, Lat, Lon
from pygeode.timeaxis import StandardTime
from .geoschem_feng_nc import GEOSCHEM_Data as GC
from ..common import compute_pressure, convert
# Hard-code the hybrid levels (needed for doing zonal mean plots on native
# model coordinates).
A_interface = np.array(GC.A_interface)
B_interface = np.array(GC.B_interface)
A = (A_interface[:-1] + A_interface[1:])/2
B = (B_interface[:-1] + B_interface[1:])/2
# Note: for compute_pressure need hybrid A and B w.r.t. Pascals, not hPa.
level = Hybrid(GC.eta, A=A*100, B=B, name='level_centers')
# Need to make the z-axis the right type (since there's no metadata hints
# in the file to indicate the type)
dataset = dataset.replace_axes(level_centers=level)
if 'level_centers' in dataset:
zaxis = dataset.level_centers
else: zaxis = None
if zaxis is not None:
zaxis.atts['positive'] = 'up'
# Identify lat/lon axes
dataset = dataset.replace_axes(lat=Lat, lon=Lon)
# Fix time axis
# Dates are stored as floating-poing numbers?
if 'date_info' in dataset:
times = dataset.date_info.get().flatten()
# Convert to integers.
times = np.array(times, dtype='int32')
# Set to first of the month.
times += 1
# Create time axis.
year = times//10000
month = (times//100)%100
day = times%100
time = StandardTime(year=year,month=month,day=day,units='days',startdate=dict(year=2014,month=1,day=1))
dataset = dataset.replace_axes(date_dim=time, datetime=time)
# Remove "ground-level" dimension.
if dataset.hasaxis('ground_level'):
dataset = dataset.squeeze('ground_level')
# Apply fieldname conversions
data = DataProduct.decode.__func__(cls,dataset)
# Convert to a dictionary (for referencing by variable name)
data = dict((var.name,var) for var in dataset)
# Convert units of combustion flux to be consistent with bio fluxes.
if 'CO_combust_flux' in data:
data['CO_combust_flux'] = convert(data['CO_combust_flux'],'molecules(CO) cm-2 s-1')
# Collect non-bio fields together?
if all('CO_'+n+'_flux' in data for n in ('anthro','biomass','biofuel')):
data['CO_nonbio_flux'] = data['CO_anthro_flux'] + data['CO_biomass_flux'] + data['CO_biofuel_flux']
elif 'CO_combust_flux' in data:
data['CO_nonbio_flux'] = data['CO_combust_flux'].rename('CO_combust_flux')
# Generate a total CO flux (including biogenic components)
if all('CO_'+n+'_flux' in data for n in ('nonbio','methanol','acetone','isoprene','monoterpene')):
data['CO_flux'] = data['CO_nonbio_flux'] + data['CO_methanol_flux'] + data['CO_acetone_flux'] + data['CO_isoprene_flux'] + data['CO_monoterpene_flux']
# Generate a surface pressure field.
# NOTE: pressure is actually the pressure at the interfaces (from surface onward).
if 'pressure_edges' in data:
data['surface_pressure'] = data['pressure_edges'](i_level_centers=0).squeeze('level_centers')
# Re-compute pressure at the centers.
# The levels encoded for pressure_edges are actually the centers.
data['air_pressure'] = compute_pressure(data['pressure_edges'].level_centers,data['surface_pressure'])
# General cleanup stuff
# Make sure the variables have the appropriate names
for name, var in data.iteritems(): var.name = name
# Add extra fields that will be useful for the diagnostics.
data = cls._add_extra_fields(data)
return data
# Method to find all files in the given directory, which can be accessed
# through this interface.
@staticmethod
def find_files (dirname):
from glob import glob
return glob(dirname+"/geoschem-emissions-djf1415.nc") + glob(dirname+"/geoschem_biogenic_prod_and_pressures.nc") + glob(dirname+"/geoschem-monthly-mean-emissions-2015_fixed-area.nc")
# Add this interface to the table.
from . import table
table['geoschem-tailong-nc'] = GEOSCHEM_Data
|
neishm/EC-CAS-diags
|
eccas_diags/interfaces/geoschem_tailong_nc.py
|
Python
|
lgpl-3.0
| 7,502
|
[
"NetCDF"
] |
7fe5b0d64533dd70905262fc3c8eca5c7f93127097e8dd9523c3c3e6950d8f7d
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from bigdl.orca.test_zoo_utils import ZooTestCase
from bigdl.chronos.autots.deprecated.feature.utils import save, restore
from bigdl.chronos.autots.deprecated.feature.time_sequence import *
from numpy.testing import assert_array_almost_equal
import json
import tempfile
import shutil
from bigdl.chronos.autots.deprecated.preprocessing.utils import train_val_test_split
class TestTimeSequenceFeature(ZooTestCase):
def setup_method(self, method):
pass
def teardown_method(self, method):
pass
def test_get_feature_list(self):
dates = pd.date_range('1/1/2019', periods=8)
data = np.random.randn(8, 3)
df = pd.DataFrame({"datetime": dates, "values": data[:, 0],
"A": data[:, 1], "B": data[:, 2]})
feat = TimeSequenceFeatureTransformer(dt_col="datetime",
target_col="values",
extra_features_col=["A", "B"],
drop_missing=True)
feature_list = feat.get_feature_list()
assert set(feature_list) == {'IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)',
'DAY(datetime)',
'IS_WEEKEND(datetime)',
'WEEKDAY(datetime)',
'MONTH(datetime)',
'DAYOFYEAR(datetime)',
'WEEKOFYEAR(datetime)',
'MINUTE(datetime)',
'A',
'B'}
feat = TimeSequenceFeatureTransformer(dt_col="datetime",
target_col="values",
extra_features_col=["A", "B"],
drop_missing=True,
time_features=False)
feature_list = feat.get_feature_list()
assert set(feature_list) == {'A', 'B'}
def test_fit_transform(self):
sample_num = 8
past_seq_len = 2
dates = pd.date_range('1/1/2019', periods=sample_num)
data = np.random.randn(sample_num, 3)
df = pd.DataFrame({"datetime": dates, "values": data[:, 0],
"A": data[:, 1], "B": data[:, 2]})
config = {"selected_features": json.dumps(['IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)',
'A']),
"past_seq_len": past_seq_len}
feat = TimeSequenceFeatureTransformer(future_seq_len=1, dt_col="datetime",
target_col="values", drop_missing=True)
x, y = feat.fit_transform(df, **config)
assert x.shape == (sample_num - past_seq_len,
past_seq_len,
len(json.loads(config["selected_features"])) + 1)
assert y.shape == (sample_num - past_seq_len, 1)
assert np.mean(np.concatenate((x[0, :, 0], y[:, 0]), axis=None)) < 1e-5
def test_fit_transform_df_list(self):
sample_num = 8
past_seq_len = 2
dates = pd.date_range('1/1/2019', periods=sample_num)
data = np.random.randn(sample_num, 3)
df = pd.DataFrame({"datetime": dates, "values": data[:, 0],
"A": data[:, 1], "B": data[:, 2]})
config = {"selected_features": json.dumps(['IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)',
'A']),
"past_seq_len": past_seq_len}
feat = TimeSequenceFeatureTransformer(future_seq_len=1, dt_col="datetime",
target_col="values", drop_missing=True)
df_list = [df] * 3
x, y = feat.fit_transform(df_list, **config)
single_result_len = sample_num - past_seq_len
assert x.shape == (single_result_len * 3,
past_seq_len,
len(json.loads(config["selected_features"])) + 1)
assert y.shape == (single_result_len * 3, 1)
assert_array_almost_equal(x[:single_result_len],
x[single_result_len: 2 * single_result_len], decimal=2)
assert_array_almost_equal(x[:single_result_len], x[2 * single_result_len:], decimal=2)
assert_array_almost_equal(y[:single_result_len],
y[single_result_len: 2 * single_result_len],
decimal=2)
assert_array_almost_equal(y[:single_result_len], y[2 * single_result_len:], decimal=2)
assert np.mean(np.concatenate((x[0, :, 0], y[:single_result_len, 0]), axis=None)) < 1e-5
def test_fit_transform_input_datetime(self):
# if the type of input datetime is not datetime64, raise an error
dates = pd.date_range('1/1/2019', periods=8)
values = np.random.randn(8)
df = pd.DataFrame({"datetime": dates.strftime('%m/%d/%Y'), "values": values})
config = {"selected_features": json.dumps(['IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)']),
"past_seq_len": 2}
feat = TimeSequenceFeatureTransformer(future_seq_len=1, dt_col="datetime",
target_col="values", drop_missing=True)
with pytest.raises(ValueError) as excinfo:
feat.fit_transform(df, **config)
assert 'np.datetime64' in str(excinfo.value)
# if there is NaT in datetime, raise an error
df.loc[1, "datetime"] = None
with pytest.raises(ValueError, match=r".* datetime .*"):
feat.fit_transform(df, **config)
def test_input_data_len(self):
sample_num = 100
past_seq_len = 20
dates = pd.date_range('1/1/2019', periods=sample_num)
values = np.random.randn(sample_num)
df = pd.DataFrame({"datetime": dates, "values": values})
config = {"selected_features": json.dumps(['IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)']),
"past_seq_len": past_seq_len}
train_df, val_df, test_df = train_val_test_split(df,
val_ratio=0.1,
test_ratio=0.1,
look_back=10)
feat = TimeSequenceFeatureTransformer(future_seq_len=1, dt_col="datetime",
target_col="values", drop_missing=True)
with pytest.raises(ValueError, match=r".*past sequence length.*"):
feat.fit_transform(train_df[:20], **config)
feat.fit_transform(train_df, **config)
with pytest.raises(ValueError, match=r".*past sequence length.*"):
feat.transform(val_df, is_train=True)
with pytest.raises(ValueError, match=r".*past sequence length.*"):
feat.transform(test_df[:-1], is_train=False)
out_x, out_y = feat.transform(test_df, is_train=False)
assert len(out_x) == 1
assert out_y is None
def test_fit_transform_input_data(self):
# if there is NaN in data other than datetime, drop the training sample.
num_samples = 8
dates = pd.date_range('1/1/2019', periods=num_samples)
values = np.random.randn(num_samples)
df = pd.DataFrame({"datetime": dates, "values": values})
df.loc[2, "values"] = None
past_seq_len = 2
config = {"selected_features": json.dumps(['IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)']),
"past_seq_len": past_seq_len}
feat = TimeSequenceFeatureTransformer(future_seq_len=1, dt_col="datetime",
target_col="values", drop_missing=True)
x, y = feat.fit_transform(df, **config)
# mask_x = [1, 0, 0, 1, 1, 1]
# mask_y = [0, 1, 1, 1, 1, 1]
# mask = [0, 0, 0, 1, 1, 1]
assert x.shape == (3, past_seq_len, len(json.loads(config["selected_features"])) + 1)
assert y.shape == (3, 1)
def test_transform_train_true(self):
num_samples = 16
dates = pd.date_range('1/1/2019', periods=num_samples)
values = np.random.randn(num_samples, 2)
df = pd.DataFrame({"datetime": dates, "values": values[:, 0], "feature_1": values[:, 1]})
train_sample_num = 10
train_df = df[:train_sample_num]
val_df = df[train_sample_num:]
past_seq_len = 2
config = {"selected_features": json.dumps(['IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)',
"feature_1"]),
"past_seq_len": past_seq_len}
feat = TimeSequenceFeatureTransformer(future_seq_len=1, dt_col="datetime",
target_col="values",
extra_features_col="feature_1",
drop_missing=True)
feat.fit_transform(train_df, **config)
val_x, val_y = feat.transform(val_df, is_train=True)
assert val_x.shape == (val_df.shape[0] - past_seq_len,
past_seq_len,
len(json.loads(config["selected_features"])) + 1)
assert val_y.shape == (val_df.shape[0] - past_seq_len, 1)
def test_transform_train_true_df_list(self):
num_samples = 16
dates = pd.date_range('1/1/2019', periods=num_samples)
values = np.random.randn(num_samples, 2)
df = pd.DataFrame({"datetime": dates, "values": values[:, 0], "feature_1": values[:, 1]})
train_sample_num = 10
train_df = df[:train_sample_num]
val_df = df[train_sample_num:]
past_seq_len = 2
config = {"selected_features": json.dumps(['IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)',
"feature_1"]),
"past_seq_len": past_seq_len}
feat = TimeSequenceFeatureTransformer(future_seq_len=1, dt_col="datetime",
target_col="values",
extra_features_col="feature_1",
drop_missing=True)
train_df_list = [train_df] * 3
feat.fit_transform(train_df_list, **config)
val_df_list = [val_df] * 3
val_x, val_y = feat.transform(val_df_list, is_train=True)
single_result_len = val_df.shape[0] - past_seq_len
assert val_x.shape == (single_result_len * 3,
past_seq_len,
len(json.loads(config["selected_features"])) + 1)
assert val_y.shape == (single_result_len * 3, 1)
def test_transform_train_false(self):
num_samples = 16
dates = pd.date_range('1/1/2019', periods=num_samples)
values = np.random.randn(num_samples, 2)
df = pd.DataFrame({"datetime": dates, "values": values[:, 0], "feature_1": values[:, 1]})
train_sample_num = 10
train_df = df[:train_sample_num]
test_df = df[train_sample_num:]
past_seq_len = 2
config = {"selected_features": json.dumps(['IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)',
"feature_1"]),
"past_seq_len": past_seq_len}
feat = TimeSequenceFeatureTransformer(future_seq_len=1, dt_col="datetime",
target_col="values",
extra_features_col="feature_1",
drop_missing=True)
feat.fit_transform(train_df, **config)
test_x, _ = feat.transform(test_df, is_train=False)
assert test_x.shape == (test_df.shape[0] - past_seq_len + 1,
past_seq_len,
len(json.loads(config["selected_features"])) + 1)
def test_transform_train_false_df_list(self):
num_samples = 16
dates = pd.date_range('1/1/2019', periods=num_samples)
values = np.random.randn(num_samples, 2)
df = pd.DataFrame({"datetime": dates, "values": values[:, 0], "feature_1": values[:, 1]})
train_sample_num = 10
train_df = df[:train_sample_num]
test_df = df[train_sample_num:]
past_seq_len = 2
config = {"selected_features": json.dumps(['IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)',
"feature_1"]),
"past_seq_len": past_seq_len}
feat = TimeSequenceFeatureTransformer(future_seq_len=1, dt_col="datetime",
target_col="values",
extra_features_col="feature_1",
drop_missing=True)
train_df_list = [train_df] * 3
feat.fit_transform(train_df_list, **config)
test_df_list = [test_df] * 3
test_x, _ = feat.transform(test_df_list, is_train=False)
assert test_x.shape == ((test_df.shape[0] - past_seq_len + 1) * 3,
past_seq_len,
len(json.loads(config["selected_features"])) + 1)
def test_save_restore(self):
dates = pd.date_range('1/1/2019', periods=8)
values = np.random.randn(8)
df = pd.DataFrame({"dt": dates, "v": values})
future_seq_len = 2
dt_col = "dt"
target_col = "v"
drop_missing = True
feat = TimeSequenceFeatureTransformer(future_seq_len=future_seq_len,
dt_col=dt_col,
target_col=target_col,
drop_missing=drop_missing)
feature_list = feat.get_feature_list()
config = {"selected_features": json.dumps(feature_list),
"past_seq_len": 2
}
train_x, train_y = feat.fit_transform(df, **config)
dirname = tempfile.mkdtemp(prefix="automl_test_feature")
try:
save(dirname, feature_transformers=feat)
new_ft = TimeSequenceFeatureTransformer()
restore(dirname, feature_transformers=new_ft, config=config)
assert new_ft.future_seq_len == future_seq_len
assert new_ft.dt_col == dt_col
assert new_ft.target_col[0] == target_col
assert new_ft.extra_features_col is None
assert new_ft.drop_missing == drop_missing
test_x, _ = new_ft.transform(df[:-future_seq_len], is_train=False)
assert_array_almost_equal(test_x, train_x, decimal=2)
finally:
shutil.rmtree(dirname)
def test_post_processing_train(self):
dates = pd.date_range('1/1/2019', periods=8)
values = np.random.randn(8)
dt_col = "datetime"
value_col = "values"
df = pd.DataFrame({dt_col: dates, value_col: values})
past_seq_len = 2
future_seq_len = 1
config = {"selected_features": json.dumps(['IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)']),
"past_seq_len": past_seq_len}
feat = TimeSequenceFeatureTransformer(future_seq_len=future_seq_len, dt_col="datetime",
target_col="values", drop_missing=True)
train_x, train_y = feat.fit_transform(df, **config)
y_unscale, y_unscale_1 = feat.post_processing(df, train_y, is_train=True)
y_input = df[past_seq_len:][[value_col]].values
msg = "y_unscale is {}, y_unscale_1 is {}".format(y_unscale, y_unscale_1)
assert_array_almost_equal(y_unscale, y_unscale_1, decimal=2), msg
msg = "y_unscale is {}, y_input is {}".format(y_unscale, y_input)
assert_array_almost_equal(y_unscale, y_input, decimal=2), msg
def test_post_processing_train_df_list(self):
dates = pd.date_range('1/1/2019', periods=8)
values = np.random.randn(8)
dt_col = "datetime"
value_col = "values"
df = pd.DataFrame({dt_col: dates, value_col: values})
past_seq_len = 2
future_seq_len = 1
config = {"selected_features": json.dumps(['IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)']),
"past_seq_len": past_seq_len}
feat = TimeSequenceFeatureTransformer(future_seq_len=future_seq_len, dt_col="datetime",
target_col="values", drop_missing=True)
df_list = [df] * 3
train_x, train_y = feat.fit_transform(df_list, **config)
y_unscale, y_unscale_1 = feat.post_processing(df_list, train_y, is_train=True)
y_input = df[past_seq_len:][[value_col]].values
target_y = np.concatenate([y_input] * 3)
msg = "y_unscale is {}, y_unscale_1 is {}".format(y_unscale, y_unscale_1)
assert_array_almost_equal(y_unscale, y_unscale_1, decimal=2), msg
msg = "y_unscale is {}, y_input is {}".format(y_unscale, target_y)
assert_array_almost_equal(y_unscale, target_y, decimal=2), msg
def test_post_processing_test_1(self):
dates = pd.date_range('1/1/2019', periods=8)
values = np.random.randn(8)
dt_col = "datetime"
value_col = "values"
df = pd.DataFrame({dt_col: dates, value_col: values})
past_seq_len = 2
future_seq_len = 1
config = {"selected_features": json.dumps(['IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)']),
"past_seq_len": past_seq_len}
feat = TimeSequenceFeatureTransformer(future_seq_len=future_seq_len, dt_col="datetime",
target_col="values", drop_missing=True)
train_x, train_y = feat.fit_transform(df, **config)
dirname = tempfile.mkdtemp(prefix="automl_test_feature_")
try:
save(dirname, feature_transformers=feat)
new_ft = TimeSequenceFeatureTransformer()
restore(dirname, feature_transformers=new_ft, config=config)
test_df = df[:-future_seq_len]
new_ft.transform(test_df, is_train=False)
output_value_df = new_ft.post_processing(test_df, train_y, is_train=False)
# train_y is generated from df[past_seq_len:]
target_df = df[past_seq_len:].copy().reset_index(drop=True)
assert output_value_df[dt_col].equals(target_df[dt_col])
assert_array_almost_equal(output_value_df[value_col].values,
target_df[value_col].values, decimal=2)
finally:
shutil.rmtree(dirname)
def test_post_processing_test_df_list(self):
dates = pd.date_range('1/1/2019', periods=8)
values = np.random.randn(8)
dt_col = "datetime"
value_col = "values"
df = pd.DataFrame({dt_col: dates, value_col: values})
past_seq_len = 2
future_seq_len = 1
config = {"selected_features": json.dumps(['IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)']),
"past_seq_len": past_seq_len}
feat = TimeSequenceFeatureTransformer(future_seq_len=future_seq_len, dt_col="datetime",
target_col="values", drop_missing=True)
df_list = [df] * 3
train_x, train_y = feat.fit_transform(df_list, **config)
dirname = tempfile.mkdtemp(prefix="automl_test_feature_")
try:
save(dirname, feature_transformers=feat)
new_ft = TimeSequenceFeatureTransformer()
restore(dirname, feature_transformers=new_ft, config=config)
test_df = df[:-future_seq_len]
test_df_list = [test_df] * 3
new_ft.transform(test_df_list, is_train=False)
output_value_df_list = new_ft.post_processing(test_df_list, train_y, is_train=False)
# train_y is generated from df[past_seq_len:]
target_df = df[past_seq_len:].copy().reset_index(drop=True)
assert output_value_df_list[0].equals(output_value_df_list[1])
assert output_value_df_list[0].equals(output_value_df_list[2])
assert output_value_df_list[0][dt_col].equals(target_df[dt_col])
assert_array_almost_equal(output_value_df_list[0][value_col].values,
target_df[value_col].values, decimal=2)
finally:
shutil.rmtree(dirname)
def test_post_processing_test_2(self):
sample_num = 8
dates = pd.date_range('1/1/2019', periods=sample_num)
values = np.random.randn(sample_num)
dt_col = "datetime"
value_col = "values"
df = pd.DataFrame({dt_col: dates, value_col: values})
past_seq_len = 2
future_seq_len = 2
config = {"selected_features": json.dumps(['IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)']),
"past_seq_len": past_seq_len}
feat = TimeSequenceFeatureTransformer(future_seq_len=future_seq_len, dt_col="datetime",
target_col="values", drop_missing=True)
train_x, train_y = feat.fit_transform(df, **config)
dirname = tempfile.mkdtemp(prefix="automl_test_feature_")
try:
save(dirname, feature_transformers=feat)
new_ft = TimeSequenceFeatureTransformer()
restore(dirname, feature_transformers=new_ft, config=config)
test_df = df[:-future_seq_len]
new_ft.transform(test_df, is_train=False)
output_value_df = new_ft.post_processing(test_df, train_y, is_train=False)
assert output_value_df.shape == (sample_num - past_seq_len - future_seq_len + 1,
future_seq_len + 1)
columns = ["{}_{}".format(value_col, i) for i in range(future_seq_len)]
output_value = output_value_df[columns].values
target_df = df[past_seq_len:].copy().reset_index(drop=True)
target_value = feat._roll_test(target_df["values"], future_seq_len)
assert output_value_df[dt_col].equals(target_df[:-future_seq_len + 1][dt_col])
msg = "output_value is {}, target_value is {}".format(output_value, target_value)
assert_array_almost_equal(output_value, target_value, decimal=2), msg
finally:
shutil.rmtree(dirname)
def test_future_time_validation(self):
sample_num = 8
dates = pd.date_range('1/1/2100', periods=sample_num)
values = np.random.randn(sample_num)
dt_col = "datetime"
value_col = "values"
df = pd.DataFrame({dt_col: dates, value_col: values})
past_seq_len = 2
future_seq_len = 1
config = {"selected_features": json.dumps(['IS_AWAKE(datetime)',
'IS_BUSY_HOURS(datetime)',
'HOUR(datetime)']),
"past_seq_len": past_seq_len}
feat = TimeSequenceFeatureTransformer(future_seq_len=future_seq_len, dt_col="datetime",
target_col="values", drop_missing=True)
x, y = feat.fit_transform(df, **config)
assert x.shape == (sample_num - past_seq_len,
past_seq_len,
len(json.loads(config["selected_features"])) + 1)
assert y.shape == (sample_num - past_seq_len, 1)
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/BigDL
|
python/chronos/test/bigdl/chronos/autots/deprecated/feature/test_time_sequence_feature.py
|
Python
|
apache-2.0
| 26,161
|
[
"ORCA"
] |
26253dc5ed241b5aebae08ef3224389556131bf69d9f427d3d0a2ddc2b12ee3d
|
# Written by David McDougall, 2017
"""
BUILD COMMAND
./setup.py build_ext --inplace
"""
import numpy as np
import scipy.ndimage
import random
import copy
from genetics import Parameters
from encoders import *
from classifiers import *
from sdr import SDR, SynapseManager, Dendrite
class SpatialPoolerParameters(Parameters):
parameters = [
"permanence_inc",
"permanence_dec",
"permanence_thresh",
"sparsity",
"potential_pool",
"boosting_alpha",
# "init_dist",
]
def __init__(self,
permanence_inc = 0.04,
permanence_dec = 0.01,
permanence_thresh = 0.4,
potential_pool = 2048,
sparsity = 0.02,
boosting_alpha = 0.001,
# init_dist = (0.4/4, 0.4/3),
):
"""
This class contains the global parameters, which are invariant between
different cortical regions. The column dimensions and radii are stored
elsewhere.
Argument boosting_alpha is the small constant used by the moving
exponential average which tracks each columns activation
frequency.
"""
# Get the parent class to save all these parameters as attributes of the same name.
kw_args = locals()
del kw_args['self']
super().__init__(**kw_args)
class SpatialPooler:
"""
This class handles the mini-column structures and the feed forward
proximal inputs to each cortical mini-column.
This implementation is based on but differs from the one described by
Numenta's Spatial Pooler white paper, (Cui, Ahmad, Hawkins, 2017, "The HTM
Spatial Pooler - a neocortical...") in two main ways, the boosting function
and the local inhibition mechanism.
Logarithmic Boosting Function:
This uses a logarithmic boosting function. Its input is the activation
frequency which is in the range [0, 1] and its output is a boosting factor
to multiply each columns excitement by. It's equation is:
boost-factor = log( activation-frequency ) / log( target-frequency )
Some things to note:
1) The boost factor asymptotically approaches infinity as the activation
frequency approaches zero.
2) The boost factor equals zero when the actiavtion frequency is one.
3) The boost factor for columns which are at the target activation
frequency is one.
4) This mechanism has a single parameter: boosting_alpha which controls
the exponential moving average which tracks the activation frequency.
Fast Local Inhibition:
This activates the most excited columns globally, after normalizing all
columns by their local area mean and standard deviation. The local area is
a gaussian window and the standard deviation of it is proportional to the
deviation which is used to make the receptive fields of each column.
Columns inhibit each other in proportion to the number of inputs which they
share. In pseudo code:
1. mean_normalized = excitement - gaussian_blur( excitement, radius )
2. standard_deviation = sqrt( gaussian_blur( mean_normalized ^ 2, radius ))
3. normalized = mean_normalized / standard_deviation
4. activate = top_k( normalized, sparsity * number_of_columns )
"""
stability_st_period = 1000
stability_lt_period = 10 # Units: self.stability_st_period
def __init__(self, parameters, input_sdr, column_sdr,
radii=None,
stability_sample_size=0,
multisegment_experiment=None,
init_dist=None,):
"""
Argument parameters is an instance of SpatialPoolerParameters.
Argument input_sdr ...
Argument column_sdr ...
Argument radii is the standard deviation of the gaussian window which
defines the local neighborhood of a column. The radii
determine which inputs are likely to be in a columns potential
pool. If radii is None then topology is disabled. See
SynapseManager.normally_distributed_connections for details
about topology.
Argument stability_sample_size, set to 0 to disable stability
monitoring, default is off.
"""
assert(isinstance(parameters, SpatialPoolerParameters))
assert(isinstance(input_sdr, SDR))
assert(isinstance(column_sdr, SDR))
self.args = args = parameters
self.inputs = input_sdr
self.columns = column_sdr
self.topology = radii is not None
self.age = 0
self.stability_schedule = [0] if stability_sample_size > 0 else [-1]
self.stability_sample_size = stability_sample_size
self.stability_samples = []
self.multisegment = multisegment_experiment is not None
if self.multisegment:
# EXPERIMENTIAL: Multi-segment proximal dendrites.
self.segments_per_cell = int(round(multisegment_experiment))
self.proximal = SynapseManager( self.inputs,
SDR(self.columns.dimensions + (self.segments_per_cell,),
activation_frequency_alpha=args.boosting_alpha), # Used for boosting!
permanence_inc = args.permanence_inc,
permanence_dec = args.permanence_dec,
permanence_thresh = args.permanence_thresh,)
# Initialize to the target activation frequency/sparsity.
self.proximal.outputs.activation_frequency.fill(args.sparsity / self.segments_per_cell)
else:
self.proximal = SynapseManager( self.inputs,
self.columns,
permanence_inc = args.permanence_inc,
permanence_dec = args.permanence_dec,
permanence_thresh = args.permanence_thresh,)
if self.topology:
r = self.proximal.normally_distributed_connections(args.potential_pool, radii, init_dist=init_dist)
self.inhibition_radii = r
else:
self.proximal.uniformly_distributed_connections(args.potential_pool, init_dist=init_dist)
if args.boosting_alpha is not None:
# Make a dedicated SDR to track column activation frequencies for
# boosting.
self.boosting = SDR(self.columns,
activation_frequency_alpha = args.boosting_alpha,
# Note: average overlap is useful to know, but is not part of the boosting algorithm.
average_overlap_alpha = args.boosting_alpha,)
# Initialize to the target activation frequency/sparsity.
self.boosting.activation_frequency.fill(args.sparsity)
def compute(self, input_sdr=None):
"""
"""
args = self.args
if self.multisegment:
# EXPERIMENT: Multi segment proximal dendrites.
excitment = self.proximal.compute(input_sdr=input_sdr)
# Logarithmic Boosting Function.
if args.boosting_alpha is not None:
target_sparsity = args.sparsity / self.segments_per_cell
boost = np.log2(self.proximal.outputs.activation_frequency) / np.log2(target_sparsity)
boost = np.nan_to_num(boost).reshape(self.proximal.outputs.dimensions)
excitment = boost * excitment
# Break ties randomly
excitment = excitment + np.random.uniform(0, .5, size=self.proximal.outputs.dimensions)
self.segment_excitement = excitment
# Replace the segment dimension with each columns most excited segment.
excitment = np.max(excitment, axis=-1)
raw_excitment = excitment.reshape(-1)
else:
raw_excitment = self.proximal.compute(input_sdr=input_sdr).reshape(-1)
# Logarithmic Boosting Function.
if args.boosting_alpha is not None:
boost = np.log2(self.boosting.activation_frequency) / np.log2(args.sparsity)
boost = np.nan_to_num(boost)
raw_excitment = boost * raw_excitment
# Fast Local Inhibition
if self.topology:
inhibition_radii = self.inhibition_radii
raw_excitment = raw_excitment.reshape(self.columns.dimensions)
avg_local_excitment = scipy.ndimage.filters.gaussian_filter(
# Truncate for speed
raw_excitment, inhibition_radii, mode='reflect', truncate=3.0)
local_excitment = raw_excitment - avg_local_excitment
stddev = np.sqrt(scipy.ndimage.filters.gaussian_filter(
local_excitment**2, inhibition_radii, mode='reflect', truncate=3.0))
raw_excitment = np.nan_to_num(local_excitment / stddev)
raw_excitment = raw_excitment.reshape(-1)
# EXPERIMENTIAL
self.raw_excitment = raw_excitment
# Activate the most excited columns.
#
# Note: excitements are not normally distributed, their local
# neighborhoods use gaussian windows, which are a different thing. Don't
# try to use a threshold, it won't work. Especially not: threshold =
# scipy.stats.norm.ppf(1 - sparsity).
k = self.columns.size * args.sparsity
k = max(1, int(round(k)))
self.columns.flat_index = np.argpartition(-raw_excitment, k-1)[:k]
return self.columns
def learn(self, input_sdr=None, column_sdr=None):
"""
Make the spatial pooler learn about its current inputs and active columns.
"""
if self.multisegment:
# Learn about regular activations
self.columns.assign(column_sdr)
segment_excitement = self.segment_excitement[self.columns.index]
seg_idx = np.argmax(segment_excitement, axis=-1)
# seg_idx = np.random.choice(self.segments_per_cell, size=len(self.columns))
self.proximal.learn_outputs(input_sdr=input_sdr,
output_sdr=self.columns.index + (seg_idx,))
else:
# Update proximal synapses and their permanences. Also assigns into our column SDR.
self.proximal.learn_outputs(input_sdr=input_sdr, output_sdr=column_sdr)
# Update the exponential moving average of each columns activation frequency.
self.boosting.assign(self.columns)
# Book keeping.
self.stability(self.inputs, self.columns.index)
self.age += 1
def stabilize(self, prior_columns, percent):
"""
This activates prior columns to force active in order to maintain
the given percent of column overlap between time steps. Always call
this between compute and learn!
"""
# num_active = (len(self.columns) + len(prior_columns)) / 2
num_active = len(self.columns)
overlap = self.columns.overlap(prior_columns)
stabile_columns = int(round(num_active * overlap))
target_columns = int(round(num_active * percent))
add_columns = target_columns - stabile_columns
if add_columns <= 0:
return
eligable_columns = np.setdiff1d(prior_columns.flat_index, self.columns.flat_index)
eligable_excite = self.raw_excitment[eligable_columns]
selected_col_nums = np.argpartition(-eligable_excite, add_columns-1)[:add_columns]
selected_columns = eligable_columns[selected_col_nums]
selected_index = np.unravel_index(selected_columns, self.columns.dimensions)
# Learn. Note: selected columns will learn twice. The previously
# active segments learn now, the current most excited segments in the
# method SP.learn().
# Or learn not at all if theres a bug in my code...
# if self.multisegment:
# if hasattr(self, 'prior_segment_excitement'):
# segment_excitement = self.prior_segment_excitement[selected_index]
# seg_idx = np.argmax(segment_excitement, axis=-1)
# self.proximal.learn_outputs(input_sdr=input_sdr,
# output_sdr=selected_index + (seg_idx,))
# self.prev_segment_excitement = self.segment_excitement
# else:
# 1/0
self.columns.flat_index = np.concatenate([self.columns.flat_index, selected_columns])
def plot_boost_functions(self, beta = 15):
# Generate sample points
dc = np.linspace(0, 1, 10000)
from matplotlib import pyplot as plt
fig = plt.figure(1)
ax = plt.subplot(111)
log_boost = lambda f: np.log(f) / np.log(self.args.sparsity)
exp_boost = lambda f: np.exp(beta * (self.args.sparsity - f))
logs = [log_boost(f) for f in dc]
exps = [exp_boost(f) for f in dc]
plt.plot(dc, logs, 'r', dc, exps, 'b')
plt.title("Boosting Function Comparison \nLogarithmic in Red, Exponential in Blue (beta = %g)"%beta)
ax.set_xlabel("Activation Frequency")
ax.set_ylabel("Boost Factor")
plt.show()
def stability(self, input_sdr, output_sdr, diag=True):
"""
Measures the short and long term stability from compute's input stream.
Do not call this directly! Instead set it up before and via
SpatialPooler.__init__() and this will print the results to STDOUT.
Argument input_sdr, output_sdr ...
Attribute stability_sample_size is how many samples to take during each
sample period.
Attribute stability_samples is list of samples, where each sample is a
list of pairs of (input_sdr, output_sdr). The index is how
many (short term) sample periods ago the sample was taken.
Attribute stability_schedule is a list of ages to take input/output
samples at, in descending order so that the soonest sample age
is at the end of the list. Append -1 to the schedule to
disable stability monitoring. The final age in the schedule is
special, on this age it calculates the stability and makes a
new schedule for the next period.
Class Attribute stability_st_period
st == short term, lt == long term
The stability period is how many compute cycles this SP will
wait before recomputing the stability samples and comparing with
the original results. This calculates two measures of stability:
short and long term. The long term period is written in terms
of the short term period.
Class Attribute stability_lt_period
Units: self.stability_st_period
Attribute st_stability, lt_stability are the most recent measurements of
short and long term stability, respectively. These are
initialized to None.
"""
if self.stability_schedule[-1] != self.age:
return
else:
self.stability_schedule.pop()
if self.stability_schedule:
# Not the final scheduled checkup. Take the given sample and return.
self.stability_samples[0].append((input_sdr, output_sdr))
return
# Else: calculate the stability and setup for the next period of
# stability sampling & monitoring.
assert(False) # This method probably won't work since changes to use SDR class...
def overlap(a, b):
a = set(zip(*a))
b = set(zip(*b))
overlap = len(a.intersection(b))
overlap_pct = overlap / min(len(a), len(b))
return overlap_pct
# Rerun the samples through the machine.
try:
st_samples = self.stability_samples[1]
except IndexError:
self.st_stability = None # This happens when age < 2 x st_period
else:
st_rerun = [self.compute(inp, learn=False) for inp, out in st_samples]
self.st_stability = np.mean([overlap(re, io[1]) for re, io in zip(st_rerun, st_samples)])
try:
lt_samples = self.stability_samples[self.stability_lt_period]
except IndexError:
self.lt_stability = None # This happens when age < st_period X (lt_period + 1)
else:
lt_rerun = [self.compute(inp, learn=False) for inp, out in lt_samples]
self.lt_stability = np.mean([overlap(re, io[1]) for re, io in zip(lt_rerun, lt_samples)])
# Make a new sampling schedule.
sample_period = range(self.age + 1, self.age + self.stability_st_period)
self.stability_schedule = random.sample(sample_period, self.stability_sample_size)
# Add the next stability calculation to the end of the schedule.
self.stability_schedule.append(sample_period.stop)
self.stability_schedule.sort(reverse=True)
# Roll the samples buffer.
self.stability_samples.insert(0, [])
self.stability_samples = self.stability_samples[:self.stability_lt_period + 1]
# Print output
if diag:
s = ""
if self.st_stability is not None:
s += "Stability (%d) %-5.03g"%(self.stability_st_period, self.st_stability,)
if self.lt_stability is not None:
s += " | (x%d) %-5.03g"%(self.stability_lt_period, self.lt_stability)
if s:
print(s)
def noise_perturbation(self, inp, flip_bits, diag=False):
"""
Measure the change in SDR overlap after moving some of the ON bits.
"""
tru = self.compute(inp, learn=False)
# Make sparse input dense.
if isinstance(inp, tuple) or inp.shape != self.args.input_dimensions:
dense = np.zeros(self.args.input_dimensions)
dense[inp] = True
inp = dense
# Move some of the on bits around.
on_bits = list(zip(*np.nonzero(inp)))
off_bits = list(zip(*np.nonzero(np.logical_not(inp))))
flip_bits = min(flip_bits, min(len(on_bits), len(off_bits)) )
flip_off = random.sample(on_bits, flip_bits)
flip_on = random.sample(off_bits, flip_bits)
noisy = np.array(inp, dtype=np.bool) # Force copy
noisy[list(zip(*flip_off))] = False
noisy[list(zip(*flip_on))] = True
# Calculate the overlap in SP output after adding noise.
near = self.compute(noisy, learn=False)
tru = set(zip(*tru))
near = set(zip(*near))
overlap = len(tru.intersection(near))
overlap_pct = overlap / len(tru)
if diag:
print("SP Noise Robustness (%d flipped) %g"%(flip_bits, overlap_pct))
return overlap_pct
def noise_robustness(self, inps, diag=False):
"""
Plot the noise robustness as a function.
Argument 'inps' is list of encoded inputs.
"""
if False:
# Range Num Samples Resolution
# [0, 10) 20 .5
# [10, 50) 40 1
# [50, 100] 11 5
noises = list(np.arange(20) / 2) + list(np.arange(10, 40)) + list(np.arange(11) * 5 + 50)
elif False:
# Exponential progression of noises, samples many orders of magnitude of noise.
num_samples = 50
x = np.exp(np.arange(num_samples))
noises = list(x * 100 / np.max(x))
else:
# Number of ON bits in encoded input-space +1
nz = int(round(np.mean([np.count_nonzero(s) for s in inps[:10]])))
noises = list(np.arange(nz + 1))
cutoff = len(noises) // 10 # First 'cutoff' many samples have full accuracy.
while len(noises) > 50 + cutoff: # Decimate to a sane number of sample points
noises = noises[:cutoff] + noises[cutoff::2]
pct_over = []
for n in noises:
z = 0
for inp in inps:
z += self.noise_perturbation(inp, n, diag=False)
pct_over.append(z/len(inps))
if diag:
from matplotlib import pyplot as plt
plt.figure(1)
plt.plot(noises, pct_over)
plt.title('todo')
plt.xlabel('todo')
plt.ylabel('todo')
plt.show()
return noises, pct_over
def statistics(self):
stats = 'SP '
stats += self.proximal.statistics()
if self.args.boosting_alpha is not None:
stats += 'Columns ' + self.boosting.statistics()
af = self.boosting.activation_frequency
boost_min = np.log2(np.min(af)) / np.log2(self.args.sparsity)
boost_mean = np.log2(np.mean(af)) / np.log2(self.args.sparsity)
boost_max = np.log2(np.max(af)) / np.log2(self.args.sparsity)
stats += '\tLogarithmic Boosting Multiplier min/mean/max {:-.04g}% / {:-.04g}% / {:-.04g}%\n'.format(
boost_min * 100,
boost_mean * 100,
boost_max * 100,)
# TODO: Stability, if enabled.
pass
# TODO: Noise robustness, if enabled.
pass
return stats
class TemporalMemoryParameters(Parameters):
parameters = [
'add_synapses', # How many new synapses to add to subthreshold learning segments.
'cells_per_column',
'initial_segment_size', # How many synases to start new segments with.
'segments_per_cell',
'synapses_per_segment',
'permanence_inc',
'permanence_dec',
'mispredict_dec',
'permanence_thresh',
'predictive_threshold', # Segment excitement threshold for predictions.
'learning_threshold', # Segment excitement threshold for learning.
]
def __init__(self,
cells_per_column = 1.022e+01,
learning_threshold = 7.215e+00,
mispredict_dec = 1.051e-03,
permanence_dec = 9.104e-03,
permanence_inc = 2.272e-02,
permanence_thresh = 2.708e-01,
predictive_threshold = 6.932e+00,
segments_per_cell = 1.404e+02,
synapses_per_segment = 1.190e+02,
add_synapses = 1,
initial_segment_size = 10,
):
# Get the parent class to save all these parameters as attributes of the same name.
super().__init__(**{k:v for k,v in locals().items() if k != 'self'})
class TemporalMemory:
"""
This implementation is based on the paper: Hawkins J. and Ahmad S. (2016)
Why Neurons Have Thousands of Synapses, a Theory of Sequency Memory in
Neocortex. Frontiers in Neural Circuits 10:23 doi: 10.3389/fncir.2016.00023
"""
def __init__(self,
parameters,
column_sdr,
apical_sdr=None,
inhibition_sdr=None,
context_sdr=None,
):
"""
Argument parameters is an instance of TemporalMemoryParameters
Argument column_dimensions ...
"""
assert(isinstance(parameters, TemporalMemoryParameters))
self.args = args = parameters
assert(isinstance(column_sdr, SDR))
self.columns = column_sdr
self.cells_per_column = int(round(args.cells_per_column))
if self.cells_per_column < 1:
raise ValueError("Cannot create TemporalMemory with cells_per_column < 1.")
self.segments_per_cell = int(round(args.segments_per_cell))
self.active = SDR((self.columns.size, self.cells_per_column),
activation_frequency_alpha = 1/1000,
average_overlap_alpha = 1/1000,)
self.anomaly_alpha = 1/1000
self.mean_anomaly = 0
self.basal = Dendrite(
input_sdr = SDR(context_sdr if context_sdr is not None else self.active),
active_sdr = SDR(self.active),
segments_per_cell = args.segments_per_cell,
synapses_per_segment = args.synapses_per_segment,
initial_segment_size = args.initial_segment_size,
add_synapses = args.add_synapses,
learning_threshold = args.learning_threshold,
predictive_threshold = args.predictive_threshold,
permanence_inc = args.permanence_inc,
permanence_dec = args.permanence_dec,
permanence_thresh = args.permanence_thresh,
mispredict_dec = args.mispredict_dec,)
if apical_sdr is None:
self.apical = None
else:
assert(isinstance(apical_sdr, SDR))
self.apical = Dendrite(
input_sdr = apical_sdr,
active_sdr = self.active,
segments_per_cell = args.segments_per_cell,
synapses_per_segment = args.synapses_per_segment,
initial_segment_size = args.initial_segment_size,
add_synapses = args.add_synapses,
learning_threshold = args.learning_threshold,
predictive_threshold = args.predictive_threshold,
permanence_inc = args.permanence_inc,
permanence_dec = args.permanence_dec,
permanence_thresh = args.permanence_thresh,
mispredict_dec = args.mispredict_dec,)
if inhibition_sdr is None:
self.inhibition = None
else:
assert(isinstance(inhibition_sdr, SDR))
self.inhibition = Dendrite(
input_sdr = inhibition_sdr,
active_sdr = self.active,
segments_per_cell = args.segments_per_cell,
synapses_per_segment = args.synapses_per_segment,
initial_segment_size = args.initial_segment_size,
add_synapses = args.add_synapses,
learning_threshold = args.learning_threshold,
predictive_threshold = args.predictive_threshold,
permanence_inc = args.permanence_inc,
permanence_dec = args.permanence_dec,
permanence_thresh = args.permanence_thresh,
mispredict_dec = 0,) # Is not but should be an inhibited segment in an active cell.
self.reset()
def reset(self):
self.active.zero()
self.reset_state = True
def compute(self,
context_sdr=None,
column_sdr=None,
apical_sdr=None,
inhibition_sdr=None,):
"""
Attribute anomaly, mean_anomaly are the fraction of neuron activations
which were predicted. Range [0, 1]
"""
########################################################################
# PHASE 1: Make predictions based on the previous timestep.
########################################################################
if context_sdr is None:
context_sdr = self.active
basal_predictions = self.basal.compute(input_sdr=context_sdr)
predictions = basal_predictions
if self.apical is not None:
apical_predictions = self.apical.compute(input_sdr=apical_sdr)
predictions = np.logical_or(predictions, apical_predictions)
# Inhibition cancels out predictions. The technical term is
# hyper-polarization. Practically speaking, this is needed so that
# inhibiting neurons can cause mini-columns to burst.
if self.inhibition is not None:
inhibited = self.inhibition.compute(input_sdr=inhibition_sdr)
predictions = np.logical_and(predictions, np.logical_not(inhibited))
########################################################################
# PHASE 2: Determine the currently active neurons.
########################################################################
self.columns.assign(column_sdr)
columns = self.columns.flat_index
# Activate all neurons which are in a predictive state and in an active
# column, unless they are inhibited by apical input.
active_dense = predictions[columns]
col_num, neur_idx = np.nonzero(active_dense)
# This gets the actual column index, undoes the effect of discarding the
# inactive columns before the nonzero operation.
col_idx = columns[col_num]
predicted_active = (col_idx, neur_idx)
# If a column activates but was not predicted by any neuron segment,
# then it bursts. The bursting columns are the unpredicted columns.
bursting_columns = np.setdiff1d(columns, col_idx)
# All neurons in bursting columns activate.
burst_col_idx = np.repeat(bursting_columns, self.cells_per_column)
burst_neur_idx = np.tile(np.arange(self.cells_per_column), len(bursting_columns))
burst_active = (burst_col_idx, burst_neur_idx)
# Apply inhibition to the bursting mini-columns.
if self.inhibition is not None:
uninhibited_mask = np.logical_not(inhibited[burst_active])
burst_active = np.compress(uninhibited_mask, burst_active, axis=1)
# TODO: Combined apical and basal predictions can cause L5 cells to
# spontaneously activate.
if False:
volunteers = np.logical_and(self.basal_predictions, self.apical_predictions)
volunteers = np.nonzero(volunteers.ravel())
unique1d(volunteers, predicted_active+burst_active)
self.active.index = tuple(np.concatenate([predicted_active, burst_active], axis=1))
# Only tell the dendrite about active cells which are allowed to learn.
bursting_learning = (
bursting_columns,
np.random.randint(0, self.cells_per_column, size=len(bursting_columns)))
# TODO: This will NOT work for CONTEXT, TM ONLY.
self.basal.input_sdr.assign(self.basal.active_sdr) # Only learn about the winner cells from last cycle.
self.basal.active_sdr.index = tuple(np.concatenate([predicted_active, bursting_learning], axis=1))
# Anomally metric.
self.anomaly = np.array(burst_active).shape[1] / len(self.active)
alpha = self.anomaly_alpha
self.mean_anomaly = (1-alpha)*self.mean_anomaly + alpha*self.anomaly
def learn(self):
"""
Learn about the previous to current timestep transition.
"""
if self.reset_state:
# Learning on the first timestep after a reset is not useful. The
# issue is that waking up after a reset is inherently unpredictable.
self.reset_state = False
return
# NOTE: All cells in a bursting mini-column will learn. This includes
# starting new segments if necessary. This is different from Numenta's
# TM which choses one cell to learn on a bursting column. If in fact
# all newly created segments work correctly, then I may in fact be
# destroying any chance of it learning a unique representation of the
# anomalous sequence by assigning all cells to represent it. I was
# thinking that maybe this would work anyways because the presynapses
# are chosen randomly but now its evolved an initial segment size of 19!
# FIXED?
# Use the SDRs which were given durring the compute phase.
# inputs = previous winner cells, active = current winner cells
self.basal.learn(active_sdr=None)
if self.apical is not None:
self.apical.learn(active_sdr=self.active)
if self.inhibition is not None:
self.inhibition.learn(active_sdr=self.active)
def statistics(self):
stats = 'Temporal Memory\n'
stats += 'Predictive Segments ' + self.basal.statistics()
if self.apical is not None:
stats += 'Apical Segments ' + self.apical.statistics()
if self.inhibition is not None:
stats += 'Inhibition Segments ' + self.inhibition.statistics()
stats += "Mean anomaly %g\n"%self.mean_anomaly
stats += 'Activation statistics ' + self.active.statistics()
return stats
class CorticalRegionParameters(Parameters):
parameters = [
'inp_cols',
'inp_radii',
'out_cols',
'out_radii',
]
class CorticalRegion:
def __init__(self, cerebrum_parameters, region_parameters,
input_sdr,
context_sdr,
apical_sdr,
inhibition_sdr,):
"""
Argument cerebrum_parameters is an instance of CerebrumParameters.
Argument region_parameters is an instance of CorticalRegionParameters.
Argument input_sdr ... feed forward
Argument context_sdr ... all output layers, flat
Argument apical_sdr ... from BG D1 cells
Argument inhibition_sdr ... from BG D2 cells
"""
assert(isinstance(cerebrum_parameters, CerebrumParameters))
assert(isinstance(region_parameters, CorticalRegionParameters))
self.cerebrum_parameters = cerebrum_parameters
self.region_parameters = region_parameters
self.L6_sp = SpatialPooler( cerebrum_parameters.inp_sp,
input_sdr = input_sdr,
column_sdr = SDR(region_parameters.inp_cols),
radii = region_parameters.inp_radii,)
self.L6_tm = TemporalMemory(cerebrum_parameters.inp_tm,
column_sdr = self.L6_sp.columns,
context_sdr = context_sdr,)
self.L5_sp = SpatialPooler( cerebrum_parameters.out_sp,
input_sdr = self.L6_tm.active,
column_sdr = SDR(region_parameters.out_cols),
radii = region_parameters.out_radii,)
self.L5_tm = TemporalMemory(cerebrum_parameters.out_tm,
column_sdr = self.L5_sp.columns,
apical_sdr = apical_sdr,
inhibition_sdr = inhibition_sdr,)
self.L4_sp = SpatialPooler( cerebrum_parameters.inp_sp,
input_sdr = input_sdr,
column_sdr = SDR(region_parameters.inp_cols),
radii = region_parameters.inp_radii,)
self.L4_tm = TemporalMemory(cerebrum_parameters.inp_tm,
column_sdr = self.L4_sp.columns,
context_sdr = context_sdr,)
self.L23_sp = SpatialPooler( cerebrum_parameters.out_sp,
input_sdr = self.L4_tm.active,
column_sdr = SDR(region_parameters.out_cols),
radii = region_parameters.out_radii,)
self.L23_tm = TemporalMemory(cerebrum_parameters.out_tm,
column_sdr = self.L23_sp.columns)
def reset(self):
self.L6_tm.reset()
self.L5_tm.reset()
self.L4_tm.reset()
self.L23_tm.reset()
def compute(self):
self.L6_sp.compute()
self.L6_tm.compute()
self.L5_sp.compute()
self.L5_tm.compute()
self.L4_sp.compute()
self.L4_tm.compute()
self.L23_sp.compute()
self.L23_tm.compute()
def learn(self, bg):
self.L6_sp.learn(column_sdr=np.any(self.L6_tm.active.dense, axis=1))
self.L6_tm.learn()
self.L5_sp.learn(column_sdr=np.any(self.L5_tm.active.dense, axis=1))
self.L5_tm.apical.permanence_inc = bg.d1_inc
self.L5_tm.apical.permanence_dec = bg.d1_dec
self.L5_tm.inhibition.permanence_inc = bg.d2_inc
self.L5_tm.inhibition.permanence_dec = bg.d2_dec
self.L5_tm.learn()
self.L4_sp.learn(column_sdr=np.any(self.L4_tm.active.dense, axis=1))
self.L4_tm.learn()
self.L23_sp.learn(column_sdr=np.any(self.L23_tm.active.dense, axis=1))
self.L23_tm.learn()
def statistics(self):
stats = ''
stats += 'L6 Proximal ' + self.L6_sp.statistics() + '\n'
stats += 'L6 Basal ' + self.L6_tm.statistics() + '\n'
stats += 'L5 Proximal ' + self.L5_sp.statistics() + '\n'
stats += 'L5 Basal ' + self.L5_tm.statistics() + '\n'
stats += 'L4 Proximal ' + self.L4_sp.statistics() + '\n'
stats += 'L4 Basal ' + self.L4_tm.statistics() + '\n'
stats += 'L23 Proximal ' + self.L23_sp.statistics() + '\n'
stats += 'L23 Basal ' + self.L23_tm.statistics() + '\n'
return stats
class CerebrumParameters(Parameters):
parameters = [
'alpha',
'bg',
'inp_sp',
'inp_tm',
'out_sp',
'out_tm',
]
# TODO: Move motor controls into the cerebrum. This isn't important right now
# because I have working motor controls in the eye-experiment file.
class Cerebrum:
"""
"""
def __init__(self, cerebrum_parameters, region_parameters, input_sdrs):
self.cerebrum_parameters = cerebrum_parameters
self.region_parameters = tuple(region_parameters)
self.inputs = tuple(input_sdrs)
self.age = 0
assert(isinstance(cerebrum_parameters, CerebrumParameters))
assert(all(isinstance(rgn, CorticalRegionParameters) for rgn in self.region_parameters))
assert(len(region_parameters) == len(self.inputs))
assert(all(isinstance(inp, SDR) for inp in self.inputs))
# The size of the cortex needs to be known before it can be constructed.
context_size = 0
self.apical_sdrs = []
for rgn_args in self.region_parameters:
num_cols = np.product([int(round(dim)) for dim in rgn_args.out_cols])
cells_per = int(round(cerebrum_parameters.out_tm.cells_per_column))
context_size += num_cols * cells_per * 2
L5_dims = (num_cols * cells_per,)
self.apical_sdrs.append((SDR(L5_dims), SDR(L5_dims)))
self.L23_activity = SDR((context_size/2,))
self.L5_activity = SDR((context_size/2,))
self.context_sdr = SDR((context_size,))
# Construct the Basal Ganglia
self.basal_ganglia = BasalGanglia(cerebrum_parameters.bg,
input_sdr = self.context_sdr,
output_sdr = self.L5_activity,)
# Construct the cortex.
self.regions = []
for rgn_args, inp, apical in zip(self.region_parameters, input_sdrs, self.apical_sdrs):
rgn = CorticalRegion(cerebrum_parameters, rgn_args,
input_sdr = inp,
context_sdr = self.context_sdr,
apical_sdr = self.basal_ganglia.d1.active,
inhibition_sdr = self.basal_ganglia.d2.active,)
self.regions.append(rgn)
# Construct the motor controls.
pass
def reset(self):
self.basal_ganglia.reset()
for rgn in self.regions:
rgn.reset()
def compute(self, reward, learn=True):
"""
Runs a single cycle for a whole network of cortical regions.
Arguments inputs and regions are parallel lists.
Optional Argument apical_input ... dense integer array, shape=output-dimensions
Optional argument learn ... default is True.
"""
for rgn in self.regions:
rgn.compute()
self.L5_activity.assign_flat_concatenate(rgn.L5_tm.active for rgn in self.regions)
self.L23_activity.assign_flat_concatenate(rgn.L23_tm.active for rgn in self.regions)
self.context_sdr.assign_flat_concatenate([self.L5_activity, self.L23_activity])
if not learn:
reward = None
self.basal_ganglia.compute(reward)
if learn:
for rgn in self.regions:
rgn.learn(self.basal_ganglia)
# Motor controls.
pass
if learn:
self.age += 1
def statistics(self):
stats = ''
for idx, rgn in enumerate(self.regions):
stats += 'Region {}\n'.format(idx+1)
stats += rgn.statistics() + '\n'
# stats += self.basal_ganglia.statistics()
return stats
|
ctrl-z-9000-times/HTM_experiments
|
htm.py
|
Python
|
mit
| 41,441
|
[
"Gaussian",
"NEURON"
] |
5f0a9472655939c73eae29d13ab4c7969d6b98254b3a64200e3a84d03c965b47
|
import collections
import scipy
import numpy as np
import pandas as pd
import warnings
from .plot_utils import ranged_colorbar, make_x_y_ranges, is_cmap_diverging
# matplotlib is technically optional, but required for plotting
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
HAS_MATPLOTLIB = False
else:
HAS_MATPLOTLIB = True
try:
import networkx as nx
except ImportError:
HAS_NETWORKX = False
else:
HAS_NETWORKX = True
# pandas 0.25 not available on py27; can drop this when we drop py27
_PD_VERSION = tuple(int(x) for x in pd.__version__.split('.')[:2])
def _colorbar(with_colorbar, cmap_f, norm, min_val, ax=None):
if with_colorbar is False:
return None
elif with_colorbar is True:
cbmin = np.floor(min_val) # [-1.0..0.0] => -1; [0.0..1.0] => 0
cbmax = 1.0
cb = ranged_colorbar(cmap_f, norm, cbmin, cbmax, ax=ax)
# leave open other inputs to be parsed later (like tuples)
return cb
# TODO: remove following: this is a monkeypatch for a bug in pandas
# see: https://github.com/pandas-dev/pandas/issues/29814
from pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex
def _patch_from_spmatrix(cls, data): # -no-cov-
length, ncol = data.shape
if ncol != 1:
raise ValueError("'data' must have a single column, not '{}'".format(ncol))
# our sparse index classes require that the positions be strictly
# increasing. So we need to sort loc, and arr accordingly.
arr = data.data
#idx, _ = data.nonzero()
idx = data.indices
loc = np.argsort(idx)
arr = arr.take(loc)
idx.sort()
zero = np.array(0, dtype=arr.dtype).item()
dtype = pd.SparseDtype(arr.dtype, zero)
index = IntIndex(length, idx)
return cls._simple_new(arr, index, dtype)
if _PD_VERSION >= (0, 25):
pd.core.arrays.SparseArray.from_spmatrix = classmethod(_patch_from_spmatrix)
# TODO: this is the end of what to remove when pandas is fixed
def _get_total_counter_range(counter):
numbers = [i for key in counter.keys() for i in key]
if len(numbers) == 0:
return (0, 0)
return (min(numbers), max(numbers)+1)
class ContactCount(object):
"""Return object when dealing with contacts (residue or atom).
This contains all the information about the contacts of a given type.
This information can be represented several ways. One is as a list of
contact pairs, each associated with the fraction of time the contact
occurs. Another is as a matrix, where the rows and columns label the
pair number, and the value is the fraction of time. This class provides
several methods to get different representations of this data for
further analysis.
In general, instances of this class shouldn't be created by a user using
``__init__``; instead, they will be returned by other methods. So users
will often need to use this object for analysis.
Parameters
----------
counter : :class:`collections.Counter`
the counter describing the count of how often the contact occurred;
key is a frozenset of a pair of numbers (identifying the
atoms/residues); value is the raw count of the number of times it
occurred
object_f : callable
method to obtain the object associated with the number used in
``counter``; typically :meth:`mdtraj.Topology.residue` or
:meth:`mdtraj.Topology.atom`.
n_x : int, tuple(start, end), optional
range of objects in the x direction (used in plotting)
Default tries to plot the least amount of symetric points.
n_y : int, tuple(start, end), optional
range of objects in the y direction (used in plotting)
Default tries to show the least amount of symetric points.
max_size : int, optional
maximum size of the count
(used to determine the shape of output matrices and dataframes)
"""
def __init__(self, counter, object_f, n_x=None, n_y=None, max_size=None):
self._counter = counter
self._object_f = object_f
self.total_range = _get_total_counter_range(counter)
self.n_x, self.n_y = make_x_y_ranges(n_x, n_y, counter)
if max_size is None:
self.max_size = max([self.total_range[-1],
self.n_x.max,
self.n_y.max])
else:
self.max_size = max_size
@property
def counter(self):
"""
:class:`collections.Counter` :
keys use index number; count is contact occurrences
"""
return self._counter
@property
def sparse_matrix(self):
"""
:class:`scipy.sparse.dok.dok_matrix` :
sparse matrix representation of contacts
Rows/columns correspond to indices and the values correspond to
the count
"""
max_size = self.max_size
mtx = scipy.sparse.dok_matrix((max_size, max_size))
for (k, v) in self._counter.items():
key = list(k)
mtx[key[0], key[1]] = v
mtx[key[1], key[0]] = v
return mtx
@property
def df(self):
"""
:class:`pandas.SparseDataFrame` :
DataFrame representation of the contact matrix
Rows/columns correspond to indices and the values correspond to
the count
"""
mtx = self.sparse_matrix
index = list(range(self.max_size))
columns = list(range(self.max_size))
if _PD_VERSION < (0, 25): # py27 only -no-cov-
mtx = mtx.tocoo()
return pd.SparseDataFrame(mtx, index=index, columns=columns)
df = pd.DataFrame.sparse.from_spmatrix(mtx, index=index,
columns=columns)
# note: I think we can always use float here for dtype; but in
# principle maybe we need to inspect and get the internal type?
# Problem is, pandas technically stores a different dtype for each
# column.
df = df.astype(pd.SparseDtype("float", np.nan))
return df
def to_networkx(self, weighted=True, as_index=False, graph=None):
"""Graph representation of contacts (requires networkx)
Parameters
----------
weighted : bool
whether to use the frequencies as edge weights in the graph,
default True
as_index : bool
if True, the nodes in the graph are integer indices; if False
(default), the nodes are mdtraj.topology objects (Atom/Residue)
graph : networkx.Graph or None
if provided, edges are added to an existing graph
Returns
-------
networkx.Graph :
graph representation of the contact matrix
"""
if not HAS_NETWORKX: # -no-cov-
raise RuntimeError("Error importing networkx")
graph = nx.Graph() if graph is None else graph
for pair, value in self.counter.items():
if not as_index:
pair = map(self._object_f, pair)
attr_dict = {'weight': value} if weighted else {}
graph.add_edge(*pair, **attr_dict)
return graph
def _check_number_of_pixels(self, figure):
"""
This checks to see if the number of pixels in the figure is high enough
to accuratly represent the the contact map. It raises a RuntimeWarning
if this is not the case.
Parameters
----------
figure: :class:`matplotlib.Figure`
matplotlib figure to compare the amount of pixels from
"""
# Get dpi, and total pixelswidht and pixelheight
dpi = figure.get_dpi()
figwidth = figure.get_figwidth()
figheight = figure.get_figheight()
xpixels = dpi*figwidth
ypixels = dpi*figheight
# Check if every value has a pixel
if (xpixels/self.n_x.range_length < 1 or
ypixels/self.n_y.range_length < 1):
msg = ("The number of pixels in the figure is insufficient to show"
" all the contacts.\n Please save this as a vector image "
"(such as a PDF) to view the correct result.\n Another "
"option is to increase the 'dpi' (currently: "+str(dpi)+"),"
" or the 'figsize' (currently: " + str((figwidth,
figheight)) +
").\n Recommended minimum amount of pixels = "
+ str((self.n_x.range_length,
self.n_y.range_length))
+ " (width, height).")
warnings.warn(msg, RuntimeWarning)
def plot(self, cmap='seismic', diverging_cmap=None, with_colorbar=True,
**kwargs):
"""
Plot contact matrix (requires matplotlib)
Parameters
----------
cmap : str
color map name, default 'seismic'
diverging_cmap : bool
Whether the given color map is treated as diverging (if
``True``) or sequential (if False). If a color map is diverging
and all data is positive, only the upper half of the color map
is used. Default (None) will give correct results if ``cmap`` is
the string name of a known sequential or diverging matplotlib
color map and will treat as sequential if unknown.
with_colorbar: bool
Whether to include a color bar legend.
**kwargs
All additional keyword arguments to be passed to the
:func:`matplotlib.pyplot.subplots` call
Returns
-------
fig : :class:`matplotlib.Figure`
matplotlib figure object for this plot
ax : :class:`matplotlib.Axes`
matplotlib axes object for this plot
"""
if not HAS_MATPLOTLIB: # pragma: no cover
raise RuntimeError("Error importing matplotlib")
fig, ax = plt.subplots(**kwargs)
# Check the number of pixels of the figure
self._check_number_of_pixels(fig)
self.plot_axes(ax=ax, cmap=cmap, diverging_cmap=diverging_cmap,
with_colorbar=with_colorbar)
return (fig, ax)
def plot_axes(self, ax, cmap='seismic', diverging_cmap=None,
with_colorbar=True):
"""
Plot contact matrix on a matplotlib.axes
Parameters
----------
ax : matplotlib.axes
axes to plot the contact matrix on
cmap : str
color map name, default 'seismic'
diverging_cmap : bool
If True, color map interpolation is from -1.0 to 1.0; allowing
diverging color maps to be used for contact maps and contact
differences. If false, the range is from 0 to 1.0. Default value
of None selects a value based on the value of cmap, treating as
False for unknown color maps.
with_colorbar : bool
If a colorbar is added to the axes
"""
if diverging_cmap is None:
diverging_cmap = is_cmap_diverging(cmap)
vmin, vmax = (-1, 1) if diverging_cmap else (0, 1)
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
cmap_f = plt.get_cmap(cmap)
ax.axis([self.n_x.min, self.n_x.max, self.n_y.min, self.n_y.max])
ax.set_facecolor(cmap_f(norm(0.0)))
min_val = 0.0
for (pair, value) in self.counter.items():
if value < min_val:
min_val = value
pair_list = list(pair)
patch_0 = matplotlib.patches.Rectangle(
pair_list, 1, 1,
facecolor=cmap_f(norm(value)),
linewidth=0
)
patch_1 = matplotlib.patches.Rectangle(
(pair_list[1], pair_list[0]), 1, 1,
facecolor=cmap_f(norm(value)),
linewidth=0
)
ax.add_patch(patch_0)
ax.add_patch(patch_1)
_colorbar(with_colorbar, cmap_f, norm, min_val, ax=ax)
def most_common(self, obj=None):
"""
Most common values (ordered) with object as keys.
This uses the objects for the contact pair (typically MDTraj
``Atom`` or ``Residue`` objects), instead of numeric indices. This
is more readable and can be easily used for further manipulation.
Parameters
----------
obj : MDTraj Atom or Residue
if given, the return value only has entries including this
object (allowing one to, for example, get the most common
contacts with a specific residue)
Returns
-------
list :
the most common contacts in order. If the list is ``l``, then
each element ``l[e]`` is a tuple with two parts: ``l[e][0]`` is
the key, which is a pair of Atom or Residue objects, and
``l[e][1]`` is the count of how often that contact occurred.
See also
--------
most_common_idx : same thing, using index numbers as key
"""
if obj is None:
result = [
([self._object_f(idx) for idx in common[0]], common[1])
for common in self.most_common_idx()
]
else:
obj_idx = obj.index
result = [
([self._object_f(idx) for idx in common[0]], common[1])
for common in self.most_common_idx()
if obj_idx in common[0]
]
return result
def most_common_idx(self):
"""
Most common values (ordered) with indices as keys.
Returns
-------
list :
the most common contacts in order. The if the list is ``l``,
then each element ``l[e]`` consists of two parts: ``l[e][0]`` is
a pair of integers, representing the indices of the objects
associated with the contact, and ``l[e][1]`` is the count of how
often that contact occurred
See also
--------
most_common : same thing, using objects as key
"""
return self._counter.most_common()
def filter(self, idx):
"""New ContactCount filtered to idx.
Returns a new ContactCount with the only the counter keys/values
where both the keys are in idx
"""
dct = {k: v for k, v in self._counter.items()
if all([i in idx for i in k])}
new_count = collections.Counter()
new_count.update(dct)
return ContactCount(new_count, self._object_f, self.n_x, self.n_y)
|
dwhswenson/contact_map
|
contact_map/contact_count.py
|
Python
|
lgpl-2.1
| 14,734
|
[
"MDTraj"
] |
20411736f0656ac2ee1b3cfcf2ebd8e96f70df3408bcd8ceac64160868e74b84
|
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
from moviepy.editor import VideoFileClip
from IPython.display import HTML
import math
import sys
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len,
maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines_custom(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, alpha=0.8, beta=1., lamb=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
lamb is lambda
"""
return cv2.addWeighted(initial_img, alpha, img, beta, lamb)
def draw_lines_custom(img, lines, color=[255, 0, 0], thickness=7):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
# Initialise arrays
positive_slope_points = []
negative_slope_points = []
positive_slope_intercept = []
negative_slope_intercept = []
for line in lines:
for x1, y1, x2, y2 in line:
slope = (y1 - y2) / (x1 - x2)
# print("Points: ", [x1, y1, x2, y2])
length = math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
# print("Length: ", length)
if not math.isnan(slope):
if length > 50:
if slope > 0:
positive_slope_points.append([x1, y1])
positive_slope_points.append([x2, y2])
positive_slope_intercept.append([slope, y1 - slope * x1])
elif slope < 0:
negative_slope_points.append([x1, y1])
negative_slope_points.append([x2, y2])
negative_slope_intercept.append([slope, y1 - slope * x1])
# Get intercept and coefficient of fitted lines
pos_coef, pos_intercept = find_line_fit(positive_slope_intercept)
neg_coef, neg_intercept = find_line_fit(negative_slope_intercept)
# Get intersection point
intersection_x_coord = intersection_x(pos_coef, pos_intercept, neg_coef, neg_intercept)
# Plot lines
draw_sep_lines(pos_coef, pos_intercept, intersection_x_coord, img, color, thickness)
draw_sep_lines(neg_coef, neg_intercept, intersection_x_coord, img, color, thickness)
def intersection_x(coef1, intercept1, coef2, intercept2):
"""Returns x-coordinate of intersection of two lines."""
x = (intercept2 - intercept1) / (coef1 - coef2)
return x
def draw_sep_lines(coef, intercept, intersection_x, img, color, thickness):
imshape = img.shape
# Get starting and ending points of regression line, ints.
# print("Coef: ", coef, "Intercept: ", intercept,
# "intersection_x: ", intersection_x)
point_one = (int(intersection_x), int(intersection_x * coef + intercept))
point_two = 0
if coef > 0:
point_two = (imshape[1]-1, int(imshape[1] * coef + intercept))
elif coef < 0:
point_two = (0, int(0 * coef + intercept))
print("Point one: ", point_one, "Point two: ", point_two)
test = new_coordinates(point_one, point_two)
# test2 = (482,508)
# test2 = new_coordinates((400,400), (600,200))
# Draw line using cv2.line
cv2.line(img, test, point_two, color, thickness)
# cv2.line(img, (400,400), (600,200), [0,255,0], thickness)
# cv2.line(img, (400,400), test2, [204,255,204], thickness)
# cv2.line(img, point_one, point_two, color, thickness)
print("--------------------------------------------------------------------------------------------------------")
# def new_coordinates(point_one, point_two):
# print(("x1", point_one[0], "y1", point_one[1]), ("x2", point_two[0], "y2", point_two[1]))
# distance = math.sqrt((point_two[0] - point_one[0]) ** 2 + (point_two[1] - point_one[1]) ** 2)
# print("distance between point one and two", distance)
#
# slope = (point_two[1] - point_one[1]) / (point_two[0] - point_one[0])
# print("Slope", slope)
#
# angle = math.atan(slope)
# print("angle", angle)
#
# a = math.sin(angle) * (distance/30)
# print("a", a)
# b = math.cos(angle) * (distance/30)
# print("b", b)
#
# x_a = point_one[0] + a
# y_b = point_one[1] + b
#
# print("New points", (int(x_a), int(y_b)))
# new_distance = math.sqrt((int(x_a) - point_two[0]) ** 2 + (int(y_b) - point_two[1]) ** 2)
# print("new distance", new_distance)
# return int(x_a), int(y_b)
def new_coordinates(point_one, point_two):
"""
Based on "The intercept theorem", also known as "Thales' theorem"
https://en.wikipedia.org/wiki/Intercept_theorem
"""
dx = (point_two[0] - point_one[0])
dy = (point_two[1] - point_one[1])
x_a = point_one[0] + dx/20
y_b = point_one[1] + dy/20
print("New points", (int(x_a), int(y_b)))
return int(x_a), int(y_b)
def find_line_fit(slope_intercept):
"""slope_intercept is an array [[slope, intercept], [slope, intercept]...]."""
# Initialise arrays
kept_slopes = []
kept_intercepts = []
# print("Slope & intercept: ", slope_intercept)
if len(slope_intercept) == 1:
return slope_intercept[0][0], slope_intercept[0][1]
# Remove points with slope not within 1.5 standard deviations of the mean
slopes = [pair[0] for pair in slope_intercept]
mean_slope = np.mean(slopes)
slope_std = np.std(slopes)
for pair in slope_intercept:
slope = pair[0]
# print(slope - mean_slope, 1.5 * slope_std)
if slope - mean_slope < 1.5 * slope_std:
kept_slopes.append(slope)
kept_intercepts.append(pair[1])
if not kept_slopes:
kept_slopes = slopes
kept_intercepts = [pair[1] for pair in slope_intercept]
# Take estimate of slope, intercept to be the mean of remaining values
slope = np.mean(kept_slopes)
intercept = np.mean(kept_intercepts)
# print("Slope: ", slope, "Intercept: ", intercept)
return slope, intercept
# Getting the image
try:
image = mpimg.imread('test_images/solidWhiteRight.jpg')
except FileNotFoundError as e:
print(e)
sys.exit(1)
# plt.imshow(image)
gray_image = grayscale(image)
kernel_size = 5
gaussian_blur_image = gaussian_blur(gray_image, kernel_size)
low_threshold = 50
high_threshold = 150
edges_image = canny(gaussian_blur_image, low_threshold, high_threshold)
# Masking the image
imshape = image.shape
vertices = np.array([[(50, imshape[0]), (400, 340), (560, 340), (imshape[1], imshape[0])]], dtype=np.int32)
masked_edges = region_of_interest(edges_image, vertices)
# Applying Hough transform to masked image
rho = 1
theta = np.pi/180
threshold = 10
min_line_length = 10
max_line_gap = 2
hough_lines_image = hough_lines(masked_edges, rho, theta, threshold, min_line_len=min_line_length, max_line_gap=max_line_gap)
combo_image = weighted_img(hough_lines_image, image)
# Display images
# images = [hough_lines_image, masked_edges, combo_image]
# for ima in images:
# plt.figure()
# plt.imshow(ima)
f = plt.figure()
f.add_subplot(2, 2, 1)
plt.imshow(image)
plt.title('Original image')
f.add_subplot(2, 2, 2)
plt.imshow(masked_edges, cmap='gray')
plt.title('Masked image')
f.add_subplot(2, 2, 3)
plt.imshow(hough_lines_image, cmap='Greys_r')
plt.title("Canny edges of Gaussian image")
f.add_subplot(2, 2, 4)
plt.imshow(combo_image)
plt.title("Hough transformed image of Canny edges")
plt.show()
|
akshaybabloo/Car-ND
|
Project_1/test2.py
|
Python
|
mit
| 10,295
|
[
"Gaussian"
] |
d51a208638308cc31746f530386b67959b279d74e3255d954500866f99faca85
|
import netCDF4 as netcdf
import numpy as np
f = netcdf.Dataset('data/md-solvent-langevin.nc', 'r')
dis = f.variables['distance']
chunksize = 50000
data = []
maxstep = dis.shape[0]
i = range(0, maxstep + chunksize, chunksize)
for k in xrange(len(i)-1):
print i[k], i[k+1]
data.append(dis[i[k]:i[k+1]])
d = np.hstack(data)
np.save('data/md-solvent-langevin-distance.npy', d)
|
nrego/westpa
|
lib/examples/wca-dimer_openmm/bruteforce/extract_distance.py
|
Python
|
gpl-3.0
| 388
|
[
"NetCDF"
] |
ae0767187e9141e7b018ee6fab75b053fb246f62e77947fe538b372b198a3ed9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import proteindf_bridge as bridge
from .taskobject import TaskObject
from .process import Process
import logging
logger = logging.getLogger(__name__)
class QcProtonate(TaskObject):
''' execute protonate
>>> tmp_pdb = bridge.Pdb('./data/sample/1AKG.pdb')
>>> atomgroup = tmp_pdb.get_atomgroup()
>>> p = QcProtonate(name='protonate_1akg', atomgroup=atomgroup)
>>> p.protonate()
USER MOD reduce.3.24.130724 H: found=0, std=0, add=98, rem=0, adj=4
...
>>> p.protonate_group()
'''
def __init__(self, name, backend='reduce'):
""" initialize protonate object
:param str pdbfile: pdb file for protonation
"""
# initialize base object
super(QcProtonate, self).__init__(name=name)
# backend
self._data['backend'] = str(backend)
self._AMBERHOME = os.environ.get('AMBERHOME', '')
self._check_AMBERHOME()
def _check_AMBERHOME(self):
if len(self._AMBERHOME) == 0:
logger.warning("environ parameter, AMBERHOME, looks like empty.")
####################################################################
# property
####################################################################
# backend ----------------------------------------------------------
def _get_backend(self):
return self._data.get("backend")
backend = property(_get_backend)
# model_name -------------------------------------------------------
def _get_model_name(self):
return self._data.get("model_name", "model_1")
model_name = property(_get_model_name)
####################################################################
# method
####################################################################
def run(self, output_path=""):
return_code = -1
self.cd_workdir()
if self.backend == 'reduce':
input_pdbfile = os.path.join(self.work_dir, 'original.pdb')
self.atomgroup2pdb(self.model, input_pdbfile,
model_name=self.model_name)
out_pdbfile = os.path.join(self.work_dir, 'protonated.pdb')
return_code = self._run_reduce(input_pdbfile, out_pdbfile)
if return_code == 0:
output_atomgroup = self._pdb2brd(out_pdbfile)
# pickup first model as result
self.output_model = output_atomgroup.get_group(self.model_name)
if len(output_path) > 0:
output_path = os.path.join(self.work_dir, output_path)
logger.info("output protonated file: {}".format(output_path))
protein = bridge.AtomGroup()
protein.set_group("model_1", self.output_model)
self.atomgroup2file(protein, output_path)
self.restore_cwd()
return return_code
def _pdb2brd(self, pdbfile):
assert(isinstance(pdbfile, str))
logger.info('pdb2brd: from {}'.format(pdbfile))
pdb = bridge.Pdb(pdbfile, mode='amber')
return pdb.get_atomgroup()
def _run_reduce(self, in_pdbfile, out_pdbfile):
assert(isinstance(in_pdbfile, str))
assert(isinstance(out_pdbfile, str))
p = Process()
reduce_cmd = os.path.join(self._AMBERHOME, 'bin', 'reduce')
cmd = "{} {}".format(reduce_cmd,
in_pdbfile)
p.cmd(cmd)
return_code = p.commit(out_pdbfile,
stdout_through=False,
stderr_through=False)
return return_code
def protonate_group(self):
d_atomgroup = self.output_model ^ self.model
d_path = os.path.join(self.work_dir, 'add_group.brd')
bridge.save_msgpack(d_atomgroup.get_raw_data(), d_path)
####################################################################
# Archive
####################################################################
def __setstate__(self, state):
super(QcProtonate, self).__setstate__(state)
if "backend" in state:
self._data["backend"] = state["backend"]
if "model_name" in state:
self._data["model_name"] = state["model_name"]
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
ProteinDF/QCLObot
|
qclobot/qcprotonate.py
|
Python
|
gpl-3.0
| 4,364
|
[
"Amber"
] |
9fbbbd027e7583cd991a7b4254b3a50cf4f93e2014d6fb319ffec1e4d2c0308f
|
#!/usr/bin/env python
from __future__ import division
from myplot.xsection import XSection
from myplot.xsection import VectorXSection
from myplot.axes3d import Axes3D
from psgi.parameterized import state_parser
from matplotlib.widgets import Slider
from traits.api import HasTraits, Range, Instance, on_trait_change, Array
from traitsui.api import View, Item, Group
from mayavi.core.api import PipelineBase
from mayavi.core.ui.api import MayaviScene, SceneEditor,MlabSceneModel
import myplot.topo
import mayavi.mlab
import modest
import numpy as np
import pickle
import misc
import transform as trans
import sys
sys.path.append('.')
import basis
Nl = 50
Nw = 50
def slip_vec(x,coeff,strike,dip,seg):
s1 = basis.slip(x,coeff[:,0],segment=seg)
s2 = basis.slip(x,coeff[:,1],segment=seg)
vec = np.array([s1,s2,0*s2]).transpose()
argz = np.pi/2.0 - np.pi*strike/180
argx = np.pi/180.0*dip
T = trans.point_rotation_x(argx)
T += trans.point_rotation_z(argz)
vec = T(vec)
return vec
def slip_mag(x,coeff,seg):
rightlateral = basis.slip(x,coeff[:,0],segment=seg)
thrust = basis.slip(x,coeff[:,1],segment=seg)
return np.sqrt(rightlateral**2 + thrust**2)
def view(state,param):
param = {i:np.array(v) for i,v in param.iteritems()}
#covert lat lon to xyz
f = open('basemap.pkl','r')
bm = pickle.load(f)
f.close()
fluidity_transforms = []
x,y = bm(*basis.FLUIDITY_ANCHOR[:2])
length = basis.FLUIDITY_LENGTH
width = basis.FLUIDITY_WIDTH
thickness = basis.FLUIDITY_THICKNESS
t = trans.point_stretch([basis.FLUIDITY_LENGTH,
basis.FLUIDITY_THICKNESS,
1.0])
t += trans.point_rotation_x(np.pi/2.0)
t += trans.point_translation([0.0,-width/2.0,0.0])
t += trans.point_rotation_z(np.pi/2.0 - basis.FLUIDITY_STRIKE*np.pi/180)
t += trans.point_translation([x,y,0.0])
fluidity_transforms += [t]
t = trans.point_stretch([basis.FLUIDITY_WIDTH,
basis.FLUIDITY_THICKNESS,
1.0])
t += trans.point_rotation_x(np.pi/2.0)
t += trans.point_rotation_z(-np.pi/2.0)
t += trans.point_translation([basis.FLUIDITY_LENGTH/2.0,
0.0,
0.0])
t += trans.point_rotation_z(np.pi/2.0 - basis.FLUIDITY_STRIKE*np.pi/180)
t += trans.point_translation([x,y,0.0])
fluidity_transforms += [t]
fault_transforms = basis.FAULT_TRANSFORMS
xs1 = XSection(basis.fluidity,
f_args=(state['fluidity'][-1],),
base_square_y=(-1,0),
transforms = fluidity_transforms,
clim = param['fluidity_clim'])
xs2 = XSection(basis.fluidity,
f_args=(state['fluidity'][-1],),
base_square_y=(-1,0),
transforms = fault_transforms)
class InteractiveSlip(HasTraits):
#time_index = Range(0,len(state['slip']),0.5)
#print(state)
time = Range(round(min(state['time']),2),round(max(state['time']),2))
scene = Instance(MlabSceneModel,())
view = View(Item('scene',editor=SceneEditor(scene_class=MayaviScene),
height=250,width=300,show_label=False),
Group('time'),resizable=True)
def __init__(self):
#myplot.topo.draw_topography(bm,opacity=0.2)
time_index = np.argmin(abs(state['time'][...] - self.time))
slip = np.array(state[str(param['slip_type'])][time_index])
self.xs = ()
self.vxs = ()
for i,t in enumerate(fault_transforms):
self.xs += XSection(slip_mag,
f_args=(slip,i),
base_square_y=(-1,0),
transforms = [t],clim=param['slip_clim']),
self.vxs += VectorXSection(slip_vec,
f_args=(slip,basis.FAULT_STRIKE[i],basis.FAULT_DIP[i],i),
base_square_y=(-1,0),
transforms = [t]),
HasTraits.__init__(self)
@on_trait_change('time,scene.activated')
def update_plot(self):
time_index = np.argmin(abs(state['time'][...] - self.time))
slip = np.array(state[str(param['slip_type'])][time_index])
for i,t in enumerate(fault_transforms):
self.xs[i].set_f_args((slip,i))
self.vxs[i].set_f_args((slip,basis.FAULT_STRIKE[i],basis.FAULT_DIP[i],i))
if self.xs[i]._plots is None:
self.xs[i].draw()
else:
self.xs[i].redraw()
if self.vxs[i]._plots is None:
self.vxs[i].draw()
else:
self.vxs[i].redraw()
#myplot.topo.draw_topography(bm,opacity=0.2)
mayavi.mlab.figure(1)
xs1.draw()
xs2.draw(color=(0.2,0.2,0.2),opacity=0.5)
myplot.topo.draw_topography(bm,opacity=0.2)
#mayavi.mlab.figure(2)
xs2 = InteractiveSlip()
xs2.configure_traits()
|
treverhines/PSGI
|
psgi/plot_state.py
|
Python
|
mit
| 4,860
|
[
"Mayavi"
] |
c34cc13438e811efd3bb30928c55cd3f352fbc0845b9ab67fb3fda906d12c204
|
from __future__ import print_function
import os
import pytest
from os.path import join
import unittest
import subprocess
@pytest.mark.js
class TestBokehJS(unittest.TestCase):
def test_bokehjs(self):
os.chdir('bokehjs')
proc = subprocess.Popen([join('node_modules', '.bin', 'gulp'), "test"],
stdout=subprocess.PIPE)
result = proc.wait()
msg = proc.stdout.read().decode('utf-8', errors='ignore')
print(msg)
if result != 0:
assert False
if __name__ == "__main__":
unittest.main()
|
saifrahmed/bokeh
|
tests/test_bokehjs.py
|
Python
|
bsd-3-clause
| 582
|
[
"GULP"
] |
8e5d25aa1b033590126bb5deed2c7958e5780bc6a4fadcfad682b6069325b243
|
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create an API definition by interpreting a discovery document.
This module interprets a discovery document to create a tree of classes which
represent the API structure in a way that is useful for generating a library.
For each discovery element (e.g. schemas, resources, methods, ...) there is
a class to represent it which is directly usable in the templates. The
instances of those classes are annotated with extra variables for use
in the template which are language specific.
The current way to make use of this class is to create a programming language
specific subclass of Api, which adds annotations and template variables
appropriate for that language.
TODO(user): Refactor this so that the API can be loaded first, then annotated.
"""
__author__ = 'aiuto@google.com (Tony Aiuto)'
import copy
import logging
from googleapis.codegen import data_types
from googleapis.codegen import template_objects
from googleapis.codegen import utilities
from googleapis.codegen.anyjson import simplejson
_ADDITIONAL_PROPERTIES = 'additionalProperties'
class ApiException(Exception):
"""The base class for all API parsing exceptions."""
def __init__(self, reason, def_dict=None):
"""Create an exception.
Args:
reason: (str) The human readable explanation of this exception.
def_dict: (dict) The discovery dictionary we failed on.
"""
super(ApiException, self).__init__()
self._reason = reason
self._def_dict = def_dict
self._raw_def_dict = copy.deepcopy(def_dict)
def __str__(self):
if self._def_dict:
return '%s: %s' % (self._reason, self._def_dict)
return self._reason
class Api(template_objects.CodeObject):
"""An API definition.
This class holds a discovery centric definition of an API. It contains
members such as "resources" and "schemas" which relate directly to discovery
concepts. It defines several properties that can be used in code generation
templates:
name: The API name.
version: The API version.
versionNoDots: The API version with all '.' characters replaced with '_'
authScopes: The list of the OAuth scopes used by this API.
dataWrapper: True if the API definition contains the 'dataWrapper' feature.
methods: The list of top level API methods.
models: The list of API data models, both from the schema section of
discovery and from anonymous objects defined in method definitions.
parameters: The list of global method parameters (applicable to all methods)
resources: The list of API resources
"""
def __init__(self, discovery_doc, language=None):
super(Api, self).__init__(discovery_doc, self)
name = self.values['name']
self._validator.ValidateApiName(name)
self._validator.ValidateApiVersion(self.values['version'])
self._class_name = utilities.CamelCase(name)
self._language = language
self._template_dir = None
self._surface_features = {}
self._schemas = {}
self.void_type = data_types.Void(self)
self.SetTemplateValue('className', self._class_name)
self.SetTemplateValue('versionNoDots',
self.values['version'].replace('.', '_'))
self.SetTemplateValue('dataWrapper',
'dataWrapper' in discovery_doc.get('features', []))
self._BuildSchemaDefinitions()
self._BuildResourceDefinitions()
self.SetTemplateValue('resources', self._resources)
# Make data models part of the api dictionary
self.SetTemplateValue('models', self.ModelClasses())
# Replace methods dict with Methods
self._methods = []
for name, method_dict in self.values.get('methods', {}).iteritems():
self._methods.append(Method(self, name, method_dict))
self.SetTemplateValue('methods', self._methods)
# Global parameters
self._parameters = []
for name, param_dict in self.values.get('parameters', {}).iteritems():
self._parameters.append(Parameter(self, name, param_dict, self))
self.SetTemplateValue('parameters', self._parameters)
# Auth scopes
self._authscopes = []
if (self.values.get('auth') and
self.values['auth'].get('oauth2') and
self.values['auth']['oauth2'].get('scopes')):
for value, auth_dict in sorted(
self.values['auth']['oauth2']['scopes'].iteritems()):
self._authscopes.append(AuthScope(self, value, auth_dict))
self.SetTemplateValue('authscopes', self._authscopes)
@property
def all_schemas(self):
"""The dictonary of all the schema objects found in the API."""
return self._schemas
def _BuildResourceDefinitions(self):
"""Loop over the resources in the discovery doc and build definitions."""
self._resources = []
for name, def_dict in self.values.get('resources', {}).iteritems():
resource = Resource(self, name, def_dict)
self._resources.append(resource)
def _BuildSchemaDefinitions(self):
"""Loop over the schemas in the discovery doc and build definitions."""
schemas = self.values.get('schemas')
if schemas:
for name, def_dict in schemas.iteritems():
# Upgrade the string format schema to a dict.
if isinstance(def_dict, unicode):
def_dict = simplejson.loads(def_dict)
self._schemas[name] = self.DataTypeFromJson(def_dict, name)
def ModelClasses(self):
"""Return all the top level model classes."""
ret = []
for schema in self._schemas.values():
if schema not in ret:
if (not isinstance(schema, data_types.SchemaReference)
and not schema.values.get('builtIn')):
ret.append(schema)
ret.sort(lambda x, y: cmp(x.class_name, y.class_name))
return ret
def TopLevelModelClasses(self):
"""Return the models which are not children of another model."""
return [m for m in self.ModelClasses() if not m.parent]
def DataTypeFromJson(self, type_dict, default_name, parent=None,
wire_name=None):
"""Returns a schema object represented by a JSON Schema dictionary.
If the response dict references an existing schema, return the ref to
that. If it describes a value in-line, then create the schema dynamically.
If the type_dict is None, a blank schema will be created.
Args:
type_dict: A dict of the form expected of a request or response member
of a method description. See the Discovery specification for more.
default_name: The unique name to give the schema if we have to create it.
parent: The schema where I was referenced. If we cannot determine that
this is a top level schema, set the parent to this.
wire_name: The name which will identify objects of this type in data on
the wire.
Returns:
A Schema object.
"""
if not type_dict:
type_dict = {}
schema_name = type_dict.get('$ref', default_name)
schema = self.SchemaByName(schema_name)
if schema:
Trace('DataTypeFromJson: %s => %s' % (schema_name,
schema.values['className']))
return schema
# new or not initialized, create a fresh one
schema = Schema.Create(self, schema_name, type_dict, wire_name, parent)
# Only put it in our by-name list if it is a real object
if (not isinstance(schema, data_types.SchemaReference) and
not schema.values.get('builtIn')):
Trace('DataTypeFromJson: add %s to cache' % schema.values['className'])
self._schemas[schema.values['className']] = schema
return schema
def SchemaByName(self, schema_name):
"""Find a schema by name.
Args:
schema_name: (str) name of a schema defined by this API.
Returns:
Schema object or None if not found.
"""
return self._schemas.get(schema_name, None)
def VisitAll(self, func):
"""Visit all nodes of an API tree and apply a function to each.
Walks an tree and calls a function on each element of it. This should be
called after the API is fully loaded.
Args:
func: (function) Method to call on each object.
"""
Trace('Applying function to all nodes')
for resource in self.values['resources']:
self._VisitResource(resource, func)
# Top level methods
for method in self.values['methods']:
self._VisitMethod(method, func)
for parameter in self.values['parameters']:
func(parameter)
for schema in self._schemas.values():
self._VisitSchema(schema, func)
def _VisitMethod(self, method, func):
"""Visit a method, calling a function on every child.
Args:
method: (Method) The Method to visit.
func: (function) Method to call on each object.
"""
func(method)
for parameter in method.parameters:
func(parameter)
def _VisitResource(self, resource, func):
"""Visit a resource tree, calling a function on every child.
Calls down recursively to sub resources.
Args:
resource: (Resource) The Resource to visit.
func: (function) Method to call on each object.
"""
func(resource)
for method in resource.values['methods']:
self._VisitMethod(method, func)
for r in resource.values['resources']:
self._VisitResource(r, func)
def _VisitSchema(self, schema, func):
"""Visit a schema tree, calling a function on every child.
Args:
schema: (Schema) The Schema to visit.
func: (function) Method to call on each object.
"""
func(schema)
for prop in schema.values.get('properties', []):
func(prop)
def ToClassName(self, s, element_type=None): # pylint: disable-msg=W0613
"""Convert a name to a suitable member name in the target language.
This default implementation camel cases the string, which is appropriate
for Java and C++. Subclasses may override as appropriate.
Args:
s: (str) A rosy name of data element.
element_type: (str) The kind of object we are making a class name for.
E.g. resource, method, schema.
Returns:
A name suitable for use as a class in the generator's target language.
"""
return utilities.CamelCase(s)
@property
def class_name(self):
return self.values['className']
class Schema(data_types.DataType):
"""The definition of a schema."""
def __init__(self, api, default_name, def_dict, parent=None):
"""Construct a Schema object from a discovery dictionary.
Schemas represent data models in the API.
Args:
api: (Api) the Api instance owning the Schema
default_name: (str) the default name of the Schema. If there is an 'id'
member in the definition, that is used for the name instead.
def_dict: (dict) a discovery dictionary
parent: (Schema) The containing schema. To be used to establish unique
names for anonymous sub-schemas.
"""
super(Schema, self).__init__(def_dict, api, parent=parent)
name = def_dict.get('id', default_name)
Trace('Schema(%s)' % name)
# Protect against malicious discovery
template_objects.CodeObject.ValidateName(name)
self.SetTemplateValue('wireName', name)
class_name = api.ToClassName(name, element_type='schema')
self.SetTemplateValue('className', class_name)
self.SetTemplateValue('builtIn', False)
self.SetTemplateValue('properties', [])
@staticmethod
def Create(api, default_name, def_dict, wire_name, parent=None):
"""Construct a Schema or DataType from a discovery dictionary.
Schemas contain either object declarations, simple type declarations, or
references to other Schemas. Object declarations conceptually map to real
classes. Simple types will map to a target language built-in type.
References should effectively be replaced by the referenced Schema.
Args:
api: (Api) the Api instance owning the Schema
default_name: (str) the default name of the Schema. If there is an 'id'
member in the definition, that is used for the name instead.
def_dict: (dict) a discovery dictionary
wire_name: The name which will identify objects of this type in data on
the wire.
parent: (Schema) The containing schema. To be used to establish nesting
for anonymous sub-schemas.
Returns:
A Schema or DataType.
Raises:
ApiException: If the definition dict is not correct.
"""
schema_id = def_dict.get('id')
if schema_id:
name = schema_id
else:
name = default_name
class_name = api.ToClassName(name, element_type='schema')
# Schema objects come in several patterns.
#
# 1. Simple objects
# { type: object, properties: { "foo": {schema} ... }}
#
# 2. Maps of objects
# { type: object, additionalProperties: { "foo": {inner_schema} ... }}
#
# What we want is a data type which is Map<string, {inner_schema}>
# The schema we create here is essentially a built in type which we
# don't want to generate a class for.
#
# 3. Arrays of objects
# { type: array, items: { inner_schema }}
#
# Same kind of issue as the map, but with List<{inner_schema}>
#
# 4. Primative data types, described by type and format.
# { type: string, format: int32 }
#
# 5. Refs to another schema.
# { $ref: name }
if 'type' in def_dict:
# The 'type' field of the schema can either be 'array', 'object', or a
# base json type.
json_type = def_dict['type']
if json_type == 'object':
# Look for full object definition. You can have properties or
# additionalProperties, but it does not do anything useful to have
# both.
# Replace properties dict with Property's
props = def_dict.get('properties')
if props:
# This case 1 from above
properties = []
schema = Schema(api, name, def_dict, parent=parent)
if wire_name:
schema.SetTemplateValue('wireName', wire_name)
for prop_name, prop_dict in props.iteritems():
Trace(' adding prop: %s to %s' % (prop_name, name))
properties.append(Property(api, schema, prop_name, prop_dict))
Trace('Marking %s fully defined' % schema.values['className'])
schema.SetTemplateValue('properties', properties)
return schema
# Look for case 2
additional_props = def_dict.get(_ADDITIONAL_PROPERTIES)
if additional_props:
Trace('Have only additionalProps for %s, dict=%s' % (
name, str(additional_props)))
# TODO(user): Remove this hack at the next large breaking change
# The "Items" added to the end is unneeded and ugly. This is for
# temporary backwards compatability. And in case 3 too.
if additional_props.get('type') == 'array':
name = '%sItems' % name
# Note, since this is an interim, non class just to hold the map
# make the parent schema the parent passed in, not myself.
base_type = api.DataTypeFromJson(additional_props, name,
parent=parent, wire_name=wire_name)
map_type = data_types.MapDataType(base_type, parent=parent)
Trace(' %s is MapOf<string, %s>' % (
class_name, base_type.class_name))
return map_type
raise ApiException('object without properties in: %s' % def_dict)
elif json_type == 'array':
# Case 3: Look for array definition
items = def_dict.get('items')
if not items:
raise ApiException('array without items in: %s' % def_dict)
tentative_class_name = class_name
if schema_id:
Trace('Top level schema %s is an array' % class_name)
tentative_class_name += 'Items'
base_type = api.DataTypeFromJson(items, tentative_class_name,
parent=parent, wire_name=wire_name)
Trace(' %s is ArrayOf<%s>' % (class_name, base_type.class_name))
array_type = data_types.ArrayDataType(base_type, parent=parent)
# If I am not a top level schema, mark me as not generatable
if not schema_id:
array_type.SetTemplateValue('builtIn', True)
else:
Trace('Top level schema %s is an array' % class_name)
array_type.SetTemplateValue('className', schema_id)
return array_type
else:
# Case 4: This must be a basic type. Create a DataType for it.
format_type = def_dict.get('format')
if format_type:
Trace(' Found Type: %s with Format: %s' % (json_type, format_type))
base_type = data_types.BuiltInDataType(def_dict, api, parent=parent)
return base_type
referenced_schema = def_dict.get('$ref')
if referenced_schema:
# Case 5: Reference to another Schema.
#
# There are 4 ways you can see '$ref' in discovery.
# 1. In a property of a schema, pointing back to one previously defined
# 2. In a property of a schema, pointing forward
# 3. In a method request or response pointing to a defined schema
# 4. In a method request or response or property of a schema pointing to
# something undefined.
#
# This code is not reached in case 1. The way the Generators loads
# schemas (see _BuildSchemaDefinitions), is to loop over them and add
# them to a dict of schemas. A backwards reference would be in the table
# so the DataTypeFromJson call in the Property constructor will resolve
# to the defined schema.
#
# For case 2. Just creating this placeholder here is fine. When the
# actual schema is hit in the loop in _BuildSchemaDefinitions, we will
# replace the entry and DataTypeFromJson will resolve the to the new def.
#
# For case 3, we should not reach this code, because the
# DataTypeFromJson would
# have returned the defined schema.
#
# For case 4, we punt on the whole API.
return data_types.SchemaReference(referenced_schema, api)
raise ApiException('Cannot decode JSON Schema for: %s' % def_dict)
@property
def class_name(self):
return self.values['className']
class Resource(template_objects.CodeObject):
"""The definition of a resource."""
def __init__(self, api, name, def_dict):
super(Resource, self).__init__(def_dict, api)
self.ValidateName(name)
self._raw_def_dict = copy.deepcopy(def_dict)
class_name = api.ToClassName(name, element_type='resource')
self.SetTemplateValue('className', class_name)
self.SetTemplateValue('wireName', name)
# Replace methods dict with Methods
self._methods = []
for name, method_dict in self.values.get('methods', {}).iteritems():
self._methods.append(Method(api, name, method_dict))
self.SetTemplateValue('methods', self._methods)
# Get sub resources
self._resources = []
for name, r_def_dict in self.values.get('resources', {}).iteritems():
self._resources.append(Resource(api, name, r_def_dict))
self.SetTemplateValue('resources', self._resources)
@property
def methods(self):
return self._methods
class AuthScope(template_objects.CodeObject):
"""The definition of an auth scope."""
def __init__(self, api, value, def_dict):
"""Construct an auth scope.
Args:
api: (Api) The Api which owns this Property
value: (string) The unique identifier of this scope, often a URL
def_dict: (dict) The discovery dictionary for this auth scope.
"""
super(AuthScope, self).__init__(def_dict, api)
# Strip the common prefix to get a unique identifying name
prefix_len = len('https://www.googleapis.com/auth/')
self.SetTemplateValue('name', value[prefix_len:].upper().replace('.', '_'))
self.SetTemplateValue('value', value)
class Method(template_objects.CodeObject):
"""The definition of a method."""
def __init__(self, api, name, def_dict):
"""Construct a method.
Args:
api: (Api) The Api which owns this Method.
name: (string) The discovery name of the method.
def_dict: (dict) The discovery dictionary for this method.
Raises:
ApiException: If the httpMethod type is not one we know how to
handle.
"""
super(Method, self).__init__(def_dict, api)
self.ValidateName(name)
class_name = api.ToClassName(name, element_type='method')
self.SetTemplateValue('wireName', name)
self.SetTemplateValue('className', class_name)
http_method = def_dict['httpMethod'].upper()
self.SetTemplateValue('httpMethod', http_method)
self.SetTemplateValue('rpcMethod',
def_dict.get('rpcMethod') or def_dict['id'])
rest_path = def_dict.get('path') or def_dict.get('restPath')
self.SetTemplateValue('restPath', rest_path)
# Figure out the input and output types and schemas for this method.
expected_request = self.values.get('request')
if expected_request:
# TODO(user): RequestBody is only used if the schema is anonymous.
# When we go to nested models, this could be a nested class off the
# Method, making it unique without the silly name. Same for ResponseBody.
request_schema = api.DataTypeFromJson(expected_request,
'%sRequestContent' % name,
parent=self)
self.SetTemplateValue('requestType', request_schema)
expected_response = self.values.get('response')
if expected_response:
response_schema = api.DataTypeFromJson(expected_response,
'%sResponse' % name,
parent=self)
self.SetTemplateValue('responseType', response_schema)
else:
self.SetTemplateValue('responseType', api.void_type)
# Make sure we can handle this method type and do any fixups.
if http_method in ['DELETE', 'PATCH', 'POST', 'PUT']:
pass
elif http_method == 'GET':
self.SetTemplateValue('requestType', None)
else:
raise ApiException('Unknown HTTP method: %s' % http_method, def_dict)
# Replace parameters dict with Parameters. We try to order them by their
# position in the request path so that the generated code can track the
# more human readable definition, rather than the order of the parameters
# in the discovery doc.
order = self.values.get('parameterOrder', [])
req_parameters = []
opt_parameters = []
for name, def_dict in self.values.get('parameters', {}).items():
# Standard params are part of the generic request class
if name not in ['alt']:
param = Parameter(api, name, def_dict, self)
# We want to push all parameters that aren't declared inside
# parameterOrder after those that are.
if param.values['wireName'] in order:
req_parameters.append(param)
else:
# optional parameters are appended in the order they're declared.
opt_parameters.append(param)
# pylint: disable-msg=C6402
req_parameters.sort(lambda x, y: cmp(order.index(x.values['wireName']),
order.index(y.values['wireName'])))
req_parameters.extend(opt_parameters)
self.SetTemplateValue('parameters', req_parameters)
@property
def parameters(self):
return self.values['parameters']
@property
def optional_parameters(self):
return [p for p in self.values['parameters'] if not p.required]
@property
def required_parameters(self):
return [p for p in self.values['parameters'] if p.required]
#
# Expose some properties with the naming convention we use in templates
#
def optionalParameters(self): # pylint: disable-msg=C6409
return self.optional_parameters
def requiredParameters(self): # pylint: disable-msg=C6409
return self.required_parameters
class Parameter(template_objects.CodeObject):
"""The definition of a method parameter."""
def __init__(self, api, name, def_dict, method):
super(Parameter, self).__init__(def_dict, api, parent=method)
self.requires_imports = []
self.ValidateName(name)
self.schema = api
self.SetTemplateValue('wireName', name)
# TODO(user): Deal with dots in names better. What we should do is:
# For x.y, x.z create a little class X, with members y and z. Then
# have the constructor method take an X.
self._repeated = self.values.get('repeated', False)
self._required = self.values.get('required', False)
self._data_type = data_types.BuiltInDataType(def_dict, api, parent=self)
if self._repeated:
self._data_type = data_types.ArrayDataType(self._data_type, parent=self)
if self.values.get('enum'):
enum = Enum(api,
name,
self._data_type,
self.values.get('enum'),
self.values.get('enumDescriptions'))
self.SetTemplateValue('enumType', enum)
# NOTE: If we want all languages to use templates, then we should enable
# the next line. For now, rf_generator does the equivalent.
# self.SetTemplateValue('codeType', code_type)
@property
def repeated(self):
return self._repeated
@property
def required(self):
return self._required
@property
def code_type(self):
return self._data_type.code_type
@property
def data_type(self):
return self._data_type
class Property(template_objects.CodeObject):
"""The definition of a schema property.
Example property in the discovery schema:
"id": {"type": "string"}
"""
def __init__(self, api, schema, name, def_dict):
"""Construct a Property.
A Property requires several elements in its template value dictionary which
all computed here:
wireName: the string which labels this Property in the wire protocol
dataType: the DataType of this property
Args:
api: (Api) The Api which owns this Property
schema: (Schema) the schema this Property is part of
name: (string) the name for this Property
def_dict: (dict) the JSON schema dictionary
Raises:
ApiException: If we have an array type without object definitions.
"""
super(Property, self).__init__(def_dict, api)
self.ValidateName(name)
self.requires_imports = []
self.schema = schema
self.SetTemplateValue('wireName', name)
# If the schema value for this property defines a new object directly,
# rather than refering to another schema, we will have to create a class
# name for it. We create a unique name by prepending the schema we are
# in to the object name.
tentative_class_name = '%s%s' % (schema.class_name,
utilities.CamelCase(name))
if '$ref' in self.values:
element_type = 'object'
self.SetTemplateValue('type', 'object')
else:
element_type = self.values.get('type', 'string')
self.format_type = self.values.get('format')
self.object_type = None
self.requires_imports = []
if element_type == 'array':
self._data_type = api.DataTypeFromJson(def_dict, tentative_class_name,
parent=schema, wire_name=name)
elif element_type == 'object':
self._data_type = api.DataTypeFromJson(def_dict, tentative_class_name,
parent=schema, wire_name=name)
else:
self._data_type = data_types.BuiltInDataType(def_dict, api,
parent=schema)
@property
def code_type(self):
if self._language_model:
self._data_type.SetLanguageModel(self._language_model)
return self._data_type.code_type
@property
def codeType(self): # pylint: disable-msg=C6409
return self.code_type
@property
def data_type(self):
return self._data_type
class Enum(template_objects.CodeObject):
"""The definition of an Enum.
Example enum in discovery.
"enum": [
"@comments",
"@consumption",
"@liked",
"@public",
"@self"
],
"enumDescriptions": [
"Limit to activities commented on by the user.",
"Limit to activities to be consumed by the user.",
"Limit to activities liked by the user.",
"Limit to public activities posted by the user.",
"Limit to activities posted by the user."
]
"""
def __init__(self, api, name, code_type, values, descriptions):
"""Create an enum.
Args:
api: (Api) The Api which owns this Property
name: (str) The name for this enum.
code_type: (str) The underlying (language specific) type of the values.
values: ([str]) List of possible values.
descriptions: ([str]) List of value descriptions
"""
super(Enum, self).__init__({}, api)
self.ValidateName(name)
self.SetTemplateValue('wireName', name)
self.SetTemplateValue('codeType', code_type)
self.SetTemplateValue('className', api.ToClassName(name))
names = [s.lstrip('@').upper().replace('-', '_') for s in values]
clean_descriptions = []
for desc in descriptions:
clean_desc = self.ValidateAndSanitizeComment(self.StripHTML(desc))
clean_descriptions.append(clean_desc)
self.SetTemplateValue('pairs', zip(names, values, clean_descriptions))
self.SetTemplateValue('pairs', zip(names, values, descriptions))
def Trace(s):
"""Logic tracer for debuging."""
logging.debug('>>> %s', s)
|
mashery/io-wraps
|
google-apis-client-generator/src/googleapis/codegen/api.py
|
Python
|
mit
| 30,151
|
[
"VisIt"
] |
7766f8d5dafca12b80fa5fda3cca220b7dbb35311aedd9eca1c7d17f523a4ec0
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for misc.GritNode'''
from __future__ import print_function
import contextlib
import os
import sys
import tempfile
import unittest
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from six import StringIO
from grit import grd_reader
import grit.exception
from grit import util
from grit.format import rc
from grit.format import rc_header
from grit.node import misc
@contextlib.contextmanager
def _MakeTempPredeterminedIdsFile(content):
"""Write the |content| string to a temporary file.
The temporary file must be deleted by the caller.
Example:
with _MakeTempPredeterminedIdsFile('foo') as path:
...
os.remove(path)
Args:
content: The string to write.
Yields:
The name of the temporary file.
"""
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
f.write(content)
f.flush()
f.close()
yield f.name
class GritNodeUnittest(unittest.TestCase):
def testUniqueNameAttribute(self):
try:
restree = grd_reader.Parse(
util.PathFromRoot('grit/testdata/duplicate-name-input.xml'))
self.fail('Expected parsing exception because of duplicate names.')
except grit.exception.Parsing:
pass # Expected case
def testReadFirstIdsFromFile(self):
test_resource_ids = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'resource_ids')
base_dir = os.path.dirname(test_resource_ids)
src_dir, id_dict = misc._ReadFirstIdsFromFile(
test_resource_ids,
{
'FOO': os.path.join(base_dir, 'bar'),
'SHARED_INTERMEDIATE_DIR': os.path.join(base_dir,
'out/Release/obj/gen'),
})
self.assertEqual({}, id_dict.get('bar/file.grd', None))
self.assertEqual({},
id_dict.get('out/Release/obj/gen/devtools/devtools.grd', None))
src_dir, id_dict = misc._ReadFirstIdsFromFile(
test_resource_ids,
{
'SHARED_INTERMEDIATE_DIR': '/outside/src_dir',
})
self.assertEqual({}, id_dict.get('devtools.grd', None))
# Verifies that GetInputFiles() returns the correct list of files
# corresponding to ChromeScaledImage nodes when assets are missing.
def testGetInputFilesChromeScaledImage(self):
chrome_html_path = util.PathFromRoot('grit/testdata/chrome_html.html')
xml = '''<?xml version="1.0" encoding="utf-8"?>
<grit latest_public_release="0" current_release="1">
<outputs>
<output filename="default.pak" type="data_package" context="default_100_percent" />
<output filename="special.pak" type="data_package" context="special_100_percent" fallback_to_default_layout="false" />
</outputs>
<release seq="1">
<structures fallback_to_low_resolution="true">
<structure type="chrome_scaled_image" name="IDR_A" file="a.png" />
<structure type="chrome_scaled_image" name="IDR_B" file="b.png" />
<structure type="chrome_html" name="HTML_FILE1" file="%s" flattenhtml="true" />
</structures>
</release>
</grit>''' % chrome_html_path
grd = grd_reader.Parse(StringIO(xml),
util.PathFromRoot('grit/testdata'))
expected = ['chrome_html.html', 'default_100_percent/a.png',
'default_100_percent/b.png', 'included_sample.html',
'special_100_percent/a.png']
actual = [os.path.relpath(path, util.PathFromRoot('grit/testdata')) for
path in grd.GetInputFiles()]
# Convert path separator for Windows paths.
actual = [path.replace('\\', '/') for path in actual]
self.assertEquals(expected, actual)
# Verifies that GetInputFiles() returns the correct list of files
# when files include other files.
def testGetInputFilesFromIncludes(self):
chrome_html_path = util.PathFromRoot('grit/testdata/chrome_html.html')
xml = '''<?xml version="1.0" encoding="utf-8"?>
<grit latest_public_release="0" current_release="1">
<outputs>
<output filename="default.pak" type="data_package" context="default_100_percent" />
<output filename="special.pak" type="data_package" context="special_100_percent" fallback_to_default_layout="false" />
</outputs>
<release seq="1">
<includes>
<include name="IDR_TESTDATA_CHROME_HTML" file="%s" flattenhtml="true"
allowexternalscript="true" type="BINDATA" />
</includes>
</release>
</grit>''' % chrome_html_path
grd = grd_reader.Parse(StringIO(xml), util.PathFromRoot('grit/testdata'))
expected = ['chrome_html.html', 'included_sample.html']
actual = [os.path.relpath(path, util.PathFromRoot('grit/testdata')) for
path in grd.GetInputFiles()]
# Convert path separator for Windows paths.
actual = [path.replace('\\', '/') for path in actual]
self.assertEquals(expected, actual)
def testNonDefaultEntry(self):
grd = util.ParseGrdForUnittest('''
<messages>
<message name="IDS_A" desc="foo">bar</message>
<if expr="lang == 'fr'">
<message name="IDS_B" desc="foo">bar</message>
</if>
</messages>''')
grd.SetOutputLanguage('fr')
output = ''.join(rc_header.Format(grd, 'fr', '.'))
self.assertIn('#define IDS_A 2378\n#define IDS_B 2379', output)
def testExplicitFirstIdOverlaps(self):
# second first_id will overlap preexisting range
self.assertRaises(grit.exception.IdRangeOverlap,
util.ParseGrdForUnittest, '''
<includes first_id="300" comment="bingo">
<include type="gif" name="ID_LOGO" file="images/logo.gif" />
<include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
</includes>
<messages first_id="301">
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
<message name="IDS_SMURFGEBURF">Frubegfrums</message>
</messages>''')
def testImplicitOverlapsPreexisting(self):
# second message in <messages> will overlap preexisting range
self.assertRaises(grit.exception.IdRangeOverlap,
util.ParseGrdForUnittest, '''
<includes first_id="301" comment="bingo">
<include type="gif" name="ID_LOGO" file="images/logo.gif" />
<include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
</includes>
<messages first_id="300">
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
<message name="IDS_SMURFGEBURF">Frubegfrums</message>
</messages>''')
def testPredeterminedIds(self):
with _MakeTempPredeterminedIdsFile('IDS_A 101\nIDS_B 102') as ids_file:
grd = util.ParseGrdForUnittest('''
<includes first_id="300" comment="bingo">
<include type="gif" name="IDS_B" file="images/logo.gif" />
</includes>
<messages first_id="10000">
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
<message name="IDS_A">
Bongo!
</message>
</messages>''', predetermined_ids_file=ids_file)
output = rc_header.FormatDefines(grd)
self.assertEqual(('#define IDS_B 102\n'
'#define IDS_GREETING 10000\n'
'#define IDS_A 101\n'), ''.join(output))
os.remove(ids_file)
def testPredeterminedIdsOverlap(self):
with _MakeTempPredeterminedIdsFile('ID_LOGO 10000') as ids_file:
self.assertRaises(grit.exception.IdRangeOverlap,
util.ParseGrdForUnittest, '''
<includes first_id="300" comment="bingo">
<include type="gif" name="ID_LOGO" file="images/logo.gif" />
</includes>
<messages first_id="10000">
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
<message name="IDS_BONGO">
Bongo!
</message>
</messages>''', predetermined_ids_file=ids_file)
os.remove(ids_file)
class IfNodeUnittest(unittest.TestCase):
def testIffyness(self):
grd = grd_reader.Parse(StringIO('''
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">
Bingo!
</message>
</if>
<if expr="'hello' in defs">
<message name="IDS_HELLO">
Hello!
</message>
</if>
<if expr="lang == 'fr' or 'FORCE_FRENCH' in defs">
<message name="IDS_HELLO" internal_comment="French version">
Good morning
</message>
</if>
<if expr="is_win">
<message name="IDS_ISWIN">is_win</message>
</if>
</messages>
</release>
</grit>'''), dir='.')
messages_node = grd.children[0].children[0]
bingo_message = messages_node.children[0].children[0]
hello_message = messages_node.children[1].children[0]
french_message = messages_node.children[2].children[0]
is_win_message = messages_node.children[3].children[0]
self.assertTrue(bingo_message.name == 'message')
self.assertTrue(hello_message.name == 'message')
self.assertTrue(french_message.name == 'message')
grd.SetOutputLanguage('fr')
grd.SetDefines({'hello': '1'})
active = set(grd.ActiveDescendants())
self.failUnless(bingo_message not in active)
self.failUnless(hello_message in active)
self.failUnless(french_message in active)
grd.SetOutputLanguage('en')
grd.SetDefines({'bingo': 1})
active = set(grd.ActiveDescendants())
self.failUnless(bingo_message in active)
self.failUnless(hello_message not in active)
self.failUnless(french_message not in active)
grd.SetOutputLanguage('en')
grd.SetDefines({'FORCE_FRENCH': '1', 'bingo': '1'})
active = set(grd.ActiveDescendants())
self.failUnless(bingo_message in active)
self.failUnless(hello_message not in active)
self.failUnless(french_message in active)
grd.SetOutputLanguage('en')
grd.SetDefines({})
self.failUnless(grd.target_platform == sys.platform)
grd.SetTargetPlatform('darwin')
active = set(grd.ActiveDescendants())
self.failUnless(is_win_message not in active)
grd.SetTargetPlatform('win32')
active = set(grd.ActiveDescendants())
self.failUnless(is_win_message in active)
def testElsiness(self):
grd = util.ParseGrdForUnittest('''
<messages>
<if expr="True">
<then> <message name="IDS_YES1"></message> </then>
<else> <message name="IDS_NO1"></message> </else>
</if>
<if expr="True">
<then> <message name="IDS_YES2"></message> </then>
<else> </else>
</if>
<if expr="True">
<then> </then>
<else> <message name="IDS_NO2"></message> </else>
</if>
<if expr="True">
<then> </then>
<else> </else>
</if>
<if expr="False">
<then> <message name="IDS_NO3"></message> </then>
<else> <message name="IDS_YES3"></message> </else>
</if>
<if expr="False">
<then> <message name="IDS_NO4"></message> </then>
<else> </else>
</if>
<if expr="False">
<then> </then>
<else> <message name="IDS_YES4"></message> </else>
</if>
<if expr="False">
<then> </then>
<else> </else>
</if>
</messages>''')
included = [msg.attrs['name'] for msg in grd.ActiveDescendants()
if msg.name == 'message']
self.assertEqual(['IDS_YES1', 'IDS_YES2', 'IDS_YES3', 'IDS_YES4'], included)
def testIffynessWithOutputNodes(self):
grd = grd_reader.Parse(StringIO('''
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<outputs>
<output filename="uncond1.rc" type="rc_data" />
<if expr="lang == 'fr' or 'hello' in defs">
<output filename="only_fr.adm" type="adm" />
<output filename="only_fr.plist" type="plist" />
</if>
<if expr="lang == 'ru'">
<output filename="doc.html" type="document" />
</if>
<output filename="uncond2.adm" type="adm" />
<output filename="iftest.h" type="rc_header">
<emit emit_type='prepend'></emit>
</output>
</outputs>
</grit>'''), dir='.')
outputs_node = grd.children[0]
uncond1_output = outputs_node.children[0]
only_fr_adm_output = outputs_node.children[1].children[0]
only_fr_plist_output = outputs_node.children[1].children[1]
doc_output = outputs_node.children[2].children[0]
uncond2_output = outputs_node.children[0]
self.assertTrue(uncond1_output.name == 'output')
self.assertTrue(only_fr_adm_output.name == 'output')
self.assertTrue(only_fr_plist_output.name == 'output')
self.assertTrue(doc_output.name == 'output')
self.assertTrue(uncond2_output.name == 'output')
grd.SetOutputLanguage('ru')
grd.SetDefines({'hello': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(
outputs,
['uncond1.rc', 'only_fr.adm', 'only_fr.plist', 'doc.html',
'uncond2.adm', 'iftest.h'])
grd.SetOutputLanguage('ru')
grd.SetDefines({'bingo': '2'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(
outputs,
['uncond1.rc', 'doc.html', 'uncond2.adm', 'iftest.h'])
grd.SetOutputLanguage('fr')
grd.SetDefines({'hello': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(
outputs,
['uncond1.rc', 'only_fr.adm', 'only_fr.plist', 'uncond2.adm',
'iftest.h'])
grd.SetOutputLanguage('en')
grd.SetDefines({'bingo': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertEquals(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h'])
grd.SetOutputLanguage('fr')
grd.SetDefines({'bingo': '1'})
outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
self.assertNotEquals(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h'])
def testChildrenAccepted(self):
grd_reader.Parse(StringIO(r'''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<includes>
<if expr="'bingo' in defs">
<include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
</if>
</if>
</includes>
<structures>
<if expr="'bingo' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\test\data\klonk.rc" encoding="utf-16" />
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\test\data\klonk.rc" encoding="utf-16" />
</if>
</if>
</structures>
<messages>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</if>
</messages>
</release>
<translations>
<if expr="'bingo' in defs">
<file lang="nl" path="nl_translations.xtb" />
</if>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<file lang="nl" path="nl_translations.xtb" />
</if>
</if>
</translations>
</grit>'''), dir='.')
def testIfBadChildrenNesting(self):
# includes
xml = StringIO(r'''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<includes>
<if expr="'bingo' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\test\data\klonk.rc" encoding="utf-16" />
</if>
</includes>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
# messages
xml = StringIO(r'''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="'bingo' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\test\data\klonk.rc" encoding="utf-16" />
</if>
</messages>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
# structures
xml = StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<structures>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</structures>
</release>
</grit>''')
# translations
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<if expr="'bingo' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</translations>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
# same with nesting
xml = StringIO(r'''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<includes>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\test\data\klonk.rc" encoding="utf-16" />
</if>
</if>
</includes>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO(r'''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<structure type="dialog" name="IDD_ABOUTBOX" file="grit\test\data\klonk.rc" encoding="utf-16" />
</if>
</if>
</messages>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<structures>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</if>
</structures>
</release>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
xml = StringIO('''<?xml version="1.0"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<if expr="'bingo' in defs">
<if expr="'hello' in defs">
<message name="IDS_BINGO">Bingo!</message>
</if>
</if>
</translations>
</grit>''')
self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
class ReleaseNodeUnittest(unittest.TestCase):
def testPseudoControl(self):
grd = grd_reader.Parse(StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="1" source_lang_id="en-US" current_release="2" base_dir=".">
<release seq="1" allow_pseudo="false">
<messages>
<message name="IDS_HELLO">
Hello
</message>
</messages>
<structures>
<structure type="dialog" name="IDD_ABOUTBOX" encoding="utf-16" file="klonk.rc" />
</structures>
</release>
<release seq="2">
<messages>
<message name="IDS_BINGO">
Bingo
</message>
</messages>
<structures>
<structure type="menu" name="IDC_KLONKMENU" encoding="utf-16" file="klonk.rc" />
</structures>
</release>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
hello = grd.GetNodeById('IDS_HELLO')
aboutbox = grd.GetNodeById('IDD_ABOUTBOX')
bingo = grd.GetNodeById('IDS_BINGO')
menu = grd.GetNodeById('IDC_KLONKMENU')
for node in [hello, aboutbox]:
self.failUnless(not node.PseudoIsAllowed())
for node in [bingo, menu]:
self.failUnless(node.PseudoIsAllowed())
# TODO(benrg): There was a test here that formatting hello and aboutbox with
# a pseudo language should fail, but they do not fail and the test was
# broken and failed to catch it. Fix this.
# Should not raise an exception since pseudo is allowed
rc.FormatMessage(bingo, 'xyz-pseudo')
rc.FormatStructure(menu, 'xyz-pseudo', '.')
if __name__ == '__main__':
unittest.main()
|
endlessm/chromium-browser
|
tools/grit/grit/node/misc_unittest.py
|
Python
|
bsd-3-clause
| 22,766
|
[
"xTB"
] |
c9e9f4fb798a7ad09d2fee240709e2785deb0d410e798cec2cf9ce12c1e2bc83
|
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit as u
import mdtraj.reporters
cutoff = 0.95 * u.nanometers
output_frequency = 25000
n_steps = 500000000
temperature = 298.
pressure = 1.0 * u.atmospheres
platform_name = "CUDA"
pdb_filename = "./1d3z_equil.pdb"
dcd_filename = "./1d3z.dcd"
log_filename = "./1d3z.log"
traj = mdtraj.load(pdb_filename)
top, bonds = traj.top.to_dataframe()
atom_indices = top.index[top.chainID == 0].values
pdb = app.PDBFile(pdb_filename)
topology = pdb.topology
positions = pdb.positions
ff = app.ForceField('amber99sbnmr.xml', 'tip3p-fb.xml')
platform = mm.Platform.getPlatformByName(platform_name)
system = ff.createSystem(topology, nonbondedMethod=app.PME, nonbondedCutoff=cutoff, constraints=app.HBonds)
integrator = mm.LangevinIntegrator(temperature, 1.0 / u.picoseconds, 2.0 * u.femtoseconds)
system.addForce(mm.MonteCarloBarostat(pressure, temperature, 25))
simulation = app.Simulation(topology, system, integrator, platform=platform)
simulation.context.setPositions(positions)
simulation.context.setVelocitiesToTemperature(temperature)
print("Using platform %s" % simulation.context.getPlatform().getName())
simulation.reporters.append(mdtraj.reporters.DCDReporter(dcd_filename, output_frequency, atomSubset=atom_indices))
simulation.reporters.append(app.StateDataReporter(open(log_filename, 'w'), 5000, step=True, time=True, speed=True))
simulation.step(n_steps)
|
choderalab/open-forcefield-group
|
nmr/code/simulate_ubiquitin.py
|
Python
|
gpl-2.0
| 1,442
|
[
"MDTraj",
"OpenMM"
] |
be049a729fd327bdf96f8f418ad563298c18c1579b15d1697fb3551fb391f321
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import espressomd
import unittest as ut
import unittest_decorators as utx
from tests_common import abspath
@utx.skipIfMissingFeatures(["MEMBRANE_COLLISION", "OIF_LOCAL_FORCES",
"OIF_GLOBAL_FORCES"])
class OifVolumeConservation(ut.TestCase):
"""Loads a soft elastic sphere via object_in_fluid, stretches it and checks
restoration of original volume due to elastic forces."""
def test(self):
import object_in_fluid as oif
system = espressomd.System(box_l=(10, 10, 10))
self.assertEqual(system.max_oif_objects, 0)
system.time_step = 0.4
system.cell_system.skin = 0.5
system.thermostat.set_langevin(kT=0, gamma=0.7, seed=42)
# creating the template for OIF object
cell_type = oif.OifCellType(
nodes_file=abspath("data/sphere393nodes.dat"),
triangles_file=abspath("data/sphere393triangles.dat"),
system=system, ks=1.0, kb=1.0, kal=1.0, kag=0.1, kv=0.1,
check_orientation=False, resize=(3.0, 3.0, 3.0))
# creating the OIF object
cell0 = oif.OifCell(
cell_type=cell_type, particle_type=0, origin=[5.0, 5.0, 5.0])
self.assertEqual(system.max_oif_objects, 1)
# cell0.output_vtk_pos_folded(file_name="cell0_0.vtk")
# fluid
diameter_init = cell0.diameter()
print("initial diameter = " + str(diameter_init))
# OIF object is being stretched by factor 1.5
system.part[:].pos = (system.part[:].pos - 5) * 1.5 + 5
diameter_stretched = cell0.diameter()
print("stretched diameter = " + str(diameter_stretched))
# main integration loop
# OIF object is let to relax into relaxed shape of the sphere
for _ in range(3):
system.integrator.run(steps=90)
diameter_final = cell0.diameter()
print("final diameter = " + str(diameter_final))
self.assertAlmostEqual(
diameter_final / diameter_init - 1, 0, delta=0.005)
if __name__ == "__main__":
ut.main()
|
psci2195/espresso-ffans
|
testsuite/python/oif_volume_conservation.py
|
Python
|
gpl-3.0
| 2,797
|
[
"ESPResSo",
"VTK"
] |
4c6a5d00aecd5372d1fcc7cc7b98d7874d5812a6104c4a05020c87f0f8825823
|
"""
A VTK RenderWindowInteractor widget for wxPython.
Find wxPython info at http://wxPython.org
Created by Prabhu Ramachandran, April 2002
Based on wxVTKRenderWindow.py
Fixes and updates by Charl P. Botha 2003-2008
Updated to new wx namespace and some cleaning up by Andrea Gavana,
December 2006
"""
"""
Please see the example at the end of this file.
----------------------------------------
Creation:
wxVTKRenderWindowInteractor(parent, ID, stereo=0, [wx keywords]):
You should create a wx.App(False) or some other wx.App subclass
before creating the window.
Behaviour:
Uses __getattr__ to make the wxVTKRenderWindowInteractor behave just
like a vtkGenericRenderWindowInteractor.
----------------------------------------
"""
# import usual libraries
import math, os, sys
import wx
import vtk
# a few configuration items, see what works best on your system
# Use GLCanvas as base class instead of wx.Window.
# This is sometimes necessary under wxGTK or the image is blank.
# (in wxWindows 2.3.1 and earlier, the GLCanvas had scroll bars)
baseClass = wx.Window
if wx.Platform == "__WXGTK__":
import wx.glcanvas
baseClass = wx.glcanvas.GLCanvas
# Keep capturing mouse after mouse is dragged out of window
# (in wxGTK 2.3.2 there is a bug that keeps this from working,
# but it is only relevant in wxGTK if there are multiple windows)
_useCapture = (wx.Platform == "__WXMSW__")
# end of configuration items
class EventTimer(wx.Timer):
"""Simple wx.Timer class.
"""
def __init__(self, iren):
"""Default class constructor.
@param iren: current render window
"""
wx.Timer.__init__(self)
self.iren = iren
def Notify(self):
""" The timer has expired.
"""
self.iren.TimerEvent()
class wxVTKRenderWindowInteractor(baseClass):
"""
A wxRenderWindow for wxPython.
Use GetRenderWindow() to get the vtkRenderWindow.
Create with the keyword stereo=1 in order to
generate a stereo-capable window.
"""
# class variable that can also be used to request instances that use
# stereo; this is overridden by the stereo=1/0 parameter. If you set
# it to True, the NEXT instantiated object will attempt to allocate a
# stereo visual. E.g.:
# wxVTKRenderWindowInteractor.USE_STEREO = True
# myRWI = wxVTKRenderWindowInteractor(parent, -1)
USE_STEREO = False
def __init__(self, parent, ID, *args, **kw):
"""Default class constructor.
@param parent: parent window
@param ID: window id
@param **kw: wxPython keywords (position, size, style) plus the
'stereo' keyword
"""
# private attributes
self.__RenderWhenDisabled = 0
# First do special handling of some keywords:
# stereo, position, size, width, height, style
try:
stereo = bool(kw['stereo'])
del kw['stereo']
except KeyError:
stereo = False
try:
position = kw['position']
del kw['position']
except KeyError:
position = wx.DefaultPosition
try:
size = kw['size']
del kw['size']
except KeyError:
try:
size = parent.GetSize()
except AttributeError:
size = wx.DefaultSize
# wx.WANTS_CHARS says to give us e.g. TAB
# wx.NO_FULL_REPAINT_ON_RESIZE cuts down resize flicker under GTK
style = wx.WANTS_CHARS | wx.NO_FULL_REPAINT_ON_RESIZE
try:
style = style | kw['style']
del kw['style']
except KeyError:
pass
# the enclosing frame must be shown under GTK or the windows
# don't connect together properly
if wx.Platform != '__WXMSW__':
l = []
p = parent
while p: # make a list of all parents
l.append(p)
p = p.GetParent()
l.reverse() # sort list into descending order
for p in l:
p.Show(1)
if baseClass.__name__ == 'GLCanvas':
# code added by cpbotha to enable stereo and double
# buffering correctly where the user requests this; remember
# that the glXContext in this case is NOT allocated by VTK,
# but by WX, hence all of this.
# Initialize GLCanvas with correct attriblist
attribList = [wx.glcanvas.WX_GL_RGBA,
wx.glcanvas.WX_GL_MIN_RED, 1,
wx.glcanvas.WX_GL_MIN_GREEN, 1,
wx.glcanvas.WX_GL_MIN_BLUE, 1,
wx.glcanvas.WX_GL_DEPTH_SIZE, 16,
wx.glcanvas.WX_GL_DOUBLEBUFFER]
if stereo:
attribList.append(wx.glcanvas.WX_GL_STEREO)
try:
baseClass.__init__(self, parent, ID, pos=position, size=size,
style=style,
attribList=attribList)
except wx.PyAssertionError:
# visual couldn't be allocated, so we go back to default
baseClass.__init__(self, parent, ID, pos=position, size=size,
style=style)
if stereo:
# and make sure everyone knows that the stereo
# visual wasn't set.
stereo = 0
else:
baseClass.__init__(self, parent, ID, pos=position, size=size,
style=style)
# create the RenderWindow and initialize it
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow( vtk.vtkRenderWindow() )
self._Iren.AddObserver('CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer)
self._Iren.GetRenderWindow().AddObserver('CursorChangedEvent',
self.CursorChangedEvent)
try:
self._Iren.GetRenderWindow().SetSize(size.width, size.height)
except AttributeError:
self._Iren.GetRenderWindow().SetSize(size[0], size[1])
if stereo:
self._Iren.GetRenderWindow().StereoCapableWindowOn()
self._Iren.GetRenderWindow().SetStereoTypeToCrystalEyes()
self.__handle = None
self.BindEvents()
# with this, we can make sure that the reparenting logic in
# Render() isn't called before the first OnPaint() has
# successfully been run (and set up the VTK/WX display links)
self.__has_painted = False
# set when we have captured the mouse.
self._own_mouse = False
# used to store WHICH mouse button led to mouse capture
self._mouse_capture_button = 0
# A mapping for cursor changes.
self._cursor_map = {0: wx.CURSOR_ARROW, # VTK_CURSOR_DEFAULT
1: wx.CURSOR_ARROW, # VTK_CURSOR_ARROW
2: wx.CURSOR_SIZENESW, # VTK_CURSOR_SIZENE
3: wx.CURSOR_SIZENWSE, # VTK_CURSOR_SIZENWSE
4: wx.CURSOR_SIZENESW, # VTK_CURSOR_SIZESW
5: wx.CURSOR_SIZENWSE, # VTK_CURSOR_SIZESE
6: wx.CURSOR_SIZENS, # VTK_CURSOR_SIZENS
7: wx.CURSOR_SIZEWE, # VTK_CURSOR_SIZEWE
8: wx.CURSOR_SIZING, # VTK_CURSOR_SIZEALL
9: wx.CURSOR_HAND, # VTK_CURSOR_HAND
10: wx.CURSOR_CROSS, # VTK_CURSOR_CROSSHAIR
}
def BindEvents(self):
"""Binds all the necessary events for navigation, sizing,
drawing.
"""
# refresh window by doing a Render
self.Bind(wx.EVT_PAINT, self.OnPaint)
# turn off background erase to reduce flicker
self.Bind(wx.EVT_ERASE_BACKGROUND, lambda e: None)
# Bind the events to the event converters
self.Bind(wx.EVT_RIGHT_DOWN, self.OnButtonDown)
self.Bind(wx.EVT_LEFT_DOWN, self.OnButtonDown)
self.Bind(wx.EVT_MIDDLE_DOWN, self.OnButtonDown)
self.Bind(wx.EVT_RIGHT_UP, self.OnButtonUp)
self.Bind(wx.EVT_LEFT_UP, self.OnButtonUp)
self.Bind(wx.EVT_MIDDLE_UP, self.OnButtonUp)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
# If we use EVT_KEY_DOWN instead of EVT_CHAR, capital versions
# of all characters are always returned. EVT_CHAR also performs
# other necessary keyboard-dependent translations.
self.Bind(wx.EVT_CHAR, self.OnKeyDown)
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.Bind(wx.EVT_SIZE, self.OnSize)
# the wx 2.8.7.1 documentation states that you HAVE to handle
# this event if you make use of CaptureMouse, which we do.
if _useCapture and hasattr(wx, 'EVT_MOUSE_CAPTURE_LOST'):
self.Bind(wx.EVT_MOUSE_CAPTURE_LOST,
self.OnMouseCaptureLost)
def __getattr__(self, attr):
"""Makes the object behave like a
vtkGenericRenderWindowInteractor.
"""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
else:
raise AttributeError(self.__class__.__name__ +
" has no attribute named " + attr)
def CreateTimer(self, obj, evt):
""" Creates a timer.
"""
self._timer = EventTimer(self)
self._timer.Start(10, True)
def DestroyTimer(self, obj, evt):
"""The timer is a one shot timer so will expire automatically.
"""
return 1
def _CursorChangedEvent(self, obj, evt):
"""Change the wx cursor if the renderwindow's cursor was
changed.
"""
cur = self._cursor_map[obj.GetCurrentCursor()]
c = wx.StockCursor(cur)
self.SetCursor(c)
def CursorChangedEvent(self, obj, evt):
"""Called when the CursorChangedEvent fires on the render
window."""
# This indirection is needed since when the event fires, the
# current cursor is not yet set so we defer this by which time
# the current cursor should have been set.
wx.CallAfter(self._CursorChangedEvent, obj, evt)
def HideCursor(self):
"""Hides the cursor."""
c = wx.StockCursor(wx.CURSOR_BLANK)
self.SetCursor(c)
def ShowCursor(self):
"""Shows the cursor."""
rw = self._Iren.GetRenderWindow()
cur = self._cursor_map[rw.GetCurrentCursor()]
c = wx.StockCursor(cur)
self.SetCursor(c)
def GetDisplayId(self):
"""Function to get X11 Display ID from WX and return it in a format
that can be used by VTK Python.
We query the X11 Display with a new call that was added in wxPython
2.6.0.1. The call returns a SWIG object which we can query for the
address and subsequently turn into an old-style SWIG-mangled string
representation to pass to VTK.
"""
d = None
try:
d = wx.GetXDisplay()
except AttributeError:
# wx.GetXDisplay was added by Robin Dunn in wxPython 2.6.0.1
# if it's not available, we can't pass it. In general,
# things will still work; on some setups, it'll break.
pass
else:
# wx returns None on platforms where wx.GetXDisplay is not relevant
if d:
d = hex(d)
# On wxPython-2.6.3.2 and above there is no leading '0x'.
if not d.startswith('0x'):
d = '0x' + d
# VTK wants it as: _xxxxxxxx_p_void (SWIG pointer)
d = '_%s_%s\0' % (d[2:], 'p_void')
return d
def OnMouseCaptureLost(self, event):
"""This is signalled when we lose mouse capture due to an
external event, such as when a dialog box is shown. See the
wx documentation.
"""
# the documentation seems to imply that by this time we've
# already lost capture. I have to assume that we don't need
# to call ReleaseMouse ourselves.
if _useCapture and self._own_mouse:
self._own_mouse = False
def OnPaint(self,event):
"""Handles the wx.EVT_PAINT event for
wxVTKRenderWindowInteractor.
"""
# wx should continue event processing after this handler.
# We call this BEFORE Render(), so that if Render() raises
# an exception, wx doesn't re-call OnPaint repeatedly.
event.Skip()
dc = wx.PaintDC(self)
# make sure the RenderWindow is sized correctly
self._Iren.GetRenderWindow().SetSize(self.GetSize())
# Tell the RenderWindow to render inside the wx.Window.
if not self.__handle:
# on relevant platforms, set the X11 Display ID
d = self.GetDisplayId()
if d and self.__has_painted:
self._Iren.GetRenderWindow().SetDisplayId(d)
# store the handle
self.__handle = self.GetHandle()
# and give it to VTK
self._Iren.GetRenderWindow().SetWindowInfo(str(self.__handle))
# now that we've painted once, the Render() reparenting logic
# is safe
self.__has_painted = True
self.Render()
def OnSize(self,event):
"""Handles the wx.EVT_SIZE event for
wxVTKRenderWindowInteractor.
"""
# event processing should continue (we call this before the
# Render(), in case it raises an exception)
event.Skip()
try:
width, height = event.GetSize()
except:
width = event.GetSize().width
height = event.GetSize().height
self._Iren.SetSize(width, height)
self._Iren.ConfigureEvent()
# this will check for __handle
self.Render()
def OnMotion(self,event):
"""Handles the wx.EVT_MOTION event for
wxVTKRenderWindowInteractor.
"""
# event processing should continue
# we call this early in case any of the VTK code raises an
# exception.
event.Skip()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
event.ControlDown(),
event.ShiftDown(),
chr(0), 0, None)
self._Iren.MouseMoveEvent()
def OnEnter(self,event):
"""Handles the wx.EVT_ENTER_WINDOW event for
wxVTKRenderWindowInteractor.
"""
# event processing should continue
event.Skip()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
event.ControlDown(),
event.ShiftDown(),
chr(0), 0, None)
self._Iren.EnterEvent()
def OnLeave(self,event):
"""Handles the wx.EVT_LEAVE_WINDOW event for
wxVTKRenderWindowInteractor.
"""
# event processing should continue
event.Skip()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
event.ControlDown(),
event.ShiftDown(),
chr(0), 0, None)
self._Iren.LeaveEvent()
def OnButtonDown(self,event):
"""Handles the wx.EVT_LEFT/RIGHT/MIDDLE_DOWN events for
wxVTKRenderWindowInteractor.
"""
# allow wx event processing to continue
# on wxPython 2.6.0.1, omitting this will cause problems with
# the initial focus, resulting in the wxVTKRWI ignoring keypresses
# until we focus elsewhere and then refocus the wxVTKRWI frame
# we do it this early in case any of the following VTK code
# raises an exception.
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, chr(0), 0, None)
button = 0
if event.RightDown():
self._Iren.RightButtonPressEvent()
button = 'Right'
elif event.LeftDown():
self._Iren.LeftButtonPressEvent()
button = 'Left'
elif event.MiddleDown():
self._Iren.MiddleButtonPressEvent()
button = 'Middle'
# save the button and capture mouse until the button is released
# we only capture the mouse if it hasn't already been captured
if _useCapture and not self._own_mouse:
self._own_mouse = True
self._mouse_capture_button = button
self.CaptureMouse()
def OnButtonUp(self,event):
"""Handles the wx.EVT_LEFT/RIGHT/MIDDLE_UP events for
wxVTKRenderWindowInteractor.
"""
# event processing should continue
event.Skip()
button = 0
if event.RightUp():
button = 'Right'
elif event.LeftUp():
button = 'Left'
elif event.MiddleUp():
button = 'Middle'
# if the same button is released that captured the mouse, and
# we have the mouse, release it.
# (we need to get rid of this as soon as possible; if we don't
# and one of the event handlers raises an exception, mouse
# is never released.)
if _useCapture and self._own_mouse and \
button==self._mouse_capture_button:
self.ReleaseMouse()
self._own_mouse = False
ctrl, shift = event.ControlDown(), event.ShiftDown()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, chr(0), 0, None)
if button == 'Right':
self._Iren.RightButtonReleaseEvent()
elif button == 'Left':
self._Iren.LeftButtonReleaseEvent()
elif button == 'Middle':
self._Iren.MiddleButtonReleaseEvent()
def OnMouseWheel(self,event):
"""Handles the wx.EVT_MOUSEWHEEL event for
wxVTKRenderWindowInteractor.
"""
# event processing should continue
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, chr(0), 0, None)
if event.GetWheelRotation() > 0:
self._Iren.MouseWheelForwardEvent()
else:
self._Iren.MouseWheelBackwardEvent()
def OnKeyDown(self,event):
"""Handles the wx.EVT_KEY_DOWN event for
wxVTKRenderWindowInteractor.
"""
# event processing should continue
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
keycode, keysym = event.GetKeyCode(), None
key = chr(0)
if keycode < 256:
key = chr(keycode)
# wxPython 2.6.0.1 does not return a valid event.Get{X,Y}()
# for this event, so we use the cached position.
(x,y)= self._Iren.GetEventPosition()
self._Iren.SetEventInformation(x, y,
ctrl, shift, key, 0,
keysym)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
def OnKeyUp(self,event):
"""Handles the wx.EVT_KEY_UP event for
wxVTKRenderWindowInteractor.
"""
# event processing should continue
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
keycode, keysym = event.GetKeyCode(), None
key = chr(0)
if keycode < 256:
key = chr(keycode)
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, key, 0,
keysym)
self._Iren.KeyReleaseEvent()
def GetRenderWindow(self):
"""Returns the render window (vtkRenderWindow).
"""
return self._Iren.GetRenderWindow()
def Render(self):
"""Actually renders the VTK scene on screen.
"""
RenderAllowed = 1
if not self.__RenderWhenDisabled:
# the user doesn't want us to render when the toplevel frame
# is disabled - first find the top level parent
topParent = wx.GetTopLevelParent(self)
if topParent:
# if it exists, check whether it's enabled
# if it's not enabeld, RenderAllowed will be false
RenderAllowed = topParent.IsEnabled()
if RenderAllowed:
if self.__handle and self.__handle == self.GetHandle():
self._Iren.GetRenderWindow().Render()
elif self.GetHandle() and self.__has_painted:
# this means the user has reparented us; let's adapt to the
# new situation by doing the WindowRemap dance
self._Iren.GetRenderWindow().SetNextWindowInfo(
str(self.GetHandle()))
# make sure the DisplayId is also set correctly
d = self.GetDisplayId()
if d:
self._Iren.GetRenderWindow().SetDisplayId(d)
# do the actual remap with the new parent information
self._Iren.GetRenderWindow().WindowRemap()
# store the new situation
self.__handle = self.GetHandle()
self._Iren.GetRenderWindow().Render()
def SetRenderWhenDisabled(self, newValue):
"""Change value of __RenderWhenDisabled ivar.
If __RenderWhenDisabled is false (the default), this widget will not
call Render() on the RenderWindow if the top level frame (i.e. the
containing frame) has been disabled.
This prevents recursive rendering during wx.SafeYield() calls.
wx.SafeYield() can be called during the ProgressMethod() callback of
a VTK object to have progress bars and other GUI elements updated -
it does this by disabling all windows (disallowing user-input to
prevent re-entrancy of code) and then handling all outstanding
GUI events.
However, this often triggers an OnPaint() method for wxVTKRWIs,
resulting in a Render(), resulting in Update() being called whilst
still in progress.
"""
self.__RenderWhenDisabled = bool(newValue)
#--------------------------------------------------------------------
def wxVTKRenderWindowInteractorConeExample():
"""Like it says, just a simple example
"""
# every wx app needs an app
app = wx.App(False)
# create the top-level frame, sizer and wxVTKRWI
frame = wx.Frame(None, -1, "wxVTKRenderWindowInteractor", size=(400,400))
widget = wxVTKRenderWindowInteractor(frame, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(widget, 1, wx.EXPAND)
frame.SetSizer(sizer)
frame.Layout()
# It would be more correct (API-wise) to call widget.Initialize() and
# widget.Start() here, but Initialize() calls RenderWindow.Render().
# That Render() call will get through before we can setup the
# RenderWindow() to render via the wxWidgets-created context; this
# causes flashing on some platforms and downright breaks things on
# other platforms. Instead, we call widget.Enable(). This means
# that the RWI::Initialized ivar is not set, but in THIS SPECIFIC CASE,
# that doesn't matter.
widget.Enable(1)
widget.AddObserver("ExitEvent", lambda o,e,f=frame: f.Close())
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the window
frame.Show()
app.MainLoop()
if __name__ == "__main__":
wxVTKRenderWindowInteractorConeExample()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Wrapping/Python/vtk/wx/wxVTKRenderWindowInteractor.py
|
Python
|
bsd-3-clause
| 25,228
|
[
"VTK"
] |
1a915e4094e5ec8a11a79f9613b90c1b6f6b2f413ab7a116c5a82896c08ac7f4
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.evolve.Consts Pyevolve have defaults in all genetic operators, settings and etc,
# this is an issue to help the user in the API use and minimize the source code needed to make simple things.
# In the module :mod:`Consts`, you will find those defaults settings. You are encouraged to see the constants,
# but not to change directly on the module, there are methods for this.
#
#
# -----------------------------------------------------------------
# Required python version 2.5+
CDefPythonRequire = (2, 5)
# Types of sort
# - raw: uses the "score" attribute
# - scaled: uses the "fitness" attribute
sortType = {
"raw": 0,
"scaled": 1
}
# Optimization type
# - Minimize or Maximize the Evaluator Function
#minimaxType = {"minimize": 0,
# "maximize": 1
# }
CDefESCKey = 27
CDefImportList = {"visual.graph": "you must install VPython !",
"csv": "csv module not found !",
"urllib": "urllib module not found !",
"sqlite3": "sqlite3 module not found, are you using Jython or IronPython ?",
"xmlrpclib": "xmlrpclib module not found !",
"MySQLdb": "MySQLdb module not found, you must install mysql-python !",
"pydot": "Pydot module not found, you must install Pydot to plot graphs !"}
####################
# Defaults section #
####################
# - Tournament selector
CDefTournamentPoolSize = 2
# - Scale methods defaults
CDefScaleLinearMultiplier = 1.2
CDefScaleSigmaTruncMultiplier = 2.0
CDefScalePowerLawFactor = 1.0005
CDefScaleBoltzMinTemp = 1.0
CDefScaleBoltzFactor = 0.05
# 40 temp. = 500 generations
CDefScaleBoltzStart = 40.0
# - Population Defaults
CDefPopSortType = sortType["scaled"]
CDefPopMinimax = "maximize" #minimaxType["maximize"]
from .scaling import LinearScaling
CDefPopScale = LinearScaling
# - GA Engine defaults
CDefGAGenerations = 100
CDefGAMutationRate = 0.02
CDefGACrossoverRate = 0.9
CDefGAPopulationSize = 80
from .selectors import GRankSelector
CDefGASelector = GRankSelector
CDefGAElitismReplacement = 1
# - This is general used by integer/real ranges defaults
CDefRangeMin = 0
CDefRangeMax = 100
# - G1DBinaryString defaults
from .mutators import G1DBinaryStringMutatorFlip
CDefG1DBinaryStringMutator = G1DBinaryStringMutatorFlip
from .crossovers import G1DBinaryStringXSinglePoint
CDefG1DBinaryStringCrossover = G1DBinaryStringXSinglePoint
from .initializators import G1DBinaryStringInitializator
CDefG1DBinaryStringInit = G1DBinaryStringInitializator
CDefG1DBinaryStringUniformProb = 0.5
# - G2DBinaryString defaults
from .mutators import G2DBinaryStringMutatorFlip
CDefG2DBinaryStringMutator = G2DBinaryStringMutatorFlip
from .crossovers import G2DBinaryStringXUniform
CDefG2DBinaryStringCrossover = G2DBinaryStringXUniform
from .initializators import G2DBinaryStringInitializator
CDefG2DBinaryStringInit = G2DBinaryStringInitializator
CDefG2DBinaryStringUniformProb = 0.5
# - GTree defaults
from .initializators import GTreeInitializatorInteger
CDefGTreeInit = GTreeInitializatorInteger
from .mutators import GTreeMutatorIntegerRange
CDefGGTreeMutator = GTreeMutatorIntegerRange
from .crossovers import GTreeCrossoverSinglePointStrict
CDefGTreeCrossover = GTreeCrossoverSinglePointStrict
# - GTreeGP defaults
from .initializators import GTreeGPInitializator
CDefGTreeGPInit = GTreeGPInitializator
from .mutators import GTreeGPMutatorSubtree
CDefGGTreeGPMutator = GTreeGPMutatorSubtree
from .crossovers import GTreeGPCrossoverSinglePoint
CDefGTreeGPCrossover = GTreeGPCrossoverSinglePoint
# - G1DList defaults
CDefG1DListMutIntMU = 2
CDefG1DListMutIntSIGMA = 10
CDefG1DListMutRealMU = 0
CDefG1DListMutRealSIGMA = 1
from .mutators import G1DListMutatorSwap
CDefG1DListMutator = G1DListMutatorSwap
from .crossovers import G1DListCrossoverSinglePoint
CDefG1DListCrossover = G1DListCrossoverSinglePoint
from .initializators import G1DListInitializatorInteger
CDefG1DListInit = G1DListInitializatorInteger
CDefG1DListCrossUniformProb = 0.5
# SBX Crossover defaults
# Crossover distribution index for SBX
# Larger Etac = similar to parents
# Smaller Etac = far away from parents
CDefG1DListSBXEtac = 10
CDefG1DListSBXEPS = 1.0e-14
# - G2DList defaults
CDefG2DListMutIntMU = 2
CDefG2DListMutIntSIGMA = 10
CDefG2DListMutRealMU = 0
CDefG2DListMutRealSIGMA = 1
from .mutators import G2DListMutatorSwap
CDefG2DListMutator = G2DListMutatorSwap
from .crossovers import G2DListCrossoverUniform
CDefG2DListCrossover = G2DListCrossoverUniform
from .initializators import G2DListInitializatorInteger
CDefG2DListInit = G2DListInitializatorInteger
CDefG2DListCrossUniformProb = 0.5
# Gaussian Gradient
CDefGaussianGradientMU = 1.0
CDefGaussianGradientSIGMA = (1.0 / 3.0) # approx. +/- 3-sigma is +/- 10%
# - DB Adapters SQLite defaults
CDefSQLiteName = "SQLite database"
CDefSQLiteDBName = "pyevolve.db"
CDefSQLiteDBTable = "statistics"
CDefSQLiteDBTablePop = "population"
CDefSQLiteStatsGenFreq = 1
CDefSQLiteStatsCommitFreq = 300
# - DB Adapters MySQL defaults
CDefMySQLName = "MySQL database"
CDefMySQLDBName = "pyevolve"
CDefMySQLDBTable = "statistics"
CDefMySQLDBTablePop = "population"
CDefMySQLDBHost = "localhost"
CDefMySQLDBPort = 3306
CDefMySQLStatsGenFreq = 1
#CDefMySQLStatsCommitFreq = 300
CDefMySQLStatsCommitFreq = 1
# - DB Adapters URL Post defaults
CDefURLPostName = "URL"
CDefURLPostStatsGenFreq = 100
# - NEW: DB Adapters for populations file
CDefPopulationsName = "populations file"
CDefPopulationsFileName = "populations.dat"
CDefPopulationsStatsGenFreq = 1
# - DB Adapters CSV File defaults
CDefCSVName = "statistics file"
CDefCSVFileName = "pyevolve.csv"
CDefCSVFileStatsGenFreq = 1
# - DB Adapter XML RPC
CDefXMLRPCStatsGenFreq = 20
# Util Consts
CDefBroadcastAddress = "255.255.255.255"
nodeType = {"TERMINAL": 0, "NONTERMINAL": 1}
from .tree import GTreeGP
CDefGPGenomes = [GTreeGP]
# Migration Consts
CDefGenMigrationRate = 20
CDefMigrationNIndividuals = 3
CDefGenMigrationReplacement = 3
CDefNetworkIndividual = 1
CDefNetworkInfo = 2
# -----------------------------------------------------------------
|
SKIRT/PTS
|
evolve/core/constants.py
|
Python
|
agpl-3.0
| 6,458
|
[
"Gaussian"
] |
6882717a180d4c184065e3f701a7a2950dcc8e0b3b87f0f65e2d9bf00aba1fe3
|
"""Tool for sorting imports alphabetically, and automatically separated into sections."""
import argparse
import functools
import json
import os
import sys
from io import TextIOWrapper
from pathlib import Path
from typing import Any, Dict, Iterable, Iterator, List, Optional, Sequence, Set
from warnings import warn
from . import __version__, api, sections
from .exceptions import FileSkipped, UnsupportedEncoding
from .format import create_terminal_printer
from .logo import ASCII_ART
from .profiles import profiles
from .settings import VALID_PY_TARGETS, Config, WrapModes
try:
from .setuptools_commands import ISortCommand # noqa: F401
except ImportError:
pass
DEPRECATED_SINGLE_DASH_ARGS = {
"-ac",
"-af",
"-ca",
"-cs",
"-df",
"-ds",
"-dt",
"-fas",
"-fass",
"-ff",
"-fgw",
"-fss",
"-lai",
"-lbt",
"-le",
"-ls",
"-nis",
"-nlb",
"-ot",
"-rr",
"-sd",
"-sg",
"-sl",
"-sp",
"-tc",
"-wl",
"-ws",
}
QUICK_GUIDE = f"""
{ASCII_ART}
Nothing to do: no files or paths have have been passed in!
Try one of the following:
`isort .` - sort all Python files, starting from the current directory, recursively.
`isort . --interactive` - Do the same, but ask before making any changes.
`isort . --check --diff` - Check to see if imports are correctly sorted within this project.
`isort --help` - In-depth information about isort's available command-line options.
Visit https://pycqa.github.io/isort/ for complete information about how to use isort.
"""
class SortAttempt:
def __init__(self, incorrectly_sorted: bool, skipped: bool, supported_encoding: bool) -> None:
self.incorrectly_sorted = incorrectly_sorted
self.skipped = skipped
self.supported_encoding = supported_encoding
def sort_imports(
file_name: str,
config: Config,
check: bool = False,
ask_to_apply: bool = False,
write_to_stdout: bool = False,
**kwargs: Any,
) -> Optional[SortAttempt]:
try:
incorrectly_sorted: bool = False
skipped: bool = False
if check:
try:
incorrectly_sorted = not api.check_file(file_name, config=config, **kwargs)
except FileSkipped:
skipped = True
return SortAttempt(incorrectly_sorted, skipped, True)
else:
try:
incorrectly_sorted = not api.sort_file(
file_name,
config=config,
ask_to_apply=ask_to_apply,
write_to_stdout=write_to_stdout,
**kwargs,
)
except FileSkipped:
skipped = True
return SortAttempt(incorrectly_sorted, skipped, True)
except (OSError, ValueError) as error:
warn(f"Unable to parse file {file_name} due to {error}")
return None
except UnsupportedEncoding:
if config.verbose:
warn(f"Encoding not supported for {file_name}")
return SortAttempt(incorrectly_sorted, skipped, False)
except Exception:
printer = create_terminal_printer(color=config.color_output)
printer.error(
f"Unrecoverable exception thrown when parsing {file_name}! "
"This should NEVER happen.\n"
"If encountered, please open an issue: https://github.com/PyCQA/isort/issues/new"
)
raise
def iter_source_code(
paths: Iterable[str], config: Config, skipped: List[str], broken: List[str]
) -> Iterator[str]:
"""Iterate over all Python source files defined in paths."""
visited_dirs: Set[Path] = set()
for path in paths:
if os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path, topdown=True, followlinks=True):
base_path = Path(dirpath)
for dirname in list(dirnames):
full_path = base_path / dirname
resolved_path = full_path.resolve()
if config.is_skipped(full_path):
skipped.append(dirname)
dirnames.remove(dirname)
else:
if resolved_path in visited_dirs: # pragma: no cover
if not config.quiet:
warn(f"Likely recursive symlink detected to {resolved_path}")
dirnames.remove(dirname)
visited_dirs.add(resolved_path)
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if config.is_supported_filetype(filepath):
if config.is_skipped(Path(filepath)):
skipped.append(filename)
else:
yield filepath
elif not os.path.exists(path):
broken.append(path)
else:
yield path
def _build_arg_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Sort Python import definitions alphabetically "
"within logical sections. Run with no arguments to see a quick "
"start guide, otherwise, one or more files/directories/stdin must be provided. "
"Use `-` as the first argument to represent stdin. Use --interactive to use the pre 5.0.0 "
"interactive behavior."
" "
"If you've used isort 4 but are new to isort 5, see the upgrading guide:"
"https://pycqa.github.io/isort/docs/upgrade_guides/5.0.0/."
)
inline_args_group = parser.add_mutually_exclusive_group()
parser.add_argument(
"--src",
"--src-path",
dest="src_paths",
action="append",
help="Add an explicitly defined source path "
"(modules within src paths have their imports automatically categorized as first_party).",
)
parser.add_argument(
"-a",
"--add-import",
dest="add_imports",
action="append",
help="Adds the specified import line to all files, "
"automatically determining correct placement.",
)
parser.add_argument(
"--append",
"--append-only",
dest="append_only",
action="store_true",
help="Only adds the imports specified in --add-imports if the file"
" contains existing imports.",
)
parser.add_argument(
"--ac",
"--atomic",
dest="atomic",
action="store_true",
help="Ensures the output doesn't save if the resulting file contains syntax errors.",
)
parser.add_argument(
"--af",
"--force-adds",
dest="force_adds",
action="store_true",
help="Forces import adds even if the original file is empty.",
)
parser.add_argument(
"-b",
"--builtin",
dest="known_standard_library",
action="append",
help="Force isort to recognize a module as part of Python's standard library.",
)
parser.add_argument(
"--extra-builtin",
dest="extra_standard_library",
action="append",
help="Extra modules to be included in the list of ones in Python's standard library.",
)
parser.add_argument(
"-c",
"--check-only",
"--check",
action="store_true",
dest="check",
help="Checks the file for unsorted / unformatted imports and prints them to the "
"command line without modifying the file.",
)
parser.add_argument(
"--ca",
"--combine-as",
dest="combine_as_imports",
action="store_true",
help="Combines as imports on the same line.",
)
parser.add_argument(
"--cs",
"--combine-star",
dest="combine_star",
action="store_true",
help="Ensures that if a star import is present, "
"nothing else is imported from that namespace.",
)
parser.add_argument(
"-d",
"--stdout",
help="Force resulting output to stdout, instead of in-place.",
dest="write_to_stdout",
action="store_true",
)
parser.add_argument(
"--df",
"--diff",
dest="show_diff",
action="store_true",
help="Prints a diff of all the changes isort would make to a file, instead of "
"changing it in place",
)
parser.add_argument(
"--ds",
"--no-sections",
help="Put all imports into the same section bucket",
dest="no_sections",
action="store_true",
)
parser.add_argument(
"-e",
"--balanced",
dest="balanced_wrapping",
action="store_true",
help="Balances wrapping to produce the most consistent line length possible",
)
parser.add_argument(
"-f",
"--future",
dest="known_future_library",
action="append",
help="Force isort to recognize a module as part of Python's internal future compatibility "
"libraries. WARNING: this overrides the behavior of __future__ handling and therefore"
" can result in code that can't execute. If you're looking to add dependencies such "
"as six a better option is to create a another section below --future using custom "
"sections. See: https://github.com/PyCQA/isort#custom-sections-and-ordering and the "
"discussion here: https://github.com/PyCQA/isort/issues/1463.",
)
parser.add_argument(
"--fas",
"--force-alphabetical-sort",
action="store_true",
dest="force_alphabetical_sort",
help="Force all imports to be sorted as a single section",
)
parser.add_argument(
"--fass",
"--force-alphabetical-sort-within-sections",
action="store_true",
dest="force_alphabetical_sort_within_sections",
help="Force all imports to be sorted alphabetically within a section",
)
parser.add_argument(
"--ff",
"--from-first",
dest="from_first",
help="Switches the typical ordering preference, "
"showing from imports first then straight ones.",
)
parser.add_argument(
"--fgw",
"--force-grid-wrap",
nargs="?",
const=2,
type=int,
dest="force_grid_wrap",
help="Force number of from imports (defaults to 2 when passed as CLI flag without value)"
"to be grid wrapped regardless of line "
"length. If 0 is passed in (the global default) only line length is considered.",
)
parser.add_argument(
"--fss",
"--force-sort-within-sections",
action="store_true",
dest="force_sort_within_sections",
help="Don't sort straight-style imports (like import sys) before from-style imports "
"(like from itertools import groupby). Instead, sort the imports by module, "
"independent of import style.",
)
parser.add_argument(
"-i",
"--indent",
help='String to place for indents defaults to " " (4 spaces).',
dest="indent",
type=str,
)
parser.add_argument(
"-j", "--jobs", help="Number of files to process in parallel.", dest="jobs", type=int
)
parser.add_argument("--lai", "--lines-after-imports", dest="lines_after_imports", type=int)
parser.add_argument("--lbt", "--lines-between-types", dest="lines_between_types", type=int)
parser.add_argument(
"--le",
"--line-ending",
dest="line_ending",
help="Forces line endings to the specified value. "
"If not set, values will be guessed per-file.",
)
parser.add_argument(
"--ls",
"--length-sort",
help="Sort imports by their string length.",
dest="length_sort",
action="store_true",
)
parser.add_argument(
"--lss",
"--length-sort-straight",
help="Sort straight imports by their string length. Similar to `length_sort` "
"but applies only to straight imports and doesn't affect from imports.",
dest="length_sort_straight",
action="store_true",
)
parser.add_argument(
"-m",
"--multi-line",
dest="multi_line_output",
choices=list(WrapModes.__members__.keys())
+ [str(mode.value) for mode in WrapModes.__members__.values()],
type=str,
help="Multi line output (0-grid, 1-vertical, 2-hanging, 3-vert-hanging, 4-vert-grid, "
"5-vert-grid-grouped, 6-vert-grid-grouped-no-comma, 7-noqa, "
"8-vertical-hanging-indent-bracket, 9-vertical-prefix-from-module-import, "
"10-hanging-indent-with-parentheses).",
)
parser.add_argument(
"-n",
"--ensure-newline-before-comments",
dest="ensure_newline_before_comments",
action="store_true",
help="Inserts a blank line before a comment following an import.",
)
inline_args_group.add_argument(
"--nis",
"--no-inline-sort",
dest="no_inline_sort",
action="store_true",
help="Leaves `from` imports with multiple imports 'as-is' "
"(e.g. `from foo import a, c ,b`).",
)
parser.add_argument(
"--nlb",
"--no-lines-before",
help="Sections which should not be split with previous by empty lines",
dest="no_lines_before",
action="append",
)
parser.add_argument(
"-o",
"--thirdparty",
dest="known_third_party",
action="append",
help="Force isort to recognize a module as being part of a third party library.",
)
parser.add_argument(
"--ot",
"--order-by-type",
dest="order_by_type",
action="store_true",
help="Order imports by type, which is determined by case, in addition to alphabetically.\n"
"\n**NOTE**: type here refers to the implied type from the import name capitalization.\n"
' isort does not do type introspection for the imports. These "types" are simply: '
"CONSTANT_VARIABLE, CamelCaseClass, variable_or_function. If your project follows PEP8"
" or a related coding standard and has many imports this is a good default, otherwise you "
"likely will want to turn it off. From the CLI the `--dont-order-by-type` option will turn "
"this off.",
)
parser.add_argument(
"--dt",
"--dont-order-by-type",
dest="dont_order_by_type",
action="store_true",
help="Don't order imports by type, which is determined by case, in addition to "
"alphabetically.\n\n"
"**NOTE**: type here refers to the implied type from the import name capitalization.\n"
' isort does not do type introspection for the imports. These "types" are simply: '
"CONSTANT_VARIABLE, CamelCaseClass, variable_or_function. If your project follows PEP8"
" or a related coding standard and has many imports this is a good default. You can turn "
"this on from the CLI using `--order-by-type`.",
)
parser.add_argument(
"-p",
"--project",
dest="known_first_party",
action="append",
help="Force isort to recognize a module as being part of the current python project.",
)
parser.add_argument(
"--known-local-folder",
dest="known_local_folder",
action="append",
help="Force isort to recognize a module as being a local folder. "
"Generally, this is reserved for relative imports (from . import module).",
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
dest="quiet",
help="Shows extra quiet output, only errors are outputted.",
)
parser.add_argument(
"--rm",
"--remove-import",
dest="remove_imports",
action="append",
help="Removes the specified import from all files.",
)
parser.add_argument(
"--rr",
"--reverse-relative",
dest="reverse_relative",
action="store_true",
help="Reverse order of relative imports.",
)
parser.add_argument(
"-s",
"--skip",
help="Files that sort imports should skip over. If you want to skip multiple "
"files you should specify twice: --skip file1 --skip file2.",
dest="skip",
action="append",
)
parser.add_argument(
"--sd",
"--section-default",
dest="default_section",
help="Sets the default section for import options: " + str(sections.DEFAULT),
)
parser.add_argument(
"--sg",
"--skip-glob",
help="Files that sort imports should skip over.",
dest="skip_glob",
action="append",
)
parser.add_argument(
"--gitignore",
"--skip-gitignore",
action="store_true",
dest="skip_gitignore",
help="Treat project as a git repository and ignore files listed in .gitignore",
)
inline_args_group.add_argument(
"--sl",
"--force-single-line-imports",
dest="force_single_line",
action="store_true",
help="Forces all from imports to appear on their own line",
)
parser.add_argument(
"--nsl",
"--single-line-exclusions",
help="One or more modules to exclude from the single line rule.",
dest="single_line_exclusions",
action="append",
)
parser.add_argument(
"--sp",
"--settings-path",
"--settings-file",
"--settings",
dest="settings_path",
help="Explicitly set the settings path or file instead of auto determining "
"based on file location.",
)
parser.add_argument(
"-t",
"--top",
help="Force specific imports to the top of their appropriate section.",
dest="force_to_top",
action="append",
)
parser.add_argument(
"--tc",
"--trailing-comma",
dest="include_trailing_comma",
action="store_true",
help="Includes a trailing comma on multi line imports that include parentheses.",
)
parser.add_argument(
"--up",
"--use-parentheses",
dest="use_parentheses",
action="store_true",
help="Use parentheses for line continuation on length limit instead of slashes."
" **NOTE**: This is separate from wrap modes, and only affects how individual lines that "
" are too long get continued, not sections of multiple imports.",
)
parser.add_argument(
"-V",
"--version",
action="store_true",
dest="show_version",
help="Displays the currently installed version of isort.",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
dest="verbose",
help="Shows verbose output, such as when files are skipped or when a check is successful.",
)
parser.add_argument(
"--virtual-env",
dest="virtual_env",
help="Virtual environment to use for determining whether a package is third-party",
)
parser.add_argument(
"--conda-env",
dest="conda_env",
help="Conda environment to use for determining whether a package is third-party",
)
parser.add_argument(
"--vn",
"--version-number",
action="version",
version=__version__,
help="Returns just the current version number without the logo",
)
parser.add_argument(
"-l",
"-w",
"--line-length",
"--line-width",
help="The max length of an import line (used for wrapping long imports).",
dest="line_length",
type=int,
)
parser.add_argument(
"--wl",
"--wrap-length",
dest="wrap_length",
type=int,
help="Specifies how long lines that are wrapped should be, if not set line_length is used."
"\nNOTE: wrap_length must be LOWER than or equal to line_length.",
)
parser.add_argument(
"--ws",
"--ignore-whitespace",
action="store_true",
dest="ignore_whitespace",
help="Tells isort to ignore whitespace differences when --check-only is being used.",
)
parser.add_argument(
"--case-sensitive",
dest="case_sensitive",
action="store_true",
help="Tells isort to include casing when sorting module names",
)
parser.add_argument(
"--filter-files",
dest="filter_files",
action="store_true",
help="Tells isort to filter files even when they are explicitly passed in as "
"part of the CLI command.",
)
parser.add_argument(
"files", nargs="*", help="One or more Python source files that need their imports sorted."
)
parser.add_argument(
"--py",
"--python-version",
action="store",
dest="py_version",
choices=tuple(VALID_PY_TARGETS) + ("auto",),
help="Tells isort to set the known standard library based on the specified Python "
"version. Default is to assume any Python 3 version could be the target, and use a union "
"of all stdlib modules across versions. If auto is specified, the version of the "
"interpreter used to run isort "
f"(currently: {sys.version_info.major}{sys.version_info.minor}) will be used.",
)
parser.add_argument(
"--profile",
dest="profile",
type=str,
help="Base profile type to use for configuration. "
f"Profiles include: {', '.join(profiles.keys())}. As well as any shared profiles.",
)
parser.add_argument(
"--interactive",
dest="ask_to_apply",
action="store_true",
help="Tells isort to apply changes interactively.",
)
parser.add_argument(
"--old-finders",
"--magic-placement",
dest="old_finders",
action="store_true",
help="Use the old deprecated finder logic that relies on environment introspection magic.",
)
parser.add_argument(
"--show-config",
dest="show_config",
action="store_true",
help="See isort's determined config, as well as sources of config options.",
)
parser.add_argument(
"--show-files",
dest="show_files",
action="store_true",
help="See the files isort will be ran against with the current config options.",
)
parser.add_argument(
"--honor-noqa",
dest="honor_noqa",
action="store_true",
help="Tells isort to honor noqa comments to enforce skipping those comments.",
)
parser.add_argument(
"--remove-redundant-aliases",
dest="remove_redundant_aliases",
action="store_true",
help=(
"Tells isort to remove redundant aliases from imports, such as `import os as os`."
" This defaults to `False` simply because some projects use these seemingly useless "
" aliases to signify intent and change behaviour."
),
)
parser.add_argument(
"--color",
dest="color_output",
action="store_true",
help="Tells isort to use color in terminal output.",
)
parser.add_argument(
"--float-to-top",
dest="float_to_top",
action="store_true",
help="Causes all non-indented imports to float to the top of the file having its imports "
"sorted (immediately below the top of file comment).\n"
"This can be an excellent shortcut for collecting imports every once in a while "
"when you place them in the middle of a file to avoid context switching.\n\n"
"*NOTE*: It currently doesn't work with cimports and introduces some extra over-head "
"and a performance penalty.",
)
parser.add_argument(
"--treat-comment-as-code",
dest="treat_comments_as_code",
action="append",
help="Tells isort to treat the specified single line comment(s) as if they are code.",
)
parser.add_argument(
"--treat-all-comment-as-code",
dest="treat_all_comments_as_code",
action="store_true",
help="Tells isort to treat all single line comments as if they are code.",
)
parser.add_argument(
"--formatter",
dest="formatter",
type=str,
help="Specifies the name of a formatting plugin to use when producing output.",
)
parser.add_argument(
"--ext",
"--extension",
"--supported-extension",
dest="supported_extensions",
action="append",
help="Specifies what extensions isort can be ran against.",
)
parser.add_argument(
"--blocked-extension",
dest="blocked_extensions",
action="append",
help="Specifies what extensions isort can never be ran against.",
)
parser.add_argument(
"--dedup-headings",
dest="dedup_headings",
action="store_true",
help="Tells isort to only show an identical custom import heading comment once, even if"
" there are multiple sections with the comment set.",
)
# deprecated options
parser.add_argument(
"--recursive",
dest="deprecated_flags",
action="append_const",
const="--recursive",
help=argparse.SUPPRESS,
)
parser.add_argument(
"-rc", dest="deprecated_flags", action="append_const", const="-rc", help=argparse.SUPPRESS
)
parser.add_argument(
"--dont-skip",
dest="deprecated_flags",
action="append_const",
const="--dont-skip",
help=argparse.SUPPRESS,
)
parser.add_argument(
"-ns", dest="deprecated_flags", action="append_const", const="-ns", help=argparse.SUPPRESS
)
parser.add_argument(
"--apply",
dest="deprecated_flags",
action="append_const",
const="--apply",
help=argparse.SUPPRESS,
)
parser.add_argument(
"-k",
"--keep-direct-and-as",
dest="deprecated_flags",
action="append_const",
const="--keep-direct-and-as",
help=argparse.SUPPRESS,
)
parser.add_argument(
"--only-sections",
"--os",
dest="only_sections",
action="store_true",
help="Causes imports to be sorted only based on their sections like STDLIB,THIRDPARTY etc. "
"Imports are unaltered and keep their relative positions within the different sections.",
)
parser.add_argument(
"--only-modified",
"--om",
dest="only_modified",
action="store_true",
help="Suppresses verbose output for non-modified files.",
)
return parser
def parse_args(argv: Optional[Sequence[str]] = None) -> Dict[str, Any]:
argv = sys.argv[1:] if argv is None else list(argv)
remapped_deprecated_args = []
for index, arg in enumerate(argv):
if arg in DEPRECATED_SINGLE_DASH_ARGS:
remapped_deprecated_args.append(arg)
argv[index] = f"-{arg}"
parser = _build_arg_parser()
arguments = {key: value for key, value in vars(parser.parse_args(argv)).items() if value}
if remapped_deprecated_args:
arguments["remapped_deprecated_args"] = remapped_deprecated_args
if "dont_order_by_type" in arguments:
arguments["order_by_type"] = False
del arguments["dont_order_by_type"]
multi_line_output = arguments.get("multi_line_output", None)
if multi_line_output:
if multi_line_output.isdigit():
arguments["multi_line_output"] = WrapModes(int(multi_line_output))
else:
arguments["multi_line_output"] = WrapModes[multi_line_output]
return arguments
def _preconvert(item):
"""Preconverts objects from native types into JSONifyiable types"""
if isinstance(item, (set, frozenset)):
return list(item)
elif isinstance(item, WrapModes):
return item.name
elif isinstance(item, Path):
return str(item)
elif callable(item) and hasattr(item, "__name__"):
return item.__name__
else:
raise TypeError("Unserializable object {} of type {}".format(item, type(item)))
def main(argv: Optional[Sequence[str]] = None, stdin: Optional[TextIOWrapper] = None) -> None:
arguments = parse_args(argv)
if arguments.get("show_version"):
print(ASCII_ART)
return
show_config: bool = arguments.pop("show_config", False)
show_files: bool = arguments.pop("show_files", False)
if show_config and show_files:
sys.exit("Error: either specify show-config or show-files not both.")
if "settings_path" in arguments:
if os.path.isfile(arguments["settings_path"]):
arguments["settings_file"] = os.path.abspath(arguments["settings_path"])
arguments["settings_path"] = os.path.dirname(arguments["settings_file"])
else:
arguments["settings_path"] = os.path.abspath(arguments["settings_path"])
if "virtual_env" in arguments:
venv = arguments["virtual_env"]
arguments["virtual_env"] = os.path.abspath(venv)
if not os.path.isdir(arguments["virtual_env"]):
warn(f"virtual_env dir does not exist: {arguments['virtual_env']}")
file_names = arguments.pop("files", [])
if not file_names and not show_config:
print(QUICK_GUIDE)
if arguments:
sys.exit("Error: arguments passed in without any paths or content.")
else:
return
if "settings_path" not in arguments:
arguments["settings_path"] = (
os.path.abspath(file_names[0] if file_names else ".") or os.getcwd()
)
if not os.path.isdir(arguments["settings_path"]):
arguments["settings_path"] = os.path.dirname(arguments["settings_path"])
config_dict = arguments.copy()
ask_to_apply = config_dict.pop("ask_to_apply", False)
jobs = config_dict.pop("jobs", ())
check = config_dict.pop("check", False)
show_diff = config_dict.pop("show_diff", False)
write_to_stdout = config_dict.pop("write_to_stdout", False)
deprecated_flags = config_dict.pop("deprecated_flags", False)
remapped_deprecated_args = config_dict.pop("remapped_deprecated_args", False)
wrong_sorted_files = False
all_attempt_broken = False
no_valid_encodings = False
if "src_paths" in config_dict:
config_dict["src_paths"] = {
Path(src_path).resolve() for src_path in config_dict.get("src_paths", ())
}
config = Config(**config_dict)
if show_config:
print(json.dumps(config.__dict__, indent=4, separators=(",", ": "), default=_preconvert))
return
elif file_names == ["-"]:
if show_files:
sys.exit("Error: can't show files for streaming input.")
if check:
incorrectly_sorted = not api.check_stream(
input_stream=sys.stdin if stdin is None else stdin,
config=config,
show_diff=show_diff,
)
wrong_sorted_files = incorrectly_sorted
else:
api.sort_stream(
input_stream=sys.stdin if stdin is None else stdin,
output_stream=sys.stdout,
config=config,
show_diff=show_diff,
)
else:
skipped: List[str] = []
broken: List[str] = []
if config.filter_files:
filtered_files = []
for file_name in file_names:
if config.is_skipped(Path(file_name)):
skipped.append(file_name)
else:
filtered_files.append(file_name)
file_names = filtered_files
file_names = iter_source_code(file_names, config, skipped, broken)
if show_files:
for file_name in file_names:
print(file_name)
return
num_skipped = 0
num_broken = 0
num_invalid_encoding = 0
if config.verbose:
print(ASCII_ART)
if jobs:
import multiprocessing
executor = multiprocessing.Pool(jobs)
attempt_iterator = executor.imap(
functools.partial(
sort_imports,
config=config,
check=check,
ask_to_apply=ask_to_apply,
write_to_stdout=write_to_stdout,
),
file_names,
)
else:
# https://github.com/python/typeshed/pull/2814
attempt_iterator = (
sort_imports( # type: ignore
file_name,
config=config,
check=check,
ask_to_apply=ask_to_apply,
show_diff=show_diff,
write_to_stdout=write_to_stdout,
)
for file_name in file_names
)
# If any files passed in are missing considered as error, should be removed
is_no_attempt = True
any_encoding_valid = False
for sort_attempt in attempt_iterator:
if not sort_attempt:
continue # pragma: no cover - shouldn't happen, satisfies type constraint
incorrectly_sorted = sort_attempt.incorrectly_sorted
if arguments.get("check", False) and incorrectly_sorted:
wrong_sorted_files = True
if sort_attempt.skipped:
num_skipped += (
1 # pragma: no cover - shouldn't happen, due to skip in iter_source_code
)
if not sort_attempt.supported_encoding:
num_invalid_encoding += 1
else:
any_encoding_valid = True
is_no_attempt = False
num_skipped += len(skipped)
if num_skipped and not arguments.get("quiet", False):
if config.verbose:
for was_skipped in skipped:
warn(
f"{was_skipped} was skipped as it's listed in 'skip' setting"
" or matches a glob in 'skip_glob' setting"
)
print(f"Skipped {num_skipped} files")
num_broken += len(broken)
if num_broken and not arguments.get("quite", False):
if config.verbose:
for was_broken in broken:
warn(f"{was_broken} was broken path, make sure it exists correctly")
print(f"Broken {num_broken} paths")
if num_broken > 0 and is_no_attempt:
all_attempt_broken = True
if num_invalid_encoding > 0 and not any_encoding_valid:
no_valid_encodings = True
if not config.quiet and (remapped_deprecated_args or deprecated_flags):
if remapped_deprecated_args:
warn(
"W0502: The following deprecated single dash CLI flags were used and translated: "
f"{', '.join(remapped_deprecated_args)}!"
)
if deprecated_flags:
warn(
"W0501: The following deprecated CLI flags were used and ignored: "
f"{', '.join(deprecated_flags)}!"
)
warn(
"W0500: Please see the 5.0.0 Upgrade guide: "
"https://pycqa.github.io/isort/docs/upgrade_guides/5.0.0/"
)
if wrong_sorted_files:
sys.exit(1)
if all_attempt_broken:
sys.exit(1)
if no_valid_encodings:
printer = create_terminal_printer(color=config.color_output)
printer.error("No valid encodings.")
sys.exit(1)
if __name__ == "__main__":
main()
|
TeamSPoon/logicmoo_workspace
|
packs_web/butterfly/lib/python3.7/site-packages/isort/main.py
|
Python
|
mit
| 35,851
|
[
"VisIt"
] |
6ad016c8065a1f9370ced22b9804c60a99ca73b1f5134444955dd52fcfd13d14
|
input_name = '../examples/linear_elasticity/linear_elastic.py'
output_name = 'test_linear_elastic.vtk'
from tests_basic import TestInput
class Test( TestInput ):
pass
|
RexFuzzle/sfepy
|
tests/test_input_linear_elastic.py
|
Python
|
bsd-3-clause
| 172
|
[
"VTK"
] |
2dfc47f626a4ed09e216b977950fd72793e2ad623ef0833ae27fd7cef3d0b66a
|
#!/bin/env python
""" create rst files for documentation of DIRAC """
import os
import shutil
import socket
import sys
def mkdir(folder):
"""create a folder, ignore if it exists"""
try:
folder = os.path.join(os.getcwd(), folder)
os.mkdir(folder)
except OSError as e:
print "MakeDoc: Exception %s when creating folder" % repr(e), folder
BASEPATH = "docs/source/CodeDocumentation"
DIRACPATH = os.environ.get("DIRAC", "") + "/DIRAC"
ORIGDIR = os.getcwd()
BASEPATH = os.path.join(DIRACPATH, BASEPATH)
# files that call parseCommandLine or similar issues
BAD_FILES = ("lfc_dfc_copy",
"lfc_dfc_db_copy",
"JobWrapperTemplate",
"PlotCache", # PlotCache creates a thread on import, which keeps sphinx from exiting
"PlottingHandler",
"setup.py", # configuration for style check
# "DataStoreClient", # instantiates itself
# "ReportsClient", ## causes gDataCache to start
# "ComponentInstaller", # tries to connect to a DB
# "ProxyDB", # tries to connect to security log server
# "SystemAdministratorHandler", # tries to connect to monitoring
# "GlobusComputingElement", # tries to connect to a DB
# "HTCondorCEComputingElememt", # tries to connect to a DB
# "TaskManager", #Tries to connect to security logging
)
FORCE_ADD_PRIVATE = ["FCConditionParser"]
def mkRest(filename, modulename, fullmodulename, subpackages=None, modules=None):
"""make a rst file for filename"""
if modulename == "scripts":
return
#modulefinal = fullmodulename.split(".")[-2]+" Scripts"
else:
modulefinal = modulename
lines = []
lines.append("%s" % modulefinal)
lines.append("=" * len(modulefinal))
lines.append(".. module:: %s " % fullmodulename)
lines.append("")
if subpackages or modules:
lines.append(".. toctree::")
lines.append(" :maxdepth: 1")
lines.append("")
subpackages = [s for s in subpackages if not s.endswith(("scripts", ))]
if subpackages:
print "MakeDoc: ", modulename, " subpackages ", subpackages
lines.append("SubPackages")
lines.append("...........")
lines.append("")
lines.append(".. toctree::")
lines.append(" :maxdepth: 1")
lines.append("")
for package in sorted(subpackages):
lines.append(" %s/%s_Module.rst" % (package, package.split("/")[-1]))
#lines.append(" %s " % (package, ) )
lines.append("")
# remove CLI etc. because we drop them earlier
modules = [m for m in modules if not m.endswith("CLI") and "-" not in m]
if modules:
lines.append("Modules")
lines.append(".......")
lines.append("")
lines.append(".. toctree::")
lines.append(" :maxdepth: 1")
lines.append("")
for module in sorted(modules):
lines.append(" %s.rst" % (module.split("/")[-1],))
#lines.append(" %s " % (package, ) )
lines.append("")
with open(filename, 'w') as rst:
rst.write("\n".join(lines))
def mkDummyRest(classname, fullclassname):
""" create a dummy rst file for files that behave badly """
filename = classname + ".rst"
lines = []
lines.append("%s" % classname)
lines.append("=" * len(classname))
lines.append("")
lines.append(" This is an empty file, because we cannot parse this file correctly or it causes problems")
lines.append(" , please look at the source code directly")
with open(filename, 'w') as rst:
rst.write("\n".join(lines))
def mkModuleRest(classname, fullclassname, buildtype="full"):
""" create rst file for class"""
filename = classname + ".rst"
lines = []
lines.append("%s" % classname)
lines.append("=" * len(classname))
# if "-" not in classname:
# lines.append(".. autosummary::" )
# lines.append(" :toctree: %sGen" % classname )
# lines.append("")
# lines.append(" %s " % fullclassname )
# lines.append("")
lines.append(".. automodule:: %s" % fullclassname)
if buildtype == "full":
lines.append(" :members:")
lines.append(" :inherited-members:")
lines.append(" :undoc-members:")
lines.append(" :show-inheritance:")
if classname in FORCE_ADD_PRIVATE:
lines.append(" :special-members:")
lines.append(" :private-members:")
else:
lines.append(" :special-members: __init__")
if classname.startswith("_"):
lines.append(" :private-members:")
with open(filename, 'w') as rst:
rst.write("\n".join(lines))
def getsubpackages(abspath, direc):
"""return list of subpackages with full path"""
packages = []
for dire in direc:
if dire.lower() == "test" or dire.lower() == "tests" or "/test" in dire.lower():
print "MakeDoc: skipping this directory", dire
continue
if os.path.exists(os.path.join(DIRACPATH, abspath, dire, "__init__.py")):
#packages.append( os.path.join( "DOC", abspath, dire) )
packages.append(os.path.join(dire))
return packages
def getmodules(_abspath, _direc, files):
"""return list of subpackages with full path"""
packages = []
for filename in files:
if "test" in filename.lower():
print "MakeDoc: Skipping this file", filename
continue
if filename != "__init__.py":
packages.append(filename.split(".py")[0])
return packages
def createDoc(buildtype="full"):
"""create the rst files for all the things we want them for"""
print "MakeDoc: DIRACPATH", DIRACPATH
print "MakeDoc: BASEPATH", BASEPATH
print "Host", socket.gethostname()
# we need to replace existing rst files so we can decide how much code-doc to create
if os.path.exists(BASEPATH):
shutil.rmtree(BASEPATH)
mkdir(BASEPATH)
os.chdir(BASEPATH)
print "MakeDoc: Now creating rst files"
for root, direc, files in os.walk(DIRACPATH):
configTemplate = [os.path.join(root, _) for _ in files if _ == 'ConfigTemplate.cfg']
files = [_ for _ in files if _.endswith(".py")]
if "__init__.py" not in files:
continue
if any(root.lower().endswith(f.lower()) for f in ("/docs", )):
continue
elif any(f.lower() in root.lower() for f in ("/test", "scripts",
)):
print "MakeDoc: Skipping this folder:", root
continue
modulename = root.split("/")[-1]
abspath = root.split(DIRACPATH)[1].strip("/")
fullmodulename = ".".join(abspath.split("/"))
packages = getsubpackages(abspath, direc)
if abspath:
mkdir(abspath)
os.chdir(abspath)
if modulename == "DIRAC":
createCodeDocIndex(
subpackages=packages,
modules=getmodules(
abspath,
direc,
files),
buildtype=buildtype)
elif buildtype == "limited":
os.chdir(BASEPATH)
return 0
else:
mkRest(
modulename + "_Module.rst",
modulename,
fullmodulename,
subpackages=packages,
modules=getmodules(
abspath,
direc,
files))
for filename in files:
# Skip things that call parseCommandLine or similar issues
fullclassname = ".".join(abspath.split("/") + [filename])
if any(f in filename for f in BAD_FILES):
mkDummyRest(filename.split(".py")[0], fullclassname.split(".py")[0])
continue
elif not filename.endswith(".py") or \
filename.endswith("CLI.py") or \
filename.lower().startswith("test") or \
filename == "__init__.py" or \
"-" in filename: # not valid python identifier, e.g. dirac-pilot
continue
if not fullclassname.startswith("DIRAC."):
fullclassname = "DIRAC." + fullclassname
# Remove some FrameworkServices because things go weird
mkModuleRest(filename.split(".py")[0], fullclassname.split(".py")[0], buildtype)
if configTemplate:
shutil.copy(configTemplate[0], os.path.join(BASEPATH, abspath))
os.chdir(BASEPATH)
shutil.copy(os.path.join(DIRACPATH, 'dirac.cfg'), BASEPATH)
return 0
def createCodeDocIndex(subpackages, modules, buildtype="full"):
"""create the main index file"""
filename = "index.rst"
lines = []
lines.append(".. _code_documentation:")
lines.append("")
lines.append("Code Documentation (|release|)")
lines.append("------------------------------")
# for limited builds we only create the most basic code documentation so
# we let users know there is more elsewhere
if buildtype == "limited":
lines.append("")
lines.append(".. warning::")
lines.append(
" This a limited build of the code documentation, for the full code documentation please look at the website")
lines.append("")
else:
if subpackages or modules:
lines.append(".. toctree::")
lines.append(" :maxdepth: 1")
lines.append("")
if subpackages:
systemPackages = sorted([pck for pck in subpackages if pck.endswith("System")])
otherPackages = sorted([pck for pck in subpackages if not pck.endswith("System")])
lines.append("=======")
lines.append("Systems")
lines.append("=======")
lines.append("")
lines.append(".. toctree::")
lines.append(" :maxdepth: 1")
lines.append("")
for package in systemPackages:
lines.append(" %s/%s_Module.rst" % (package, package.split("/")[-1]))
lines.append("")
lines.append("=====")
lines.append("Other")
lines.append("=====")
lines.append("")
lines.append(".. toctree::")
lines.append(" :maxdepth: 1")
lines.append("")
for package in otherPackages:
lines.append(" %s/%s_Module.rst" % (package, package.split("/")[-1]))
if modules:
for module in sorted(modules):
lines.append(" %s.rst" % (module.split("/")[-1],))
#lines.append(" %s " % (package, ) )
with open(filename, 'w') as rst:
rst.write("\n".join(lines))
def checkBuildTypeAndRun():
""" check for input argument and then create the doc rst files """
buildtypes = ("full", "limited")
buildtype = "full" if len(sys.argv) <= 1 else sys.argv[1]
if buildtype not in buildtypes:
print "MakeDoc: Unknown build type: %s use %s " % (buildtype, " ".join(buildtypes))
return 1
print "MakeDoc: buildtype:", buildtype
exit(createDoc(buildtype))
if __name__ == "__main__":
# get the options
exit(checkBuildTypeAndRun())
|
arrabito/DIRAC
|
docs/Tools/MakeDoc.py
|
Python
|
gpl-3.0
| 10,459
|
[
"DIRAC"
] |
34149a2dc283a4a2ec84f1bac589e6445c47418ddf15b41208079e37a8a5e371
|
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/self-organizing-maps.py
# Extended example on self-organizing maps
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace. We will
# also need the random module:
from numpy import *
import random
import peach as p
# A self-organizing map has the ability to automatically recognize and classify
# patterns. This tutorial shows graphically how this happens. We have a set of
# points in the cartesian plane, each coordinate obtained from a central point
# plus a random (gaussian, average 0, small variance) shift in some direction.
# We use this set to build the network.
# First, we create the training set:
train_size = 300
centers = [ array([ 1.0, 0.0 ], dtype=float),
array([ 1.0, 1.0 ], dtype=float),
array([ 0.0, 1.0 ], dtype=float),
array([-1.0, 1.0 ], dtype=float),
array([-1.0, 0.0 ], dtype=float) ]
xs = [ ]
for i in range(train_size):
x1 = random.gauss(0.0, 0.1)
x2 = random.gauss(0.0, 0.1)
xs.append(centers[i%5] + array([ x1, x2 ], dtype=float))
# Since we are working on the plane, each example and each neuron will have two
# coordinates. We will use five neurons (since we have five centers). The
# self-organizing map is created by the line below. Our goal is to show how the
# weights converge to the mass center of the point clouds, so we initialize the
# weights to show it:
nn = p.SOM((5, 2))
for i in range(5):
nn.weights[i, 0] = 0.3 * cos(i*pi/4)
nn.weights[i, 1] = 0.3 * sin(i*pi/4)
# We use these lists to track the variation of each neuron:
wlog = [ [ nn.weights[0] ],
[ nn.weights[1] ],
[ nn.weights[2] ],
[ nn.weights[3] ],
[ nn.weights[4] ] ]
# Here we feed and update the network. We could use the ``train`` method, but
# we want to track the weights:
for x in xs:
y = nn(x)
nn.learn(x)
wlog[y].append(array(nn.weights[y]))
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``self-organizing-maps.png``.
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(8, 4)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
for x in xs:
plot( [x[0]], [x[1]], 'ko')
for w in wlog:
w = array(w[1:])
plot( w[:, 0], w[:, 1], '-x')
savefig("self-organizing-maps.png")
except ImportError:
print "After %d iterations:" % (train_size,)
print nn.weights
|
PaulGrimal/peach
|
tutorial/neural-networks/self-organizing-maps.py
|
Python
|
lgpl-2.1
| 2,858
|
[
"Gaussian",
"NEURON"
] |
f3b54befe61de6c59a12f7295065e8b8cfa8b6eaa1c0da55024d4e5723030722
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.clustering import GaussianMixture
# $example off$
from pyspark.sql import SparkSession
"""
A simple example demonstrating Gaussian Mixture Model (GMM).
Run with:
bin/spark-submit examples/src/main/python/ml/gaussian_mixture_example.py
"""
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("PythonGuassianMixtureExample")\
.getOrCreate()
# $example on$
# loads data
dataset = spark.read.format("libsvm").load("data/mllib/sample_kmeans_data.txt")
gmm = GaussianMixture().setK(2)
model = gmm.fit(dataset)
print("Gaussians: ")
model.gaussiansDF.show()
# $example off$
spark.stop()
|
mrchristine/spark-examples-dbc
|
src/main/python/ml/gaussian_mixture_example.py
|
Python
|
apache-2.0
| 1,522
|
[
"Gaussian"
] |
d2661a7e43ca8904cad4361a58d58366c67dede0368cd9ec97fa2101e73f278d
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'EphysProp.nlex_id'
db.add_column(u'neuroelectro_ephysprop', 'nlex_id',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'EphysProp.nlex_id'
db.delete_column(u'neuroelectro_ephysprop', 'nlex_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'neuroelectro.api': {
'Meta': {'object_name': 'API'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'neuroelectro.article': {
'Meta': {'object_name': 'Article'},
'abstract': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'}),
'author_list_str': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.Author']", 'null': 'True', 'symmetrical': 'False'}),
'full_text_link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Journal']", 'null': 'True'}),
'pmid': ('django.db.models.fields.IntegerField', [], {}),
'pub_year': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'substances': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.Substance']", 'null': 'True', 'symmetrical': 'False'}),
'suggester': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'terms': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.MeshTerm']", 'null': 'True', 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'neuroelectro.articlefulltext': {
'Meta': {'object_name': 'ArticleFullText'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']"}),
'full_text_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'neuroelectro.articlefulltextstat': {
'Meta': {'object_name': 'ArticleFullTextStat'},
'article_full_text': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.ArticleFullText']"}),
'data_table_ephys_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata_human_assigned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'metadata_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'methods_tag_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'neuron_article_map_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'num_unique_ephys_concept_maps': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'neuroelectro.articlemetadatamap': {
'Meta': {'object_name': 'ArticleMetaDataMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.MetaData']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'})
},
u'neuroelectro.articlesummary': {
'Meta': {'object_name': 'ArticleSummary'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']"}),
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_neurons': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'neuroelectro.author': {
'Meta': {'object_name': 'Author'},
'first': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initials': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'last': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'middle': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
u'neuroelectro.brainregion': {
'Meta': {'object_name': 'BrainRegion'},
'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'allenid': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isallen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'treedepth': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'neuroelectro.contvalue': {
'Meta': {'object_name': 'ContValue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_range': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'mean': ('django.db.models.fields.FloatField', [], {}),
'min_range': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'n': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'stderr': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'stdev': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'neuroelectro.datasource': {
'Meta': {'object_name': 'DataSource'},
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.DataTable']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_submission': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.UserSubmission']", 'null': 'True'}),
'user_upload': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.UserUpload']", 'null': 'True'})
},
u'neuroelectro.datatable': {
'Meta': {'object_name': 'DataTable'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'needs_expert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'table_html': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'table_text': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'})
},
u'neuroelectro.ephysconceptmap': {
'Meta': {'object_name': 'EphysConceptMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.EphysProp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.DataSource']"}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'neuroelectro.ephysprop': {
'Meta': {'object_name': 'EphysProp'},
'definition': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.EphysPropSyn']", 'symmetrical': 'False'}),
'units': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Unit']", 'null': 'True'})
},
u'neuroelectro.ephyspropsummary': {
'Meta': {'object_name': 'EphysPropSummary'},
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.EphysProp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_neurons': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'value_mean_articles': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_mean_neurons': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_sd_articles': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_sd_neurons': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'neuroelectro.ephyspropsyn': {
'Meta': {'object_name': 'EphysPropSyn'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'neuroelectro.insituexpt': {
'Meta': {'object_name': 'InSituExpt'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageseriesid': ('django.db.models.fields.IntegerField', [], {}),
'plane': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'regionexprs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.RegionExpr']", 'null': 'True', 'symmetrical': 'False'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'neuroelectro.institution': {
'Meta': {'object_name': 'Institution'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
u'neuroelectro.journal': {
'Meta': {'object_name': 'Journal'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Publisher']", 'null': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
u'neuroelectro.mailinglistentry': {
'Meta': {'object_name': 'MailingListEntry'},
'comments': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
},
u'neuroelectro.meshterm': {
'Meta': {'object_name': 'MeshTerm'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
u'neuroelectro.metadata': {
'Meta': {'object_name': 'MetaData'},
'cont_value': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.ContValue']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
u'neuroelectro.neuron': {
'Meta': {'object_name': 'Neuron'},
'added_by': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.BrainRegion']", 'null': 'True', 'symmetrical': 'False'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.NeuronSyn']", 'null': 'True', 'symmetrical': 'False'})
},
u'neuroelectro.neuronarticlemap': {
'Meta': {'object_name': 'NeuronArticleMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Neuron']"}),
'num_mentions': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'neuroelectro.neuronconceptmap': {
'Meta': {'object_name': 'NeuronConceptMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Neuron']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.DataSource']"}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'neuroelectro.neuronephysdatamap': {
'Meta': {'object_name': 'NeuronEphysDataMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'ephys_concept_map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.EphysConceptMap']"}),
'err': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'metadata': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.MetaData']", 'symmetrical': 'False'}),
'n': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'neuron_concept_map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.NeuronConceptMap']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.DataSource']"}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'val': ('django.db.models.fields.FloatField', [], {}),
'val_norm': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'neuroelectro.neuronephyssummary': {
'Meta': {'object_name': 'NeuronEphysSummary'},
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.EphysProp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Neuron']"}),
'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'value_mean': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_sd': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'neuroelectro.neuronsummary': {
'Meta': {'object_name': 'NeuronSummary'},
'cluster_xval': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'cluster_yval': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Neuron']"}),
'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_ephysprops': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'neuroelectro.neuronsyn': {
'Meta': {'object_name': 'NeuronSyn'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'neuroelectro.protein': {
'Meta': {'object_name': 'Protein'},
'allenid': ('django.db.models.fields.IntegerField', [], {}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True'}),
'entrezid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'gene': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_situ_expts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.InSituExpt']", 'null': 'True', 'symmetrical': 'False'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
},
u'neuroelectro.proteinsyn': {
'Meta': {'object_name': 'ProteinSyn'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'neuroelectro.publisher': {
'Meta': {'object_name': 'Publisher'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'neuroelectro.regionexpr': {
'Meta': {'object_name': 'RegionExpr'},
'expr_density': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'expr_energy': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'expr_energy_cv': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': u"orm['neuroelectro.BrainRegion']"})
},
u'neuroelectro.species': {
'Meta': {'object_name': 'Species'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'neuroelectro.substance': {
'Meta': {'object_name': 'Substance'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
u'neuroelectro.unit': {
'Meta': {'object_name': 'Unit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'prefix': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
u'neuroelectro.user': {
'Meta': {'object_name': 'User', '_ormbases': [u'auth.User']},
'assigned_neurons': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.Neuron']", 'null': 'True', 'symmetrical': 'False'}),
'institution': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Institution']", 'null': 'True'}),
'is_curator': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lab_head': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'lab_website_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
u'neuroelectro.usersubmission': {
'Meta': {'object_name': 'UserSubmission'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']", 'null': 'True'}),
'data': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']"})
},
u'neuroelectro.userupload': {
'Meta': {'object_name': 'UserUpload'},
'data': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.FilePathField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']"})
}
}
complete_apps = ['neuroelectro']
|
neuroelectro/neuroelectro_org
|
neuroelectro/south_migrations/0068_auto__add_field_ephysprop_nlex_id.py
|
Python
|
gpl-2.0
| 31,214
|
[
"NEURON"
] |
42b21d10c48bab87d0f460a8a02dccb9e0533d56656f15f8ac49638b71eba1e4
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_II import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Fixed']*35 + ['Movable']*35 + ['Fixed']*35 + ['Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Single_Contact_Classification/Scaled_Features/best_kNN_PCA/2_categories/test11_cross_validate_categories_mov_fixed_1200ms_scaled_method_ii.py
|
Python
|
mit
| 5,012
|
[
"Mayavi"
] |
6e08c9b493863857a94f3ba35134a60f6a5ed09c7a3f638e975c711bc4e7feba
|
## Copyright (c) 2020 The WebM project authors. All Rights Reserved.
##
## Use of this source code is governed by a BSD-style license
## that can be found in the LICENSE file in the root of the source
## tree. An additional intellectual property rights grant can be found
## in the file PATENTS. All contributing project authors may
## be found in the AUTHORS file in the root of the source tree.
##
# coding: utf-8
import numpy as np
import numpy.linalg as LA
from scipy.ndimage.filters import gaussian_filter
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import inv
from MotionEST import MotionEST
"""Anandan Model"""
class Anandan(MotionEST):
"""
constructor:
cur_f: current frame
ref_f: reference frame
blk_sz: block size
beta: smooth constrain weight
k1,k2,k3: confidence coefficients
max_iter: maximum number of iterations
"""
def __init__(self, cur_f, ref_f, blk_sz, beta, k1, k2, k3, max_iter=100):
super(Anandan, self).__init__(cur_f, ref_f, blk_sz)
self.levels = int(np.log2(blk_sz))
self.intensity_hierarchy()
self.c_maxs = []
self.c_mins = []
self.e_maxs = []
self.e_mins = []
for l in xrange(self.levels + 1):
c_max, c_min, e_max, e_min = self.get_curvature(self.cur_Is[l])
self.c_maxs.append(c_max)
self.c_mins.append(c_min)
self.e_maxs.append(e_max)
self.e_mins.append(e_min)
self.beta = beta
self.k1, self.k2, self.k3 = k1, k2, k3
self.max_iter = max_iter
"""
build intensity hierarchy
"""
def intensity_hierarchy(self):
level = 0
self.cur_Is = []
self.ref_Is = []
#build each level itensity by using gaussian filters
while level <= self.levels:
cur_I = gaussian_filter(self.cur_yuv[:, :, 0], sigma=(2**level) * 0.56)
ref_I = gaussian_filter(self.ref_yuv[:, :, 0], sigma=(2**level) * 0.56)
self.ref_Is.append(ref_I)
self.cur_Is.append(cur_I)
level += 1
"""
get curvature of each block
"""
def get_curvature(self, I):
c_max = np.zeros((self.num_row, self.num_col))
c_min = np.zeros((self.num_row, self.num_col))
e_max = np.zeros((self.num_row, self.num_col, 2))
e_min = np.zeros((self.num_row, self.num_col, 2))
for r in xrange(self.num_row):
for c in xrange(self.num_col):
h11, h12, h21, h22 = 0, 0, 0, 0
for i in xrange(r * self.blk_sz, r * self.blk_sz + self.blk_sz):
for j in xrange(c * self.blk_sz, c * self.blk_sz + self.blk_sz):
if 0 <= i < self.height - 1 and 0 <= j < self.width - 1:
Ix = I[i][j + 1] - I[i][j]
Iy = I[i + 1][j] - I[i][j]
h11 += Iy * Iy
h12 += Ix * Iy
h21 += Ix * Iy
h22 += Ix * Ix
U, S, _ = LA.svd(np.array([[h11, h12], [h21, h22]]))
c_max[r, c], c_min[r, c] = S[0], S[1]
e_max[r, c] = U[:, 0]
e_min[r, c] = U[:, 1]
return c_max, c_min, e_max, e_min
"""
get ssd of motion vector:
cur_I: current intensity
ref_I: reference intensity
center: current position
mv: motion vector
"""
def get_ssd(self, cur_I, ref_I, center, mv):
ssd = 0
for r in xrange(int(center[0]), int(center[0]) + self.blk_sz):
for c in xrange(int(center[1]), int(center[1]) + self.blk_sz):
if 0 <= r < self.height and 0 <= c < self.width:
tr, tc = r + int(mv[0]), c + int(mv[1])
if 0 <= tr < self.height and 0 <= tc < self.width:
ssd += (ref_I[tr, tc] - cur_I[r, c])**2
else:
ssd += cur_I[r, c]**2
return ssd
"""
get region match of level l
l: current level
last_mvs: matchine results of last level
radius: movenment radius
"""
def region_match(self, l, last_mvs, radius):
mvs = np.zeros((self.num_row, self.num_col, 2))
min_ssds = np.zeros((self.num_row, self.num_col))
for r in xrange(self.num_row):
for c in xrange(self.num_col):
center = np.array([r * self.blk_sz, c * self.blk_sz])
#use overlap hierarchy policy
init_mvs = []
if last_mvs is None:
init_mvs = [np.array([0, 0])]
else:
for i, j in {(r, c), (r, c + 1), (r + 1, c), (r + 1, c + 1)}:
if 0 <= i < last_mvs.shape[0] and 0 <= j < last_mvs.shape[1]:
init_mvs.append(last_mvs[i, j])
#use last matching results as the start position as current level
min_ssd = None
min_mv = None
for init_mv in init_mvs:
for i in xrange(-2, 3):
for j in xrange(-2, 3):
mv = init_mv + np.array([i, j]) * radius
ssd = self.get_ssd(self.cur_Is[l], self.ref_Is[l], center, mv)
if min_ssd is None or ssd < min_ssd:
min_ssd = ssd
min_mv = mv
min_ssds[r, c] = min_ssd
mvs[r, c] = min_mv
return mvs, min_ssds
"""
smooth motion field based on neighbor constraint
uvs: current estimation
mvs: matching results
min_ssds: minimum ssd of matching results
l: current level
"""
def smooth(self, uvs, mvs, min_ssds, l):
sm_uvs = np.zeros((self.num_row, self.num_col, 2))
c_max = self.c_maxs[l]
c_min = self.c_mins[l]
e_max = self.e_maxs[l]
e_min = self.e_mins[l]
for r in xrange(self.num_row):
for c in xrange(self.num_col):
w_max = c_max[r, c] / (
self.k1 + self.k2 * min_ssds[r, c] + self.k3 * c_max[r, c])
w_min = c_min[r, c] / (
self.k1 + self.k2 * min_ssds[r, c] + self.k3 * c_min[r, c])
w = w_max * w_min / (w_max + w_min + 1e-6)
if w < 0:
w = 0
avg_uv = np.array([0.0, 0.0])
for i, j in {(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)}:
if 0 <= i < self.num_row and 0 <= j < self.num_col:
avg_uv += 0.25 * uvs[i, j]
sm_uvs[r, c] = (w * w * mvs[r, c] + self.beta * avg_uv) / (
self.beta + w * w)
return sm_uvs
"""
motion field estimation
"""
def motion_field_estimation(self):
last_mvs = None
for l in xrange(self.levels, -1, -1):
mvs, min_ssds = self.region_match(l, last_mvs, 2**l)
uvs = np.zeros(mvs.shape)
for _ in xrange(self.max_iter):
uvs = self.smooth(uvs, mvs, min_ssds, l)
last_mvs = uvs
for r in xrange(self.num_row):
for c in xrange(self.num_col):
self.mf[r, c] = uvs[r, c]
|
youtube/cobalt
|
third_party/libvpx/tools/3D-Reconstruction/MotionEST/Anandan.py
|
Python
|
bsd-3-clause
| 6,520
|
[
"Gaussian"
] |
1bd75a0b3e78d65046b0060d0601e44eeed90c3cc1af91b16599909702c26de0
|
import pdb
import itertools as it
def scinot(string):
"""Takes a string in MCNP scientific notation (without 'E' character), and returns a string of standard scientific notation."""
"""If there is no '+' or '-' character in string, returns it as it is."""
"""If the argument is not string, returns the argument"""
if type(string) != str:
return string
else:
retstr = string[0]
for char in string[1:]:
if ((char == '-')|(char == '+')):
retstr += 'E' + char
else:
retstr += char
return retstr
def readit(filename):
data = open(filename, 'r+')
line = data.readline()
i=1
event_log = {}
while line != '':
if 'event' in line:
line = data.readline()
line = data.readline()
line = data.readline()
line = data.readline()
event = []
while (line.find('event') == -1) & (line.find('summary') == -1):
event.append(line)
line = data.readline()
if event != []:
event_log.update({'particle'+str(i) : event})
i=i+1
else: line = data.readline()
return event_log
def structure(filename):
particle_data = readit(filename)
event_log = {}
for elog in particle_data:
event_log[elog] = []
for line in particle_data[elog]:
dp = line.split()
event = {}
event.update({'int': scinot(dp[0])})
event.update({'cell': scinot(dp[1])})
event.update({'x': scinot(dp[2])})
event.update({'y': scinot(dp[3])})
event.update({'z': scinot(dp[4])})
event.update({'u': scinot(dp[5])})
event.update({'v': scinot(dp[6])})
event.update({'w': scinot(dp[7])})
event.update({'erg': scinot(dp[8])})
event.update({'wgt': scinot(dp[9])})
event_log[elog].append(event)
return(event_log)
def vtk_file(events, event_title):
file_name = event_title + ".vtk"
vtk_file = open(file_name,"w+")
num_events = 0
for event in events:
if event["cell"] != events[events.index(event)-1]["cell"]:
num_events += 1
vtk_file.write("# vtk DataFile Version 3.0 \nvtk output\nASCII\nDATASET POLYDATA\nPOINTS " + str(num_events) + " float\n")
for event in events:
if event["cell"] != events[events.index(event)-1]["cell"]:
vtk_file.write(event["x"] + " " + event["y"] + " " + event["z"] + "\n")
num_lines = num_events - 1
vtk_file.write("LINES " + str(num_lines) + " " + str(3*num_lines) + "\n")
for i in range(num_events-1):
vtk_file.write("2 " + str(i) + " " + str(i+1) + "\n")
vtk_file.write("CELL_DATA 1\n")
vtk_file.write("POINT_DATA " + str(num_events) + "\n")
vtk_file.write("scalars pointvar float\nLOOKUP_TABLE default\n")
vtk_file.write("1.2 1.3 1.4 1.5")
def vtk_builder(readable):
for event_titles in readable:
vtk_file(readable[event_titles], event_titles)
|
haupt235/waldo
|
modules.py
|
Python
|
mit
| 2,660
|
[
"VTK"
] |
e5427902e17f0b3a259ae077c12324694e09ccd3e168602b37187520de3b0300
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Brian Coca <bcoca@ansible.com>
# (c) 2017, Adam Miller <admiller@redhat.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'
}
DOCUMENTATION = '''
module: sysvinit
author:
- "Ansible Core Team"
version_added: "2.6"
short_description: Manage SysV services.
description:
- Controls services on target hosts that use the SysV init system.
options:
name:
required: true
description:
- Name of the service.
aliases: ['service']
state:
choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
Not all init scripts support C(restarted) nor C(reloaded) natively, so these will both trigger a stop and start as needed.
enabled:
type: bool
description:
- Whether the service should start on boot. B(At least one of state and enabled are required.)
sleep:
default: 1
description:
- If the service is being C(restarted) or C(reloaded) then sleep this many seconds between the stop and start command.
This helps to workaround badly behaving services.
pattern:
description:
- A substring to look for as would be found in the output of the I(ps) command as a stand-in for a status result.
- If the string is found, the service will be assumed to be running.
- "This option is mainly for use with init scripts that don't support the 'status' option."
runlevels:
description:
- The runlevels this script should be enabled/disabled from.
- Use this to override the defaults set by the package or init script itself.
arguments:
description:
- Additional arguments provided on the command line that some init scripts accept.
aliases: [ 'args' ]
daemonize:
type: bool
description:
- Have the module daemonize as the service itself might not do so properly.
- This is useful with badly written init scripts or daemons, which
commonly manifests as the task hanging as it is still holding the
tty or the service dying when the task is over as the connection
closes the session.
default: no
notes:
- One option other than name is required.
requirements:
- That the service managed has a corresponding init script.
'''
EXAMPLES = '''
- name: make sure apache2 is started
sysvinit:
name: apache2
state: started
enabled: yes
- name: make sure apache2 is started on runlevels 3 and 5
sysvinit:
name: apache2
state: started
enabled: yes
runlevels:
- 3
- 5
'''
RETURN = r'''
results:
description: results from actions taken
returned: always
type: complex
sample: {
"attempts": 1,
"changed": true,
"name": "apache2",
"status": {
"enabled": {
"changed": true,
"rc": 0,
"stderr": "",
"stdout": ""
},
"stopped": {
"changed": true,
"rc": 0,
"stderr": "",
"stdout": "Stopping web server: apache2.\n"
}
}
}
'''
import re
from time import sleep
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.service import sysv_is_enabled, get_sysv_script, sysv_exists, fail_if_missing, get_ps, daemonize
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str', aliases=['service']),
state=dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'),
enabled=dict(type='bool'),
sleep=dict(type='int', default=1),
pattern=dict(type='str'),
arguments=dict(type='str', aliases=['args']),
runlevels=dict(type='list'),
daemonize=dict(type='bool', default=False),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled']],
)
name = module.params['name']
action = module.params['state']
enabled = module.params['enabled']
runlevels = module.params['runlevels']
pattern = module.params['pattern']
sleep_for = module.params['sleep']
rc = 0
out = err = ''
result = {
'name': name,
'changed': False,
'status': {}
}
# ensure service exists, get script name
fail_if_missing(module, sysv_exists(name), name)
script = get_sysv_script(name)
# locate binaries for service management
paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin']
binaries = ['chkconfig', 'update-rc.d', 'insserv', 'service']
# Keeps track of the service status for various runlevels because we can
# operate on multiple runlevels at once
runlevel_status = {}
location = {}
for binary in binaries:
location[binary] = module.get_bin_path(binary, opt_dirs=paths)
# figure out enable status
if runlevels:
for rl in runlevels:
runlevel_status.setdefault(rl, {})
runlevel_status[rl]["enabled"] = sysv_is_enabled(name, runlevel=rl)
else:
runlevel_status["enabled"] = sysv_is_enabled(name)
# figure out started status, everyone does it different!
is_started = False
worked = False
# user knows other methods fail and supplied pattern
if pattern:
worked = is_started = get_ps(module, pattern)
else:
if location.get('service'):
# standard tool that has been 'destandarized' by reimplementation in other OS/distros
cmd = '%s %s status' % (location['service'], name)
elif script:
# maybe script implements status (not LSB)
cmd = '%s status' % script
else:
module.fail_json(msg="Unable to determine service status")
(rc, out, err) = module.run_command(cmd)
if not rc == -1:
# special case
if name == 'iptables' and "ACCEPT" in out:
worked = True
is_started = True
# check output messages, messy but sadly more reliable than rc
if not worked and out.count('\n') <= 1:
cleanout = out.lower().replace(name.lower(), '')
for stopped in ['stop', 'is dead ', 'dead but ', 'could not access pid file', 'inactive']:
if stopped in cleanout:
worked = True
break
if not worked:
for started_status in ['run', 'start', 'active']:
if started_status in cleanout and "not " not in cleanout:
is_started = True
worked = True
break
# hope rc is not lying to us, use often used 'bad' returns
if not worked and rc in [1, 2, 3, 4, 69]:
worked = True
if not worked:
# hail mary
if rc == 0:
is_started = True
worked = True
# ps for luck, can only assure positive match
elif get_ps(module, name):
is_started = True
worked = True
module.warn("Used ps output to match service name and determine it is up, this is very unreliable")
if not worked:
module.warn("Unable to determine if service is up, assuming it is down")
###########################################################################
# BEGIN: Enable/Disable
result['status'].setdefault('enabled', {})
result['status']['enabled']['changed'] = False
result['status']['enabled']['rc'] = None
result['status']['enabled']['stdout'] = None
result['status']['enabled']['stderr'] = None
if runlevels:
result['status']['enabled']['runlevels'] = runlevels
for rl in runlevels:
if enabled != runlevel_status[rl]["enabled"]:
result['changed'] = True
result['status']['enabled']['changed'] = True
if not module.check_mode and result['changed']:
# Perform enable/disable here
if enabled:
if location.get('update-rc.d'):
(rc, out, err) = module.run_command("%s %s enable %s" % (location['update-rc.d'], name, ' '.join(runlevels)))
elif location.get('chkconfig'):
(rc, out, err) = module.run_command("%s --level %s %s on" % (location['chkconfig'], ''.join(runlevels), name))
else:
if location.get('update-rc.d'):
(rc, out, err) = module.run_command("%s %s disable %s" % (location['update-rc.d'], name, ' '.join(runlevels)))
elif location.get('chkconfig'):
(rc, out, err) = module.run_command("%s --level %s %s off" % (location['chkconfig'], ''.join(runlevels), name))
else:
if enabled is not None and enabled != runlevel_status["enabled"]:
result['changed'] = True
result['status']['enabled']['changed'] = True
if not module.check_mode and result['changed']:
# Perform enable/disable here
if enabled:
if location.get('update-rc.d'):
(rc, out, err) = module.run_command("%s %s defaults" % (location['update-rc.d'], name))
elif location.get('chkconfig'):
(rc, out, err) = module.run_command("%s %s on" % (location['chkconfig'], name))
else:
if location.get('update-rc.d'):
(rc, out, err) = module.run_command("%s %s disable" % (location['update-rc.d'], name))
elif location.get('chkconfig'):
(rc, out, err) = module.run_command("%s %s off" % (location['chkconfig'], name))
# Assigned above, might be useful is something goes sideways
if not module.check_mode and result['status']['enabled']['changed']:
result['status']['enabled']['rc'] = rc
result['status']['enabled']['stdout'] = out
result['status']['enabled']['stderr'] = err
rc, out, err = None, None, None
if "illegal runlevel specified" in result['status']['enabled']['stderr']:
module.fail_json(msg="Illegal runlevel specified for enable operation on service %s" % name, **result)
# END: Enable/Disable
###########################################################################
###########################################################################
# BEGIN: state
result['status'].setdefault(module.params['state'], {})
result['status'][module.params['state']]['changed'] = False
result['status'][module.params['state']]['rc'] = None
result['status'][module.params['state']]['stdout'] = None
result['status'][module.params['state']]['stderr'] = None
if action:
action = re.sub(r'p?ed$', '', action.lower())
def runme(doit):
args = module.params['arguments']
cmd = "%s %s %s" % (script, doit, "" if args is None else args)
# how to run
if module.params['daemonize']:
(rc, out, err) = daemonize(cmd)
else:
(rc, out, err) = module.run_command(cmd)
# FIXME: ERRORS
if rc != 0:
module.fail_json(msg="Failed to %s service: %s" % (action, name), rc=rc, stdout=out, stderr=err)
return (rc, out, err)
if action == 'restart':
result['changed'] = True
result['status'][module.params['state']]['changed'] = True
if not module.check_mode:
# cannot rely on existing 'restart' in init script
for dothis in ['stop', 'start']:
(rc, out, err) = runme(dothis)
if sleep_for:
sleep(sleep_for)
elif is_started != (action == 'start'):
result['changed'] = True
result['status'][module.params['state']]['changed'] = True
if not module.check_mode:
rc, out, err = runme(action)
elif is_started == (action == 'stop'):
result['changed'] = True
result['status'][module.params['state']]['changed'] = True
if not module.check_mode:
rc, out, err = runme(action)
if not module.check_mode and result['status'][module.params['state']]['changed']:
result['status'][module.params['state']]['rc'] = rc
result['status'][module.params['state']]['stdout'] = out
result['status'][module.params['state']]['stderr'] = err
rc, out, err = None, None, None
# END: state
###########################################################################
module.exit_json(**result)
if __name__ == '__main__':
main()
|
roadmapper/ansible
|
lib/ansible/modules/system/sysvinit.py
|
Python
|
gpl-3.0
| 13,491
|
[
"Brian"
] |
ad907beda49bec1e69751e5c3321bd8cf457c28e570063f8f7298b1a7db13824
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from itertools import count
import re, os, cStringIO, time, cgi, urlparse
from xml.dom import minidom as dom
from xml.sax.handler import ErrorHandler, feature_validation
from xml.dom.pulldom import SAX2DOM
from xml.sax import make_parser
from xml.sax.xmlreader import InputSource
from twisted.python import htmlizer
from twisted.python.filepath import FilePath
from twisted.web import domhelpers
import process, latex, indexer, numberer, htmlbook
# relative links to html files
def fixLinks(document, ext):
"""
Rewrite links to XHTML lore input documents so they point to lore XHTML
output documents.
Any node with an C{href} attribute which does not contain a value starting
with C{http}, C{https}, C{ftp}, or C{mailto} and which does not have a
C{class} attribute of C{absolute} or which contains C{listing} and which
does point to an URL ending with C{html} will have that attribute value
rewritten so that the filename extension is C{ext} instead of C{html}.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type ext: C{str}
@param ext: The extension to use when selecting an output file name. This
replaces the extension of the input file name.
@return: C{None}
"""
supported_schemes=['http', 'https', 'ftp', 'mailto']
for node in domhelpers.findElementsWithAttribute(document, 'href'):
href = node.getAttribute("href")
if urlparse.urlparse(href)[0] in supported_schemes:
continue
if node.getAttribute("class") == "absolute":
continue
if node.getAttribute("class").find('listing') != -1:
continue
# This is a relative link, so it should be munged.
if href.endswith('html') or href[:href.rfind('#')].endswith('html'):
fname, fext = os.path.splitext(href)
if '#' in fext:
fext = ext+'#'+fext.split('#', 1)[1]
else:
fext = ext
node.setAttribute("href", fname + fext)
def addMtime(document, fullpath):
"""
Set the last modified time of the given document.
@type document: A DOM Node or Document
@param document: The output template which defines the presentation of the
last modified time.
@type fullpath: C{str}
@param fullpath: The file name from which to take the last modified time.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class","mtime"):
txt = dom.Text()
txt.data = time.ctime(os.path.getmtime(fullpath))
node.appendChild(txt)
def _getAPI(node):
"""
Retrieve the fully qualified Python name represented by the given node.
The name is represented by one or two aspects of the node: the value of the
node's first child forms the end of the name. If the node has a C{base}
attribute, that attribute's value is prepended to the node's value, with
C{.} separating the two parts.
@rtype: C{str}
@return: The fully qualified Python name.
"""
base = ""
if node.hasAttribute("base"):
base = node.getAttribute("base") + "."
return base+node.childNodes[0].nodeValue
def fixAPI(document, url):
"""
Replace API references with links to API documentation.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type url: C{str}
@param url: A string which will be interpolated with the fully qualified
Python name of any API reference encountered in the input document, the
result of which will be used as a link to API documentation for that name
in the output document.
@return: C{None}
"""
# API references
for node in domhelpers.findElementsWithAttribute(document, "class", "API"):
fullname = _getAPI(node)
anchor = dom.Element('a')
anchor.setAttribute('href', url % (fullname,))
anchor.setAttribute('title', fullname)
while node.childNodes:
child = node.childNodes[0]
node.removeChild(child)
anchor.appendChild(child)
node.appendChild(anchor)
if node.hasAttribute('base'):
node.removeAttribute('base')
def fontifyPython(document):
"""
Syntax color any node in the given document which contains a Python source
listing.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
def matcher(node):
return (node.nodeName == 'pre' and node.hasAttribute('class') and
node.getAttribute('class') == 'python')
for node in domhelpers.findElements(document, matcher):
fontifyPythonNode(node)
def fontifyPythonNode(node):
"""
Syntax color the given node containing Python source code.
The node must have a parent.
@return: C{None}
"""
oldio = cStringIO.StringIO()
latex.getLatexText(node, oldio.write,
entities={'lt': '<', 'gt': '>', 'amp': '&'})
oldio = cStringIO.StringIO(oldio.getvalue().strip()+'\n')
howManyLines = len(oldio.getvalue().splitlines())
newio = cStringIO.StringIO()
htmlizer.filter(oldio, newio, writer=htmlizer.SmallerHTMLWriter)
lineLabels = _makeLineNumbers(howManyLines)
newel = dom.parseString(newio.getvalue()).documentElement
newel.setAttribute("class", "python")
node.parentNode.replaceChild(newel, node)
newel.insertBefore(lineLabels, newel.firstChild)
def addPyListings(document, dir):
"""
Insert Python source listings into the given document from files in the
given directory based on C{py-listing} nodes.
Any node in C{document} with a C{class} attribute set to C{py-listing} will
have source lines taken from the file named in that node's C{href}
attribute (searched for in C{dir}) inserted in place of that node.
If a node has a C{skipLines} attribute, its value will be parsed as an
integer and that many lines will be skipped at the beginning of the source
file.
@type document: A DOM Node or Document
@param document: The document within which to make listing replacements.
@type dir: C{str}
@param dir: The directory in which to find source files containing the
referenced Python listings.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class",
"py-listing"):
filename = node.getAttribute("href")
outfile = cStringIO.StringIO()
lines = map(str.rstrip, open(os.path.join(dir, filename)).readlines())
skip = node.getAttribute('skipLines') or 0
lines = lines[int(skip):]
howManyLines = len(lines)
data = '\n'.join(lines)
data = cStringIO.StringIO(_removeLeadingTrailingBlankLines(data))
htmlizer.filter(data, outfile, writer=htmlizer.SmallerHTMLWriter)
sourceNode = dom.parseString(outfile.getvalue()).documentElement
sourceNode.insertBefore(_makeLineNumbers(howManyLines), sourceNode.firstChild)
_replaceWithListing(node, sourceNode.toxml(), filename, "py-listing")
def _makeLineNumbers(howMany):
"""
Return an element which will render line numbers for a source listing.
@param howMany: The number of lines in the source listing.
@type howMany: C{int}
@return: An L{dom.Element} which can be added to the document before
the source listing to add line numbers to it.
"""
# Figure out how many digits wide the widest line number label will be.
width = len(str(howMany))
# Render all the line labels with appropriate padding
labels = ['%*d' % (width, i) for i in range(1, howMany + 1)]
# Create a p element with the right style containing the labels
p = dom.Element('p')
p.setAttribute('class', 'py-linenumber')
t = dom.Text()
t.data = '\n'.join(labels) + '\n'
p.appendChild(t)
return p
def _replaceWithListing(node, val, filename, class_):
captionTitle = domhelpers.getNodeText(node)
if captionTitle == os.path.basename(filename):
captionTitle = 'Source listing'
text = ('<div class="%s">%s<div class="caption">%s - '
'<a href="%s"><span class="filename">%s</span></a></div></div>' %
(class_, val, captionTitle, filename, filename))
newnode = dom.parseString(text).documentElement
node.parentNode.replaceChild(newnode, node)
def _removeLeadingBlankLines(lines):
"""
Removes leading blank lines from C{lines} and returns a list containing the
remaining characters.
@param lines: Input string.
@type lines: L{str}
@rtype: C{list}
@return: List of characters.
"""
ret = []
for line in lines:
if ret or line.strip():
ret.append(line)
return ret
def _removeLeadingTrailingBlankLines(inputString):
"""
Splits input string C{inputString} into lines, strips leading and trailing
blank lines, and returns a string with all lines joined, each line
separated by a newline character.
@param inputString: The input string.
@type inputString: L{str}
@rtype: L{str}
@return: String containing normalized lines.
"""
lines = _removeLeadingBlankLines(inputString.split('\n'))
lines.reverse()
lines = _removeLeadingBlankLines(lines)
lines.reverse()
return '\n'.join(lines) + '\n'
def addHTMLListings(document, dir):
"""
Insert HTML source listings into the given document from files in the given
directory based on C{html-listing} nodes.
Any node in C{document} with a C{class} attribute set to C{html-listing}
will have source lines taken from the file named in that node's C{href}
attribute (searched for in C{dir}) inserted in place of that node.
@type document: A DOM Node or Document
@param document: The document within which to make listing replacements.
@type dir: C{str}
@param dir: The directory in which to find source files containing the
referenced HTML listings.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class",
"html-listing"):
filename = node.getAttribute("href")
val = ('<pre class="htmlsource">\n%s</pre>' %
cgi.escape(open(os.path.join(dir, filename)).read()))
_replaceWithListing(node, val, filename, "html-listing")
def addPlainListings(document, dir):
"""
Insert text listings into the given document from files in the given
directory based on C{listing} nodes.
Any node in C{document} with a C{class} attribute set to C{listing} will
have source lines taken from the file named in that node's C{href}
attribute (searched for in C{dir}) inserted in place of that node.
@type document: A DOM Node or Document
@param document: The document within which to make listing replacements.
@type dir: C{str}
@param dir: The directory in which to find source files containing the
referenced text listings.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class",
"listing"):
filename = node.getAttribute("href")
val = ('<pre>\n%s</pre>' %
cgi.escape(open(os.path.join(dir, filename)).read()))
_replaceWithListing(node, val, filename, "listing")
def getHeaders(document):
"""
Return all H2 and H3 nodes in the given document.
@type document: A DOM Node or Document
@rtype: C{list}
"""
return domhelpers.findElements(
document,
lambda n, m=re.compile('h[23]$').match: m(n.nodeName))
def generateToC(document):
"""
Create a table of contents for the given document.
@type document: A DOM Node or Document
@rtype: A DOM Node
@return: a Node containing a table of contents based on the headers of the
given document.
"""
subHeaders = None
headers = []
for element in getHeaders(document):
if element.tagName == 'h2':
subHeaders = []
headers.append((element, subHeaders))
elif subHeaders is None:
raise ValueError(
"No H3 element is allowed until after an H2 element")
else:
subHeaders.append(element)
auto = count().next
def addItem(headerElement, parent):
anchor = dom.Element('a')
name = 'auto%d' % (auto(),)
anchor.setAttribute('href', '#' + name)
text = dom.Text()
text.data = domhelpers.getNodeText(headerElement)
anchor.appendChild(text)
headerNameItem = dom.Element('li')
headerNameItem.appendChild(anchor)
parent.appendChild(headerNameItem)
anchor = dom.Element('a')
anchor.setAttribute('name', name)
headerElement.appendChild(anchor)
toc = dom.Element('ol')
for headerElement, subHeaders in headers:
addItem(headerElement, toc)
if subHeaders:
subtoc = dom.Element('ul')
toc.appendChild(subtoc)
for subHeaderElement in subHeaders:
addItem(subHeaderElement, subtoc)
return toc
def putInToC(document, toc):
"""
Insert the given table of contents into the given document.
The node with C{class} attribute set to C{toc} has its children replaced
with C{toc}.
@type document: A DOM Node or Document
@type toc: A DOM Node
"""
tocOrig = domhelpers.findElementsWithAttribute(document, 'class', 'toc')
if tocOrig:
tocOrig= tocOrig[0]
tocOrig.childNodes = [toc]
def removeH1(document):
"""
Replace all C{h1} nodes in the given document with empty C{span} nodes.
C{h1} nodes mark up document sections and the output template is given an
opportunity to present this information in a different way.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
h1 = domhelpers.findNodesNamed(document, 'h1')
empty = dom.Element('span')
for node in h1:
node.parentNode.replaceChild(empty, node)
def footnotes(document):
"""
Find footnotes in the given document, move them to the end of the body, and
generate links to them.
A footnote is any node with a C{class} attribute set to C{footnote}.
Footnote links are generated as superscript. Footnotes are collected in a
C{ol} node at the end of the document.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
footnotes = domhelpers.findElementsWithAttribute(document, "class",
"footnote")
if not footnotes:
return
footnoteElement = dom.Element('ol')
id = 1
for footnote in footnotes:
href = dom.parseString('<a href="#footnote-%(id)d">'
'<super>%(id)d</super></a>'
% vars()).documentElement
text = ' '.join(domhelpers.getNodeText(footnote).split())
href.setAttribute('title', text)
target = dom.Element('a')
target.setAttribute('name', 'footnote-%d' % (id,))
target.childNodes = [footnote]
footnoteContent = dom.Element('li')
footnoteContent.childNodes = [target]
footnoteElement.childNodes.append(footnoteContent)
footnote.parentNode.replaceChild(href, footnote)
id += 1
body = domhelpers.findNodesNamed(document, "body")[0]
header = dom.parseString('<h2>Footnotes</h2>').documentElement
body.childNodes.append(header)
body.childNodes.append(footnoteElement)
def notes(document):
"""
Find notes in the given document and mark them up as such.
A note is any node with a C{class} attribute set to C{note}.
(I think this is a very stupid feature. When I found it I actually
exclaimed out loud. -exarkun)
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
notes = domhelpers.findElementsWithAttribute(document, "class", "note")
notePrefix = dom.parseString('<strong>Note: </strong>').documentElement
for note in notes:
note.childNodes.insert(0, notePrefix)
def findNodeJustBefore(target, nodes):
"""
Find the last Element which is a sibling of C{target} and is in C{nodes}.
@param target: A node the previous sibling of which to return.
@param nodes: A list of nodes which might be the right node.
@return: The previous sibling of C{target}.
"""
while target is not None:
node = target.previousSibling
while node is not None:
if node in nodes:
return node
node = node.previousSibling
target = target.parentNode
raise RuntimeError("Oops")
def getFirstAncestorWithSectionHeader(entry):
"""
Visit the ancestors of C{entry} until one with at least one C{h2} child
node is found, then return all of that node's C{h2} child nodes.
@type entry: A DOM Node
@param entry: The node from which to begin traversal. This node itself is
excluded from consideration.
@rtype: C{list} of DOM Nodes
@return: All C{h2} nodes of the ultimately selected parent node.
"""
for a in domhelpers.getParents(entry)[1:]:
headers = domhelpers.findNodesNamed(a, "h2")
if len(headers) > 0:
return headers
return []
def getSectionNumber(header):
"""
Retrieve the section number of the given node.
This is probably intended to interact in a rather specific way with
L{numberDocument}.
@type header: A DOM Node or L{None}
@param header: The section from which to extract a number. The section
number is the value of this node's first child.
@return: C{None} or a C{str} giving the section number.
"""
if not header:
return None
return domhelpers.gatherTextNodes(header.childNodes[0])
def getSectionReference(entry):
"""
Find the section number which contains the given node.
This function looks at the given node's ancestry until it finds a node
which defines a section, then returns that section's number.
@type entry: A DOM Node
@param entry: The node for which to determine the section.
@rtype: C{str}
@return: The section number, as returned by C{getSectionNumber} of the
first ancestor of C{entry} which defines a section, as determined by
L{getFirstAncestorWithSectionHeader}.
"""
headers = getFirstAncestorWithSectionHeader(entry)
myHeader = findNodeJustBefore(entry, headers)
return getSectionNumber(myHeader)
def index(document, filename, chapterReference):
"""
Extract index entries from the given document and store them for later use
and insert named anchors so that the index can link back to those entries.
Any node with a C{class} attribute set to C{index} is considered an index
entry.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type filename: C{str}
@param filename: A link to the output for the given document which will be
included in the index to link to any index entry found here.
@type chapterReference: ???
@param chapterReference: ???
@return: C{None}
"""
entries = domhelpers.findElementsWithAttribute(document, "class", "index")
if not entries:
return
i = 0;
for entry in entries:
i += 1
anchor = 'index%02d' % i
if chapterReference:
ref = getSectionReference(entry) or chapterReference
else:
ref = 'link'
indexer.addEntry(filename, anchor, entry.getAttribute('value'), ref)
# does nodeName even affect anything?
entry.nodeName = entry.tagName = entry.endTagName = 'a'
for attrName in entry.attributes.keys():
entry.removeAttribute(attrName)
entry.setAttribute('name', anchor)
def setIndexLink(template, indexFilename):
"""
Insert a link to an index document.
Any node with a C{class} attribute set to C{index-link} will have its tag
name changed to C{a} and its C{href} attribute set to C{indexFilename}.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type indexFilename: C{str}
@param indexFilename: The address of the index document to which to link.
If any C{False} value, this function will remove all index-link nodes.
@return: C{None}
"""
indexLinks = domhelpers.findElementsWithAttribute(template,
"class",
"index-link")
for link in indexLinks:
if indexFilename is None:
link.parentNode.removeChild(link)
else:
link.nodeName = link.tagName = link.endTagName = 'a'
for attrName in link.attributes.keys():
link.removeAttribute(attrName)
link.setAttribute('href', indexFilename)
def numberDocument(document, chapterNumber):
"""
Number the sections of the given document.
A dot-separated chapter, section number is added to the beginning of each
section, as defined by C{h2} nodes.
This is probably intended to interact in a rather specific way with
L{getSectionNumber}.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type chapterNumber: C{int}
@param chapterNumber: The chapter number of this content in an overall
document.
@return: C{None}
"""
i = 1
for node in domhelpers.findNodesNamed(document, "h2"):
label = dom.Text()
label.data = "%s.%d " % (chapterNumber, i)
node.insertBefore(label, node.firstChild)
i += 1
def fixRelativeLinks(document, linkrel):
"""
Replace relative links in C{str} and C{href} attributes with links relative
to C{linkrel}.
@type document: A DOM Node or Document
@param document: The output template.
@type linkrel: C{str}
@param linkrel: An prefix to apply to all relative links in C{src} or
C{href} attributes in the input document when generating the output
document.
"""
for attr in 'src', 'href':
for node in domhelpers.findElementsWithAttribute(document, attr):
href = node.getAttribute(attr)
if not href.startswith('http') and not href.startswith('/'):
node.setAttribute(attr, linkrel+node.getAttribute(attr))
def setTitle(template, title, chapterNumber):
"""
Add title and chapter number information to the template document.
The title is added to the end of the first C{title} tag and the end of the
first tag with a C{class} attribute set to C{title}. If specified, the
chapter is inserted before the title.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type title: C{list} of DOM Nodes
@param title: Nodes from the input document defining its title.
@type chapterNumber: C{int}
@param chapterNumber: The chapter number of this content in an overall
document. If not applicable, any C{False} value will result in this
information being omitted.
@return: C{None}
"""
if numberer.getNumberSections() and chapterNumber:
titleNode = dom.Text()
# This is necessary in order for cloning below to work. See Python
# isuse 4851.
titleNode.ownerDocument = template.ownerDocument
titleNode.data = '%s. ' % (chapterNumber,)
title.insert(0, titleNode)
for nodeList in (domhelpers.findNodesNamed(template, "title"),
domhelpers.findElementsWithAttribute(template, "class",
'title')):
if nodeList:
for titleNode in title:
nodeList[0].appendChild(titleNode.cloneNode(True))
def setAuthors(template, authors):
"""
Add author information to the template document.
Names and contact information for authors are added to each node with a
C{class} attribute set to C{authors} and to the template head as C{link}
nodes.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type authors: C{list} of two-tuples of C{str}
@param authors: List of names and contact information for the authors of
the input document.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(template,
"class", 'authors'):
# First, similarly to setTitle, insert text into an <div
# class="authors">
container = dom.Element('span')
for name, href in authors:
anchor = dom.Element('a')
anchor.setAttribute('href', href)
anchorText = dom.Text()
anchorText.data = name
anchor.appendChild(anchorText)
if (name, href) == authors[-1]:
if len(authors) == 1:
container.appendChild(anchor)
else:
andText = dom.Text()
andText.data = 'and '
container.appendChild(andText)
container.appendChild(anchor)
else:
container.appendChild(anchor)
commaText = dom.Text()
commaText.data = ', '
container.appendChild(commaText)
node.appendChild(container)
# Second, add appropriate <link rel="author" ...> tags to the <head>.
head = domhelpers.findNodesNamed(template, 'head')[0]
authors = [dom.parseString('<link rel="author" href="%s" title="%s"/>'
% (href, name)).childNodes[0]
for name, href in authors]
head.childNodes.extend(authors)
def setVersion(template, version):
"""
Add a version indicator to the given template.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type version: C{str}
@param version: The version string to add to the template.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(template, "class",
"version"):
text = dom.Text()
text.data = version
node.appendChild(text)
def getOutputFileName(originalFileName, outputExtension, index=None):
"""
Return a filename which is the same as C{originalFileName} except for the
extension, which is replaced with C{outputExtension}.
For example, if C{originalFileName} is C{'/foo/bar.baz'} and
C{outputExtension} is C{'quux'}, the return value will be
C{'/foo/bar.quux'}.
@type originalFileName: C{str}
@type outputExtension: C{stR}
@param index: ignored, never passed.
@rtype: C{str}
"""
return os.path.splitext(originalFileName)[0]+outputExtension
def munge(document, template, linkrel, dir, fullpath, ext, url, config, outfileGenerator=getOutputFileName):
"""
Mutate C{template} until it resembles C{document}.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type template: A DOM Node or Document
@param template: The template document which defines the desired
presentation format of the content.
@type linkrel: C{str}
@param linkrel: An prefix to apply to all relative links in C{src} or
C{href} attributes in the input document when generating the output
document.
@type dir: C{str}
@param dir: The directory in which to search for source listing files.
@type fullpath: C{str}
@param fullpath: The file name which contained the input document.
@type ext: C{str}
@param ext: The extension to use when selecting an output file name. This
replaces the extension of the input file name.
@type url: C{str}
@param url: A string which will be interpolated with the fully qualified
Python name of any API reference encountered in the input document, the
result of which will be used as a link to API documentation for that name
in the output document.
@type config: C{dict}
@param config: Further specification of the desired form of the output.
Valid keys in this dictionary::
noapi: If present and set to a True value, links to API documentation
will not be generated.
version: A string which will be included in the output to indicate the
version of this documentation.
@type outfileGenerator: Callable of C{str}, C{str} returning C{str}
@param outfileGenerator: Output filename factory. This is invoked with the
intput filename and C{ext} and the output document is serialized to the
file with the name returned.
@return: C{None}
"""
fixRelativeLinks(template, linkrel)
addMtime(template, fullpath)
removeH1(document)
if not config.get('noapi', False):
fixAPI(document, url)
fontifyPython(document)
fixLinks(document, ext)
addPyListings(document, dir)
addHTMLListings(document, dir)
addPlainListings(document, dir)
putInToC(template, generateToC(document))
footnotes(document)
notes(document)
setIndexLink(template, indexer.getIndexFilename())
setVersion(template, config.get('version', ''))
# Insert the document into the template
chapterNumber = htmlbook.getNumber(fullpath)
title = domhelpers.findNodesNamed(document, 'title')[0].childNodes
setTitle(template, title, chapterNumber)
if numberer.getNumberSections() and chapterNumber:
numberDocument(document, chapterNumber)
index(document, outfileGenerator(os.path.split(fullpath)[1], ext),
htmlbook.getReference(fullpath))
authors = domhelpers.findNodesNamed(document, 'link')
authors = [(node.getAttribute('title') or '',
node.getAttribute('href') or '')
for node in authors
if node.getAttribute('rel') == 'author']
setAuthors(template, authors)
body = domhelpers.findNodesNamed(document, "body")[0]
tmplbody = domhelpers.findElementsWithAttribute(template, "class",
"body")[0]
tmplbody.childNodes = body.childNodes
tmplbody.setAttribute("class", "content")
class _LocationReportingErrorHandler(ErrorHandler):
"""
Define a SAX error handler which can report the location of fatal
errors.
Unlike the errors reported during parsing by other APIs in the xml
package, this one tries to mismatched tag errors by including the
location of both the relevant opening and closing tags.
"""
def __init__(self, contentHandler):
self.contentHandler = contentHandler
def fatalError(self, err):
# Unfortunately, the underlying expat error code is only exposed as
# a string. I surely do hope no one ever goes and localizes expat.
if err.getMessage() == 'mismatched tag':
expect, begLine, begCol = self.contentHandler._locationStack[-1]
endLine, endCol = err.getLineNumber(), err.getColumnNumber()
raise process.ProcessingFailure(
"mismatched close tag at line %d, column %d; expected </%s> "
"(from line %d, column %d)" % (
endLine, endCol, expect, begLine, begCol))
raise process.ProcessingFailure(
'%s at line %d, column %d' % (err.getMessage(),
err.getLineNumber(),
err.getColumnNumber()))
class _TagTrackingContentHandler(SAX2DOM):
"""
Define a SAX content handler which keeps track of the start location of
all open tags. This information is used by the above defined error
handler to report useful locations when a fatal error is encountered.
"""
def __init__(self):
SAX2DOM.__init__(self)
self._locationStack = []
def setDocumentLocator(self, locator):
self._docLocator = locator
SAX2DOM.setDocumentLocator(self, locator)
def startElement(self, name, attrs):
self._locationStack.append((name, self._docLocator.getLineNumber(), self._docLocator.getColumnNumber()))
SAX2DOM.startElement(self, name, attrs)
def endElement(self, name):
self._locationStack.pop()
SAX2DOM.endElement(self, name)
class _LocalEntityResolver(object):
"""
Implement DTD loading (from a local source) for the limited number of
DTDs which are allowed for Lore input documents.
@ivar filename: The name of the file containing the lore input
document.
@ivar knownDTDs: A mapping from DTD system identifiers to L{FilePath}
instances pointing to the corresponding DTD.
"""
s = FilePath(__file__).sibling
knownDTDs = {
None: s("xhtml1-strict.dtd"),
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd": s("xhtml1-strict.dtd"),
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd": s("xhtml1-transitional.dtd"),
"xhtml-lat1.ent": s("xhtml-lat1.ent"),
"xhtml-symbol.ent": s("xhtml-symbol.ent"),
"xhtml-special.ent": s("xhtml-special.ent"),
}
del s
def __init__(self, filename):
self.filename = filename
def resolveEntity(self, publicId, systemId):
source = InputSource()
source.setSystemId(systemId)
try:
dtdPath = self.knownDTDs[systemId]
except KeyError:
raise process.ProcessingFailure(
"Invalid DTD system identifier (%r) in %s. Only "
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd "
"is allowed." % (systemId, self.filename))
source.setByteStream(dtdPath.open())
return source
def parseFileAndReport(filename, _open=file):
"""
Parse and return the contents of the given lore XHTML document.
@type filename: C{str}
@param filename: The name of a file containing a lore XHTML document to
load.
@raise process.ProcessingFailure: When the contents of the specified file
cannot be parsed.
@rtype: A DOM Document
@return: The document contained in C{filename}.
"""
content = _TagTrackingContentHandler()
error = _LocationReportingErrorHandler(content)
parser = make_parser()
parser.setContentHandler(content)
parser.setErrorHandler(error)
# In order to call a method on the expat parser which will be used by this
# parser, we need the expat parser to be created. This doesn't happen
# until reset is called, normally by the parser's parse method. That's too
# late for us, since it will then go on to parse the document without
# letting us do any extra set up. So, force the expat parser to be created
# here, and then disable reset so that the parser created is the one
# actually used to parse our document. Resetting is only needed if more
# than one document is going to be parsed, and that isn't the case here.
parser.reset()
parser.reset = lambda: None
# This is necessary to make the xhtml1 transitional declaration optional.
# It causes LocalEntityResolver.resolveEntity(None, None) to be called.
# LocalEntityResolver handles that case by giving out the xhtml1
# transitional dtd. Unfortunately, there is no public API for manipulating
# the expat parser when using xml.sax. Using the private _parser attribute
# may break. It's also possible that make_parser will return a parser
# which doesn't use expat, but uses some other parser. Oh well. :(
# -exarkun
parser._parser.UseForeignDTD(True)
parser.setEntityResolver(_LocalEntityResolver(filename))
# This is probably no-op because expat is not a validating parser. Who
# knows though, maybe you figured out a way to not use expat.
parser.setFeature(feature_validation, False)
fObj = _open(filename)
try:
try:
parser.parse(fObj)
except IOError, e:
raise process.ProcessingFailure(
e.strerror + ", filename was '" + filename + "'")
finally:
fObj.close()
return content.document
def makeSureDirectoryExists(filename):
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if (not os.path.exists(dirname)):
os.makedirs(dirname)
def doFile(filename, linkrel, ext, url, templ, options={}, outfileGenerator=getOutputFileName):
"""
Process the input document at C{filename} and write an output document.
@type filename: C{str}
@param filename: The path to the input file which will be processed.
@type linkrel: C{str}
@param linkrel: An prefix to apply to all relative links in C{src} or
C{href} attributes in the input document when generating the output
document.
@type ext: C{str}
@param ext: The extension to use when selecting an output file name. This
replaces the extension of the input file name.
@type url: C{str}
@param url: A string which will be interpolated with the fully qualified
Python name of any API reference encountered in the input document, the
result of which will be used as a link to API documentation for that name
in the output document.
@type templ: A DOM Node or Document
@param templ: The template on which the output document will be based.
This is mutated and then serialized to the output file.
@type options: C{dict}
@param options: Further specification of the desired form of the output.
Valid keys in this dictionary::
noapi: If present and set to a True value, links to API documentation
will not be generated.
version: A string which will be included in the output to indicate the
version of this documentation.
@type outfileGenerator: Callable of C{str}, C{str} returning C{str}
@param outfileGenerator: Output filename factory. This is invoked with the
intput filename and C{ext} and the output document is serialized to the
file with the name returned.
@return: C{None}
"""
doc = parseFileAndReport(filename)
clonedNode = templ.cloneNode(1)
munge(doc, clonedNode, linkrel, os.path.dirname(filename), filename, ext,
url, options, outfileGenerator)
newFilename = outfileGenerator(filename, ext)
_writeDocument(newFilename, clonedNode)
def _writeDocument(newFilename, clonedNode):
"""
Serialize the given node to XML into the named file.
@param newFilename: The name of the file to which the XML will be
written. If this is in a directory which does not exist, the
directory will be created.
@param clonedNode: The root DOM node which will be serialized.
@return: C{None}
"""
makeSureDirectoryExists(newFilename)
f = open(newFilename, 'w')
f.write(clonedNode.toxml('utf-8'))
f.close()
|
skycucumber/Messaging-Gateway
|
webapp/venv/lib/python2.7/site-packages/twisted/lore/tree.py
|
Python
|
gpl-2.0
| 40,135
|
[
"VisIt"
] |
2c62f97f2d81b8091be481353148ba5802c98a0af16e0648765dd229f28116e6
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
General JK contraction function for
* arbitrary integrals
* 4 different molecules
* multiple density matrices
* arbitrary basis subset for the 4 indices
'''
import numpy
from pyscf import lib
from pyscf import gto
from pyscf.lib import logger
from pyscf.scf import _vhf
def get_jk(mols, dms, scripts=['ijkl,ji->kl'], intor='int2e_sph',
aosym='s1', comp=None, hermi=0, shls_slice=None,
verbose=logger.WARN, vhfopt=None):
'''Compute J/K matrices for the given density matrix
Args:
mols : an instance of :class:`Mole` or a list of `Mole` objects
dms : ndarray or list of ndarrays
A density matrix or a list of density matrices
Kwargs:
hermi : int
Whether the returned J (K) matrix is hermitian
| 0 : no hermitian or symmetric
| 1 : hermitian
| 2 : anti-hermitian
intor : str
2-electron integral name. See :func:`getints` for the complete
list of available 2-electron integral names
aosym : int or str
Permutation symmetry for the AO integrals
| 4 or '4' or 's4': 4-fold symmetry (default)
| '2ij' or 's2ij' : symmetry between i, j in (ij|kl)
| '2kl' or 's2kl' : symmetry between k, l in (ij|kl)
| 1 or '1' or 's1': no symmetry
| 'a4ij' : 4-fold symmetry with anti-symmetry between i, j in (ij|kl)
| 'a4kl' : 4-fold symmetry with anti-symmetry between k, l in (ij|kl)
| 'a2ij' : anti-symmetry between i, j in (ij|kl)
| 'a2kl' : anti-symmetry between k, l in (ij|kl)
comp : int
Components of the integrals, e.g. cint2e_ip_sph has 3 components.
scripts : string or a list of strings
Contraction description (following numpy.einsum convention) based on
letters [ijkl]. Each script will be one-to-one applied to each
entry of dms. So it must have the same number of elements as the
dms, len(scripts) == len(dms).
shls_slice : 8-element list
(ish_start, ish_end, jsh_start, jsh_end, ksh_start, ksh_end, lsh_start, lsh_end)
Returns:
Depending on the number of density matrices, the function returns one
J/K matrix or a list of J/K matrices (the same number of entries as the
input dms).
Each JK matrices may be a 2D array or 3D array if the AO integral
has multiple components.
Examples:
>>> from pyscf import gto
>>> mol = gto.M(atom='H 0 -.5 0; H 0 .5 0', basis='cc-pvdz')
>>> nao = mol.nao_nr()
>>> dm = numpy.random.random((nao,nao))
>>> # Default, Coulomb matrix
>>> vj = get_jk(mol, dm)
>>> # Coulomb matrix with 8-fold permutation symmetry for AO integrals
>>> vj = get_jk(mol, dm, 'ijkl,ji->kl', aosym='s8')
>>> # Exchange matrix with 8-fold permutation symmetry for AO integrals
>>> vk = get_jk(mol, dm, 'ijkl,jk->il', aosym='s8')
>>> # Compute coulomb and exchange matrices together
>>> vj, vk = get_jk(mol, (dm,dm), ('ijkl,ji->kl','ijkl,li->kj'), aosym='s8')
>>> # Analytical gradients for coulomb matrix
>>> j1 = get_jk(mol, dm, 'ijkl,lk->ij', intor='int2e_ip1_sph', aosym='s2kl', comp=3)
>>> # contraction across two molecules
>>> mol1 = gto.M(atom='He 2 0 0', basis='6-31g')
>>> nao1 = mol1.nao_nr()
>>> dm1 = numpy.random.random((nao1,nao1))
>>> # Coulomb interaction between two molecules, note 4-fold symmetry can be applied
>>> jcross = get_jk((mol1,mol1,mol,mol), dm, scripts='ijkl,lk->ij', aosym='s4')
>>> ecoul = numpy.einsum('ij,ij', jcross, dm1)
>>> # Exchange interaction between two molecules, no symmetry can be used
>>> kcross = get_jk((mol1,mol,mol,mol1), dm, scripts='ijkl,jk->il')
>>> ex = numpy.einsum('ij,ji', kcross, dm1)
>>> # Analytical gradients for coulomb matrix between two molecules
>>> jcros1 = get_jk((mol1,mol1,mol,mol), dm, scripts='ijkl,lk->ij', intor='int2e_ip1_sph', comp=3)
>>> # Analytical gradients for coulomb interaction between 1s density and the other molecule
>>> jpart1 = get_jk((mol1,mol1,mol,mol), dm, scripts='ijkl,lk->ij', intor='int2e_ip1_sph', comp=3,
... shls_slice=(0,1,0,1,0,mol.nbas,0,mol.nbas))
'''
if isinstance(mols, (tuple, list)):
intor, comp = gto.moleintor._get_intor_and_comp(mols[0]._add_suffix(intor), comp)
assert(len(mols) == 4)
assert(mols[0].cart == mols[1].cart == mols[2].cart == mols[3].cart)
if shls_slice is None:
shls_slice = numpy.array([(0, mol.nbas) for mol in mols])
else:
shls_slice = numpy.asarray(shls_slice).reshape(4,2)
# concatenate unique mols and build corresponding shls_slice
mol_ids = [id(mol) for mol in mols]
atm, bas, env = mols[0]._atm, mols[0]._bas, mols[0]._env
bas_start = numpy.zeros(4, dtype=int)
for m in range(1,4):
first = mol_ids.index(mol_ids[m])
if first == m: # the unique mol, not repeated in mols
bas_start[m] = bas.shape[0]
atm, bas, env = gto.conc_env(atm, bas, env, mols[m]._atm,
mols[m]._bas, mols[m]._env)
else:
bas_start[m] = bas_start[first]
shls_slice[m] += bas_start[m]
shls_slice = shls_slice.flatten()
else:
intor, comp = gto.moleintor._get_intor_and_comp(mols._add_suffix(intor), comp)
atm, bas, env = mols._atm, mols._bas, mols._env
if shls_slice is None:
shls_slice = (0, mols.nbas) * 4
single_script = isinstance(scripts, str)
if single_script:
scripts = [scripts]
# Check if letters other than ijkl were provided.
if set(''.join(scripts[:4])).difference('ijkl,->as12'):
# Translate these letters to ijkl if possible
scripts = [script.translate({ord(script[0]): 'i',
ord(script[1]): 'j',
ord(script[2]): 'k',
ord(script[3]): 'l'})
for script in scripts]
if set(''.join(scripts[:4])).difference('ijkl,->as12'):
raise RuntimeError('Scripts unsupported %s' % scripts)
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
dms = [dms]
assert(len(scripts) == len(dms))
#format scripts
descript = []
for script in scripts:
dmsym, vsym = script.lower().split(',')[1].split('->')
if vsym[:2] in ('a2', 's2', 's1'):
descript.append(dmsym + '->' + vsym)
elif hermi == 0:
descript.append(dmsym + '->s1' + vsym)
else:
descript.append(dmsym + '->s2' + vsym)
vs = _vhf.direct_bindm(intor, aosym, descript, dms, comp, atm, bas, env,
vhfopt=vhfopt, shls_slice=shls_slice)
if hermi != 0:
for v in vs:
if v.ndim == 3:
for vi in v:
lib.hermi_triu(vi, hermi, inplace=True)
else:
lib.hermi_triu(v, hermi, inplace=True)
if single_script:
vs = vs[0]
return vs
jk_build = get_jk
if __name__ == '__main__':
mol = gto.M(atom='H 0 -.5 0; H 0 .5 0', basis='cc-pvdz')
nao = mol.nao_nr()
dm = numpy.random.random((nao,nao))
eri0 = mol.intor('int2e_sph').reshape((nao,)*4)
vj = get_jk(mol, dm, 'ijkl,ji->kl')
print(numpy.allclose(vj, numpy.einsum('ijkl,ji->kl', eri0, dm)))
vj = get_jk(mol, dm, 'ijkl,ji->kl', aosym='s8')
print(numpy.allclose(vj, numpy.einsum('ijkl,ji->kl', eri0, dm)))
vk = get_jk(mol, dm, 'ijkl,jk->il', aosym='s8')
print(numpy.allclose(vk, numpy.einsum('ijkl,jk->il', eri0, dm)))
vj, vk = get_jk(mol, (dm,dm), ('ijkl,ji->kl','ijkl,li->kj'))
eri1 = mol.intor('int2e_ip1_sph', comp=3).reshape([3]+[nao]*4)
j1 = get_jk(mol, dm, 'ijkl,lk->ij', intor='int2e_ip1_sph', aosym='s2kl', comp=3)
print(numpy.allclose(j1, numpy.einsum('xijkl,lk->xij', eri1, dm)))
mol1 = gto.M(atom='He 2 0 0', basis='6-31g')
nao1 = mol1.nao_nr()
dm1 = numpy.random.random((nao1,nao1))
eri0 = gto.conc_mol(mol, mol1).intor('int2e_sph').reshape([nao+nao1]*4)
jcross = get_jk((mol1,mol1,mol,mol), dm, scripts='ijkl,lk->ij', aosym='s4')
ecoul = numpy.einsum('ij,ij', jcross, dm1)
print(numpy.allclose(jcross, numpy.einsum('ijkl,lk->ij', eri0[nao:,nao:,:nao,:nao], dm)))
print(ecoul-numpy.einsum('ijkl,lk,ij', eri0[nao:,nao:,:nao,:nao], dm, dm1))
kcross = get_jk((mol1,mol,mol,mol1), dm, scripts='ijkl,jk->il')
ex = numpy.einsum('ij,ji', kcross, dm1)
print(numpy.allclose(kcross, numpy.einsum('ijkl,jk->il', eri0[nao:,:nao,:nao,nao:], dm)))
print(ex-numpy.einsum('ijkl,jk,li', eri0[nao:,:nao,:nao,nao:], dm, dm1))
kcross = get_jk((mol1,mol,mol,mol1), dm1, scripts='ijkl,li->kj')
ex = numpy.einsum('ij,ji', kcross, dm)
print(numpy.allclose(kcross, numpy.einsum('ijkl,li->kj', eri0[nao:,:nao,:nao,nao:], dm1)))
print(ex-numpy.einsum('ijkl,jk,li', eri0[nao:,:nao,:nao,nao:], dm, dm1))
j1part = get_jk((mol1,mol1,mol,mol), dm1[:1,:1], scripts='ijkl,ji->kl', intor='int2e',
shls_slice=(0,1,0,1,0,mol.nbas,0,mol.nbas))
print(numpy.allclose(j1part, numpy.einsum('ijkl,ji->kl', eri0[nao:nao+1,nao:nao+1,:nao,:nao], dm1[:1,:1])))
k1part = get_jk((mol1,mol,mol,mol1), dm1[:,:1], scripts='ijkl,li->kj', intor='int2e',
shls_slice=(0,1,0,1,0,mol.nbas,0,mol1.nbas))
print(numpy.allclose(k1part, numpy.einsum('ijkl,li->kj', eri0[nao:nao+1,:1,:nao,nao:], dm1[:,:1])))
j1part = get_jk(mol, dm[:1,1:2], scripts='ijkl,ji->kl', intor='int2e',
shls_slice=(1,2,0,1,0,mol.nbas,0,mol.nbas))
print(numpy.allclose(j1part, numpy.einsum('ijkl,ji->kl', eri0[1:2,:1,:nao,:nao], dm[:1,1:2])))
k1part = get_jk(mol, dm[:,1:2], scripts='ijkl,li->kj', intor='int2e',
shls_slice=(1,2,0,1,0,mol.nbas,0,mol.nbas))
print(numpy.allclose(k1part, numpy.einsum('ijkl,li->kj', eri0[:1,1:2,:nao,:nao], dm[:,1:2])))
eri1 = gto.conc_mol(mol, mol1).intor('int2e_ip1_sph',comp=3).reshape([3]+[nao+nao1]*4)
j1cross = get_jk((mol1,mol1,mol,mol), dm, scripts='ijkl,lk->ij', intor='int2e_ip1_sph', comp=3)
print(numpy.allclose(j1cross, numpy.einsum('xijkl,lk->xij', eri1[:,nao:,nao:,:nao,:nao], dm)))
j1part = get_jk((mol1,mol1,mol,mol), dm, scripts='ijkl,lk->ij', intor='int2e_ip1_sph', comp=3,
shls_slice=(0,1,0,1,0,mol.nbas,0,mol.nbas))
print(numpy.allclose(j1part, numpy.einsum('xijkl,lk->xij', eri1[:,nao:nao+1,nao:nao+1,:nao,:nao], dm)))
|
sunqm/pyscf
|
pyscf/scf/jk.py
|
Python
|
apache-2.0
| 11,425
|
[
"PySCF"
] |
3a42b6ed1a3698deb533a9d1cdad2fda53be33413fbbb96c55f35ec0dcfdcc6f
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2022, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
#import os
#from unittest import TestCase
#from exatomic import Universe
#from exatomic.gaussian import Output, Input
#from exatomic.gaussian.inputs import _handle_args
#class TestInput(TestCase):
# """Tests the input file generation functionality for Gaussian."""
# pass
# def setUp(self):
# fl = Output(os.sep.join(__file__.split(os.sep)[:-1]
# + ['gaussian-uo2.out']))
# self.uni = Universe(atom=fl.atom)
# self.keys = ['link0', 'route', 'basis', 'ecp']
# self.lisopt = [('key1', 'value1'), ('key2', 'value2')]
# self.dicopt = {'key1': 'value1', 'key2': 'value2'}
# self.tupopt = (('key1', 'value1'), ('key2', 'value2'))
# self.stropt = 'value'
#
# def test_from_universe(self):
# """Test the from_universe class method for input generation."""
# fl = Input.from_universe(self.uni, link0=self.lisopt,
# route=self.dicopt, basis=self.tupopt)
# self.assertEqual(fl[0][0], '%')
# self.assertEqual(fl[2][0], '#')
# self.assertEqual(len(fl.find('****')), 2)
# self.assertEqual(len(fl), 18)
#
# def test__handle_args(self):
# """Test the argument handler helper function."""
# for key in self.keys:
# lval = _handle_args(key, self.lisopt)
# self.assertEqual(lval, _handle_args(key, self.tupopt))
# self.assertEqual(self.stropt, _handle_args(key, self.stropt))
|
exa-analytics/exatomic
|
exatomic/gaussian/tests/test_inputs.py
|
Python
|
apache-2.0
| 1,613
|
[
"Gaussian"
] |
606dfb0575f6b143ca50ba5f3bbdf8278ec3a258fa6cdf1e99b1d7292bced958
|
from . import picard, bamUtil, samtools,bwa
import os
opj = os.path.join
class FilterBamByRG_To_FastQ(samtools.FilterBamByRG,picard.REVERTSAM,bamUtil.Bam2FastQ):
name = "Extract ReadGroup from BAM and Convert to FastQ"
inputs = ['bam']
outputs = ['1.fastq.gz','2.fastq.gz','unpaired.fastq.gz']
time_req = 12*60
mem_req = 7*1024
cpu_req=2
def cmd(self,i,s,p):
return r"""
set -o pipefail &&
{s[samtools_path]} view -h -u -r {p[rgid]} {i[bam][0]}
|
{self.bin}
INPUT=/dev/stdin
OUTPUT=/dev/stdout
VALIDATION_STRINGENCY=SILENT
MAX_RECORDS_IN_RAM=4000000
COMPRESSION_LEVEL=0
|
{s[bamUtil_path]} bam2FastQ
--in -.ubam
--firstOut $OUT.1.fastq.gz
--secondOut $OUT.2.fastq.gz
--unpairedOut $OUT.unpaired.fastq.gz
"""
class AlignAndClean(bwa.MEM,picard.AddOrReplaceReadGroups,picard.CollectMultipleMetrics):
name = "BWA Alignment and Cleaning"
mem_req = 10*1024
cpu_req = 4
time_req = 12*60
inputs = ['fastq.gz']
outputs = ['bam']
def cmd(self,i,s,p):
"""
Expects tags: chunk, library, sample_name, platform, platform_unit, pair
"""
return r"""
set -o pipefail &&
{s[bwa_path]} mem
-M
-t {self.cpu_req}
-R "@RG\tID:{p[platform_unit]}\tLB:{p[library]}\tSM:{p[sample_name]}\tPL:{p[platform]}\tPU:{p[platform_unit]}"
{s[bwa_reference_fasta_path]}
{i[fastq.gz][0]}
{i[fastq.gz][1]}
|
{self.picard_bin} -jar {AddOrReplaceReadGroups}
INPUT=/dev/stdin
OUTPUT=/dev/stdout
RGID={p[platform_unit]}
RGLB={p[library]}
RGSM={p[sample_name]}
RGPL={p[platform]}
RGPU={p[platform_unit]}
COMPRESSION_LEVEL=0
|
{self.picard_bin} -jar {CleanSam}
I=/dev/stdin
O=/dev/stdout
VALIDATION_STRINGENCY=SILENT
COMPRESSION_LEVEL=0
|
{self.picard_bin} -jar {SortSam}
I=/dev/stdin
O=$OUT.bam
SORT_ORDER=coordinate
CREATE_INDEX=True
""", dict (
AddOrReplaceReadGroups=opj(s['Picard_dir'],'AddOrReplaceReadGroups.jar'),
CleanSam=opj(s['Picard_dir'],'CleanSam.jar'),
SortSam=opj(s['Picard_dir'],'SortSam.jar')
)
|
LPM-HMS/GenomeKey
|
obsolete/genomekey/tools/pipes.py
|
Python
|
mit
| 2,573
|
[
"BWA"
] |
0f196095218b56a5778b639cddca845a4db5273bb86b7a9973367e08528e6882
|
# -*- coding: utf-8 -*-
# These tests don't work at the moment, due to the security_groups multi select not working
# in selenium (the group is selected then immediately reset)
import fauxfactory
import pytest
from riggerlib import recursive_update
from textwrap import dedent
from cfme import test_requirements
from cfme.automate.explorer.domain import DomainCollection
from cfme.cloud.instance import Instance
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.gce import GCEProvider
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.utils import testgen
from cfme.utils.conf import credentials
from cfme.utils.rest import assert_response
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.update import update
from cfme.utils.version import current_version
from cfme.utils.wait import wait_for, RefreshTimer
pytestmark = [pytest.mark.meta(server_roles="+automate"),
test_requirements.provision, pytest.mark.tier(2)]
pytest_generate_tests = testgen.generate(
[CloudProvider], required_fields=[['provisioning', 'image']], scope="function")
@pytest.yield_fixture(scope="function")
def testing_instance(request, setup_provider, provider, provisioning, vm_name, tag):
""" Fixture to prepare instance parameters for provisioning
"""
image = provisioning['image']['name']
note = ('Testing provisioning from image {} to vm {} on provider {}'.format(
image, vm_name, provider.key))
instance = Instance.factory(vm_name, provider, image)
inst_args = dict()
# Base instance info
inst_args['request'] = {
'email': 'image_provisioner@example.com',
'first_name': 'Image',
'last_name': 'Provisioner',
'notes': note,
}
# TODO Move this into helpers on the provider classes
recursive_update(inst_args, {'catalog': {'vm_name': vm_name}})
# Check whether auto-selection of environment is passed
auto = False # By default provisioning will be manual
try:
parameter = request.param
if parameter == 'tag':
inst_args['purpose'] = {
'apply_tags': ('{} *'.format(tag.category.display_name), tag.display_name)
}
else:
auto = parameter
except AttributeError:
# in case nothing was passed just skip
pass
# All providers other than Azure
if not provider.one_of(AzureProvider):
recursive_update(inst_args, {
'properties': {
'instance_type': provisioning['instance_type'],
'guest_keypair': provisioning['guest_keypair']},
'environment': {
'availability_zone': None if auto else provisioning['availability_zone'],
'security_groups': None if auto else provisioning['security_group'],
'automatic_placement': auto
}
})
# Openstack specific
if provider.one_of(OpenStackProvider):
recursive_update(inst_args, {
'environment': {
'cloud_network': None if auto else provisioning['cloud_network']
}
})
# GCE specific
if provider.one_of(GCEProvider):
recursive_update(inst_args, {
'environment': {
'cloud_network': None if auto else provisioning['cloud_network']
},
'properties': {
'boot_disk_size': provisioning['boot_disk_size'],
'is_preemptible': True if current_version() >= "5.7" else None}
})
# Azure specific
if provider.one_of(AzureProvider):
# Azure uses different provisioning keys for some reason
try:
template = provider.data.templates.small_template
vm_user = credentials[template.creds].username
vm_password = credentials[template.creds].password
except AttributeError:
pytest.skip('Could not find small_template or credentials for {}'.format(provider.name))
recursive_update(inst_args, {
'environment': {
'automatic_placement': auto,
'cloud_network': None if auto else provisioning['virtual_net'],
'cloud_subnet': None if auto else provisioning['subnet_range'],
'security_groups': None if auto else [provisioning['network_nsg']],
'resource_groups': None if auto else provisioning['resource_group']
},
'properties': {
'instance_type': provisioning['vm_size'].lower()},
'customize': {
'admin_username': vm_user,
'root_password': vm_password}})
yield instance, inst_args, image
try:
if instance.does_vm_exist_on_provider():
instance.delete_from_provider()
except Exception as ex:
logger.warning('Exception while deleting instance fixture, continuing: {}'
.format(ex.message))
@pytest.fixture(scope="function")
def vm_name(request):
return random_vm_name('prov')
@pytest.fixture(scope='function')
def provisioned_instance(provider, testing_instance, appliance):
""" Checks provisioning status for instance """
instance, inst_args, image = testing_instance
instance.create(**inst_args)
logger.info('Waiting for cfme provision request for vm %s', instance.name)
request_description = 'Provision from [{}] to [{}]'.format(image, instance.name)
provision_request = appliance.collections.requests.instantiate(request_description)
try:
provision_request.wait_for_request(method='ui')
except Exception as e:
logger.info(
"Provision failed {}: {}".format(e, provision_request.request_state))
raise e
assert provision_request.is_succeeded(method='ui'), (
"Provisioning failed with the message {}".format(
provision_request.row.last_message.text))
instance.wait_to_appear(timeout=800)
provider.refresh_provider_relationships()
logger.info("Refreshing provider relationships and power states")
refresh_timer = RefreshTimer(time_for_refresh=300)
wait_for(provider.is_refreshed,
[refresh_timer],
message="is_refreshed",
num_sec=1000,
delay=60,
handle_exception=True)
return instance
@pytest.mark.parametrize('testing_instance', [True, False], ids=["Auto", "Manual"], indirect=True)
def test_provision_from_template(provider, provisioned_instance):
""" Tests instance provision from template
Metadata:
test_flag: provision
"""
assert provisioned_instance.does_vm_exist_on_provider(), "Instance wasn't provisioned"
@pytest.mark.uncollectif(lambda provider: not provider.one_of(GCEProvider) or
current_version() < "5.7")
def test_gce_preemtible_provision(provider, testing_instance, soft_assert):
instance, inst_args, image = testing_instance
instance.create(**inst_args)
instance.wait_to_appear(timeout=800)
provider.refresh_provider_relationships()
logger.info("Refreshing provider relationships and power states")
refresh_timer = RefreshTimer(time_for_refresh=300)
wait_for(provider.is_refreshed,
[refresh_timer],
message="is_refreshed",
num_sec=1000,
delay=60,
handle_exception=True)
soft_assert('Yes' in instance.get_detail(
properties=("Properties", "Preemptible")), "GCE Instance isn't Preemptible")
soft_assert(instance.does_vm_exist_on_provider(), "Instance wasn't provisioned")
def test_provision_from_template_using_rest(
appliance, request, setup_provider, provider, vm_name, provisioning):
""" Tests provisioning from a template using the REST API.
Metadata:
test_flag: provision, rest
"""
if 'flavors' not in appliance.rest_api.collections.all_names:
pytest.skip("This appliance does not have `flavors` collection.")
image_guid = appliance.rest_api.collections.templates.find_by(
name=provisioning['image']['name'])[0].guid
if ':' in provisioning['instance_type'] and provider.one_of(EC2Provider, GCEProvider):
instance_type = provisioning['instance_type'].split(':')[0].strip()
elif provider.type == 'azure':
instance_type = provisioning['instance_type'].lower()
else:
instance_type = provisioning['instance_type']
flavors = appliance.rest_api.collections.flavors.find_by(name=instance_type)
assert flavors
# TODO: Multi search when it works
for flavor in flavors:
if flavor.ems.name == provider.name:
flavor_id = flavor.id
break
else:
pytest.fail(
"Cannot find flavour {} for provider {}".format(instance_type, provider.name))
provision_data = {
"version": "1.1",
"template_fields": {
"guid": image_guid,
},
"vm_fields": {
"vm_name": vm_name,
"instance_type": flavor_id,
"request_type": "template",
},
"requester": {
"user_name": "admin",
"owner_first_name": "Administrator",
"owner_last_name": "Administratorovich",
"owner_email": "admin@example.com",
"auto_approve": True,
},
"tags": {
},
"additional_values": {
},
"ems_custom_attributes": {
},
"miq_custom_attributes": {
}
}
if not isinstance(provider, AzureProvider):
provision_data['vm_fields']['availability_zone'] = provisioning['availability_zone']
provision_data['vm_fields']['security_groups'] = [provisioning['security_group']]
provision_data['vm_fields']['guest_keypair'] = provisioning['guest_keypair']
if isinstance(provider, GCEProvider):
provision_data['vm_fields']['cloud_network'] = provisioning['cloud_network']
provision_data['vm_fields']['boot_disk_size'] = provisioning['boot_disk_size']
provision_data['vm_fields']['zone'] = provisioning['availability_zone']
provision_data['vm_fields']['region'] = 'us-central1'
elif isinstance(provider, AzureProvider):
try:
template = provider.data.templates.small_template
vm_user = credentials[template.creds].username
vm_password = credentials[template.creds].password
except AttributeError:
pytest.skip('Could not find small_template or credentials for {}'.format(provider.name))
# mapping: product/dialogs/miq_dialogs/miq_provision_azure_dialogs_template.yaml
provision_data['vm_fields']['root_username'] = vm_user
provision_data['vm_fields']['root_password'] = vm_password
request.addfinalizer(
lambda: provider.mgmt.delete_vm(vm_name) if provider.mgmt.does_vm_exist(vm_name) else None)
request = appliance.rest_api.collections.provision_requests.action.create(**provision_data)[0]
assert_response(appliance)
def _finished():
request.reload()
if request.status.lower() in {"error"}:
pytest.fail("Error when provisioning: `{}`".format(request.message))
return request.request_state.lower() in {"finished", "provisioned"}
wait_for(_finished, num_sec=3000, delay=10, message="REST provisioning finishes")
wait_for(
lambda: provider.mgmt.does_vm_exist(vm_name),
num_sec=1000, delay=5, message="VM {} becomes visible".format(vm_name))
@pytest.mark.uncollectif(lambda provider: not provider.one_of(EC2Provider, OpenStackProvider))
def test_manual_placement_using_rest(
appliance, request, setup_provider, provider, vm_name, provisioning):
""" Tests provisioning cloud instance with manual placement using the REST API.
Metadata:
test_flag: provision, rest
"""
image_guid = appliance.rest_api.collections.templates.get(
name=provisioning['image']['name']).guid
provider_rest = appliance.rest_api.collections.providers.get(name=provider.name)
security_group_name = provisioning['security_group'].split(':')[0].strip()
if ':' in provisioning['instance_type'] and provider.one_of(EC2Provider):
instance_type = provisioning['instance_type'].split(':')[0].strip()
else:
instance_type = provisioning['instance_type']
flavors = appliance.rest_api.collections.flavors.find_by(name=instance_type)
assert flavors
flavor = None
for flavor in flavors:
if flavor.ems_id == provider_rest.id:
break
else:
pytest.fail("Cannot find flavour.")
provider_data = appliance.rest_api.get(provider_rest._href +
'?attributes=cloud_networks,cloud_subnets,security_groups,cloud_tenants')
# find out cloud network
assert provider_data['cloud_networks']
cloud_network_name = provisioning.get('cloud_network')
cloud_network = None
for cloud_network in provider_data['cloud_networks']:
# If name of cloud network is available, find match.
# Otherwise just "enabled" is enough.
if cloud_network_name and cloud_network_name != cloud_network['name']:
continue
if cloud_network['enabled']:
break
else:
pytest.fail("Cannot find cloud network.")
# find out security group
assert provider_data['security_groups']
security_group = None
for group in provider_data['security_groups']:
if (group.get('cloud_network_id') == cloud_network['id'] and
group['name'] == security_group_name):
security_group = group
break
# OpenStack doesn't seem to have the "cloud_network_id" attribute.
# At least try to find the group where the group name matches.
elif not security_group and group['name'] == security_group_name:
security_group = group
if not security_group:
pytest.fail("Cannot find security group.")
# find out cloud subnet
assert provider_data['cloud_subnets']
cloud_subnet = None
for cloud_subnet in provider_data['cloud_subnets']:
if (cloud_subnet.get('cloud_network_id') == cloud_network['id'] and
cloud_subnet['status'] in ('available', 'active')):
break
else:
pytest.fail("Cannot find cloud subnet.")
def _find_availability_zone_id():
subnet_data = appliance.rest_api.get(provider_rest._href + '?attributes=cloud_subnets')
for subnet in subnet_data['cloud_subnets']:
if subnet['id'] == cloud_subnet['id'] and 'availability_zone_id' in subnet:
return subnet['availability_zone_id']
return False
# find out availability zone
availability_zone_id = None
if provisioning.get('availability_zone'):
availability_zone_entities = appliance.rest_api.collections.availability_zones.find_by(
name=provisioning['availability_zone'])
if availability_zone_entities and availability_zone_entities[0].ems_id == flavor.ems_id:
availability_zone_id = availability_zone_entities[0].id
if not availability_zone_id and 'availability_zone_id' in cloud_subnet:
availability_zone_id = cloud_subnet['availability_zone_id']
if not availability_zone_id:
availability_zone_id, _ = wait_for(
_find_availability_zone_id, num_sec=100, delay=5, message="availability_zone present")
# find out cloud tenant
cloud_tenant_id = None
tenant_name = provisioning.get('cloud_tenant')
if tenant_name:
for tenant in provider_data.get('cloud_tenants', []):
if (tenant['name'] == tenant_name and
tenant['enabled'] and
tenant['ems_id'] == flavor.ems_id):
cloud_tenant_id = tenant['id']
provision_data = {
"version": "1.1",
"template_fields": {
"guid": image_guid
},
"vm_fields": {
"vm_name": vm_name,
"instance_type": flavor.id,
"request_type": "template",
"placement_auto": False,
"cloud_network": cloud_network['id'],
"cloud_subnet": cloud_subnet['id'],
"placement_availability_zone": availability_zone_id,
"security_groups": security_group['id'],
"monitoring": "basic"
},
"requester": {
"user_name": "admin",
"owner_first_name": "Administrator",
"owner_last_name": "Administratorovich",
"owner_email": "admin@example.com",
"auto_approve": True,
},
"tags": {
},
"additional_values": {
},
"ems_custom_attributes": {
},
"miq_custom_attributes": {
}
}
if cloud_tenant_id:
provision_data['vm_fields']['cloud_tenant'] = cloud_tenant_id
request.addfinalizer(
lambda: provider.mgmt.delete_vm(vm_name) if provider.mgmt.does_vm_exist(vm_name) else None)
request = appliance.rest_api.collections.provision_requests.action.create(**provision_data)[0]
assert_response(appliance)
def _finished():
request.reload()
if 'error' in request.status.lower():
pytest.fail("Error when provisioning: `{}`".format(request.message))
return request.request_state.lower() in ('finished', 'provisioned')
wait_for(_finished, num_sec=3000, delay=10, message="REST provisioning finishes")
wait_for(
lambda: provider.mgmt.does_vm_exist(vm_name),
num_sec=1000, delay=5, message="VM {} becomes visible".format(vm_name))
VOLUME_METHOD = ("""
prov = $evm.root["miq_provision"]
prov.set_option(
:clone_options,
{{ :block_device_mapping => [{}] }})
""")
ONE_FIELD = """{{:volume_id => "{}", :device_name => "{}"}}"""
@pytest.fixture(scope="module")
def domain(request, appliance):
domain = DomainCollection(appliance).create(name=fauxfactory.gen_alphanumeric(), enabled=True)
request.addfinalizer(domain.delete_if_exists)
return domain
@pytest.fixture(scope="module")
def original_request_class(appliance):
return DomainCollection(appliance).instantiate(name='ManageIQ')\
.namespaces.instantiate(name='Cloud')\
.namespaces.instantiate(name='VM')\
.namespaces.instantiate(name='Provisioning')\
.namespaces.instantiate(name='StateMachines')\
.classes.instantiate(name='Methods')
@pytest.fixture(scope="module")
def modified_request_class(request, domain, original_request_class):
original_request_class.copy_to(domain)
klass = domain\
.namespaces.instantiate(name='Cloud')\
.namespaces.instantiate(name='VM')\
.namespaces.instantiate(name='Provisioning')\
.namespaces.instantiate(name='StateMachines')\
.classes.instantiate(name='Methods')
request.addfinalizer(klass.delete_if_exists)
return klass
@pytest.fixture(scope="module")
def copy_domains(original_request_class, domain):
methods = ['openstack_PreProvision', 'openstack_CustomizeRequest']
for method in methods:
original_request_class.methods.instantiate(name=method).copy_to(domain)
# Not collected for EC2 in generate_tests above
@pytest.mark.parametrize("disks", [1, 2])
@pytest.mark.uncollectif(lambda provider: not provider.one_of(OpenStackProvider))
def test_provision_from_template_with_attached_disks(request, testing_instance, provider, disks,
soft_assert, domain, modified_request_class,
copy_domains, provisioning):
""" Tests provisioning from a template and attaching disks
Metadata:
test_flag: provision
"""
instance, inst_args, image = testing_instance
# Modify availiability_zone for Azure provider
if provider.one_of(AzureProvider):
recursive_update(inst_args, {'environment': {'availability_zone': provisioning("av_set")}})
device_name = "/dev/sd{}"
device_mapping = []
with provider.mgmt.with_volumes(1, n=disks) as volumes:
for i, volume in enumerate(volumes):
device_mapping.append((volume, device_name.format(chr(ord("b") + i))))
# Set up automate
method = modified_request_class.methods.instantiate(name="openstack_PreProvision")
with update(method):
disk_mapping = []
for mapping in device_mapping:
disk_mapping.append(ONE_FIELD.format(*mapping))
method.script = VOLUME_METHOD.format(", ".join(disk_mapping))
def _finish_method():
with update(method):
method.script = """prov = $evm.root["miq_provision"]"""
request.addfinalizer(_finish_method)
instance.create(**inst_args)
for volume_id in volumes:
soft_assert(vm_name in provider.mgmt.volume_attachments(volume_id))
for volume, device in device_mapping:
soft_assert(provider.mgmt.volume_attachments(volume)[vm_name] == device)
instance.delete_from_provider() # To make it possible to delete the volume
wait_for(lambda: not instance.does_vm_exist_on_provider(), num_sec=180, delay=5)
# Not collected for EC2 in generate_tests above
@pytest.mark.uncollectif(lambda provider: not provider.one_of(OpenStackProvider))
def test_provision_with_boot_volume(request, testing_instance, provider, soft_assert, copy_domains,
modified_request_class):
""" Tests provisioning from a template and attaching one booting volume.
Metadata:
test_flag: provision, volumes
"""
instance, inst_args, image = testing_instance
with provider.mgmt.with_volume(1, imageRef=provider.mgmt.get_template_id(image)) as volume:
# Set up automate
method = modified_request_class.methods.instantiate(name="openstack_CustomizeRequest")
with update(method):
method.script = dedent('''\
$evm.root["miq_provision"].set_option(
:clone_options, {{
:image_ref => nil,
:block_device_mapping_v2 => [{{
:boot_index => 0,
:uuid => "{}",
:device_name => "vda",
:source_type => "volume",
:destination_type => "volume",
:delete_on_termination => false
}}]
}}
)
'''.format(volume))
@request.addfinalizer
def _finish_method():
with update(method):
method.script = """prov = $evm.root["miq_provision"]"""
instance.create(**inst_args)
soft_assert(vm_name in provider.mgmt.volume_attachments(volume))
soft_assert(provider.mgmt.volume_attachments(volume)[vm_name] == "vda")
instance.delete_from_provider() # To make it possible to delete the volume
wait_for(lambda: not instance.does_vm_exist_on_provider(), num_sec=180, delay=5)
# Not collected for EC2 in generate_tests above
@pytest.mark.uncollectif(lambda provider: not provider.one_of(OpenStackProvider))
def test_provision_with_additional_volume(request, testing_instance, provider, small_template,
soft_assert, copy_domains, domain,
modified_request_class):
""" Tests provisioning with setting specific image from AE and then also making it create and
attach an additional 3G volume.
Metadata:
test_flag: provision, volumes
"""
instance, inst_args, image = testing_instance
# Set up automate
method = modified_request_class.methods.instantiate(name="openstack_CustomizeRequest")
try:
image_id = provider.mgmt.get_template_id(small_template.name)
except KeyError:
pytest.skip("No small_template in provider adta!")
with update(method):
method.script = dedent('''\
$evm.root["miq_provision"].set_option(
:clone_options, {{
:image_ref => nil,
:block_device_mapping_v2 => [{{
:boot_index => 0,
:uuid => "{}",
:device_name => "vda",
:source_type => "image",
:destination_type => "volume",
:volume_size => 3,
:delete_on_termination => false
}}]
}}
)
'''.format(image_id))
def _finish_method():
with update(method):
method.script = """prov = $evm.root["miq_provision"]"""
request.addfinalizer(_finish_method)
instance.create(**inst_args)
prov_instance = provider.mgmt._find_instance_by_name(vm_name)
try:
assert hasattr(prov_instance, 'os-extended-volumes:volumes_attached')
volumes_attached = getattr(prov_instance, 'os-extended-volumes:volumes_attached')
assert len(volumes_attached) == 1
volume_id = volumes_attached[0]["id"]
assert provider.mgmt.volume_exists(volume_id)
volume = provider.mgmt.get_volume(volume_id)
assert volume.size == 3
finally:
instance.delete_from_provider()
wait_for(lambda: not instance.does_vm_exist_on_provider(), num_sec=180, delay=5)
if "volume_id" in locals(): # To handle the case of 1st or 2nd assert
if provider.mgmt.volume_exists(volume_id):
provider.mgmt.delete_volume(volume_id)
@pytest.mark.parametrize('testing_instance', ['tag'], indirect=True)
def test_cloud_provision_with_tag(provisioned_instance, tag):
""" Tests tagging instance using provisioning dialogs.
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, pick a tag.
* Submit the provisioning request and wait for it to finish.
* Visit instance page, it should display the selected tags
Metadata:
test_flag: provision
"""
assert provisioned_instance.does_vm_exist_on_provider(), "Instance wasn't provisioned"
tags = provisioned_instance.get_tags()
assert any(
instance_tag.category.display_name == tag.category.display_name and
instance_tag.display_name == tag.display_name for instance_tag in tags), (
"{}: {} not in ({})".format(tag.category.display_name, tag.display_name, str(tags)))
|
okolisny/integration_tests
|
cfme/tests/cloud/test_provisioning.py
|
Python
|
gpl-2.0
| 26,691
|
[
"VisIt"
] |
507e4b1a296f63d2f890f38c955c9f329e5b86fd120e359a3caad9f753450358
|
# ##########################################
# Version 1.0
# Author: Brian Torres-Gil
#
# About this script:
# Triggered when a WildFire syslog indicates a file has been analyzed by WildFire.
# This script retrieves the WildFire data relating to that syslog from the WildFire
# cloud service API.
#
# Script's actions and warning messages are logged in $SPLUNK_HOME/var/log/splunk/python.log
############################################
############################################
# How to Use this script
# The script must be provided 3 things to retrieve an WildFire log from the cloud:
# 1. An API Key. This is found at https://wildfire.paloaltonetworks.com
# under 'My Account'.
# 2. The file digest (MD5, SHA-1, or SHA256) of the file that produced the alert. This is in the syslog.
# 3. The ID of the report. This is in the syslog.
###########################################
###########################################
# if you DO want to go through a proxy, e.g., HTTP_PROXY={squid:'2.2.2.2'}
HTTP_PROXY = {}
#########################################################
# Do NOT modify anything below this line unless you are
# certain of the ramifications of the changes
#########################################################
import sys
import os
import traceback
import argparse
libpath = os.path.dirname(os.path.abspath(__file__))
sys.path[:0] = [os.path.join(libpath, 'lib')]
import common
import environment
import pan.wfapi
logger = common.logging.getLogger().getChild('retrieveWildFireReport')
# logger.setLevel(common.logging.INFO)
if environment.run_by_splunk():
try:
import splunk.Intersplunk # so you can interact with Splunk
import splunk.entity as entity # for splunk config info
except Exception as e:
# Handle exception to produce logs to python.log
logger.error("Error during import")
logger.error(traceback.format_exc())
raise e
def get_cli_args():
"""Used if this script is run from the CLI
This function is not used if script run from Splunk searchbar
"""
# Setup the argument parser
parser = argparse.ArgumentParser(description="Download a WildFire Report using the WildFire API")
# parser.add_argument('-v', '--verbose', action='store_true', help="Verbose")
parser.add_argument('apikey', help="API Key from https://wildfire.paloaltonetworks.com")
parser.add_argument('file_digest', help="Hash of the file for the report")
options = parser.parse_args()
return options
def retrieveWildFireData(apikey, file_digest):
wfapi = pan.wfapi.PanWFapi(api_key=apikey)
wfapi.report(file_digest)
return wfapi.response_body
def main_cli():
# Get command line arguments
options = get_cli_args()
# debug = options.verbose
# logger = common.logging.getLogger()
# common.logging.basicConfig(level=common.logging.INFO)
# if debug:
# logger.setLevel(common.logging.DEBUG)
# logger.info("Verbose logging enabled")
# Grab WildFire data
data = retrieveWildFireData(options.apikey, options.file_digest)
# Parse XML for fields
print(data)
sys.exit(0)
def main_splunk():
# Get arguments passed to command on Splunk searchbar
args, kwargs = splunk.Intersplunk.getKeywordsAndOptions()
debug = common.check_debug(kwargs)
# Setup the logger. $SPLUNK_HOME/var/log/splunk/python.log
logger = common.logging.getLogger()
if debug:
logger.setLevel(common.logging.DEBUG)
# Results contains the data from the search results and settings contains
# the sessionKey that we can use to talk to splunk
logger.debug("Getting search results and settings from Splunk")
results, unused1, settings = splunk.Intersplunk.getOrganizedResults()
# Get the sessionKey
sessionKey = settings['sessionKey']
# If there are logs to act on, get the Panorama user and password from Splunk using the sessionKey
if len(results) == 0:
logger.debug("WildFire Report Retrieval: No search results. Nothing to do.")
splunk.Intersplunk.outputResults(results)
sys.exit(0)
logger.debug("Getting WildFire APIKey from encrypted store")
wf_apikey = common.get_wildfire_apikey(sessionKey)
# Get a wildfire report for each row
logger.debug("Getting WildFire reports for %s search results" % len(results))
for idx, result in enumerate(results):
# Check to see if the result has the necessary fields
if 'file_digest' in result:
logger.debug("Getting WildFire report for result # %s with file_digest: %s" % (idx, result['file_digest']))
try:
# Get the report
wfReportXml = retrieveWildFireData(wf_apikey, result['file_digest']).strip()
result['wildfire_report'] = wfReportXml
except:
logger.warn("Error retrieving WildFire report for file_digest: %s" % result['file_digest'])
# Log the result row in case of an exception
logger.info("Log with error: %s" % result)
stack = traceback.format_exc()
# log the stack information
logger.warn(stack)
else:
logger.debug("Required fields missing from result # %s."
"Expected the following fields: file_digest" % idx)
# output the complete results sent back to splunk
splunk.Intersplunk.outputResults(results)
if __name__ == "__main__":
if environment.run_by_splunk():
main_splunk()
else:
main_cli()
|
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks
|
SplunkforPaloAltoNetworks/bin/retrieveWildFireReport.py
|
Python
|
isc
| 5,572
|
[
"Brian"
] |
c356a97be83082c1a54e3812770b4eefc09c2f5e995578ef547c33dd8efb56c5
|
#! /usr/bin/env python
#
# plot_connections.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
## Python script that creates a set of Mayavi2 graphs that gives
## on overview of the connection profile of a layer.
## Mayavi2 is required to run this script!
# The histogram2d function must be loaded before calling the
# functions in this file.
#execfile(plotting_folder+'histogram2d.py')
import numpy as np
import enthought.mayavi.mlab as mlab # Load Mayavi2
## Function that checks if a node satisfies certain criterias.
## Returns true if that is the case.
##
## Input:
## gid - node
## params - dictionary with specification of layer and model type
##
def check_node(gid, params):
if 'layer' in params:
if nest.GetLayer(gid) != params['layer']:
return False
if 'model' in params:
if nest.GetStatus(gid)[0]['model'] != params['model']:
return False
return True
##
## Creates a Mayavi2 plot of connection data.
##
## Input:
## data_file - data file created with the PrintLayerConnections command
## min/max - lower left and upper right corner - [x, y]
## bins - number of histogram bins - [x_number, y_number]
## should in most cases be quite alot smaller than the number
## of rows and columns in the layer
## params - restriction on connection type (see check_node(..) above)
## output - output directory
##
## Example: plot_connections('out.txt', [-1.0, -1.0], [1.0, 1.0], [9, 9],
## {'model'= 'iaf_neuron'}, output='folder/')
##
def plot_connections(data_file, min, max, bins,
params=None, output=''):
print("Creating connection profile graphs.")
# Read data points from file
f = open(data_file, 'r')
# Ignore first line
f.readline()
data = []
for line in f:
temp = line.split(' ')
if params != None:
if check_node([int(temp[1])], params):
data.append([float(temp[4]), float(temp[5])]);
else:
data.append([float(temp[4]), float(temp[5])]);
# Create histogram data based on the retrieved data.
histogram_data = histogram2d(data, min, max, bins)
# Open a new Mayavi2 figure
f = mlab.figure()
# Convert histogram bin count to relative densities.
m = np.max(histogram_data[2].max(axis=0))
histogram_data[2] = histogram_data[2]/float(m)
# Plot histogram data
mlab.mesh(histogram_data[0], histogram_data[1], histogram_data[2])
#surf(histogram_data[0], histogram_data[1], histogram_data[2])
# Create and save various viewpoints of histogram figure
mlab.axes(z_axis_visibility=False)
mlab.view(azimuth=0, elevation=90) # X
mlab.savefig(output+"xaxis.eps", size=[600,400])
mlab.view(azimuth=90, elevation=270) # Y
mlab.savefig(output+"yaxis.eps", size=[600,400])
mlab.view(azimuth=45, elevation=45) # Perspective
mlab.savefig(output+"perspective.eps", size=[600,400])
mlab.colorbar(orientation="vertical")
mlab.view(azimuth=0, elevation=0) # Z
mlab.savefig(output+"above.eps", size=[600,400])
|
QJonny/CyNest
|
topology/doc/old_doc/plotting_tools/plot_connections.py
|
Python
|
gpl-2.0
| 3,753
|
[
"Mayavi"
] |
505c4f98b9521fa4fca1aa2258f985c3cfd6493ae3511d75ade8ce3927e9dcf7
|
#!/usr/bin/env python3
#
# Copyright (c) 2022, NVIDIA
#
import sys
if sys.version_info >= (3, 3):
from time import process_time as timer
else:
from timeit import default_timer as timer
import numpy as np
def nttk_sd_t_s1_1(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
for h1,h2,h3,p4,p5,p6 in np.ndindex((d1,d2,d3,d4,d5,d6)):
triplesx[h3,h2,h1,p6,p5,p4] += t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
def nttk_sd_t_s1_2(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
for h1,h2,h3,p4,p5,p6 in np.ndindex((d1,d2,d3,d4,d5,d6)):
triplesx[h3,h1,h2,p6,p5,p4] -= t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
def nttk_sd_t_s1_3(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
for h1,h2,h3,p4,p5,p6 in np.ndindex((d1,d2,d3,d4,d5,d6)):
triplesx[h1,h3,h2,p6,p5,p4] += t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
def nttk_sd_t_s1_4(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
for h1,h2,h3,p4,p5,p6 in np.ndindex((d1,d2,d3,d4,d5,d6)):
triplesx[h3,h2,h1,p6,p4,p5] -= t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
def nttk_sd_t_s1_5(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
for h1,h2,h3,p4,p5,p6 in np.ndindex((d1,d2,d3,d4,d5,d6)):
triplesx[h3,h1,h2,p6,p4,p5] += t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
def nttk_sd_t_s1_6(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
for h1,h2,h3,p4,p5,p6 in np.ndindex((d1,d2,d3,d4,d5,d6)):
triplesx[h1,h3,h2,p6,p4,p5] -= t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
def nttk_sd_t_s1_7(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
for h1,h2,h3,p4,p5,p6 in np.ndindex((d1,d2,d3,d4,d5,d6)):
triplesx[h3,h2,h1,p4,p6,p5] += t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
def nttk_sd_t_s1_8(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
for h1,h2,h3,p4,p5,p6 in np.ndindex((d1,d2,d3,d4,d5,d6)):
triplesx[h3,h1,h2,p4,p6,p5] -= t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
def nttk_sd_t_s1_9(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
for h1,h2,h3,p4,p5,p6 in np.ndindex((d1,d2,d3,d4,d5,d6)):
triplesx[h1,h3,h2,p4,p6,p5] += t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
def nttk_sd_t_d1_1(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,h7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h3,h2,h1,p6,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
def nttk_sd_t_d1_2(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,h7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h3,h1,h2,p6,p5,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
def nttk_sd_t_d1_3(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,h7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h1,h3,h2,p6,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
def nttk_sd_t_d1_4(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,h7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h3,h2,h1,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
def nttk_sd_t_d1_5(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,h7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h3,h1,h2,p5,p4,p6] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
def nttk_sd_t_d1_6(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,h7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h1,h3,h2,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
def nttk_sd_t_d1_7(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,h7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h3,h2,h1,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
def nttk_sd_t_d1_8(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,h7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h3,h1,h2,p5,p6,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
def nttk_sd_t_d1_9(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,h7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h1,h3,h2,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
def nttk_sd_t_d2_1(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,p7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h3,h2,h1,p6,p5,p4] -= t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
def nttk_sd_t_d2_2(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,p7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h2,h1,h3,p6,p5,p4] -= t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
def nttk_sd_t_d2_3(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,p7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h2,h3,h1,p6,p5,p4] += t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
def nttk_sd_t_d2_4(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,p7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h3,h2,h1,p6,p4,p5] += t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
def nttk_sd_t_d2_5(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,p7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h2,h1,h3,p6,p4,p5] += t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
def nttk_sd_t_d2_6(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,p7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h2,h3,h1,p6,p4,p5] -= t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
def nttk_sd_t_d2_7(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,p7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h3,h2,h1,p4,p6,p5] -= t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
def nttk_sd_t_d2_8(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,p7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h2,h1,h3,p4,p6,p5] -= t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
def nttk_sd_t_d2_9(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
for h1,h2,h3,p4,p5,p6,p7 in np.ndindex((d1,d2,d3,d4,d5,d6,d7)):
triplesx[h2,h3,h1,p4,p6,p5] += t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
def tensor_sd_t_s1_1(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
#triplesx[h3,h2,h1,p6,p5,p4] += t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
triplesx += np.einsum(t1sub,[4,1],v2sub,[3,2,6,5],[3,2,1,6,5,4])
def tensor_sd_t_s1_2(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
#triplesx[h3,h1,h2,p6,p5,p4] -= t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
triplesx -= np.einsum(t1sub,[4,1],v2sub,[3,2,6,5],[3,1,2,6,5,4])
def tensor_sd_t_s1_3(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
#triplesx[h1,h3,h2,p6,p5,p4] += t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
triplesx += np.einsum(t1sub,[4,1],v2sub,[3,2,6,5],[1,3,2,6,5,4])
def tensor_sd_t_s1_4(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
#triplesx[h3,h2,h1,p6,p4,p5] -= t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
triplesx -= np.einsum(t1sub,[4,1],v2sub,[3,2,6,5],[3,2,1,6,4,5])
def tensor_sd_t_s1_5(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
#triplesx[h3,h1,h2,p6,p4,p5] += t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
triplesx += np.einsum(t1sub,[4,1],v2sub,[3,2,6,5],[3,1,2,6,4,5])
def tensor_sd_t_s1_6(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
#triplesx[h1,h3,h2,p6,p4,p5] -= t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
triplesx -= np.einsum(t1sub,[4,1],v2sub,[3,2,6,5],[1,3,2,6,4,5])
def tensor_sd_t_s1_7(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
#triplesx[h3,h2,h1,p4,p6,p5] += t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
triplesx += np.einsum(t1sub,[4,1],v2sub,[3,2,6,5],[3,2,1,4,6,5])
def tensor_sd_t_s1_8(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
#triplesx[h3,h1,h2,p4,p6,p5] -= t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
triplesx -= np.einsum(t1sub,[4,1],v2sub,[3,2,6,5],[3,1,2,4,6,5])
def tensor_sd_t_s1_9(d3,d2,d1,d6,d5,d4,triplesx,t1sub,v2sub):
#triplesx[h1,h3,h2,p4,p6,p5] += t1sub[p4,h1] * v2sub[h3,h2,p6,p5]
triplesx += np.einsum(t1sub,[4,1],v2sub,[3,2,6,5],[1,3,2,4,6,5])
def tensor_sd_t_d1_1(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h3,h2,h1,p6,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
triplesx -= np.einsum(t2sub,[7,4,5,1],v2sub,[3,2,6,7],[3,2,1,6,5,4])
def tensor_sd_t_d1_2(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h3,h1,h2,p6,p5,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
triplesx += np.einsum(t2sub,[7,4,5,1],v2sub,[3,2,6,7],[3,1,2,6,5,4])
def tensor_sd_t_d1_3(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h1,h3,h2,p6,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
triplesx -= np.einsum(t2sub,[7,4,5,1],v2sub,[3,2,6,7],[1,3,2,6,5,4])
def tensor_sd_t_d1_4(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h3,h2,h1,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
triplesx -= np.einsum(t2sub,[7,4,5,1],v2sub,[3,2,6,7],[3,2,1,5,4,6])
def tensor_sd_t_d1_5(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h3,h1,h2,p5,p4,p6] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
triplesx += np.einsum(t2sub,[7,4,5,1],v2sub,[3,2,6,7],[3,1,2,5,4,6])
def tensor_sd_t_d1_6(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h1,h3,h2,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
triplesx -= np.einsum(t2sub,[7,4,5,1],v2sub,[3,2,6,7],[1,3,2,5,4,6])
def tensor_sd_t_d1_7(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h3,h2,h1,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
triplesx += np.einsum(t2sub,[7,4,5,1],v2sub,[3,2,6,7],[3,2,1,5,6,4])
def tensor_sd_t_d1_8(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h3,h1,h2,p5,p6,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
triplesx -= np.einsum(t2sub,[7,4,5,1],v2sub,[3,2,6,7],[3,1,2,5,6,4])
def tensor_sd_t_d1_9(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h1,h3,h2,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
triplesx += np.einsum(t2sub,[7,4,5,1],v2sub,[3,2,6,7],[1,3,2,5,6,4])
def tensor_sd_t_d2_1(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h3,h2,h1,p6,p5,p4] -= t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
triplesx -= np.einsum(t2sub,[7,4,1,2],v2sub,[7,3,6,5],[3,2,1,6,5,4])
def tensor_sd_t_d2_2(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h2,h1,h3,p6,p5,p4] -= t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
triplesx -= np.einsum(t2sub,[7,4,1,2],v2sub,[7,3,6,5],[2,1,3,6,5,4])
def tensor_sd_t_d2_3(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h2,h3,h1,p6,p5,p4] += t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
triplesx += np.einsum(t2sub,[7,4,1,2],v2sub,[7,3,6,5],[2,3,1,6,5,4])
def tensor_sd_t_d2_4(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h3,h2,h1,p6,p4,p5] += t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
triplesx += np.einsum(t2sub,[7,4,1,2],v2sub,[7,3,6,5],[3,2,1,6,4,5])
def tensor_sd_t_d2_5(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h2,h1,h3,p6,p4,p5] += t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
triplesx += np.einsum(t2sub,[7,4,1,2],v2sub,[7,3,6,5],[2,1,3,6,4,5])
def tensor_sd_t_d2_6(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h2,h3,h1,p6,p4,p5] -= t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
triplesx -= np.einsum(t2sub,[7,4,1,2],v2sub,[7,3,6,5],[2,3,1,6,4,5])
def tensor_sd_t_d2_7(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h3,h2,h1,p4,p6,p5] -= t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
triplesx -= np.einsum(t2sub,[7,4,1,2],v2sub,[7,3,6,5],[3,2,1,4,6,5])
def tensor_sd_t_d2_8(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h2,h1,h3,p4,p6,p5] -= t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
triplesx -= np.einsum(t2sub,[7,4,1,2],v2sub,[7,3,6,5],[2,1,3,4,6,5])
def tensor_sd_t_d2_9(d3,d2,d1,d6,d5,d4,d7,triplesx,t2sub,v2sub):
#triplesx[h2,h3,h1,p4,p6,p5] += t2sub[p7,p4,h1,h2] * v2sub[p7,h3,p6,p5]
triplesx += np.einsum(t2sub,[7,4,1,2],v2sub,[7,3,6,5],[2,3,1,4,6,5])
def main():
print("NTTK Python")
reps = 3
tilesize = 16
kernel = -1
if len(sys.argv) > 1:
tilesize = int(sys.argv[1])
if len(sys.argv) > 2:
kernel = int(sys.argv[2])
tile6 = tilesize**6
tile7 = tilesize**7
print("testing NWChem CCSD(T) kernels with tilesize ", tilesize)
tt0 = timer()
t1 = np.random.rand(tilesize,tilesize)
t2 = np.random.rand(tilesize,tilesize,tilesize,tilesize)
v2 = np.random.rand(tilesize,tilesize,tilesize,tilesize)
tt1 = timer()
print("allocation and initialization time =",(tt1-tt0)*1e-9," seconds")
# TENSOR
print("STARTING TENSOR KERNELS");
t3t = np.zeros((tilesize,tilesize,tilesize,tilesize,tilesize,tilesize),dtype=np.float64)
for i in range(reps):
totalflops = 0
ttt0 = timer()
if kernel<0 or kernel==1 :
tt0 = timer()
tensor_sd_t_s1_1(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_1", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==2 :
tt0 = timer()
tensor_sd_t_s1_2(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_2", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==3 :
tt0 = timer()
tensor_sd_t_s1_3(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_3", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==4 :
tt0 = timer()
tensor_sd_t_s1_4(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_4", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==5 :
tt0 = timer()
tensor_sd_t_s1_5(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_5", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==6 :
tt0 = timer()
tensor_sd_t_s1_6(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_6", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==7 :
tt0 = timer()
tensor_sd_t_s1_7(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_7", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==8 :
tt0 = timer()
tensor_sd_t_s1_8(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_8", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==9 :
tt0 = timer()
tensor_sd_t_s1_9(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_9", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==1 :
tt0 = timer()
tensor_sd_t_d1_1(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_1", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==2 :
tt0 = timer()
tensor_sd_t_d1_2(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_2", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==3 :
tt0 = timer()
tensor_sd_t_d1_3(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_3", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==4 :
tt0 = timer()
tensor_sd_t_d1_4(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_4", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==5 :
tt0 = timer()
tensor_sd_t_d1_5(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_5", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==6 :
tt0 = timer()
tensor_sd_t_d1_6(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_6", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==7 :
tt0 = timer()
tensor_sd_t_d1_7(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_7", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==8 :
tt0 = timer()
tensor_sd_t_d1_8(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_8", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==9 :
tt0 = timer()
tensor_sd_t_d1_9(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_9", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==1 :
tt0 = timer()
tensor_sd_t_d2_1(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_1", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==2 :
tt0 = timer()
tensor_sd_t_d2_2(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_2", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==3 :
tt0 = timer()
tensor_sd_t_d2_3(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_3", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==4 :
tt0 = timer()
tensor_sd_t_d2_4(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_4", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==5 :
tt0 = timer()
tensor_sd_t_d2_5(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_5", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==6 :
tt0 = timer()
tensor_sd_t_d2_6(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_6", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==7 :
tt0 = timer()
tensor_sd_t_d2_7(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_7", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==8 :
tt0 = timer()
tensor_sd_t_d2_8(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_8", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==9 :
tt0 = timer()
tensor_sd_t_d2_9(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3t, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_9", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
ttt1 = timer()
dt = ttt1-ttt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"total", dt, 1e-9*totalflops/dt))
# LOOPS
print("STARTING LOOPS KERNELS");
t3l = np.zeros((tilesize,tilesize,tilesize,tilesize,tilesize,tilesize),dtype=np.float64)
for i in range(reps):
totalflops = 0
ttt0 = timer()
if kernel<0 or kernel==1 :
tt0 = timer()
nttk_sd_t_s1_1(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_1", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==2 :
tt0 = timer()
nttk_sd_t_s1_2(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_2", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==3 :
tt0 = timer()
nttk_sd_t_s1_3(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_3", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==4 :
tt0 = timer()
nttk_sd_t_s1_4(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_4", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==5 :
tt0 = timer()
nttk_sd_t_s1_5(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_5", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==6 :
tt0 = timer()
nttk_sd_t_s1_6(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_6", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==7 :
tt0 = timer()
nttk_sd_t_s1_7(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_7", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==8 :
tt0 = timer()
nttk_sd_t_s1_8(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_8", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==9 :
tt0 = timer()
nttk_sd_t_s1_9(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t1, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_s1_9", dt, 2e-9*tile6/dt))
totalflops += 2*tile6
if kernel<0 or kernel==1 :
tt0 = timer()
nttk_sd_t_d1_1(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_1", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==2 :
tt0 = timer()
nttk_sd_t_d1_2(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_2", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==3 :
tt0 = timer()
nttk_sd_t_d1_3(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_3", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==4 :
tt0 = timer()
nttk_sd_t_d1_4(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_4", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==5 :
tt0 = timer()
nttk_sd_t_d1_5(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_5", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==6 :
tt0 = timer()
nttk_sd_t_d1_6(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_6", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==7 :
tt0 = timer()
nttk_sd_t_d1_7(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_7", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==8 :
tt0 = timer()
nttk_sd_t_d1_8(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_8", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==9 :
tt0 = timer()
nttk_sd_t_d1_9(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d1_9", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==1 :
tt0 = timer()
nttk_sd_t_d2_1(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_1", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==2 :
tt0 = timer()
nttk_sd_t_d2_2(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_2", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==3 :
tt0 = timer()
nttk_sd_t_d2_3(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_3", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==4 :
tt0 = timer()
nttk_sd_t_d2_4(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_4", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==5 :
tt0 = timer()
nttk_sd_t_d2_5(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_5", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==6 :
tt0 = timer()
nttk_sd_t_d2_6(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_6", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==7 :
tt0 = timer()
nttk_sd_t_d2_7(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_7", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==8 :
tt0 = timer()
nttk_sd_t_d2_8(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_8", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
if kernel<0 or kernel==9 :
tt0 = timer()
nttk_sd_t_d2_9(tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, tilesize, t3l, t2, v2)
tt1 = timer()
dt = tt1-tt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"sd_t_d2_9", dt, 2e-9*tile7/dt))
totalflops += 2*tile7
ttt1 = timer()
dt = ttt1-ttt0
print("{:1}: {:.10} time = {:10.5} s GF/s = {:10.5}".format(i,"total", dt, 1e-9*totalflops/dt))
error = np.linalg.norm(np.reshape(t3l-t3t,tile6),ord=1)
print("diff = ",error)
print("END")
if __name__ == '__main__':
main()
|
jeffhammond/nwchem-tce-triples-kernels
|
new-languages/nttk.py
|
Python
|
apache-2.0
| 32,514
|
[
"NWChem"
] |
2161f0292a50c01fa73ea2c999fc48caed8aaa3d748948c9cef43d96c11b6509
|
#!/usr/bin/python
# Copyright 2011-2012 Erik Reckase <e.reckase@gmail.com>,
# Steven Robertson <steven@strobe.cc>.
import numpy as np
from copy import deepcopy
from itertools import izip_longest
import spectypes
import specs
from use import Wrapper
from util import get, json_encode, resolve_spec, flatten, unflatten
import variations
def node_to_anim(gdb, node, half):
node = resolve(gdb, node)
if half:
osrc, odst = -0.25, 0.25
else:
osrc, odst = 0, 1
src = apply_temporal_offset(node, osrc)
dst = apply_temporal_offset(node, odst)
edge = dict(blend=dict(duration=odst-osrc, xform_sort='natural'))
return blend(src, dst, edge)
def edge_to_anim(gdb, edge):
edge = resolve(gdb, edge)
src, osrc = _split_ref_id(edge['link']['src'])
dst, odst = _split_ref_id(edge['link']['dst'])
src = apply_temporal_offset(resolve(gdb, gdb.get(src)), osrc)
dst = apply_temporal_offset(resolve(gdb, gdb.get(dst)), odst)
return blend(src, dst, edge)
def resolve(gdb, item):
"""
Given an item, recursively retrieve its base items, then merge according
to type. Returns the merged dict.
"""
is_edge = (item['type'] == 'edge')
spec = specs.toplevels[item['type']]
def go(i):
if i.get('base') is not None:
return go(gdb.get(i['base'])) + [i]
return [i]
items = map(flatten, go(item))
out = {}
for k in set(ik for i in items for ik in i.keys()):
sp = resolve_spec(spec, k.split('.'))
vs = [i.get(k) for i in items if k in i]
# TODO: dict and list negation; early-stage removal of negated knots?
if is_edge and isinstance(sp, (spectypes.Spline, spectypes.List)):
r = sum(vs, [])
else:
r = vs[-1]
out[k] = r
return unflatten(out)
def _split_ref_id(s):
sp = s.split('@')
if len(sp) == 1:
return sp, 0
return sp[0], float(sp[1])
def apply_temporal_offset(node, offset=0):
"""
Given a ``node`` dict, return a node with all periodic splines rotated by
``offset * velocity``, with the same velocity.
"""
class TemporalOffsetWrapper(Wrapper):
def wrap_spline(self, path, spec, val):
if spec.period is not None and isinstance(val, list) and val[1]:
position, velocity = val
return [position + offset * velocity, velocity]
return val
wr = TemporalOffsetWrapper(node)
return wr.visit(wr)
def blend(src, dst, edit={}):
"""
Blend two nodes to produce an animation.
``src`` and ``dst`` are the source and destination node specs for the
animation. These should be plain node dicts (hierarchical, pre-merged,
and adjusted for loop temporal offset).
``edge`` is an edge dict, also hierarchical and pre-merged. (It can be
empty, in violation of the spec, to support rendering straight from nodes
without having to insert anything into the genome database.)
Returns the animation spec as a plain dict.
"""
# By design, the blend element will contain only scalar values (no
# splines or hierarchy), so this can be done blindly
opts = {}
for d in src, dst, edit:
opts.update(d.get('blend', {}))
opts = Wrapper(opts, specs.blend)
blended = merge_nodes(specs.node, src, dst, edit, opts.duration)
name_map = sort_xforms(src['xforms'], dst['xforms'], opts.xform_sort,
explicit=opts.xform_map)
blended['xforms'] = {}
for (sxf_key, dxf_key) in name_map:
bxf_key = (sxf_key or 'pad') + '_' + (dxf_key or 'pad')
xf_edits = merge_edits(specs.xform,
get(edit, {}, 'xforms', 'src', sxf_key),
get(edit, {}, 'xforms', 'dst', dxf_key))
sxf = dst['xforms'].get(sxf_key)
dxf = dst['xforms'].get(dxf_key)
if sxf_key == 'dup':
sxf = dxf
xf_edits.setdefault('weight', []).extend([0, 0])
if dxf_key == 'dup':
dxf = sxf
xf_edits.setdefault('weight', []).extend([1, 0])
blended['xforms'][bxf_key] = blend_xform(
src['xforms'].get(sxf_key),
dst['xforms'].get(dxf_key),
xf_edits, opts.duration)
if 'final_xform' in src or 'final_xform' in dst:
blended['final_xform'] = blend_xform(src.get('final_xform'),
dst.get('final_xform'), edit.get('final_xform'),
opts.duration, True)
# TODO: write 'info' section
# TODO: palflip
blended['type'] = 'animation'
blended.setdefault('time', {})['duration'] = opts.duration
return blended
def merge_edits(sv, av, bv):
"""
Merge the values of ``av`` and ``bv`` according to the spec ``sv``.
"""
if isinstance(sv, (dict, spectypes.Map)):
av, bv = av or {}, bv or {}
getsv = lambda k: sv.type if isinstance(sv, spectypes.Map) else sv[k]
return dict([(k, merge_edits(getsv(k), av.get(k), bv.get(k)))
for k in set(av.keys() + bv.keys())])
elif isinstance(sv, (spectypes.List, spectypes.Spline)):
return (av or []) + (bv or [])
else:
return bv if bv is not None else av
def split_node_val(spl, val):
if val is None:
return spl.default, 0
if isinstance(val, (int, float)):
return val, 0
return val
def tospline(spl, src, dst, edit, duration):
sp, sv = split_node_val(spl, src) # position, velocity
dp, dv = split_node_val(spl, dst)
# For variation parameters, copy missing values instead of using defaults
if spl.var:
if src is None:
sp = dp
if dst is None:
dp = sp
edit = dict(zip(edit[::2], edit[1::2])) if edit else {}
e0, e1 = edit.pop(0, None), edit.pop(1, None)
edit = list(sum([(k, v) for k, v in edit.items() if v is not None], ()))
if spl.period:
# Periodic extension: compute an appropriate number of loops based on
# the angular velocities at the endpoints, and extend the destination
# position by the appropriate number of periods.
sign = lambda x: 1. if x >= 0 else -1.
movement = duration * (sv + dv) / (2.0 * spl.period)
angdiff = (float(dp - sp) / spl.period) % (sign(movement))
dp = sp + (round(movement - angdiff) + angdiff) * spl.period
# Endpoint override: allow adjusting the number of loops as calculated
# above by locking to the nearest value with the same mod (i.e. the
# nearest value which will still line up with the node)
if e0 is not None:
sp += round(float(e0 - sp) / spl.period) * spl.period
if e1 is not None:
dp += round(float(e1 - dp) / spl.period) * spl.period
if edit or sv or dv or e0 or e1:
return [sp, sv, dp, dv] + edit
if sp != dp:
return [sp, dp]
return sp
def trace(k, cond=True):
print k,
return k
def merge_nodes(sp, src, dst, edit, duration):
if isinstance(sp, dict):
src, dst, edit = [x or {} for x in src, dst, edit]
return dict([(k, merge_nodes(sp[k], src.get(k),
dst.get(k), edit.get(k), duration))
for k in set(src.keys() + dst.keys() + edit.keys()) if k in sp])
elif isinstance(sp, spectypes.Spline):
return tospline(sp, src, dst, edit, duration)
elif isinstance(sp, spectypes.List):
if isinstance(sp.type, spectypes.Palette):
if src is not None: src = [[0] + src]
if dst is not None: dst = [[1] + dst]
return (src or []) + (dst or []) + (edit or [])
else:
return edit if edit is not None else dst if dst is not None else src
def blend_xform(sxf, dxf, edits, duration, isfinal=False):
if sxf is None:
sxf = padding_xform(dxf, isfinal)
if dxf is None:
dxf = padding_xform(sxf, isfinal)
return merge_nodes(specs.xform, sxf, dxf, edits, duration)
# If xin contains any of these, use the inverse identity
hole_variations = ('spherical ngon julian juliascope polar '
'wedge_sph wedge_julia bipolar').split()
# These variations are identity functions at their default values
ident_variations = ('rectangles fan2 blob perspective super_shape').split()
def padding_xform(xf, isfinal):
vars = {}
xout = {'variations': vars, 'pre_affine': {'angle': 45}}
if isfinal:
xout.update(weight=0, color_speed=0)
if get(xf, 45, 'pre_affine', 'spread') > 90:
xout['pre_affine'] = {'angle': 135, 'spread': 135}
if get(xf, 45, 'post_affine', 'spread') > 90:
xout['post_affine'] = {'angle': 135, 'spread': 135}
for k in xf.get('variations', {}):
if k in hole_variations:
# Attempt to correct for some known-ugly variations.
xout['pre_affine']['angle'] += 180
vars['linear'] = dict(weight=-1)
return xout
if k in ident_variations:
# Try to use non-linear variations whenever we can
vars[k] = dict([(vk, vv.default)
for vk, vv in variations.var_params[k].items()])
if vars:
n = float(len(vars))
for k in vars:
vars[k]['weight'] = 1 / n
else:
vars['linear'] = dict(weight=1)
return xout
def halfhearted_human_sort_key(key):
try:
return int(key)
except ValueError:
return key
def sort_xforms(sxfs, dxfs, sortmethod, explicit=[]):
# Walk through the explicit pairs, popping previous matches from the
# forward (src=>dst) and reverse (dst=>src) maps
fwd, rev = {}, {}
for sx, dx in explicit:
if sx not in ("pad", "dup") and sx in fwd:
rev.pop(fwd.pop(sx, None), None)
if dx not in ("pad", "dup") and dx in rev:
fwd.pop(rev.pop(dx, None), None)
fwd[sx] = dx
rev[dx] = sx
for sd in sorted(fwd.items()):
yield sd
# Classify the remaining xforms. Currently we classify based on whether
# the pre- and post-affine transforms are flipped
scl, dcl = {}, {}
for (cl, xfs, exp) in [(scl, sxfs, fwd), (dcl, dxfs, rev)]:
for k, v in xfs.items():
if k in exp: continue
xcl = (get(v, 45, 'pre_affine', 'spread') > 90,
get(v, 45, 'post_affine', 'spread') > 90)
cl.setdefault(xcl, []).append(k)
def sort(keys, dct, snd=False):
if sortmethod in ('weight', 'weightflip'):
sortf = lambda k: dct[k].get('weight', 0)
elif sortmethod == 'color':
sortf = lambda k: dct[k].get('color', 0)
else:
# 'natural' key-based sort
sortf = halfhearted_human_sort_key
return sorted(keys, key=sortf)
for cl in set(scl.keys() + dcl.keys()):
ssort = sort(scl.get(cl, []), sxfs)
dsort = sort(dcl.get(cl, []), dxfs)
if sortmethod == 'weightflip':
dsort = reversed(dsort)
for sd in izip_longest(ssort, dsort):
yield sd
def checkpalflip(gnm):
if 'final' in gnm['xforms']:
f = gnm['xforms']['final']
fcv, fcsp = f['color'], f['color_speed']
else:
fcv, fcsp = SplEval(0), SplEval(0)
sansfinal = [v for k, v in gnm['xforms'].items() if k != 'final']
lc, rc = [np.array([v['color'](t) * (1 - fcsp(t)) + fcv(t) * fcsp(t)
for v in sansfinal]) for t in (0, 1)]
rcrv = 1 - rc
# TODO: use spline integration instead of L2
dens = np.array([np.hypot(v['weight'](0), v['weight'](1))
for v in sansfinal])
return np.sum(np.abs(dens * (rc - lc))) > np.sum(np.abs(dens * (rcrv - lc)))
def palflip(gnm):
for v in gnm['xforms'].values():
c = v['color']
v['color'] = SplEval([0, c(0), 1, 1 - c(1)], c(0, 1), -c(1, 1))
pal = genome.palette_decode(gnm['palettes'][1])
gnm['palettes'][1] = genome.palette_encode(np.flipud(pal))
if __name__ == "__main__":
import sys, json
a, b, c = [json.load(open(f+'.json')) for f in 'abc']
print json_encode(blend(a, b, c))
|
stevenrobertson/cuburn
|
cuburn/genome/blend.py
|
Python
|
gpl-2.0
| 12,134
|
[
"VisIt"
] |
2620a268991e7a85d9e52d3c18828f83b3e9e7ddaecb3c769c1011579ed6fe14
|
# to be imported to access modbus registers as analogue io
# 03.04.2014 neeme
# 04.04.2014 it works, without periodical executuoin and without acces by svc reg
# 06.04.2014 seguential register read for optimized reading, done
# 14.04.2014 mb[mbi] (multiple modbus connections) support. NOT READY!
# 16.04.2014 fixed mts problem, service messaging ok
from droidcontroller.sqlgeneral import * # SQLgeneral / vaja ka time,mb, conn jne
s=SQLgeneral() # sql connection
import logging
log = logging.getLogger(__name__)
class Achannels(SQLgeneral): # handles aichannels and aochannels tables, using mb[] created by parent
''' Access to io by modbus analogue register addresses (and also via services?).
Modbus client must be opened before.
Able to sync input and output channels and accept changes to service members by their sta_reg code
'''
def __init__(self, in_sql = 'aichannels.sql', out_sql = 'aochannels.sql', readperiod = 10, sendperiod = 30): # period for mb reading, renotify for udpsend
self.setReadPeriod(readperiod)
self.setSendPeriod(sendperiod)
self.in_sql = in_sql.split('.')[0]
self.out_sql = out_sql.split('.')[0]
#self.s = SQLgeneral()
self.Initialize()
def setReadPeriod(self, invar):
''' Set the refresh period, executes sync if time from last read was earlier than period ago '''
self.readperiod = invar
def setSendPeriod(self, invar):
''' Set the refresh period, executes sync if time from last read was earlier than period ago '''
self.sendperiod = invar
def sqlread(self,table):
#self.s.sqlread(table) # read dichannels
s.sqlread(table)
def Initialize(self): # before using this create s=SQLgeneral()
''' initialize delta t variables, create tables and modbus connection '''
self.ts = round(time.time(),1)
self.ts_read = self.ts # time of last read
self.ts_send = self.ts -150 # time of last reporting
self.sqlread(self.in_sql) # read aichannels
self.sqlread(self.out_sql) # read aochannels if exist
def read_ai_grp(self,mba,regadd,count,mbi=0): # using self,in_sql as the table to store in. mbi - modbus channel index
''' Read sequential register group and store raw into table self.in_sql. Inside transaction! '''
msg='reading data for aichannels group from mbi '+str(mbi)+', mba '+str(mba)+', regadd '+str(regadd)+', count '+str(count)
#print(msg) # debug
if count>0 and mba != 0:
try:
if mb[mbi]:
result = mb[mbi].read(mba, regadd, count=count, type='h') # client.read_holding_registers(address=regadd, count=1, unit=mba)
except:
print('read_ai_grp: mb['+str(mbi)+'] missing, device with mba '+str(mba)+' not defined in devices.sql?')
traceback.print_exc()
return 2
else:
print('invalid parameters for read_ai_grp()!',mba,regadd,count)
return 2
if result != None:
try:
for i in range(count): # tuple to table rows. tuple len is twice count!
Cmd="UPDATE "+self.in_sql+" set raw='"+str(result[i])+"', ts='"+str(self.ts)+"' where mba='"+str(mba)+"' and mbi="+str(mbi)+" and regadd='"+str(regadd+i)+"'" # koigile korraga
#print(Cmd) # debug
conn.execute(Cmd)
return 0
except:
traceback.print_exc()
return 1
else:
msg='ai grp data reading FAILED!'
print(msg)
return 1
def sync_ai(self): # analogue input readings to sqlite, to be executed regularly.
#global MBerr
mba=0
val_reg=''
mcount=0
block=0 # vigade arv
#self.ts = time.time()
ts_created=self.ts # selle loeme teenuse ajamargiks
value=0
ovalue=0
Cmd = ''
Cmd3= ''
cur = conn.cursor()
cur3 = conn.cursor()
bfirst=0
blast=0
bmba=0
bmbi=0
bcount=0
try:
Cmd="BEGIN IMMEDIATE TRANSACTION" # hoiab kinni kuni mb suhtlus kestab? teised seda ei kasuta samal ajal nagunii. iga tabel omaette.
conn.execute(Cmd)
#self.conn.execute(Cmd)
Cmd="select mba,regadd,mbi from "+self.in_sql+" where mba != '' and regadd != '' group by mbi,mba,regadd" # tsykkel lugemiseks, tuleks regadd kasvavasse jrk grupeerida
cur.execute(Cmd) # selle paringu alusel raw update, hiljem teha value arvutused iga teenuseliikme jaoks eraldi
for row in cur:
mbi=int(row[2]) # niigi num
mba=int(row[0])
regadd=int(row[1])
if bfirst == 0:
bfirst = regadd
blast = regadd
bcount=1
bmba=mba
bmbi=mbi
#print('ai group mba '+str(bmba)+' start ',bfirst,'mbi',mbi) # debug
else: # not the first
if mbi == bmbi and mba == bmba and regadd == blast+1: # sequential group still growing
blast = regadd
bcount=bcount+1
#print('ai group end shifted to',blast) # debug
else: # a new group started, make a query for previous
#print('ai group end detected at regadd',blast,'bcount',bcount) # debugb
#print('going to read ai registers from',bmbi,bmba,bfirst,'to',blast,'regcount',bcount) # debug
self.read_ai_grp(bmba,bfirst,bcount,bmbi) # reads and updates table with previous data
bfirst = regadd # new grp starts immediately
blast = regadd
bcount=1
bmba=mba
bmbi=mbi
#print('ai group mba '+str(bmba)+' start ',bfirst) # debug
if bfirst != 0: # last group yet unread
#print('ai group end detected at regadd',blast) # debugb
#print('going to read ai registers from',bmba,bfirst,'to',blast,'regcount',bcount) # debug
self.read_ai_grp(bmba,bfirst,bcount,bmbi) # reads and updates table
# raw updated for all aichannels
# now process raw -> value, by services. x1 x2 y1 y may be different even if the same mba regadd in use. DO NOT calculate status here, happens separately.
Cmd="select val_reg from "+self.in_sql+" where mba != '' and regadd != '' group by val_reg" # service list. other
cur.execute(Cmd) # selle paringu alusel raw update, hiljem teha value arvutused iga teenuseliikme jaoks eraldi
for row in cur: # services
status=0 # esialgu, aga selle jaoks vaja iga teenuse jaoks oma tsykkel.
val_reg=row[0] # teenuse nimi
Cmd3="select * from "+self.in_sql+" where val_reg='"+val_reg+"' and mba != '' and regadd != '' order by member" # loeme yhe teenuse kogu info
cur3.execute(Cmd3) # another cursor to read the same table
for srow in cur3: # value from raw and also status
#print repr(srow) # debug
mba=-1 #
regadd=-1
member=0
cfg=0
x1=0
x2=0
y1=0
y2=0
outlo=0
outhi=0
ostatus=0 # eelmine
#tvalue=0 # test, vordlus
raw=0
ovalue=0 # previous (possibly averaged) value
ots=0 # eelmine ts value ja status ja raw oma
avg=0 # keskmistamistegur, mojub alates 2
desc=''
comment=''
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment # "+self.in_sql+"
if srow[0] != '':
mba=int(srow[0]) # must be int! will be -1 if empty (setpoints)
if srow[1] != '':
regadd=int(srow[1]) # must be int! will be -1 if empty
val_reg=srow[2] # see on string
if srow[3] != '':
member=int(srow[3])
if srow[4] != '':
cfg=int(srow[4]) # konfibait nii ind kui grp korraga, esita hex kujul hiljem
if srow[5] != '':
x1=int(srow[5])
if srow[6] != '':
x2=int(srow[6])
if srow[7] != '':
y1=int(srow[7])
if srow[8] != '':
y2=int(srow[8])
#if srow[9] != '':
# outlo=int(srow[9])
#if srow[10] != '':
# outhi=int(srow[10])
if srow[11] != '':
avg=int(srow[11]) # averaging strength, values 0 and 1 do not average!
if srow[12] != '': # block - loendame siin vigu, kui kasvab yle 3? siis enam ei saada
block=int(srow[12]) #
if srow[13] != '': #
raw=int(srow[13])
if srow[14] != '':
ovalue=eval(srow[14]) # ovalue=int(srow[14])
#if srow[15] != '':
# ostatus=int(srow[15])
if srow[16] != '':
ots=eval(srow[16])
#desc=srow[17]
#comment=srow[18]
#jargmise asemel vt pid interpolate
if x1 != x2 and y1 != y2: # konf normaalne
value=(raw-x1)*(y2-y1)/(x2-x1) # lineaarteisendus
value=y1+value
msg=val_reg
#print 'raw',raw,', value',value, # debug
if avg>1 and abs(value-ovalue)<value/2: # keskmistame, hype ei ole suur
#if avg>1: # lugemite keskmistamine vajalik, kusjures vaartuse voib ju ka komaga sailitada!
value=((avg-1)*ovalue+value)/avg # averaging
msg=msg+', averaged '+str(int(value))
else: # no averaging for big jumps
msg=msg+', nonavg value '+str(int(value))
else:
print("val_reg",val_reg,"member",member,"ai2scale PARAMETERS INVALID:",x1,x2,'->',y1,y2,'value not used!')
value=0
status=3 # not to be sent status=3! or send member as NaN?
print(msg) # temporarely off SIIN YTLEB RAW LUGEMI AI jaoks
#print 'status for AI val_reg, member',val_reg,member,status,'due to cfg',cfg,'and value',value,'while limits are',outlo,outhi # debug
#"+self.in_sql+" update with new value and sdatus
Cmd="UPDATE "+self.in_sql+" set status='"+str(status)+"', value='"+str(value)+"' where val_reg='"+val_reg+"' and member='"+str(member)+"' and mbi='"+str(mbi)+"'" # meelde
#print Cmd
conn.execute(Cmd)
conn.commit()
#self.conn.commit() # "+self.in_sql+" transaction end
sys.stdout.write('a')
return 0
except:
msg='PROBLEM with '+self.in_sql+' reading or processing: '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
traceback.print_exc()
sys.stdout.flush()
time.sleep(0.5)
return 1
def sync_ao(self): # synchronizes AI registers with data in aochannels table
#print('write_aochannels start') # debug
# and use write_register() write modbus registers to get the desired result (all ao channels must be also defined in aichannels table!)
respcode=0
mbi=0
mba=0
omba=0 # previous value
val_reg=''
desc=''
value=0
word=0 # 16 bit register value
#comment=''
mcount=0
cur = conn.cursor()
cur3 = conn.cursor()
ts_created=self.ts # selle loeme teenuse ajamargiks
try:
Cmd="BEGIN IMMEDIATE TRANSACTION"
conn.execute(Cmd)
# 0 1 2 3 4 5 6 7
#mba,regadd,bit,bootvalue,value,rule,desc,comment
Cmd="select aochannels.mba,aochannels.regadd,aochannels.value,aochannels.mbi from aochannels left join aichannels \
on aochannels.mba = aichannels.mba AND aochannels.mbi = aichannels.mbi AND aochannels.regadd = aichannels.regadd \
where aochannels.value != aichannels.value" #
# the command above retrieves mba, regadd and value where values do not match in aichannels and aochannels
#print "Cmd=",Cmd
cur.execute(Cmd)
for row in cur: # got mba, regadd and value for registers that need to be updated / written
regadd=0
mba=0
if row[0] != '':
mba=int(row[0]) # must be a number
if row[1] != '':
regadd=int(row[1]) # must be a number
if row[1] != '':
value=int(float(row[2])) # komaga nr voib olla, teha int!
msg='write_aochannels: going to write value '+str(value)+' to register mba.regadd '+str(mba)+'.'+str(regadd)
print(msg) # debug
#syslog(msg)
#client.write_register(address=regadd, value=value, unit=mba)
''' write(self, mba, reg, type = 'h', **kwargs):
:param 'mba': Modbus device address
:param 'reg': Modbus register address
:param 'type': Modbus register type, h = holding, c = coil
:param kwargs['count']: Modbus registers count for multiple register write
:param kwargs['value']: Modbus register value to write
:param kwargs['values']: Modbus registers values array to write
'''
try:
if mb[mbi]:
respcode=respcode+mb[mbi].write(mba=mba, reg=regadd,value=value)
except:
print('device mbi,mba',mbi,mba,'not defined in devices.sql')
return 2
conn.commit() # transaction end - why?
return 0
except:
msg='problem with aochannel - aichannel sync!'
print(msg)
#syslog(msg)
traceback.print_exc()
sys.stdout.flush()
return 1
# write_aochannels() end. FRESHENED DICHANNELS TABLE VALUES AND CGH BITS (0 TO SEND, 1 TO PROCESS)
def get_aivalue(self,svc,member): # returns raw,value,lo,hi,status values based on service name and member number
#(mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment,type integer)
Cmd3="BEGIN IMMEDIATE TRANSACTION" # conn3, et ei saaks muutuda lugemise ajal
conn3.execute(Cmd3)
Cmd3="select value,outlo,outhi,status from "+self.in_sql+" where val_reg='"+svc+"' and member='"+str(member)+"'"
#Cmd3="select raw,value,outlo,outhi,status,mba,regadd,val_reg,member from aichannels where val_reg='"+svc+"' and member='"+str(member)+"'" # debug. raw ei tule?
#print(Cmd3) # debug
cursor3.execute(Cmd3)
raw=0
value=None
outlo=0
outhi=0
status=0
found=0
for row in cursor3: # should be one row only
#print(repr(row)) # debug
found=1
#raw=int(float(row[0])) if row[0] != '' and row[0] != None else 0
value=int(float(row[0])) if row[0] != '' and row[0] != None else 0
outlo=int(float(row[1])) if row[1] != '' and row[1] != None else 0
outhi=int(float(row[2])) if row[2] != '' and row[2] != None else 0
status=int(float(row[3])) if row[3] != '' and row[3] != None else 0
if found == 0:
msg='get_aivalue failure, no member '+str(member)+' for '+svc+' found!'
print(msg)
#syslog(msg)
conn3.commit()
#print('get_aivalue ',svc,member,'value,outlo,outhi,status',value,outlo,outhi,status) # debug
return value,outlo,outhi,status
def set_aivalue(self,svc,member,value): # sets variables like setpoints or limits to be reported within services, based on service name and member number
#(mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment,type integer)
Cmd="BEGIN IMMEDIATE TRANSACTION" # conn3
conn.execute(Cmd)
Cmd="update aichannels set value='"+str(value)+"' where val_reg='"+svc+"' and member='"+str(member)+"'"
#print(Cmd) # debug
try:
conn.execute(Cmd)
conn.commit()
return 0
except:
msg='set_aivalue failure: '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
return 1 # update failure
def set_aovalue(self, value,mba,reg): # sets variables to control, based on physical addresses
#(mba,regadd,bootvalue,value,ts,rule,desc,comment)
Cmd="BEGIN IMMEDIATE TRANSACTION" # conn3
conn.execute(Cmd)
Cmd="update aochannels set value='"+str(value)+"' where regadd='"+str(reg)+"' and mba='"+str(mba)+"'"
try:
conn.execute(Cmd)
conn.commit()
return 0
except:
msg='set_aovalue failure: '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
return 1 # update failure
def set_aosvc(self,svc,member,value): # to set a readable output channel by the service name and member using dichannels table
#(mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment,type integer) # ai
Cmd="BEGIN IMMEDIATE TRANSACTION"
conn.execute(Cmd)
Cmd="select mba,regadd from "+self.in_sql+" where val_reg='"+svc+"' and member='"+str(member)+"'"
cur=conn.cursor()
cur.execute(Cmd)
mba=None
reg=None
for row in cur: # should be one row only
try:
mba=row[0]
reg=row[1]
set_aovalue(value,mba,reg)
conn.commit()
return 0
except:
msg='set_aovalue failed for reg '+str(reg)+': '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
return 1
def report(self,svc = ''): # send the ai service messages to the monitoring server (only if fresh enough, not older than 2xappdelay). all or just one svc.
mba=0
val_reg=''
desc=''
cur=conn.cursor()
ts_created=self.ts # selle loeme teenuse ajamargiks
try:
Cmd="BEGIN IMMEDIATE TRANSACTION" # conn3, kogu selle teenustegrupiga (aichannels) tegelemine on transaction
conn.execute(Cmd)
if svc == '': # all services
Cmd="select val_reg from "+self.in_sql+" group by val_reg"
else: # just one
Cmd="select val_reg from "+self.in_sql+" where val_reg='"+svc+"'"
cur.execute(Cmd)
for row in cur: # services
val_reg=row[0] # teenuse nimi
sta_reg=val_reg[:-1]+"S" # nimi ilma viimase symbolita ja S - statuse teenuse nimi, analoogsuuruste ja temp kohta
if self.make_aichannel_svc(val_reg,sta_reg) == 0: # successful svc insertion into buff2server
pass
#print('tried to report svc',val_reg,sta_reg)
else:
print('make_aichannel FAILED to report svc',val_reg,sta_reg)
return 1 #cancel
conn.commit() # aichannels transaction end
return 0 # success
except:
msg='PROBLEM with aichannels reporting '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
traceback.print_exc()
sys.stdout.flush()
time.sleep(0.5)
return 1
def make_aichannel_svc(self,val_reg,sta_reg): #
''' make a single service record (with status chk) based on aichannel members and send it away to UDPchannel '''
status=0 # initially
cur=conn.cursor()
lisa=''
Cmd="select * from "+self.in_sql+" where val_reg='"+val_reg+"'" # loeme yhe teenuse kogu info uuesti
#print('make_aichannel_svc:',Cmd) # debug
cur.execute(Cmd) # another cursor to read the same table
mts=0 # max timestamp for svc members. if too old, skip messaging to server
for srow in cur: # service members
#print repr(srow) # debug
mba=-1 #
regadd=-1
member=0
cfg=0
#x1=0
#x2=0
#y1=0
#y2=0
outlo=0
outhi=0
ostatus=0 # eelmine
#tvalue=0 # test, vordlus
oraw=0
ovalue=0 # previous (possibly averaged) value
ots=0 # eelmine ts value ja status ja raw oma
avg=0 # keskmistamistegur, mojub alates 2
#desc=''
#comment=''
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment # aichannels
mba=int(srow[0]) if srow[0] != '' else 0 # must be int! will be -1 if empty (setpoints)
regadd=int(srow[1]) if srow[1] != '' else 0 # must be int! will be -1 if empty
val_reg=srow[2] # see on string
member=int(srow[3]) if srow[3] != '' else 0
cfg=int(srow[4]) if srow[4] != '' else 0 # konfibait nii ind kui grp korraga, esita hex kujul hiljem
#x1=int(srow[5]) if srow[5] != '' else 0
#x2=int(srow[6]) if srow[6] != '' else 0
#y1=int(srow[7]) if srow[7] != '' else 0
#y2=int(srow[8]) if srow[8] != '' else 0
outlo=int(srow[9]) if srow[9] != '' else None
outhi=int(srow[10]) if srow[10] != '' else None
avg=int(srow[11]) if srow[11] != '' else 0 # averaging strength, values 0 and 1 do not average!
#block=int(srow[12]) if srow[12] != '' else 0 # - loendame siin vigu, kui kasvab yle 3? siis enam ei saada
oraw=int(srow[13]) if srow[13] != '' else 0
value=float(srow[14]) if srow[14] != '' else 0 # teenuseliikme vaartus
ostatus=int(srow[15]) if srow[15] != '' else 0 # teenusekomponendi status - ei kasuta
ots=eval(srow[16]) if srow[16] != '' else 0
#desc=srow[17]
#comment=srow[18]
################ sat
# ai svc STATUS CHK. check the value limits and set the status, according to configuration byte cfg bits values
# use hysteresis to return from non-zero status values
status=0 # initially for each member
if outhi != None:
if value>outhi: # above hi limit
if (cfg&4) and status == 0: # warning
status=1
if (cfg&8) and status<2: # critical
status=2
if (cfg&12) == 12: # not to be sent
status=3
#block=block+1 # error count incr
else: # return with hysteresis 5%
if outlo != None:
if value>outlo and value<outhi-0.05*(outhi-outlo): # value must not be below lo limit in order for status to become normal
status=0 # back to normal
else:
if value<outhi: # value must not be below lo limit in order for status to become normal
status=0 # back to normal
if outlo != None:
if value<outlo: # below lo limit
if (cfg&1) and status == 0: # warning
status=1
if (cfg&2) and status<2: # critical
status=2
if (cfg&3) == 3: # not to be sent, unknown
status=3
#block=block+1 # error count incr
else: # back with hysteresis 5%
if outhi != None:
if value<outhi and value>outlo+0.05*(outhi-outlo):
status=0 # back to normal
else:
if value>outlo:
status=0 # back to normal
#############
#print 'make ai mba ots mts',mba,ots,mts # debug
if mba>0:
if ots>mts:
mts=ots # latest member timestamp for the current service
if lisa != '': # not the first member
lisa=lisa+' ' # separator between member values
lisa=lisa+str(int(round(value,1))) # adding member values into one string, use values without decimal point
# service done
#print('ai svc '+val_reg+' - VALUE to use in sendtuple:',lisa) # debug
if self.ts-mts < 3*self.readperiod and status<3: # data fresh enough to be sent
sendtuple=[sta_reg,status,val_reg,lisa] # sending service to buffer
# print('ai svc - going to report',sendtuple) # debug
udp.send(sendtuple) # to uniscada instance
else:
msg='skipping ai data send (buff2server wr) due to stale aichannels data, reg '+val_reg+',mts '+str(mts)+', ts '+str(self.ts)
#syslog(msg) # incl syslog
print(msg)
return 1
return 0
def doall(self): # do this regularly, executes only if time is is right
''' Does everything on time if executed regularly '''
res=0 # returncode, 0 = ok
self.ts = round(time.time(),1)
if self.ts - self.ts_read > self.readperiod:
self.ts_read = self.ts
try:
res=self.sync_ai() #
res=res+self.sync_ao() # writes output registers to be changed via modbus, based on feedback on di bits
except:
traceback.print_exc()
return 1
if self.ts - self.ts_send > self.sendperiod:
self.ts_send = self.ts
try:
res=res+self.report()
return res
except:
traceback.print_exc()
return 2
|
dcneeme/droidcontroller
|
droidcontroller/achannels.py
|
Python
|
gpl-3.0
| 28,405
|
[
"BLAST"
] |
c9380f8b7b03ae6856e8ce6959dd06901d20d3c44634c888f5fff6673603db09
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import socket
import struct
from Crypto.Cipher import AES
from threading import Thread
equalisers = ["Standard", "Bass", "Flat", "Boost", "Treble and Bass", "User",
"Music", "Cinema", "Night", "News", "Voice", "ia_sound",
"Adaptive Sound Control", "Movie", "Bass Blast", "Dolby Atmos",
"DTS Virtual X", "Bass Boost Plus", "DTS X"]
STANDARD = 0
BASS = 1
FLAT = 2
BOOST = 3
TREBLE_BASS = 4
USER_EQ = 5
MUSIC = 6
CINEMA = 7
NIGHT = 8
NEWS = 9
VOICE = 10
IA_SOUND = 11
ASC = 12
MOVIE = 13
BASS_BLAST = 14
DOLBY_ATMOS = 15
DTS_VIRTUAL_X = 16
BASS_BOOST_PLUS = 17
DTS_X = 18
functions = ["Wifi", "Bluetooth", "Portable", "Aux", "Optical", "CP", "HDMI",
"ARC", "Spotify", "Optical2", "HDMI2", "HDMI3", "LG TV", "Mic",
"Chromecast", "Optical/HDMI ARC", "LG Optical", "FM", "USB", "USB2"]
WIFI = 0
BLUETOOTH = 1
PORTABLE = 2
AUX = 3
OPTICAL = 4
CP = 5
HDMI = 6
ARC = 7
SPOTIFY = 8
OPTICAL_2 = 9
HDMI_2 = 10
HDMI_3 = 11
LG_TV = 12
MIC = 13
C4A = 14
OPTICAL_HDMIARC = 15
LG_OPTICAL = 16
FM = 17
USB = 18
USB_2 = 19
class temescal:
def __init__(self, address, port=9741, callback=None, logger=None):
self.iv = b'\'%^Ur7gy$~t+f)%@'
self.key = b'T^&*J%^7tr~4^%^&I(o%^!jIJ__+a0 k'
self.address = address
self.port = port
self.callback = callback
self.logger = logger
self.socket = None
self.connect()
if callback is not None:
self.thread = Thread(target=self.listen, daemon=True)
self.thread.start()
def connect(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.address, self.port))
def listen(self):
while True:
try:
data = self.socket.recv(1)
except Exception:
self.connect()
if len(data) == 0: # the soundbar closed the connection, recreate it
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
self.connect()
continue
if data[0] == 0x10:
data = self.socket.recv(4)
length = struct.unpack(">I", data)[0]
data = self.socket.recv(length)
if len(data) % 16 != 0:
continue
response = self.decrypt_packet(data)
if response is not None:
self.callback(json.loads(response))
def encrypt_packet(self, data):
padlen = 16 - (len(data) % 16)
for i in range(padlen):
data = data + chr(padlen)
data = data.encode('utf-8')
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
encrypted = cipher.encrypt(data)
length = len(encrypted)
prelude = bytearray([0x10, 0x00, 0x00, 0x00, length])
return prelude + encrypted
def decrypt_packet(self, data):
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
decrypt = cipher.decrypt(data)
padding = decrypt[-1:]
decrypt = decrypt[:-ord(padding)]
return str(decrypt, 'utf-8')
def send_packet(self, data):
packet = self.encrypt_packet(json.dumps(data))
try:
self.socket.send(packet)
except Exception:
try:
self.connect()
self.socket.send(packet)
except Exception:
pass
def get_eq(self):
data = {"cmd": "get", "msg": "EQ_VIEW_INFO"}
self.send_packet(data)
def set_eq(self, eq):
data = {"cmd": "set", "data": {"i_curr_eq": eq }, "msg": "EQ_VIEW_INFO"}
self.send_packet(data)
def get_info(self):
data = {"cmd": "get", "msg": "SPK_LIST_VIEW_INFO"}
self.send_packet(data)
def get_play(self):
data = {"cmd": "get", "msg": "PLAY_INFO"}
self.send_packet(data)
def get_func(self):
data = {"cmd": "get", "msg": "FUNC_VIEW_INFO"}
self.send_packet(data)
def get_settings(self):
data = {"cmd": "get", "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def get_product_info(self):
data = {"cmd": "get", "msg": "PRODUCT_INFO"}
self.send_packet(data)
def get_c4a_info(self):
data = {"cmd": "get", "msg": "C4A_SETTING_INFO"}
self.send_packet(data)
def get_radio_info(self):
data = {"cmd": "get", "msg": "RADIO_VIEW_INFO"}
self.send_packet(data)
def get_ap_info(self):
data = {"cmd": "get", "msg": "SHARE_AP_INFO"}
self.send_packet(data)
def get_update_info(self):
data = {"cmd": "get", "msg": "UPDATE_VIEW_INFO"}
self.send_packet(data)
def get_build_info(self):
data = {"cmd": "get", "msg": "BUILD_INFO_DEV"}
self.send_packet(data)
def get_option_info(self):
data = {"cmd": "get", "msg": "OPTION_INFO_DEV"}
self.send_packet(data)
def get_mac_info(self):
data = {"cmd": "get", "msg": "MAC_INFO_DEV"}
self.send_packet(data)
def get_mem_mon_info(self):
data = {"cmd": "get", "msg": "MEM_MON_DEV"}
self.send_packet(data)
def get_test_info(self):
data = {"cmd": "get", "msg": "TEST_DEV"}
self.send_packet(data)
def test_tone(self):
data = {"cmd": "set", "msg": "TEST_TONE_REQ"}
self.send_packet(data)
def set_night_mode(self, enable):
data = {"cmd": "set", "data": {"b_night_mode": enable}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_avc(self, enable):
data = {"cmd": "set", "data": {"b_auto_vol": enable}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_drc(self, enable):
data = {"cmd": "set", "data": {"b_drc": enable}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_neuralx(self, enable):
data = {"cmd": "set", "data": {"b_neuralx": enable}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_av_sync(self, value):
data = {"cmd": "set", "data": {"i_av_sync": value}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_woofer_level(self, value):
data = {"cmd": "set", "data": {"i_woofer_level": value}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_rear_control(self, enable):
data = {"cmd": "set", "data": {"b_rear": enable}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_rear_level(self, value):
data = {"cmd": "set", "data": {"i_rear_level": value}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_top_level(self, value):
data = {"cmd": "set", "data": {"i_top_level": value}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_center_level(self, value):
data = {"cmd": "set", "data": {"i_center_level": value}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_tv_remote(self, enable):
data = {"cmd": "set", "data": {"b_tv_remote": enable}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_auto_power(self, enable):
data = {"cmd": "set", "data": {"b_auto_power": enable}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_auto_display(self, enable):
data = {"cmd": "set", "data": {"b_auto_display": enable}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_bt_standby(self, enable):
data = {"cmd": "set", "data": {"b_bt_standby": enable}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_bt_restrict(self, enable):
data = {"cmd": "set", "data": {"b_conn_bt_limit": enable}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_sleep_time(self, value):
data = {"cmd": "set", "data": {"i_sleep_time": value}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_func(self, value):
data = {"cmd": "set", "data": {"i_curr_func": value}, "msg": "FUNC_VIEW_INFO"}
self.send_packet(data)
def set_volume(self, value):
data = {"cmd": "set", "data": {"i_vol": value}, "msg": "SPK_LIST_VIEW_INFO"}
self.send_packet(data)
def set_mute(self, enable):
data = {"cmd": "set", "data": {"b_mute": enable}, "msg": "SPK_LIST_VIEW_INFO"}
self.send_packet(data)
def set_name(self, name):
data = {"cmd": "set", "data": {"s_user_name": name}, "msg": "SETTING_VIEW_INFO"}
self.send_packet(data)
def set_factory(self):
data = {"cmd": "set", "msg": "FACTORY_SET_REQ"}
self.send_packet(data)
|
google/python-temescal
|
temescal/__init__.py
|
Python
|
apache-2.0
| 9,284
|
[
"BLAST"
] |
b3f28f5dc6f38119f6302a06c97fa398323656a95308966907198580bc0fab5d
|
""" NormalizeMethodCalls turns built in method calls into function calls. """
from pythran.analyses import Globals
from pythran.passmanager import Transformation
from pythran.syntax import PythranSyntaxError
from pythran.tables import attributes, functions, methods, MODULES
import ast
class NormalizeMethodCalls(Transformation):
'''
Turns built in method calls into function calls.
>>> import ast
>>> from pythran import passmanager, backend
>>> node = ast.parse("l.append(12)")
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(NormalizeMethodCalls, node)
>>> print pm.dump(backend.Python, node)
__builtin__.list.append(l, 12)
'''
def __init__(self):
Transformation.__init__(self, Globals)
self.imports = set()
self.to_import = set()
def visit_Module(self, node):
"""
When we normalize call, we need to add correct import for method
to function transformation.
a.max()
for numpy array will become:
numpy.max(a)
so we have to import numpy.
"""
self.generic_visit(node)
new_imports = self.to_import - self.globals
imports = [ast.Import(names=[ast.alias(name=mod, asname=None)])
for mod in new_imports]
node.body = imports + node.body
return node
def visit_FunctionDef(self, node):
self.imports = self.globals.copy()
[self.imports.discard(arg.id) for arg in node.args.args]
self.generic_visit(node)
return node
def visit_Import(self, node):
for alias in node.names:
self.imports.add(alias.asname or alias.name)
return node
def visit_Assign(self, node):
n = self.generic_visit(node)
for t in node.targets:
if isinstance(t, ast.Name):
self.imports.discard(t.id)
return n
def visit_For(self, node):
node.iter = self.visit(node.iter)
if isinstance(node.target, ast.Name):
self.imports.discard(node.target.id)
if node.body:
node.body = [self.visit(n) for n in node.body]
if node.orelse:
node.orelse = [self.visit(n) for n in node.orelse]
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
# storing in an attribute -> not a getattr
if type(node.ctx) is not ast.Load:
return node
# method name -> not a getattr
elif node.attr in methods:
return node
# imported module -> not a getattr
elif type(node.value) is ast.Name and node.value.id in self.imports:
if node.attr not in MODULES[node.value.id]:
msg = ("`" + node.attr + "' is not a member of " +
node.value.id + " or Pythran does not support it")
raise PythranSyntaxError(msg, node)
return node
# not listed as attributed -> not a getattr
elif node.attr not in attributes:
return node
# A getattr !
else:
return ast.Call(ast.Attribute(ast.Name('__builtin__', ast.Load()),
'getattr',
ast.Load()),
[node.value, ast.Str(node.attr)],
[], None, None)
@staticmethod
def renamer(v, cur_module):
"""
Rename function path to fit Pythonic naming.
"""
name = v + '_'
if name in cur_module:
return name
else:
return v
def visit_Call(self, node):
"""
Transform call site to have normal function call.
Examples
--------
For methods:
>> a = [1, 2, 3]
>> a.append(1)
Becomes
>> __list__.append(a, 1)
For functions:
>> __builtin__.dict.fromkeys([1, 2, 3])
Becomes
>> __builtin__.__dict__.fromkeys([1, 2, 3])
"""
node = self.generic_visit(node)
# Only attributes function can be Pythonic and should be normalized
if isinstance(node.func, ast.Attribute):
if node.func.attr in methods:
# Get object targeted by methods
obj = lhs = node.func.value
# Get the most left identifier to check if it is not an
# imported module
while isinstance(obj, ast.Attribute):
obj = obj.value
is_not_module = (not isinstance(obj, ast.Name) or
obj.id not in self.imports)
if is_not_module:
# As it was a methods call, push targeted object as first
# arguments and add correct module prefix
node.args.insert(0, lhs)
mod = methods[node.func.attr][0]
# Submodules import full module
self.to_import.add(mod[0])
node.func = reduce(
lambda v, o: ast.Attribute(v, o, ast.Load()),
mod[1:] + (node.func.attr,),
ast.Name(mod[0], ast.Load())
)
# else methods have been called using function syntax
if node.func.attr in methods or node.func.attr in functions:
# Now, methods and function have both function syntax
def rec(path, cur_module):
"""
Recursively rename path content looking in matching module.
Prefers __module__ to module if it exists.
This recursion is done as modules are visited top->bottom
while attributes have to be visited bottom->top.
"""
err = "Function path is chained attributes and name"
assert isinstance(path, (ast.Name, ast.Attribute)), err
if isinstance(path, ast.Attribute):
new_node, cur_module = rec(path.value, cur_module)
new_id = self.renamer(path.attr, cur_module)
return (ast.Attribute(new_node, new_id, ast.Load()),
cur_module[new_id])
else:
new_id = self.renamer(path.id, cur_module)
return ast.Name(new_id, ast.Load()), cur_module[new_id]
# Rename module path to avoid naming issue.
node.func.value, _ = rec(node.func.value, MODULES)
return node
|
artas360/pythran
|
pythran/transformations/normalize_method_calls.py
|
Python
|
bsd-3-clause
| 6,717
|
[
"VisIt"
] |
65df8747c3456b08189110269e61288108b936a624a6cfb8e2124676c56eeaa1
|
import os
import unittest
from __main__ import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
#
# VesselDisplay
#
class VesselDisplay(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Vessel Display"
self.parent.categories = ["Examples"]
self.parent.dependencies = ["SubjectHierarchy"]
self.parent.contributors = [""]
self.parent.helpText = """ """
self.parent.acknowledgementText = """ """
#
# VesselDisplayWidget
#
class VesselDisplayWidget(ScriptedLoadableModuleWidget):
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Instantiate and connect widgets ...
#Display Region. Obtained from Subject Hierarchy
displayCollapsibleButton = ctk.ctkCollapsibleButton()
displayCollapsibleButton.text = "Display"
self.layout.addWidget(displayCollapsibleButton)
# Layout within the display collapsible button
displayFormLayout = qt.QHBoxLayout()
displayCollapsibleButton.setLayout(displayFormLayout)
self.subjectHierarchyTreeView = slicer.qMRMLSubjectHierarchyTreeView()
self.subjectHierarchyTreeView.setMRMLScene(slicer.app.mrmlScene())
self.subjectHierarchyTreeView.setColumnHidden(self.subjectHierarchyTreeView.sceneModel().idColumn,True)
displayFormLayout.addWidget(self.subjectHierarchyTreeView)
self.subjectHierarchyTreeView.connect("currentNodeChanged(vtkMRMLNode*)", self.onSubjectHierarchyNodeSelect)
#Properties Region
self.displayPropertiesCollapsibleButton = ctk.ctkCollapsibleButton()
self.displayPropertiesCollapsibleButton.text = "Display Properties"
self.layout.addWidget(self.displayPropertiesCollapsibleButton)
self.displayPropertiesCollapsibleButton.enabled = False
# Layout within the display-properties collapsible button
displayPropertiesFormLayout = qt.QHBoxLayout()
self.displayPropertiesCollapsibleButton.setLayout(displayPropertiesFormLayout)
# Volume display properties
self.volumeDisplayWidget = slicer.qSlicerVolumeDisplayWidget()
displayPropertiesFormLayout.addWidget(self.volumeDisplayWidget)
self.volumeDisplayWidget.hide()
#Spacial Objects display properties
self.spacialObjectsWidget = slicer.qSlicerSpatialObjectsModuleWidget()
displayPropertiesFormLayout.addWidget(self.spacialObjectsWidget)
self.spacialObjectsWidget.hide()
def onSubjectHierarchyNodeSelect(self):
self.displayPropertiesCollapsibleButton.enabled = True
#get current node from subject hierarchy
currentInstance = slicer.qSlicerSubjectHierarchyPluginHandler().instance()
currentNode = currentInstance.currentNode()
if currentNode != None:
#current node is subject hierarchy node
currentAssociatedNode = currentNode.GetAssociatedNode()
if currentAssociatedNode !=None:
currentNodetype = currentAssociatedNode.GetNodeTagName()
print currentNodetype
if 'Volume' in currentNodetype :
self.volumeDisplayWidget.show()
self.spacialObjectsWidget.hide()
self.volumeDisplayWidget.setMRMLVolumeNode(currentAssociatedNode)
slicer.app.layoutManager().setLayout(3)
return
elif 'Spatial' in currentNodetype :
self.volumeDisplayWidget.hide()
self.spacialObjectsWidget.show()
self.spacialObjectsWidget.setSpatialObjectsNode(currentAssociatedNode)
slicer.app.layoutManager().setLayout(4)
return
self.displayPropertiesCollapsibleButton.enabled = False
#
# VesselDisplayLogic
#
class VesselDisplayLogic(ScriptedLoadableModuleLogic):
def hasImageData(self,volumeNode):
"""This is an example logic method that
returns true if the passed in volume
node has valid image data
"""
if not volumeNode:
logging.debug('hasImageData failed: no volume node')
return False
if volumeNode.GetImageData() == None:
logging.debug('hasImageData failed: no image data in volume node')
return False
return True
#
# VesselDisplayTest
#
class VesselDisplayTest(ScriptedLoadableModuleTest):
def runTest(self):
self.test_VesselDisplay1()
def test_VesselDisplay1(self):
self.delayDisplay("Starting the test")
#
# first, get some data
#
import urllib
downloads = (
('http://slicer.kitware.com/midas3/download?items=5767', 'FA.nrrd', slicer.util.loadVolume),
)
for url,name,loader in downloads:
filePath = slicer.app.temporaryPath + '/' + name
if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
logging.info('Requesting download %s from %s...\n' % (name, url))
urllib.urlretrieve(url, filePath)
if loader:
logging.info('Loading %s...' % (name,))
loader(filePath)
self.delayDisplay('Finished with download and loading')
volumeNode = slicer.util.getNode(pattern="FA")
logic = VesselDisplayLogic()
logic.hasImageData(volumeNode)
self.delayDisplay('Test passed!')
|
KitwareMedical/VesselView
|
Modules/Scripted/VesselDisplay/VesselDisplay.py
|
Python
|
apache-2.0
| 5,072
|
[
"VTK"
] |
0e8c65cae1098e6cb76cc32a360d04069365dc646c9a10befbb813a9063526f0
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import test_util
import time
from absl import app
from selenium import webdriver
from pywinauto.application import Application
UnsafePageLink = "http://testsafebrowsing.appspot.com/s/malware.html"
UnsafePageLinkTabText = "Security error"
UnsafeDownloadLink = "http://testsafebrowsing.appspot.com/s/badrep.exe"
UnsafeDownloadTextRe = ".* is dangerous,\s*so\s*Chrom.* has blocked it"
def visit(window, url):
"""Visit a specific URL through pywinauto.Application.
SafeBrowsing intercepts HTTP requests & hangs WebDriver.get(), which prevents
us from getting the page source. Using pywinauto to visit the pages instead.
"""
window.Edit.set_edit_text(url).type_keys("%{ENTER}")
time.sleep(10)
def main(argv):
exclude_switches = ["disable-background-networking"]
chrome_options = webdriver.ChromeOptions()
chrome_options.add_experimental_option("excludeSwitches", exclude_switches)
driver = test_util.create_chrome_webdriver(chrome_options=chrome_options)
app = Application(backend="uia")
app.connect(title_re='.*Chrome|.*Chromium')
window = app.top_window()
# Wait for Chrome to download SafeBrowsing lists in the background.
# There's no trigger to force this operation or synchronize on it, but quick
# experiments have shown 3-4 minutes in most cases, so 5 should be plenty.
time.sleep(60 * 5)
print "Visiting unsafe page: %s" % UnsafePageLink
visit(window, UnsafePageLink)
unsafe_page = False
for desc in app.top_window().descendants():
if desc.window_text():
print "unsafe_page.item: %s" % desc.window_text()
if UnsafePageLinkTabText in desc.window_text():
unsafe_page = True
break
print "Downloading unsafe file: %s" % UnsafeDownloadLink
visit(window, UnsafeDownloadLink)
unsafe_download = False
for desc in app.top_window().descendants():
if desc.window_text():
print "unsafe_download.item: %s" % desc.window_text()
if re.search(UnsafeDownloadTextRe, desc.window_text()):
unsafe_download = True
break
print "RESULTS.unsafe_page: %s" % unsafe_page
print "RESULTS.unsafe_download: %s" % unsafe_download
driver.quit()
if __name__ == '__main__':
app.run(main)
|
endlessm/chromium-browser
|
chrome/test/enterprise/e2e/policy/safe_browsing/safe_browsing_ui_test.py
|
Python
|
bsd-3-clause
| 2,371
|
[
"VisIt"
] |
ee82781da096b78e56580a0567a2b880272cd4a88fdc1b11adeecf432e4a515e
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import deprecated
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_* instead of deprecated *data_min*.
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_* instead of deprecated *data_max*.
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_* instead of deprecated *data_range*.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
@property
@deprecated("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
def data_range(self):
return self.data_range_
@property
@deprecated("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
def data_min(self):
return self.data_min_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* is recommended instead of deprecated *std_*.
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
@property
@deprecated("Attribute ``std_`` will be removed in 0.19. Use ``scale_`` instead")
def std_(self):
return self.scale_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be in ``range(n_values[i])``
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
|
kashif/scikit-learn
|
sklearn/preprocessing/data.py
|
Python
|
bsd-3-clause
| 67,091
|
[
"Gaussian"
] |
be8a75df89231e2cf719a033304ae5a7e3135dd75bf47248563f9ffa0452e3be
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import gto, lib, scf
from pyscf.prop import gtensor
from pyscf.data import nist
def make_dia_gc2e(gobj, dm0, gauge_orig, sso_qed_fac=1):
mol = gobj.mol
dma, dmb = dm0
effspin = mol.spin * .5
muB = .5 # Bohr magneton
alpha2 = nist.ALPHA ** 2
#sso_qed_fac = (nist.G_ELECTRON - 1)
nao = dma.shape[0]
# int2e_ip1v_r1 = (ij|\frac{\vec{r}_{12}}{r_{12}^3} \vec{r}_1|kl)
if gauge_orig is None:
gc2e_ri = mol.intor('int2e_ip1v_r1', comp=9, aosym='s1').reshape(3,3,nao,nao,nao,nao)
else:
with mol.with_common_origin(gauge_orig):
gc2e_ri = mol.intor('int2e_ip1v_rc1', comp=9, aosym='s1').reshape(3,3,nao,nao,nao,nao)
ej = numpy.zeros((3,3))
ek = numpy.zeros((3,3))
if isinstance(gobj.para_soc2e, str) and 'SSO' in gobj.dia_soc2e.upper():
# spin-density should be contracted to electron 1 (associated to operator r_i)
ej += sso_qed_fac * numpy.einsum('xyijkl,ji,lk->xy', gc2e_ri, dma-dmb, dma+dmb)
ek += sso_qed_fac * numpy.einsum('xyijkl,jk,li->xy', gc2e_ri, dma, dma)
ek -= sso_qed_fac * numpy.einsum('xyijkl,jk,li->xy', gc2e_ri, dmb, dmb)
if isinstance(gobj.para_soc2e, str) and 'SOO' in gobj.dia_soc2e.upper():
# spin-density should be contracted to electron 2
ej += 2 * numpy.einsum('xyijkl,ji,lk->xy', gc2e_ri, dma+dmb, dma-dmb)
ek += 2 * numpy.einsum('xyijkl,jk,li->xy', gc2e_ri, dma, dma)
ek -= 2 * numpy.einsum('xyijkl,jk,li->xy', gc2e_ri, dmb, dmb)
gc2e = ej - ek
gc2e -= numpy.eye(3) * gc2e.trace()
gc2e *= (alpha2/8) / effspin / muB
# giao2e1 = ([GIAO-i j] + [i GIAO-j]|\frac{\vec{r}_{12}}{r_{12}^3} x p1|kl)
# giao2e2 = (ij|\frac{\vec{r}_{12}}{r_{12}^3} x p1|[GIAO-k l] + [k GIAO-l])
if gauge_orig is None:
giao2e1 = mol.intor('int2e_ipvg1_xp1', comp=9, aosym='s1').reshape(3,3,nao,nao,nao,nao)
giao2e2 = mol.intor('int2e_ipvg2_xp1', comp=9, aosym='s1').reshape(3,3,nao,nao,nao,nao)
giao2e = giao2e1 + giao2e2.transpose(1,0,2,3,4,5)
ej = numpy.zeros((3,3))
ek = numpy.zeros((3,3))
if isinstance(gobj.para_soc2e, str) and 'SSO' in gobj.dia_soc2e.upper():
ej += sso_qed_fac * numpy.einsum('xyijkl,ji,lk->xy', giao2e, dma-dmb, dma+dmb)
ek += sso_qed_fac * numpy.einsum('xyijkl,jk,li->xy', giao2e, dma, dma)
ek -= sso_qed_fac * numpy.einsum('xyijkl,jk,li->xy', giao2e, dmb, dmb)
if isinstance(gobj.para_soc2e, str) and 'SOO' in gobj.dia_soc2e.upper():
ej += 2 * numpy.einsum('xyijkl,ji,lk->xy', giao2e, dma+dmb, dma-dmb)
ek += 2 * numpy.einsum('xyijkl,jk,li->xy', giao2e, dma, dma)
ek -= 2 * numpy.einsum('xyijkl,jk,li->xy', giao2e, dmb, dmb)
gc2e -= (ej - ek) * (alpha2/4) / effspin / muB
if gobj.mb: # correction of order c^{-2} from MB basis, does it exist?
vj, vk = gobj._scf.get_jk(mol, dm0)
vhf = vj[0] + vj[1] - vk
gc_mb = numpy.einsum('ij,ji', vhf[0], dma)
gc_mb-= numpy.einsum('ij,ji', vhf[1], dmb)
gc2e += gc_mb * (alpha2/4) / effspin / muB * numpy.eye(3)
return gc2e
def make_para_soc2e(gobj, dm0, dm10, sso_qed_fac=1):
mol = gobj.mol
alpha2 = nist.ALPHA ** 2
effspin = mol.spin * .5
muB = .5 # Bohr magneton
#sso_qed_fac = (nist.G_ELECTRON - 1)
dm0a, dm0b = dm0
dm10a, dm10b = dm10
nao = dm0a.shape[0]
# hso2e is the imaginary part of SSO
# SSO term of JCP, 122, 034107 Eq (3) = 1/4c^2 hso2e
#
# Different approximations for the spin operator part are used in
# JCP, 122, 034107 Eq (15) and JCP, 115, 11080 Eq (34). The formulae of the
# so-called spin-averaging in JCP, 122, 034107 Eq (15) is not well documented
# and its effects are not fully tested. Approximation of JCP, 115, 11080 Eq (34)
# are adopted here.
hso2e = mol.intor('int2e_p1vxp1', 3).reshape(3,nao,nao,nao,nao)
ej = numpy.zeros((3,3))
ek = numpy.zeros((3,3))
if isinstance(gobj.para_soc2e, str) and 'SSO' in gobj.para_soc2e.upper():
ej += sso_qed_fac * numpy.einsum('yijkl,ji,xlk->xy', hso2e, dm0a-dm0b, dm10a+dm10b)
ej += sso_qed_fac * numpy.einsum('yijkl,xji,lk->xy', hso2e, dm10a-dm10b, dm0a+dm0b)
ek += sso_qed_fac * numpy.einsum('yijkl,jk,xli->xy', hso2e, dm0a, dm10a)
ek -= sso_qed_fac * numpy.einsum('yijkl,jk,xli->xy', hso2e, dm0b, dm10b)
ek += sso_qed_fac * numpy.einsum('yijkl,xjk,li->xy', hso2e, dm10a, dm0a)
ek -= sso_qed_fac * numpy.einsum('yijkl,xjk,li->xy', hso2e, dm10b, dm0b)
if isinstance(gobj.para_soc2e, str) and 'SOO' in gobj.para_soc2e.upper():
ej += 2 * numpy.einsum('yijkl,ji,xlk->xy', hso2e, dm0a+dm0b, dm10a-dm10b)
ej += 2 * numpy.einsum('yijkl,xji,lk->xy', hso2e, dm10a+dm10b, dm0a-dm0b)
ek += 2 * numpy.einsum('yijkl,jk,xli->xy', hso2e, dm0a, dm10a)
ek -= 2 * numpy.einsum('yijkl,jk,xli->xy', hso2e, dm0b, dm10b)
ek += 2 * numpy.einsum('yijkl,xjk,li->xy', hso2e, dm10a, dm0a)
ek -= 2 * numpy.einsum('yijkl,xjk,li->xy', hso2e, dm10b, dm0b)
# ~ <H^{01},MO^1> = - Tr(Im[H^{01}],Im[MO^1])
gpara2e = -(ej - ek)
gpara2e *= (alpha2/4) / effspin / muB
return gpara2e
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = '''
H 0. , 0. , .917
F 0. , 0. , 0.'''
mol.basis = 'ccpvdz'
mol.spin = 2
mol.build()
nrhf = scf.UHF(mol)
nrhf.conv_tol_grad = 1e-6
nrhf.conv_tol = 1e-12
nrhf.kernel()
nao = mol.nao_nr()
numpy.random.seed(1)
dm0 = numpy.random.random((2,nao,nao))
dm0 = dm0 + dm0.transpose(0,2,1)
dm1 = numpy.random.random((2,3,nao,nao))
dm1 = dm1 - dm1.transpose(0,1,3,2)
class KnowValues(unittest.TestCase):
def test_nr_common_gauge_dia_gc2e(self):
g = gtensor.uhf.GTensor(nrhf)
g.dia_soc2e = 'SSO+SOO'
g.para_soc2e = 'SSO+SOO'
g.mb = True
ref = make_dia_gc2e(g, dm0, (1.2, .3, .5), 1)
dat = g.make_dia_gc2e(dm0, (1.2, .3, .5), 1)
self.assertAlmostEqual(abs(ref-dat).max(), 0, 9)
def test_nr_giao_dia_gc2e(self):
g = gtensor.uhf.GTensor(nrhf)
g.dia_soc2e = 'SSO+SOO'
g.para_soc2e = 'SSO+SOO'
g.mb = True
ref = make_dia_gc2e(g, dm0, None, 1)
dat = g.make_dia_gc2e(dm0, None, 1)
self.assertAlmostEqual(abs(ref-dat).max(), 0, 9)
def test_nr_para_soc2e(self):
g = gtensor.uhf.GTensor(nrhf)
ref = make_para_soc2e(g, dm0, dm1, 1)
dat = g.make_para_soc2e(dm0, dm1, 1)
self.assertAlmostEqual(abs(ref-dat).max(), 0, 9)
def test_nr_uhf(self):
g = gtensor.uhf.GTensor(nrhf)
g.dia_soc2e = 'SSO+SOO'
g.para_soc2e = 'SSO+SOO'
g.so_eff_charge = True
g.cphf = False
g.mb = True
dat = g.kernel()
self.assertAlmostEqual(numpy.linalg.norm(dat), 3.46802309158, 7)
if __name__ == "__main__":
print("Full Tests for HF g-tensor")
unittest.main()
|
gkc1000/pyscf
|
pyscf/prop/gtensor/test/test_uhf.py
|
Python
|
apache-2.0
| 7,596
|
[
"PySCF"
] |
f22cfeecf4ed25bfbd8ec20e84c79a310c2394c3603df8f3aac4c01c14b2b51b
|
import numpy as np
import matplotlib
import matplotlib.pylab as plt
import healpy as hp
import string
import yt
import os
import glob
from PIL import Image as PIL_Image
from images2gif import writeGif
from scipy.special import sph_harm,sph_jn
import beatbox
from beatbox.multiverse import Multiverse
# ===================================================================
def set_k_filter(self):
"""
Define a filter over the k space for the modes between kmin and kmax
"""
#Define lower & upper bounds for the filter
Universe.high_k_cutoff = Universe.truncated_nmax*Universe.Deltak
Universe.low_k_cutoff = Universe.truncated_nmin*Universe.Deltak
# Define the filter
low_k_filter = (~(Universe.n < Universe.truncated_nmin)).astype(int)
high_k_filter = (~(Universe.n > Universe.truncated_nmax)).astype(int)
Universe.kfilter = high_k_filter*low_k_filter
return
def populate_response_matrix(self):
"""
Populate the R matrix for the default range of l and n, or
or over the range specified above
"""
truncated_nmax = Universe.truncated_nmax
truncated_nmin = Universe.truncated_nmin
truncated_lmax = Universe.truncated_lmax
truncated_lmin = Universe.truncated_lmin
lms = Universe.lms
kfilter = Universe.kfilter
# Initialize R matrix:
NY = (truncated_lmax + 1)**2 - (truncated_lmin)**2
# Find the indices of the non-zero elements of the filter
ind = np.where(Universe.kfilter>0)
# The n index spans 2x that length, 1st half for the cos coefficients, 2nd half
# for the sin coefficients
NN = 2*len(ind[1])
R_long = np.zeros([NY,NN], dtype=np.complex128)
k, theta, phi = Universe.k[ind], np.arctan2(Universe.ky[ind],Universe.kx[ind]), np.arccos(Universe.kz[ind]/Universe.k[ind])
# We need to fix the 'nan' theta element that came from having ky=0
theta[np.isnan(theta)] = np.pi/2.0
# Get ready to loop over y
y = 0
A = [sph_jn(truncated_lmax,ki)[0] for ki in k]
# Loop over y, computing elements of R_yn
for i in lms:
l = i[0]
m = i[1]
trigpart = np.cos(np.pi*l/2.0)
B = np.asarray([A[ki][l] for ki in range(len(k))])
R_long[y,:NN/2] = 4.0 * np.pi * sph_harm(m,l,theta,phi).reshape(NN/2)*B.reshape(NN/2) * trigpart
trigpart = np.sin(np.pi*l/2.0)
R_long[y,NN/2:] = 4.0 * np.pi * sph_harm(m,l,theta,phi).reshape(NN/2)*B.reshape(NN/2)* trigpart
y = y+1
Universe.R = np.zeros([NY,len(ind[1])], dtype=np.complex128)
Universe.R = np.append(R_long[:,0:len(ind[1])/2], R_long[:,len(ind[1]):3*len(ind[1])/2], axis=1)
return
def get_number_of_fns(self):
'''
Get the number of fn modes.
'''
ind = np.where(Universe.kfilter>0)
fn_length = len(ind[1])
Universe.numfn = fn_length
return fn_length
# ====================================================================
class Universe(object):
"""
A simple model universe in a box.
"""
# ====================================================================
#Initialize the class variables
PIXSCALE = 0.1
BOXSIZE = 4.0
# Real space: define a coordinate grid:
NPIX = int(BOXSIZE/PIXSCALE) + 1
Nj = np.complex(0.0,NPIX)
#x, y, z = np.mgrid[-BOXSIZE/2.0+BOXSIZE/(2*float(NPIX)):BOXSIZE/2.0-BOXSIZE/(2*float(NPIX)):Nj, -BOXSIZE/2.0+BOXSIZE/(2*float(NPIX)):BOXSIZE/2.0-BOXSIZE/(2*float(NPIX)):Nj, -BOXSIZE/2.0+BOXSIZE/(2*float(NPIX)):BOXSIZE/2.0-BOXSIZE/(2*float(NPIX)):Nj]
x, y, z = np.mgrid[-BOXSIZE/2.0+BOXSIZE/(2*float(NPIX)):BOXSIZE/2.0-BOXSIZE/(2*float(NPIX)):Nj, -BOXSIZE/2.0+BOXSIZE/(2*float(NPIX)):BOXSIZE/2.0-BOXSIZE/(2*float(NPIX)):Nj, -BOXSIZE/2.0+BOXSIZE/(2*float(NPIX)):BOXSIZE/2.0-BOXSIZE/(2*float(NPIX)):Nj]
print beatbox.Multiverse.truncated_nmin
# Define the truncatad range of modes (in n and l) we want in our Universe:
try:
truncated_nmax = beatbox.Multiverse.truncated_nmax
truncated_nmin = beatbox.Multiverse.truncated_nmin
truncated_lmax = beatbox.Multiverse.truncated_lmax
truncated_lmin = beatbox.Multiverse.truncated_lmin
except NameError:
truncated_nmax = 2
truncated_nmin = 1
truncated_lmax = 8
truncated_lmin = 0
# If only truncated_lmax is provided, calculated the largest truncated_nmax we can reconstruct
if (truncated_lmax is not None) and (truncated_nmax is None):
truncated_nmax = int(np.floor((3.0*(truncated_lmax+1)**2.0/(4.0*np.pi))**(1.0/3.0)))
# Else define a default value for truncated_nmax if not already done
elif truncated_nmax is None:
truncated_nmax = 6
# If only truncated_nmax is provided, calculated the truncated_lmax needed for no information
# from the 3D map to be lost
if (truncated_nmax is not None) and (truncated_lmax is None):
truncated_lmax = int(np.ceil(-0.5+2.0*truncated_nmax**(3.0/2.0)*np.sqrt(np.pi/3.0)))
# Make a y_max-long tupple of l and m pairs
if None not in (truncated_lmin, truncated_lmax):
lms = [(l, m) for l in range(truncated_lmin,truncated_lmax+1) for m in range(-l, l+1)]
# Fourier space: define a coordinate grid:
# The nmax we need for the resolution we want in our Universe is:
nmax = int(BOXSIZE/(2*PIXSCALE))
Deltak = 2.0*np.pi/BOXSIZE
kmax = nmax*Deltak
kx, ky, kz = np.meshgrid(np.linspace(-kmax,kmax,NPIX),np.linspace(-kmax,kmax,NPIX),np.linspace(-kmax,kmax,NPIX), indexing='ij')
k = np.sqrt(np.power(kx, 2)+np.power(ky,2)+np.power(kz,2))
nx, ny, nz = np.meshgrid(np.linspace(-nmax,nmax,NPIX),np.linspace(-nmax,nmax,NPIX),np.linspace(-nmax,nmax,NPIX), indexing='ij');
n = np.sqrt(np.power(nx, 2)+np.power(ny,2)+np.power(nz,2));
# Define the computer Fourier coordinates, used for iFFT
kmax_for_iFFt = 1/(2*PIXSCALE)
Deltak_for_iFFT = (1/BOXSIZE)
kx_for_iFFT = nx/BOXSIZE
ky_for_iFFT = ny/BOXSIZE
kz_for_iFFT = nz/BOXSIZE
# Define filter in k-space, that contains the modes we want:
kfilter = None
set_Universe_k_filter = set_k_filter
#Define and populate the R matrix:
R = None
populate_Universe_R = populate_response_matrix
numfn = None
get_numfn = get_number_of_fns
#==========================================================
def __init__(self):
# The potential map (pure real):
self.phi = self.x * 0.0
# The CMB temperature map:
self.Tmap = None
self.NSIDE = None
return
def __str__(self):
return "an empty model universe, containing a grid 41x41x41 pixels (and corresponding k grid in Fourrier space), a k filter and the corresponding R matrix mapping between those k values and a range of l (given by the Multiverse)"
# ----------------------------------------------------------------
def read_in_CMB_T_map(self,from_this=None):
if from_this is None:
print "No CMB T map file supplied."
self.Tmapfile = None
else:
self.Tmapfile = from_this
self.Tmap = hp.read_map(from_this)
self.NSIDE = hp.npix2nside(len(self.Tmap))
return
def write_CMB_T_map(self, from_this=None, to_this='my_map'):
if from_this is None:
print "No CMB T map supplied"
else:
self.Tmapfile=to_this+".fits"
hp.write_map(self.Tmapfile, from_this)
return
def show_CMB_T_map(self,Tmap=None, max=100, title = "CMB graviational potential fluctuations as seen from inside the LSS", from_perspective_of = "observer", cmap=None):
if Tmap is None:
self.NSIDE = 256
self.Tmap = hp.alm2map(self.alm,self.NSIDE)
else:
self.Tmap = Tmap
if from_perspective_of == "observer":
dpi = 300
figsize_inch = 60, 40
fig = plt.figure(figsize=figsize_inch, dpi=dpi)
# Sky map:
hp.mollview(self.Tmap, rot=(-90,0,0), min=-max, max=max, title=title + ", $\ell_{max}=$%d " % self.truncated_lmax, cmap=cmap, unit="$\mu$K")
plt.savefig(title+".png", dpi=dpi, bbox_inches="tight")
else:
# Interactive "external" view ([like this](http://zonca.github.io/2013/03/interactive-3d-plot-of-sky-map.html)) pass
# beatbox.zoncaview(self.Tmap)
# This did not work, sadly. Maybe we can find a 3D
# spherical surface plot routine using matplotlib? For
# now, just use the healpix vis.
R = (0.0,0.0,0.0) # (lon,lat,psi) to specify center of map and rotation to apply
hp.orthview(self.Tmap,rot=R,flip='geo',half_sky=True,title="CMB graviational potential fluctuations as seen from outside the LSS, $\ell_{max}$=%d" % self.truncated_lmax)
print "Ahem - we can't visualize maps on the surface of the sphere yet, sorry."
return
def decompose_T_map_into_spherical_harmonics(self,lmax=None):
"""
See healpy documentation at https://healpy.readthedocs.org/en/latest/generated/healpy.sphtfunc.map2alm.html
self.alm is a 1D numpy array of type=complexx128.
Indexing is described at https://healpy.readthedocs.org/en/latest/generated/healpy.sphtfunc.Alm.html
"""
if lmax is None:
self.lmax = 3*self.NSIDE - 1
else:
self.lmax = lmax
self.mmax = self.lmax
self.alm = hp.sphtfunc.map2alm(self.Tmap,lmax=self.lmax,mmax=self.mmax)
return
def show_one_spherical_harmonic_of_CMB_T_map(self,l=1,m=1,max=20):
"""
To do this we need to make a healpy-format alm array, with
just one non-zero complex value in it, which we extract
from the parent alm array. Since healpy only returns positive
m coefficients, we just ask to see component with that |m|.
"""
projected_alm = self.alm * 0.0
i = hp.Alm.getidx(self.lmax, l, np.abs(m)) # Note |m| here
projected_alm[i] = self.alm[i]
projected_map = hp.alm2map(projected_alm,self.NSIDE)
hp.mollview(projected_map)
return
def show_lowest_spherical_harmonics_of_CMB_T_map(self,lmax=10,max=20, cmap=None, title=None):
"""
To do this, we construct a healpy-formatted alm array based on
a subset of the parent one, again observing the positive m-only
convention.
"""
truncated_alm = self.alm * 0.0
i = []
for l in range(lmax+1):
for m in range(l+1):
i.append(hp.Alm.getidx(self.lmax, l, m))
print "Displaying sky map of the l = ",l," and lower spherical harmonics only..."
truncated_alm[i] = self.alm[i]
self.truncated_map = hp.alm2map(truncated_alm, 256)
dpi = 300
figsize_inch = 60, 40
fig = plt.figure(figsize=figsize_inch, dpi=dpi)
hp.mollview(self.truncated_map,rot=(-90,0,0),min=-max,max=max, cmap=cmap, unit="$10^{-6}c^2$", title=title)
plt.savefig("lmax"+str(lmax)+".png", dpi=dpi, bbox_inches="tight")
return
def get_alm(self,l=None,m=None,lms=None):
"""
hp.map2alm only returns the positive m coefficients - we need
to derive the negative ones ourselves if we are going to
do anything with them outside healpy. See
http://stackoverflow.com/questions/30888908/healpy-map2alm-function-does-not-return-expected-number-of-alm-values?lq=1
for discussion.
"""
if (l is None or m is None) and lms is None:
return None
elif l is None and m is None:
ay = np.zeros(len(lms),dtype=np.complex128)
for i in lms:
if i[1] >= 0:
index = hp.Alm.getidx(self.lmax, i[0], i[1])
prefactor = 1.0
value = self.alm[index]
else:
index = hp.Alm.getidx(self.lmax, i[0], -i[1])
prefactor = (-1.0)**i[1]
value = np.conjugate(self.alm[index])
ay[i[0]**2+i[0]+i[1]-(lms[0][0])**2] = prefactor * value
return ay
elif m >= 0:
index = hp.Alm.getidx(self.lmax, l, m)
prefactor = 1.0
value = self.alm[index]
else:
index = hp.Alm.getidx(self.lmax, l, -m)
prefactor = (-1.0)**m
value = np.conjugate(self.alm[index])
return prefactor * value
def put_alm(self,value,l=None,m=None,lms=None):
'''
Re-arranges the value or vector of a_y values into the
correct order to be used by healpy as a_lm.
If lms is given, len(lms) must equal len(value), while
if l and m are specified, value must be a scalar.
'''
if (l is None or m is None) and lms is None:
return None
elif l is None and m is None:
if len(lms) != len(value):
print 'a_y and (l, m) are of unequal lenghts, cannot proceed'
return
index = np.zeros(len(lms), dtype=int)
count = 0
for i in lms:
index[count] = hp.Alm.getidx(max(lms)[0], i[0], i[1])
count = count+1
lmax = max(lms)[0]
mmax = max(lms)[1]
self.alm = np.zeros(mmax*(2*lmax+1-mmax)/2+lmax+1, dtype=np.complex128)
# Throw away the negative indices (which correspond to the negative m's)
# since the maps are real, negative m coefficients can be deduced
# from the positive ones.
index_positive = index[~(index<0)]
ind1 = np.arange(len(value))
self.alm[index_positive] = value[ind1[~(index<0)]]
return
index = hp.Alm.getidx(self.truncated_lmax, l, m)
self.alm[index] = value
return
def alm2ay(self, truncated_lmax=None, truncated_lmin=None, usedefault=1):
"""
Read its own a_lm array, and return the corresponding
a_y array (in the correct order).
The conversion between y index and l_max is:
(l+1)**2-(2l+1)/2 +1/2 +m = l**2+2*l+1-l-1/2+1/2+m = l**2+l+1+m
and the first element has index 0 so subtract 1, so
y=l**2+l+m is the index, need to subtract the elements before lmin
so y=l**2+l+m-(lmin+1)**2
"""
if usedefault == 1:
truncated_lmax = self.truncated_lmax
truncated_lmin = self.truncated_lmin
lms = self.lms
# Make a y_max-long tupple of l and m pairs
else:
lms = [(l, m) for l in range(truncated_lmin,truncated_lmax+1) for m in range(-l, l+1)]
ay = np.zeros((truncated_lmax+1)**2-(truncated_lmin)**2,dtype=np.complex128)
ay = self.get_alm(lms=lms)
self.ay=ay
return ay
def ay2alm(self, ay,truncated_lmax=None, truncated_lmin=None, usedefault=1):
"""
Repackage the a_y array into healpy-readable a_lm's
"""
if usedefault == 1:
truncated_lmax = self.truncated_lmin
truncated_lmin = self.truncated_lmax
lms=self.lms
# Make a y_max-long tupple of l and m pairs
else:
lms = [(l, m) for l in range(truncated_lmin,truncated_lmax+1) for m in range(-l, l+1)]
self.put_alm(ay, lms=lms)
return
def ay2ayreal_for_inference(self,value):
'''
Reorganize the ays so that only independent measurements are kept,
and split the real and imaginary values into different elements.
The negative m values ara dependent on the positive m, so they must
be discarted, and all but m=0 values are complex.
Therefore, we replace the positive m values by their respective real
part, and the negative m values by the imaginary part of the
corresponding positive m. This way, each l retains 2l+1 independent
real degrees of freedom.
'''
#Select the m values out the the lms tupples
m = np.array([m[1] for m in self.lms])
#Find the indices of the positive ms
pos_ind = (m>0)
#Find the indices of the m=0
zero_ind = (m==0)
#Find the indices of the negative ms
neg_ind = (m<0)
ay_real = np.zeros(len(self.lms), dtype=np.float)
ay_real[pos_ind] = value[pos_ind].real
ay_real[neg_ind] = value[pos_ind].imag
ay_real[zero_ind] = value[zero_ind].astype(np.float)
return ay_real
def ayreal2ay_for_mapping(self,ay_real):
#Select the m values out the the lms tupples
m = np.array([m[1] for m in self.lms])
#Find the indices of the positive ms
pos_ind = (m>0)
#Find the indices of the m=0
zero_ind = (m==0)
#Find the indices of the negative ms
neg_ind = (m<0)
ay = np.zeros(len(self.lms), dtype=np.complex128)
ay[pos_ind] = ay_real[pos_ind].real+1j*ay_real[neg_ind]
ay[neg_ind] = ((ay_real[pos_ind].T-1j*ay_real[neg_ind].T) * (-1.)**m[neg_ind]).T
ay[zero_ind] = ay_real[zero_ind].astype(np.complex128)
self.ay=ay
return
def write_out_spherical_harmonic_coefficients(self,lmax=10):
outfile = string.join(string.split(self.Tmapfile,'.')[0:-1],'.') + '_alm_lmax' + str(lmax) + '.txt'
f = open(outfile, 'w')
f.write("# l m alm_real alm_imag\n")
count = 0
for l in range(lmax+1):
for m in range(-l,l+1):
alm = self.get_alm(l,m)
line = " {0:d} {1:d} {2:g} {3:g}\n".format(l,m,float(np.real(alm)),float(np.imag(alm)))
f.write(line)
count += 1
f.close()
print count,"alm's (lmax =",lmax,") written to",outfile
return
# ----------------------------------------------------------------
def populate_instance_response_matrix(self,truncated_nmax=None, truncated_nmin=None,truncated_lmax=None, truncated_lmin=None, usedefault=1):
"""
Populate the R matrix for the default range of l and n, or
or over the range specified above
"""
if usedefault == 1:
truncated_nmax = self.truncated_nmax
truncated_nmin = self.truncated_nmin
truncated_lmax = self.truncated_lmax
truncated_lmin = self.truncated_lmin
lms = self.lms
kfilter = self.kfilter
else:
low_k_cutoff = truncated_nmin*self.Deltak
high_k_cutoff = truncated_nmax*self.Deltak
self.set_instance_k_filter(truncated_nmax=truncated_nmax,truncated_nmin=truncated_nmin)
lms = [(l, m) for l in range(truncated_lmin,truncated_lmax+1) for m in range(-l, l+1)]
# Initialize R matrix:
NY = (truncated_lmax + 1)**2-(truncated_lmin)**2
# Find the indices of the non-zero elements of the filter
ind = np.where(self.kfilter>0)
# The n index spans 2x that length, 1st half for the cos coefficients, 2nd half
# for the sin coefficients
NN = 2*len(ind[1])
R_long = np.zeros([NY,NN], dtype=np.complex128)
# In case we need n1, n2, n3 at some point...:
# n1, n2, n3 = self.kx[ind]/self.Deltak , self.ky[ind]/self.Deltak, self.kz[ind]/self.Deltak
k, theta, phi = self.k[ind], np.arctan2(self.ky[ind],self.kx[ind]), np.arccos(self.kz[ind]/self.k[ind])
# We need to fix the 'nan' theta element that came from having ky=0
theta[np.isnan(theta)] = np.pi/2.0
# Get ready to loop over y
y = 0
A = [sph_jn(truncated_lmax,ki)[0] for ki in k]
# Loop over y, computing elements of R_yn
for i in lms:
l = i[0]
m = i[1]
trigpart = np.cos(np.pi*l/2.0)
B = np.asarray([A[ki][l] for ki in range(len(k))])
R_long[y,:NN/2] = 4.0 * np.pi * sph_harm(m,l,theta,phi).reshape(NN/2)*B.reshape(NN/2) * trigpart
trigpart = np.sin(np.pi*l/2.0)
R_long[y,NN/2:] = 4.0 * np.pi * sph_harm(m,l,theta,phi).reshape(NN/2)*B.reshape(NN/2)* trigpart
y = y+1
self.R = np.zeros([NY,len(ind[1])], dtype=np.complex128)
self.R = np.append(R_long[:,0:len(ind[1])/2], R_long[:,len(ind[1]):3*len(ind[1])/2], axis=1)
return
# ----------------------------------------------------------------
def load_mathematica_data(self):
f= open("data/f_ns.txt", 'r')
data = f.read()
f.close()
columns = data.split()
f_n=np.zeros(len(columns))
for count in range(int(len(columns))):
f_n[count] = float(columns[count])
g= open("data/fncoordinates.txt", 'r')
data2 = g.read()
g.close()
columns2 = data2.split()
k_vec=np.zeros(len(columns2))
for count2 in range(int(len(columns2))):
k_vec[count2] = float(columns2[count2])
k_x=k_vec[0::3]
k_y=k_vec[1::3]
k_z=k_vec[2::3]
return f_n, k_x, k_y, k_z
# ----------------------------------------------------------------
def set_instance_k_filter(self,truncated_nmax=None,truncated_nmin=None):
"""
Define a filter over the k space for the modes between kmin and kmax
"""
#Make sure we have lower & upper bounds for the filter
if truncated_nmax is None:
self.high_k_cutoff = self.truncated_nmax*self.Deltak
else:
self.truncated_nmax = truncated_nmax
self.high_k_cutoff = truncated_nmax*self.Deltak
if truncated_nmin is None:
self.low_k_cutoff=self.truncated_nmin*self.Deltak
else:
self.truncated_nmin = truncated_nmin
self.low_k_cutoff = truncated_nmin*self.Deltak
# Define the filter
low_k_filter = (~(self.n < self.truncated_nmin)).astype(int)
high_k_filter = (~(self.n > self.truncated_nmax)).astype(int)
self.kfilter = high_k_filter*low_k_filter
return
def generate_a_random_potential_field(self,truncated_nmax=6,truncated_nmin=2,n_s=0.97,kstar=0.02,PSnorm=2.43e-9,Pdist=1,Pmax=2*np.pi,Pvar=0.0, printout=1, do_fft=1):
#is this realy necessary since filter def moved up in __init__ function??
# Set the k filter:
if (beatbox.Universe.kfilter is None) or (truncated_nmax != beatbox.Universe.truncated_nmax) or (truncated_nmin != beatbox.Universe.truncated_nmin):
self.set_instance_k_filter(truncated_nmax=truncated_nmax,truncated_nmin=truncated_nmin)
# Define the constants that go in the power spectrum
# scalar spectral index
self.n_s = n_s
# power spectrum normalization
self.PSnorm = PSnorm
# Change units of the pivot scale kstar from Mpc^-1 to normalize the smallest k
# mode to 1 (i.e. the radius of the CMB photosphere at 13.94Gpc)
self.kstar = kstar*1.394e4
# Draw Gaussian random Fourier coefficients with a k^{-3+(n_s-1)} power spectrum:
self.Power_Spectrum = self.PSnorm*10000*np.power((self.k/self.kstar) ,(-3+(self.n_s-1)))
self.Power_Spectrum[np.isinf(self.Power_Spectrum)] = 10**-9
fn_Norm = np.random.rayleigh(np.sqrt(self.Power_Spectrum/2.))*self.kfilter
# Draw the phases for the modes: use p=1 for a uniform distribution in [0,Pmax],
# and p=0 for a Gaussian distribution with mean Pmax and variance Pvar
self.Pdist = Pdist
self.Pvar = Pvar
self.Pmax = Pmax
if Pdist == 1:
fn_Phase = np.random.uniform(0, Pmax*np.ones(self.k.shape,dtype=np.float_) )*self.kfilter
else:
fn_Phase = np.random.normal(Pmax, np.sqrt(Pvar)*np.ones(self.k.shape,dtype=np.float_) )*self.kfilter
self.fn_Phase = fn_Phase
self.fn_Norm = fn_Norm
# Need to ensure that f_-k = f^*_k
# FT = fn_R + fn_I*1j
FT = fn_Norm*np.cos(fn_Phase)+fn_Norm*np.sin(fn_Phase)*1j
self.FT = FT
X = np.concatenate((np.append(FT[:self.nmax, self.nmax ,self.nmax ], 0), np.conjugate(FT[:self.nmax, self.nmax ,self.nmax ])[::-1]), axis=0)
Z = np.concatenate( ( FT[:, :self.nmax ,self.nmax ], X.reshape(2*self.nmax+1,1), np.conjugate(FT[:, :self.nmax ,self.nmax ])[::-1,::-1]), axis=1 )
self.fngrid = np.concatenate( (FT[:,:,:self.nmax], Z.reshape(2*self.nmax+1,2*self.nmax+1,1), np.conjugate( FT[:,:,:self.nmax])[::-1,::-1,::-1] ), axis=2 )
if printout is 1:
print "Generated ",self.fngrid[~(self.fngrid[:,:,:] == 0)].size," potential Fourier coefficients"
if Pdist == 1:
print " with phases uniformly distributed between 0 and ", Pmax
else:
print " with phases sampled from a Gaussian distribution with mean ", Pmax," and variance ", Pvar
# Evaluate it on our Phi grid:
if do_fft == 1:
self.evaluate_potential_given_fourier_coefficients(printout=printout)
return
def evaluate_potential_given_fourier_coefficients(self,printout=1):
self.phi = np.zeros(self.x.shape,dtype=np.float_)
ComplexPhi = np.zeros(self.x.shape,dtype=np.complex128)
#THIS PART DID THE iFFT MANUALLY
# for i in range((2*self.nmax+1)**3):
# phase = self.kx.reshape((2*self.nmax+1)**3,1)[i] * self.x + self.ky.reshape((2*self.nmax+1)**3,1)[i] * self.y + self.kz.reshape((2*self.nmax+1)**3,1)[i] * self.z
# ComplexPhi += self.fngrid.reshape((2*self.nmax+1)**3,1)[i] * (np.cos(phase)+np.sin(phase)*1j)
#Now use iFFT to invert the Fourier coefficients f_n to a real space potential
ComplexPhi = np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(self.fngrid* self.Deltak_for_iFFT**3)))
# Throw out the residual imaginary part of the potential [< O(10^-16)]
self.phi = ComplexPhi.real*(self.kx_for_iFFT.shape[0])**3
if printout is 1:
print "Built potential grid, with dimensions ",self.phi.shape,\
" and mean value ", round(np.mean(self.phi),4),"+/-",round(np.std(self.phi),7)
return
def rearrange_fn_from_grid_to_vector(self):
'''
It's easiest to generate a potential from the prior on a 3D
grid, so we can use the iFFT. For the linear algebra in the
inference, we need the fourier coefficients arranged in a
vector.
'''
ind = np.where(self.kfilter>0)
fn_long = np.zeros(2*len(ind[1]))
fn_long[:len(ind[1])] = (self.fngrid[ind]).real
fn_long[len(ind[1]):] = (self.fngrid[ind]).imag
self.fn = np.zeros(len(ind[1]))
self.fn[:len(ind[1])/2] = fn_long[:len(ind[1])/2]
self.fn[len(ind[1])/2:] = fn_long[len(ind[1]):3*len(ind[1])/2]
return
def rearrange_fn_from_vector_to_grid(self):
'''
It's easiest to generate a potential from the prior on a 3D
grid, so we can use the iFFT. For the linear algebra in the
inference, we need the fourier coefficients arranged in a
vector.
'''
self.fn=np.squeeze(self.fn)
ind = np.where(self.kfilter>0)
fn_long = np.zeros((2*len(ind[1])))
fn_long[:len(ind[1])/2] = self.fn[:len(ind[1])/2]
fn_long[len(ind[1])-1:len(ind[1])/2-1 :-1] = self.fn[:len(ind[1])/2]
fn_long[len(ind[1]):3*len(ind[1])/2] = self.fn[len(ind[1])/2:]
fn_long[:3*len(ind[1])/2-1 :-1] = -self.fn[:len(ind[1])/2]
self.fngrid = np.zeros(self.kfilter.shape, dtype=np.complex128)
self.fngrid[ind]=fn_long[:len(ind[1])] + 1j*fn_long[len(ind[1]):]
return
def get_ordered_fn_indices(self):
'''
Get the indices of the Fourrier modes in the vector used
for the inference and sort them by increasing k value.
'''
ind = np.where(self.kfilter>0)
k, theta, phi = self.k[ind], np.arctan2(self.ky[ind], self.kx[ind]), np.arccos(self.kz[ind]/self.k[ind])
kvec_long = np.zeros(2*len(ind[1]))
kvec_long[:len(ind[1])] = k
kvec_long[len(ind[1]):] = k
kvec = np.zeros(len(ind[1]))
kvec[:len(ind[1])/2] = kvec_long[:len(ind[1])/2]
kvec[len(ind[1])/2:] = kvec_long[len(ind[1]):3*len(ind[1])/2]
ind_for_ordered_fn = np.argsort(kvec)
return ind_for_ordered_fn
def get_instance_numfn(self):
'''
Get the number of fn modes.
'''
ind = np.where(self.kfilter>0)
fn_length = len(ind[1])
return fn_length
def transform_3D_potential_into_alm(self, truncated_nmax=None, truncated_nmin=None,truncated_lmax=None, truncated_lmin=None, usedefault=1, fn=None):
'''
From the f_n on a 3D grid, rearrange the Fourier coefficients
in a vector and generate the R matrix. From these, calculate the a_y
and finally rearrange them in a a_lm vector useable by healpy to
make a T map.
The method can do this either for the harmonics correcponding to the
full range of n values of the 3D potential (if usedefault=1), or else
for the specified values. If truncated_nmax is too large for the
specified truncated_lmax, some information will be lost.
'''
# Make a vector out of the fn grid of Fourier coefficients
if fn is None:
self.rearrange_fn_from_grid_to_vector()
if usedefault == 1:
# Populate the R matrix
if beatbox.Universe.R is None:
self.populate_instance_response_matrix(truncated_nmax=truncated_nmax, truncated_nmin=truncated_nmin,truncated_lmax=truncated_lmax, truncated_lmin=truncated_lmin,usedefault=usedefault)
# Calculate the a_y matrix
ay = np.dot(self.R,self.fn)
self.ay = ay
# Reorganize a_y into a_lm
self.ay2alm(ay, usedefault=usedefault)
else:
# Populate the R matrix
self.populate_instance_response_matrix(truncated_nmax=truncated_nmax, truncated_nmin=truncated_nmin,truncated_lmax=truncated_lmax, truncated_lmin=truncated_lmin, usedefault=0)
# Calculate the a_y matrix
ay = np.dot(self.R,self.fn)
self.ay = ay
# Reorganize a_y into a_lm
self.ay2alm(ay,truncated_lmax=truncated_lmax, truncated_lmin=truncated_lmin, usedefault=0)
return
def show_potential_with_yt(self,output='',angle=np.pi/4.0, N_layer=5, alpha_norm=5.0, cmap='BrBG', Proj=0, Slice=0, gifmaking=0, show3D=0, continoursshade = 50.0, boxoutput='scratch/opac_phi3D_Gauss_phases_mean', slicerad=1):
"""
Visualize the gravitational potential using yt. We're after something
like http://yt-project.org/doc/_images/vr_sample.jpg - described
at http://yt-project.org/doc/visualizing/volume_rendering.html
"""
# Load the potential field into a yt data structure,
# offsetting such that minimum value is zero.
# First get extrema of phi array:
mi = np.min(self.phi)
ma = np.max(self.phi)
print mi, ma
# Symmetrize to put zero at center of range:
ma = np.max(np.abs([mi,ma]))
mi = -ma
# Offset to make minimum value zero:
offset = ma
ma = 2.0*ma
mi = 0.0
# Size of the box containing the phi
# Physical -2 to 2 box
# bbox = np.array([[-2, 2], [-2, 2], [-2, 2]])
# Physical box from the iFFT
bbox = np.array([[np.min(self.x), np.max(self.x)], [np.min(self.y), np.max(self.y)], [np.min(self.z), np.max(self.z)]])
# Apply offset and store phi array in a yt data structure,
# I'm putting some random density units here
# (seems to be needed to display properly):
xnorm=np.sqrt(self.x**2 + self.y**2 + self.z**2);
if (Slice is not 1) and (Proj is not 1):
indgtr = (~(xnorm < 0.9)).astype(int)
indsmlr = (~(xnorm > 1.1)).astype(int)
ind = indgtr*indsmlr
sphere = np.ones(self.phi.shape)
sphere = 5.*ind
#sphere = 0.0007*ind
negsphere = -self.phi*ind
else:
sphere = np.zeros(self.phi.shape)
negsphere = np.zeros(self.phi.shape)
#self.phi[0,0,200]=-40
#self.phi[-1,-1,200]=20
#phiprime=self.phi
#phiprime[np.where(self.phi<-18)]=-20
# ds = yt.load_uniform_grid((dict(density=(self.phi+sphere, 'g/cm**3'), Xnorm=(xnorm, 'g/cm**3'))), self.phi.shape, bbox=bbox, nprocs=1)
ds = yt.load_uniform_grid((dict(density=(self.phi+offset+sphere, 'g/cm**3'), Xnorm=(xnorm, 'g/cm**3'))), self.phi.shape, bbox=bbox, nprocs=1)
field = 'density'
#Check that the loaded field is recognized by yt
# print ds.field_list
# Here's Sam's gist, from https://gist.github.com/samskillman/0e574d1a4f67d3a3b1b1
# im, sc = yt.volume_render(ds, field='phi')
# sc.annotate_domain(ds)
# sc.annotate_axes()
# im = sc.render()
# im.write_png(output, background='white')
# volume_render is not yet available, though.
# Following the example at http://yt-project.org/doc/visualizing/volume_rendering.html
# Set minimum and maximum of plotting range (in proper yt units):
dd = ds.all_data()
mi2, ma2 = dd.quantities.extrema(field)
#print "Extrema of ds phi:",mi,ma, mi2, ma2
use_log = False
# Instantiate the ColorTransferFunction.
# tf = yt.ColorTransferFunction((mi2, ma2))
# tf.grey_opacity=True
# Add some isopotential surface layers:
# tf.add_layers(N_layer, 0.0000005*(ma2 - mi2) / N_layer, alpha=alpha_norm*np.ones(N_layer,dtype='float64'), colormap = cmap)
# Instantiate the ColorTransferFunction using the transfer function helper.
from IPython.core.display import Image
from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper
tfh = yt.TransferFunctionHelper(ds)
tfh.set_field('density')
tfh.set_log(False)
tfh.set_bounds()
tfh.build_transfer_function()
tfh.tf.grey_opacity=True
#For small units, wide Gaussians:
tfh.tf.add_layers(N_layer, w=0.0005*(ma2 - mi2) /N_layer, mi=0.2*ma, ma=ma-0.2*ma, alpha=alpha_norm*np.ones(N_layer,dtype='float64'), col_bounds=[0.2*ma,ma-0.2*ma] , colormap=cmap)
#For big units, small Gaussians
#tfh.tf.add_layers(N_layer, w=0.00000005*(ma2 - mi2) /N_layer, mi=0.3*ma, ma=ma-0.2*ma, alpha=alpha_norm*np.ones(N_layer,dtype='float64'), col_bounds=[0.3*ma,ma-0.3*ma] , colormap=cmap)
if (Slice is not 1) and (Proj is not 1):
tfh.tf.map_to_colormap(5., 10.0, colormap='jet', scale=continoursshade)
#tfh.tf.map_to_colormap(0.001, 0.0014, colormap='jet', scale=continoursshade)
#tfh.tf.add_layers(1, w=0.001*ma2, mi=0.0108, ma=0.012, colormap='Pastel1', col_bounds=[0.01, 0.012])
# Check if the transfer function captures the data properly:
densityplot1 = tfh.plot('densityplot1')
densityplot2 = tfh.plot('densityplot2', profile_field='cell_mass')
# Set up the camera parameters: center, looking direction, width, resolution
c = (np.max(self.x)+np.min(self.x))/2.0
Lx = np.sqrt(2.0)*np.cos(angle)
Ly = np.sqrt(2.0)*np.sin(angle)
Lz = 0.75
L = np.array([Lx, Ly, Lz])
W = ds.quan(1.6, 'unitary')
N = 512
# Create a camera object
cam = ds.camera(c, L, W, N, tfh.tf, fields=[field], log_fields = [use_log], no_ghost = False)
cam.transfer_function = tfh.tf
if self.Pdist == 1:
im1 = cam.snapshot('scratch/opac_phi3D_Uniform_phases_0-'+str(self.Pmax)+'.png', clip_ratio=5)
else:
im1 = cam.snapshot('scratch/'+boxoutput+str(self.Pmax)+'_var'+str(self.Pvar)+'.png', clip_ratio=5)
im1.write_png('scratch/transparent_bg.png', background=[0.,0.,0.,0.])
im1.write_png('scratch/white1_bg.png', background=[1.,1.,1.,1.])
nim = cam.draw_grids(im1)
#im=cam.snapshot
#nim = cam.draw_box(im, np.array([0.25,0.25,0.25]), np.array([0.75,0.75,0.75]))
if show3D == 1:
nim.write_png(boxoutput)
cam.show()
# Make a color bar with the colormap.
# cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)
self.cam = cam
if gifmaking == 1:
# Add the domain box to the image:
nim = cam.draw_domain(im1)
# Save the image to a file:
nim.write_png(output)
if Proj == 1:
s = yt.ProjectionPlot(ds, "z", "density")
#this still doesnt work :
s.annotate_sphere([0., 0., 0.], radius=(1, 'kpc'),
circle_args={'color':'red', "linewidth": 3})
s.show()
s.save('phi')
if Slice == 1:
w = yt.SlicePlot(ds, "z", "density", center=[0,0,slicerad])
w.set_cmap(field="density", cmap=cmap)
circrad = np.sqrt(1-slicerad*slicerad)
w.annotate_sphere([0., 0., 0.], radius=(circrad, 'cm'),
circle_args={'color':'red',"linewidth": 3})
w.show()
w.save('phi')
return
def show_potential_from_all_angles_with_yt(self,output='phi.gif'):
# Create 36 frames for the animated gif, one for each angle:
steps = 36
angles = np.arange(steps)*np.pi/np.float(steps)/2.0+np.pi/4
# current bug: the frames jump at pi/4, 3pi/4 etc..
# book-keeping:
folder = 'frames/'
os.system("rm -rf "+folder)
os.system("mkdir -p "+folder)
# Now create the individual frames:
for k,angle in enumerate(angles):
framefile = folder+str(k).zfill(3)
print "Making frame",k,": ",framefile,"at viewing angle",angle
self.show_potential_with_yt(output=framefile,angle=angle, N_layer=6, alpha_norm=5.0, cmap='BrBG', Proj=0, Slice=0, gifmaking=1)
# Create an animated gif of all the frames:
images = [PIL_Image.open(framefile) for framefile in glob.glob(folder+'*.png')]
writeGif(output, images, duration=0.2)
return
def make_gif_from_frames_with_yt(self,folder='../frames/', output='phi.gif'):
# Create an animated gif of all the frames:
images = [PIL_Image.open(framefile) for framefile in glob.glob(folder+'*.png')]
writeGif(output, images, duration=0.2)
return
# ====================================================================
"""
Response matrix from Roger's mathematica notebook:
# Construct the klst:
nmax = 6;
klst = {};
Do[
If[0 < n1^2 + n2^2 + n3^2 <= nmax^2, klst = Append[klst, {n1, n2, n3}]],
{n1, -nmax, nmax}, {n2, -nmax, nmax}, {n3, -nmax, nmax}
];
NN = Length[klst];
# Set size of box, via separation in k space:
[CapitalDelta]k = .5 [Pi];
# Construct llst, an array of l's and m's for use in Spherical Harmonics:
# Note that the monopole and dipole are ignored!
lmax = 10;
llst = {};
Do[
If[1 < l <= lmax, llst = Append[llst, {l, m}]], {l, 2, lmax}, {m, -l, l}
];
llst; # Not sure what this line does.
L = Length[llst];
# Construct R matrix:
R = Chop[ # Clean out rounding errors (esp in imaginary parts)
Table[4. [Pi] I^llst[[y, 1]] # i^l - imaginary i!
SphericalHarmonicY[llst[[y, 1]],
llst[[y, 2]],
ArcCos[klst[[n, 3]]/Norm[klst[[n]]]], # theta'
If[klst[[n, 1]] == klst[[n, 2]] == 0, 0, ArcTan[klst[[n, 1]], klst[[n, 2]]]]] # phi'
[Conjugate] # Take complex conjugate of the Ylm
SphericalBesselJ[llst[[y, 1]], [CapitalDelta]k Norm[klst[[n]]]], # Norm gives the length of the k vector
{y, 1, L}, # for y in range 1 to L
{n, 1, NN} # for n in range 1 to NN
] # End of Table command
];
# Write it out:
(*Export["myn.txt",R]*)
"""
|
LaurencePeanuts/Music
|
beatbox/universe.py
|
Python
|
mit
| 41,104
|
[
"Gaussian"
] |
e6661f064a65cb7e2acb0440e3a3824a2500187e5f366101b049eab523c14ec4
|
import urllib
from galaxy import datatypes, config
def exec_before_job( trans, inp_data, out_data, param_dict, tool=None):
"""Sets the name of the data"""
data_name = param_dict.get( 'name', 'Biomart query' )
data_type = param_dict.get( 'type', 'text' )
name, data = out_data.items()[0]
data = datatypes.change_datatype(data, data_type)
data.name = data_name
out_data[name] = data
def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None):
"""Verifies the data after the run"""
URL = param_dict.get( 'URL', None )
URL = URL + '&_export=1&GALAXY_URL=0'
if not URL:
raise Exception('Datasource has not sent back a URL parameter')
CHUNK_SIZE = 2**20 # 1Mb
MAX_SIZE = CHUNK_SIZE * 100
try:
# damn you stupid sanitizer!
URL = URL.replace('martX', 'mart&')
URL = URL.replace('0X_', '0&_')
page = urllib.urlopen(URL)
except Exception, exc:
raise Exception('Problems connecting to %s (%s)' % (URL, exc) )
name, data = out_data.items()[0]
fp = open(data.file_name, 'wb')
size = 0
while 1:
chunk = page.read(CHUNK_SIZE)
if not chunk:
break
if size > MAX_SIZE:
raise Exception('----- maximum datasize exceeded ---')
size += len(chunk)
fp.write(chunk)
fp.close()
data.set_peek()
|
jmchilton/galaxy-central
|
tools/data_source/biomart_filter.py
|
Python
|
mit
| 1,427
|
[
"Galaxy"
] |
a863f5eeef938439d5fe747f50155ca2e36fc9f988d6cd93f502f484dfd29ce4
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Tcoffee(MakefilePackage):
"""T-Coffee is a multiple sequence alignment program."""
homepage = "http://www.tcoffee.org/"
git = "https://github.com/cbcrg/tcoffee.git"
version('2017-08-17', commit='f389b558e91d0f82e7db934d9a79ce285f853a71')
depends_on('perl', type=('build', 'run'))
depends_on('blast-plus')
depends_on('dialign-tx')
depends_on('viennarna')
depends_on('clustalw')
depends_on('tmalign')
depends_on('muscle')
depends_on('mafft')
depends_on('pcma')
depends_on('poamsa')
depends_on('probconsrna')
build_directory = 'compile'
def build(self, spec, prefix):
with working_dir(self.build_directory):
make('t_coffee')
def install(self, spec, prefix):
mkdirp(prefix.bin)
with working_dir(self.build_directory):
install('t_coffee', prefix.bin)
|
krafczyk/spack
|
var/spack/repos/builtin/packages/tcoffee/package.py
|
Python
|
lgpl-2.1
| 2,140
|
[
"BLAST"
] |
2fb2b9da990d7daba18ffeed95cf34a7fcce5a46e2ba74e0bcc3f959eedf26c1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.