text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import neurokernel.mpi_relaunch
import scipy.io as io
from libSpineML2NK import nk_executable
from libSpineML import smlExperiment
import numpy as np
import pdb
e = nk_executable.Executable('./experiment0.xml')
exp = e.bundle.experiments[0].Experiment[0]
ai = exp.AbstractInput[0]
mutant = io.loadmat('data/MutantBG6Data.mat')
m_input = mutant['recorded_input'][0:36000,0]
l = m_input.tolist()
l.insert(0,0)
m_input = np.array(l)
net = e.bundle.networks[0]
pop = net.Population[0]
pop.Neuron.Property
pars = {
'Contrast_K_1': 1.0000931884041385e-06,
'Contrast_K_2': 0.017438565232583827,
'Contrast_K_3': 0.00053921972167124226,
'Contrast_X0_1': 0.78950438106528775,
'Contrast_X0_2': 0.33112848144438961,
'Contrast_X0_3': 7.7300101281352198,
'Contrast_alpha_1': -2.2093228258778383,
'Contrast_alpha_2': -1.3444700959816109,
'Contrast_alpha_3': -1.794266702113672,
'Contrast_beta_1': 6999.9905331388572,
'Contrast_beta_2': 496.14805024208408,
'Contrast_beta_3': 3100.0004971668282,
"Contrast_beta_G']": 16.405345948449689,
'Contrast_tau_1': 1.0401071826364103,
'Contrast_A_1':0.0012003566315597568,
'Contrast_B_1': -0.9975992867368805,
'Contrast_tau_2': 0.49999997136200214,
'Contrast_A_2': 0.0024937657285116362,
'Contrast_B_2': -0.9950124685429768,
'Contrast_tau_3': 1.7319623248428329,
'Contrast_A_3': 0.0007212041952871236,
'Contrast_B_3': -0.9985575916094257,
'Gralbeta_G': 1000,
'Mean_K_1': 0.0073778950460808802,
'Mean_K_2': 0.022266684710357632,
'Mean_K_3': 0.0066009688542815071,
'Mean_X0_1': 0.00014552091408427518,
'Mean_X0_2': 0.08958786615222121,
'Mean_X0_3': 0.1033351339097275,
'Mean_alpha_1': -1.6952757942013319,
'Mean_alpha_2': -1.2384563460468387,
'Mean_alpha_3': -1.1660506692817525,
'Mean_beta_1': 56.70165385724556,
'Mean_beta_2': 416.19002756957138,
'Mean_beta_3': 2000,
'Mean_beta_G': 15.581626909473325,
'Mean_tau_1': 8.2361520984230552,
'Mean_A_1': 0.00015174687177639374,
'Mean_B_1': -0.9996965062564472,
'Mean_tau_2': 0.60169556672600977,
'Mean_A_2': 0.002073155636233452,
'Mean_B_2': -0.9958536887275331,
'Mean_tau_3': 3.707965580158564,
'Mean_A_3': 0.0003369984766284639,
'Mean_B_3': -0.9993260030467431,
'Am': 0.0012484394506866417,
'Bm' : -0.9975031210986267
}
model = io.loadmat("../data/MutantModel.mat")
theta = np.ravel(model['mutantNARXparamsBG6'])
# Overwrite default values with Python Script Values
for prop in pop.Neuron.Property:
if prop.name in pars:
prop.AbstractValue.value = pars[prop.name]
print "Replaced %s" % prop.name
else:
# Set initial values:
if prop.name == 'Contrast_ym_1':
prop.AbstractValue.value = pars['Contrast_X0_1']
print "Replaced %s" % prop.name
if prop.name == 'Contrast_ym_2':
prop.AbstractValue.value = pars['Contrast_X0_2']
print "Replaced %s" % prop.name
if prop.name == 'Contrast_ym_3':
prop.AbstractValue.value = pars['Contrast_X0_3']
print "Replaced %s" % prop.name
if prop.name == 'Mean_ym_1':
prop.AbstractValue.value = pars['Mean_X0_1']
print "Replaced %s" % prop.name
if prop.name == 'Mean_ym_2':
prop.AbstractValue.value = pars['Mean_X0_2']
print "Replaced %s" % prop.name
if prop.name == 'Mean_ym_3':
prop.AbstractValue.value = pars['Mean_X0_3']
print "Replaced %s" % prop.name
if prop.name[0:2] == 'th':
# Inject Narx Parameters
num = int(prop.name[-(len(prop.name)-2): len(prop.name)])
prop.AbstractValue.value = float(theta[num])
print "Replaced %s" % theta[num]
print prop.name + " is " + str(prop.AbstractValue.value)
# Saturate Input! Nothing below 1e-10
m_input[m_input < 1e-10] =1e-10
#Rewrite Input Dynamically from mutant data
ai.TimePointValue = [] # Get rid of default input
for time, inj in enumerate(m_input):
tp = smlExperiment.TimePointValueType(time=time,value=inj)
ai.add_TimePointValue(tp)
exp.Simulation.duration = len(m_input)/1000.0
e.set_debug()
e.execute()
|
AdamRTomkins/libSpineML2NK
|
libSpineML2NK/examples/Narx/Narx_Python/run_python_params.py
|
Python
|
gpl-3.0
| 4,162
|
[
"NEURON"
] |
a12c6d6f89baaad01acc869139cf4e58900fdb64aa6b0b3f63028a2115423918
|
#!/usr/bin/env python
"""
Author: Shane Bussmann, T. K. Daisy Leung
Similar to uvmcmcfit.py, but here we edited it to
- throw out a pre-defined number of burn-in samples;
- save acceptance_fraction + misc stuff as a separate file
- only save samples every some number of samples (instead of every iteration)
- email ourselves once a certain number of samples have been obtained, and so we can decide whether or not to stop sampling instead of interupting the code
Last modified: 2016 Dec 15
Note: This is experimental software that is in a very active stage of
development. If you are interested in using this for your research, please
contact me first at tleung@astro.cornell.edu! Thanks.
Purpose: Fit a parametric model to interferometric data using Dan
Foreman-Mackey's emcee routine. Gravitationally lensed sources are accounted
for using ray-tracing routines based on Adam Bolton's lensdemo_script and
lensdemo_func python scripts. Here is the copyright license from
lensdemo_script.py:
Copyright 2009 by Adam S. Bolton
Creative Commons Attribution-Noncommercial-ShareAlike 3.0 license applies:
http://creativecommons.org/licenses/by-nc-sa/3.0/
All redistributions, modified or otherwise, must include this
original copyright notice, licensing statement, and disclaimer.
DISCLAIMER: ABSOLUTELY NO WARRANTY EXPRESS OR IMPLIED.
AUTHOR ASSUMES NO LIABILITY IN CONNECTION WITH THIS COMPUTER CODE.
--------------------------
USAGE
python $PYSRC/uvmcmcfit2.py
--------------------------
SETUP PROCEDURES
1. Establish a directory that contains data for the specific target for which
you wish to measure a lens model. This is the directory from which you will
run the software.
I call this "uvfit00" for the first run on a given dataset, "uvfit01" for
the second, etc.
2. Inside this directory, you must ensure the following files are present:
- "config.yaml": This is the configuration file that describes where the source
of interest is located, what type of model to use for the lens and source, the
name of the image of the target from your interferometric data, the name of
the uvfits files containing the interferometric visibilities, and a few
important processing options as well. Syntax is yaml.
- Image of the target from your interferometric data. The spatial resolution
of this image (arcseconds per pixel), modified by an optional oversampling
parameter, defines the spatial resolution in both the unlensed and lensed
surface brightness maps.
- interferometric visibilities for every combination of array configuration,
sideband, and date observed that you want to model.
3. More info about the constraints and priors input files.
- Lenses: The lenses are assumed to have singular isothermal ellipsoid
profiles.
- Sources: Sources are represented by Gaussian profiles.
--------
OUTPUTS
"posteriorpdf.fits": model parameters for every MCMC iteration, in fits
format.
"summary.txt": contains mean acceptance fraction
"""
from __future__ import print_function
# import the required modules
import os
import os.path
import sys
from astropy.io import fits
import numpy
from astropy.table import Table
import emcee
#import pyximport
#pyximport.install(setup_args={"include_dirs":numpy.get_include()})
import sample_vis
import lensutil
import uvutil
import setuputil
import yaml
from subprocess import call
import time
#cwd = os.getcwd()
#sys.path.append(cwd)
#import config
def lnprior(pzero_regions, paramSetup):
"""
Function that computes the ln prior probabilities of the model parameters.
"""
priorln = 0.0
mu = 1
# import pdb; pdb.set_trace()
# ensure all parameters are finite
if (pzero_regions * 0 != 0).any():
priorln = -numpy.inf
return priorln, mu
# Uniform priors
uniform_regions = paramSetup['PriorShape'] == 'Uniform'
if uniform_regions.any():
p_l_regions = paramSetup['p_l'][uniform_regions]
p_u_regions = paramSetup['p_u'][uniform_regions]
pzero_uniform = pzero_regions[uniform_regions]
if (pzero_uniform > p_l_regions).all() and (pzero_uniform < p_u_regions).all():
# log prior
priorln += numpy.log(1.0/numpy.abs(p_l_regions - p_u_regions)).sum()
else:
priorln = -numpy.inf
return priorln, mu
# Gaussian priors
gaussian_regions = paramSetup['PriorShape'] == 'Gaussian'
if gaussian_regions.any():
import scipy.stats as stats
# initlized as [mean, blah, blah, sigma]
mean_regions = paramSetup['p_l'][gaussian_regions]
rms_regions = paramSetup['p_u'][gaussian_regions]
pzero_gauss = pzero_regions[gaussian_regions]
priorln += numpy.log(stats.norm(scale=rms_regions, loc=mean_regions).pdf(pzero_gauss)).sum()
# Gaussian pos (for parameter that must be positive e.g. flux density)
gaussPos_regions = paramSetup['PriorShape'] == 'GaussianPos'
if gaussPos_regions.any():
pzero_gaussPos = pzero_regions[gaussPos_regions]
if pzero_gaussPos < 0.0:
priorln = -numpy.inf
return priorln, mu
else:
import scipy.stats as stats
# initlized as [mean, blah, blah, sigma]
mean_regions = paramSetup['p_l'][gaussPos_regions]
rms_regions = paramSetup['p_u'][gaussPos_regions]
priorln += numpy.log(stats.norm(scale=rms_regions, loc=mean_regions).pdf(pzero_gauss)).sum()
# if not isinstance(priorln, float):
# priorln = priorln.sum()
return priorln, mu
def lnlike(pzero_regions, vis_complex, wgt, uuu, vvv, pcd,
fixindx, paramSetup, computeamp=True, miriad=False):
""" Function that computes the Ln likelihood of the data"""
# search poff_models for parameters fixed relative to other parameters
fixindx = numpy.array(fixindx)
fixed = (numpy.where(fixindx >= 0))[0]
nfixed = fixindx[fixed].size
p_u_regions = paramSetup['p_u']
poff_regions = p_u_regions.copy()
poff_regions[:] = 0.
#for ifix in range(nfixed):
# poff_regions[fixed[ifix]] = pzero_regions[fixindx[fixed[ifix]]]
for ifix in range(nfixed):
ifixed = int(fixed[ifix])
subindx = int(fixindx[ifixed])
par0 = 0
if fixindx[subindx] > 0:
par0 = pzero_regions[fixindx[subindx]]
poff_regions[ifixed] = pzero_regions[subindx] + par0
parameters_regions = pzero_regions + poff_regions
npar_previous = 0
amp = [] # Will contain the 'blobs' we compute
g_image_all = 0.
g_lensimage_all = 0.
e_image_all = 0.
e_lensimage_all = 0.
nregions = paramSetup['nregions']
for regioni in range(nregions):
# get the model info for this model
x = paramSetup['x'][regioni]
y = paramSetup['y'][regioni]
headmod = paramSetup['modelheader'][regioni]
nlens = paramSetup['nlens_regions'][regioni]
nsource = paramSetup['nsource_regions'][regioni]
model_types = paramSetup['model_types'][regioni]
# get pzero, p_u, and p_l for this specific model
nparlens = 5 * nlens
nparsource = 6 * nsource
npar = nparlens + nparsource + npar_previous
parameters = parameters_regions[npar_previous:npar]
npar_previous = npar
#-----------------------------------------------------------------
# Create a surface brightness map of lensed emission for the given set
# of foreground lens(es) and background source parameters.
#-----------------------------------------------------------------
g_image, g_lensimage, e_image, e_lensimage, amp_tot, amp_mask = \
lensutil.sbmap(x, y, nlens, nsource, parameters, model_types,
computeamp=computeamp)
e_image_all += e_image
e_lensimage_all += e_lensimage
g_image_all += g_image
g_lensimage_all += g_lensimage
amp.extend(amp_tot)
amp.extend(amp_mask)
# --------------------------------------------------------------------
# Python version of UVMODEL:
# "Observe" the lensed emission with the interferometer
# --------------------------------------------------------------------
if nlens > 0:
if computeamp:
# Evaluate amplification for each region
lensmask = e_lensimage != 0
mask = e_image != 0
numer = g_lensimage[lensmask].sum()
denom = g_image[mask].sum()
amp_mask = numer / denom
numer = g_lensimage.sum()
denom = g_image.sum()
amp_tot = numer / denom
if amp_tot > 1e2:
amp_tot = 1e2
if amp_mask > 1e2:
amp_mask = 1e2
amp.extend([amp_tot])
amp.extend([amp_mask])
else:
amp.extend([1.0])
amp.extend([1.0])
if miriad:
# save the fits image of the lensed source
ptag = str(os.getpid())
SBmapLoc = 'LensedSBmap' + ptag + '.fits'
fits.writeto(SBmapLoc, g_lensimage_all, header=headmod, clobber=True)
# convert fits format to miriad format
SBmapMiriad = 'LensedSBmap' + ptag + '.miriad'
os.system('rm -rf ' + SBmapMiriad)
cmd = 'fits op=xyin in=' + SBmapLoc + ' out=' + SBmapMiriad
call(cmd + ' > /dev/null 2>&1', shell=True)
# compute simulated visibilities
modelvisfile = 'SimulatedVisibilities' + ptag + '.miriad'
call('rm -rf ' + modelvisfile, shell=True)
cmd = 'uvmodel options=subtract vis=' + visfilemiriad + \
' model=' + SBmapMiriad + ' out=' + modelvisfile
call(cmd + ' > /dev/null 2>&1', shell=True)
# convert simulated visibilities to uvfits format
mvuvfits = 'SimulatedVisibilities' + ptag + '.uvfits'
call('rm -rf ' + mvuvfits, shell=True)
cmd = 'fits op=uvout in=' + modelvisfile + ' out=' + mvuvfits
call(cmd + ' > /dev/null 2>&1', shell=True)
# read simulated visibilities
mvuv = fits.open(mvuvfits)
diff_real = mvuv[0].data['DATA'][:, 0, 0, 0, 0, 0]
diff_imag = mvuv[0].data['DATA'][:, 0, 0, 0, 0, 1]
wgt = mvuv[0].data['DATA'][:, 0, 0, 0, 0, 2]
#model_complex = model_real[goodvis] + 1.0j * model_imag[goodvis]
diff_all = numpy.append(diff_real, diff_imag)
wgt = numpy.append(wgt, wgt)
goodvis = wgt > 0
diff_all = diff_all[goodvis]
wgt = wgt[goodvis]
chi2_all = wgt * diff_all * diff_all
else:
model_complex = sample_vis.uvmodel(g_lensimage_all, headmod,
uuu, vvv, pcd)
diff_all = numpy.abs(vis_complex - model_complex)
chi2_all = wgt * diff_all * diff_all
#model_real += numpy.real(model_complex)
#model_imag += numpy.imag(model_complex)
#fits.writeto('g_lensimage.fits', g_lensimage_all, headmod, clobber=True)
#import matplotlib.pyplot as plt
#print(pzero_regions)
#plt.imshow(g_lensimage, origin='lower')
#plt.colorbar()
#plt.show()
#plt.imshow(g_image, origin='lower')
#plt.colorbar()
#plt.show()
# calculate chi^2 assuming natural weighting
#fnuisance = 0.0
#modvariance_real = 1 / wgt #+ fnuisance ** 2 * model_real ** 2
#modvariance_imag = 1 / wgt #+ fnuisance ** 2 * model_imag ** 2
#wgt = wgt / 4.
#chi2_real_all = (real - model_real) ** 2. / modvariance_real
#chi2_imag_all = (imag - model_imag) ** 2. / modvariance_imag
#chi2_all = numpy.append(chi2_real_all, chi2_imag_all)
# compute the sigma term
#sigmaterm_real = numpy.log(2 * numpy.pi / wgt)
#sigmaterm_imag = numpy.log(2 * numpy.pi * modvariance_imag)
# compute the ln likelihood
lnlikemethod = paramSetup['lnlikemethod']
if lnlikemethod == 'chi2':
lnlike = chi2_all
else:
# by definition, loglike = -n/2*ln(2pi sigma^2) - 1/(2sigma^2) sum of (data-model)^2 over i=1 to n; but the constant term doesn't matter
sigmaterm_all = len(wgt) * numpy.log(2 * numpy.pi / wgt)
lnlike = chi2_all # + sigmaterm_all
# * -1/2 factor in latter step
# compute number of degrees of freedom
#nmeasure = lnlike.size
#nparam = (pzero != 0).size
#ndof = nmeasure - nparam
# assert that lnlike is equal to -1 * maximum likelihood estimate
# use visibilities where weight is greater than 0
#goodvis = wgt > 0
#likeln = -0.5 * lnlike[goodvis].sum()
likeln = -0.5 * lnlike.sum()
#print(pcd, likeln)
if likeln * 0 != 0:
likeln = -numpy.inf
return likeln, amp
def lnprob(pzero_regions, vis_complex, wgt, uuu, vvv, pcd,
fixindx, paramSetup, computeamp=True, miriad=False):
"""
Computes ln probabilities via ln prior + ln likelihood
"""
lp, mu = lnprior(pzero_regions, paramSetup)
if not numpy.isfinite(lp):
probln = -numpy.inf
mu = 1
return probln, mu
ll, mu = lnlike(pzero_regions, vis_complex, wgt, uuu, vvv, pcd,
fixindx, paramSetup, computeamp=computeamp, miriad=miriad)
normalization = 1.0#2 * real.size
probln = lp * normalization + ll
# print(probln, lp*normalization, ll) # remove
return probln, mu
configloc = 'config.yaml'
configfile = open(configloc, 'r')
config = yaml.load(configfile)
# Determine if we are going to compute the amplification of every model
if config.keys().count('ComputeAmp') > 0:
computeamp = config['ComputeAmp']
else:
computeamp = True
# Determine parallel processing options
if config.keys().count('MPI') > 0:
mpi = config['MPI']
else:
mpi = False
# multiple processors on a cluster using MPI
if mpi:
from emcee.utils import MPIPool
# One thread per slot
Nthreads = 1
# Initialize the pool object
pool = MPIPool()
# If this process is not running as master, wait for instructions, then exit
if not pool.is_master():
pool.wait()
sys.exit(0)
# Single processor with Nthreads cores
else:
if config.keys().count('Nthreads') > 0:
# set the number of threads to use for parallel processing
Nthreads = config['Nthreads']
else:
Nthreads = 1
# Initialize the pool object
pool = ''
#--------------------------------------------------------------------------
# Read in ALMA image and beam
#im = fits.getdata(config['ImageName'])
#im = im[0, 0, :, :].copy()
headim = fits.getheader(config['ImageName'])
# get resolution in ALMA image
#celldata = numpy.abs(headim['CDELT1'] * 3600)
#--------------------------------------------------------------------------
# read in visibility data
visfile = config['UVData']
# Determine if we will use miriad to compute simulated visibilities
if config.keys().count('UseMiriad') > 0:
miriad = config['UseMiriad']
if miriad:
interactive = False
index = visfile.index('uvfits')
visfilemiriad = visfile[0:index] + 'miriad'
# scale the weights
newvisfile = visfile[0:index] + 'scaled.uvfits'
uvutil.scalewt(visfile, newvisfile)
visfile = newvisfile
else:
miriad = False
else:
miriad = False
# attempt to process multiple visibility files. This won't work if miriad=True
try:
filetype = visfile[-6:]
if filetype == 'uvfits':
uvfits = True
else:
uvfits = False
uuu, vvv, www = uvutil.uvload(visfile)
pcd = uvutil.pcdload(visfile)
vis_complex, wgt = uvutil.visload(visfile)
except:
try:
for i, ivisfile in enumerate(visfile):
filetype = ivisfile[-6:]
if filetype == 'uvfits':
uvfits = True
else:
uvfits = False
iuuu, ivvv, iwww = uvutil.uvload(ivisfile)
ipcd = uvutil.pcdload(ivisfile)
ivis_complex, iwgt = uvutil.visload(ivisfile)
if i == 0:
uuu = iuuu
vvv = ivvv
pcd = ipcd
vis_complex = ivis_complex
wgt = iwgt
else:
uuu = numpy.append(uuu, iuuu)
vvv = numpy.append(vvv, ivvv)
if ipcd != pcd:
data1 = visfile[0]
data2 = visfile[ivisfile]
msg = 'Phase centers in ' + data1 + ' and ' + data2 \
+ ' do not match. Please ensure phase ' \
+ 'centers in all visibility datasets are equal.'
print(msg)
raise TypeError
vis_complex = numpy.append(vis_complex, ivis_complex)
wgt = numpy.append(wgt, iwgt)
except:
msg = "Visibility datasets must be specified as either a string or "\
"a list of strings."
print(msg)
raise TypeError
# remove the data points with zero or negative weight
positive_definite = wgt > 0
assert len(positive_definite[positive_definite]) > 0, " --- Find no data to fit, check the weights --- "
vis_complex = vis_complex[positive_definite]
wgt = wgt[positive_definite]
uuu = uuu[positive_definite]
vvv = vvv[positive_definite]
#www = www[positive_definite]
npos = wgt.size
#----------------------------------------------------------------------------
# Load input parameters
paramSetup = setuputil.loadParams(config)
nwalkers = paramSetup['nwalkers']
nregions = paramSetup['nregions']
nparams = paramSetup['nparams']
pname = paramSetup['pname']
nsource_regions = paramSetup['nsource_regions']
# Use an intermediate posterior PDF to initialize the walkers if it exists
posteriorloc = 'posteriorpdf.fits'
if os.path.exists(posteriorloc):
# read the latest posterior PDFs
print("Found existing posterior PDF file: {:s}".format(posteriorloc))
posteriordat = Table.read(posteriorloc)
if len(posteriordat) > 1:
# assign values to pzero
nlnprob = 1
pzero = numpy.zeros((nwalkers, nparams))
startindx = nlnprob
for j in range(nparams):
namej = posteriordat.colnames[j + startindx]
pzero[:, j] = posteriordat[namej][-nwalkers:]
# number of mu measurements
nmu = len(posteriordat.colnames) - nparams - nlnprob
# output name is based on most recent burnin file name
realpdf = True
else:
realpdf = False
else:
realpdf = False
if not realpdf:
extendedpname = ['lnprob']
extendedpname.extend(pname)
nmu = 0
for regioni in range(nregions):
ri = str(regioni)
if paramSetup['nlens_regions'][regioni] > 0:
nsource = nsource_regions[regioni]
for i in range(nsource):
si = '.Source' + str(i) + '.Region' + ri
extendedpname.append('mu_tot' + si)
nmu += 1
for i in range(nsource):
si = '.Source' + str(i) + '.Region' + ri
extendedpname.append('mu_aper' + si)
nmu += 1
extendedpname.append('mu_tot.Region' + ri)
extendedpname.append('mu_aper.Region' + ri)
nmu += 2
posteriordat = Table(names = extendedpname)
pzero = numpy.array(paramSetup['pzero'])
# make sure no parts of pzero exceed p_u or p_l
#arrayp_u = numpy.array(p_u)
#arrayp_l = numpy.array(p_l)
#for j in range(nwalkers):
# exceed = arraypzero[j] >= arrayp_u
# arraypzero[j, exceed] = 2 * arrayp_u[exceed] - arraypzero[j, exceed]
# exceed = arraypzero[j] <= arrayp_l
# arraypzero[j, exceed] = 2 * arrayp_l[exceed] - arraypzero[j, exceed]
#pzero = arraypzero
#p_u = arrayp_u
#p_l = arrayp_l
# determine the indices for fixed parameters
fixindx = setuputil.fixParams(paramSetup)
fixindx = map(int, fixindx)
# Initialize the sampler with the chosen specs.
if mpi:
sampler = emcee.EnsembleSampler(nwalkers, nparams, lnprob, pool=pool, \
args=[vis_complex, wgt, uuu, vvv, pcd, \
fixindx, paramSetup, computeamp, miriad])
else:
sampler = emcee.EnsembleSampler(nwalkers, nparams, lnprob, \
args=[vis_complex, wgt, uuu, vvv, pcd, \
fixindx, paramSetup, computeamp, miriad], threads=Nthreads)
# Sample, outputting to a file
#os.system('date')
currenttime = time.time()
# do burn-in if posteriorpdf.fits doesn't exist or contains any samples
# But, it's difficult to judge how many steps is needed
# need to may sure later that we are sampling longer than the AC time
if not realpdf:
burnin = 150
print("*** Running Burn in phase of steps {:d} ***".format(burnin))
try:
pos0, lnprob0, rstate0 = sampler.run_mcmc(pzero, burnin)
except ValueError:
pos0, lnprob0, rstate0, _ = sampler.run_mcmc(pzero, burnin)
sampler.reset() # reset chain
else:
pos0 = pzero
class AlarmException(Exception):
pass
def alarmHandler(signum, frame):
raise AlarmException
def nonBlockingRawInput(prompt='', timeout=20, response='yes'):
'''
'''
import signal
signal.signal(signal.SIGALRM, alarmHandler)
signal.alarm(timeout)
try:
text = raw_input(prompt)
signal.alarm(0)
return text
except AlarmException:
print('\nPrompt timeout. Continuing...')
signal.signal(signal.SIGALRM, signal.SIG_IGN)
return response
def query_yes_no(question, default=None):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
import sys
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
# sys.stdout.flush()
def email_self(msg, receiver='tleung@astro.cornell.edu'):
'''
Parameters
----------
msg: str
in email
'''
import os
#email
SENDMAIL = "/usr/sbin/sendmail"
p = os.popen("%s -t" % SENDMAIL, "w")
p.write("To: "+receiver+"\n")
p.write("Subject: uvmcmcfit needs a respond to continuue. \n")
p.write("\n") # blank line separating headers from body
message = msg + "\n\n" + ' Continue?'
p.write(message)
sts = p.close()
if sts != 0:
print("Sendmail exit status {}".format(sts))
import cPickle as pickle
import os
# pos - A list of current positions of walkers in the parameter space; dim = (nwalkers, dim)
# prob - The list of log posterior probabilities for the walkers at positions given by pos . The shape of this object is (nwalkers, dim).
# state - the random number generator state
# amp - metadata 'blobs' associated with the current positon
# below for testing..
# nsamples = 1000
# nsessions = 2
# in general, we want many samples.
# niter & nsesions dep. on nwalkers
#
nsamples = 1e6
niter = int(round(nsamples/nwalkers))
nsessions = 10
saveint = niter/nsessions/3
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
for i in range(nsessions):
saveidx = 0
for pos, prob, state, amp in sampler.sample(pos0, iterations=int(niter/nsessions)):
# using sampler.sample() will have pre-defined 0s in elements (cf. run_mcmc())
walkers, steps, dim = sampler.chain.shape
result = [
"Mean Acceptance fraction across all walkers of this iteration: {:.2f}".format(numpy.mean(sampler.acceptance_fraction)),
"Mean lnprob and Max lnprob values: {:f} {:f}".format(numpy.mean(prob), numpy.max(prob)),
"Time to run previous set of walkers (seconds): {:f}".format(time.time() - currenttime)
]
print('\n'.join(result))
f = open('summary.txt', 'a')
f.write('\n'.join(result))
f.write('\n')
f.close()
currenttime = time.time()
#ff.write(str(prob))
superpos = numpy.zeros(1 + nparams + nmu)
for wi in range(nwalkers):
superpos[0] = prob[wi]
superpos[1:nparams + 1] = pos[wi]
superpos[nparams + 1:nparams + nmu + 1] = amp[wi]
posteriordat.add_row(superpos)
# only save if it has went through every saveint iterations or is the last sample
if not sampler.chain[:, numpy.any(sampler.chain[0, :, :] != 0, axis=1), :].shape[1] % saveint or (sampler.chain[:, numpy.any(sampler.chain[0, :, :] != 0, axis=1), :].shape[1] == int(niter/nsessions)):
print("Ran {:d} iterations in this session. Saving data".format(sampler.chain[:, numpy.any(sampler.chain[0, :, :] != 0, axis=1), :].shape[1]))
posteriordat.write('posteriorpdf.fits', overwrite=True)
#posteriordat.write('posteriorpdf.txt', format='ascii')
saveidx = sampler.chain[:, numpy.any(sampler.chain[0, :, :] != 0, axis=1), :].shape[1]
message = "We have finished {:d} iterations with {:d} walkers. ".format(sampler.chain[:, numpy.any(sampler.chain[0, :, :] != 0, axis=1), :].shape[1], nwalkers)
if i < nsessions-1:
email_self(message)
print(message)
ret = nonBlockingRawInput("Shall we continuue with next session? (Y/N)", timeout=600).lower()
while not ret in valid:
print("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
ret = nonBlockingRawInput("Shall we continuue with next session? (Y/N)", timeout=600).lower()
if not valid[ret]:
import sys
sys.exit("Quiting... ")
if mpi: pool.close()
sampler.reset()
pos0 = pos
f = open('summary.txt', 'a')
f.write("Finish all {:d} sessions \n".format(nsessions))
f.write("Total number of samples: {:d} \n".format(niter/nsessions * nsessions * nwalkers))
f.write('\n')
f.close()
if mpi: pool.close()
|
astro313/uvmcmcfit
|
uvmcmcfit.py
|
Python
|
mit
| 26,614
|
[
"Gaussian"
] |
6da86e0e84db89ff68fc2d2435fc82cb7e4800f4233c22f94e80359d3d83590e
|
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Postprocessing step that extracts representation from trained model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from disentanglement_lib.data.ground_truth import named_data
from disentanglement_lib.postprocessing import methods # pylint: disable=unused-import
from disentanglement_lib.utils import convolute_hub
from disentanglement_lib.utils import results
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
import gin.tf
def postprocess_with_gin(model_dir,
output_dir,
overwrite=False,
gin_config_files=None,
gin_bindings=None):
"""Postprocess a trained model based on the provided gin configuration.
This function will set the provided gin bindings, call the postprocess()
function and clear the gin config. Please see the postprocess() for required
gin bindings.
Args:
model_dir: String with path to directory where the model is saved.
output_dir: String with the path where the representation should be saved.
overwrite: Boolean indicating whether to overwrite output directory.
gin_config_files: List of gin config files to load.
gin_bindings: List of gin bindings to use.
"""
if gin_config_files is None:
gin_config_files = []
if gin_bindings is None:
gin_bindings = []
gin.parse_config_files_and_bindings(gin_config_files, gin_bindings)
postprocess(model_dir, output_dir, overwrite)
gin.clear_config()
@gin.configurable(
"postprocess", blacklist=["model_dir", "output_dir", "overwrite"])
def postprocess(model_dir,
output_dir,
overwrite=False,
postprocess_fn=gin.REQUIRED,
random_seed=gin.REQUIRED,
name=""):
"""Loads a trained Gaussian encoder and extracts representation.
Args:
model_dir: String with path to directory where the model is saved.
output_dir: String with the path where the representation should be saved.
overwrite: Boolean indicating whether to overwrite output directory.
postprocess_fn: Function used to extract the representation (see methods.py
for examples).
random_seed: Integer with random seed used for postprocessing (may be
unused).
name: Optional string with name of the representation (can be used to name
representations).
"""
# We do not use the variable 'name'. Instead, it can be used to name
# representations as it will be part of the saved gin config.
del name
# Delete the output directory if it already exists.
if tf.gfile.IsDirectory(output_dir):
if overwrite:
tf.gfile.DeleteRecursively(output_dir)
else:
raise ValueError("Directory already exists and overwrite is False.")
# Set up timer to keep track of elapsed time in results.
experiment_timer = time.time()
# Automatically set the proper data set if necessary. We replace the active
# gin config as this will lead to a valid gin config file where the data set
# is present.
if gin.query_parameter("dataset.name") == "auto":
# Obtain the dataset name from the gin config of the previous step.
gin_config_file = os.path.join(model_dir, "results", "gin", "train.gin")
gin_dict = results.gin_dict(gin_config_file)
with gin.unlock_config():
gin.bind_parameter("dataset.name", gin_dict["dataset.name"].replace(
"'", ""))
dataset = named_data.get_named_ground_truth_data()
# Path to TFHub module of previously trained model.
module_path = os.path.join(model_dir, "tfhub")
with hub.eval_function_for_module(module_path) as f:
def _gaussian_encoder(x):
"""Encodes images using trained model."""
# Push images through the TFHub module.
output = f(dict(images=x), signature="gaussian_encoder", as_dict=True)
# Convert to numpy arrays and return.
return {key: np.array(values) for key, values in output.items()}
# Run the postprocessing function which returns a transformation function
# that can be used to create the representation from the mean and log
# variance of the Gaussian distribution given by the encoder. Also returns
# path to a checkpoint if the transformation requires variables.
transform_fn, transform_checkpoint_path = postprocess_fn(
dataset, _gaussian_encoder, np.random.RandomState(random_seed),
output_dir)
# Takes the "gaussian_encoder" signature, extracts the representation and
# then saves under the signature "representation".
tfhub_module_dir = os.path.join(output_dir, "tfhub")
convolute_hub.convolute_and_save(
module_path, "gaussian_encoder", tfhub_module_dir, transform_fn,
transform_checkpoint_path, "representation")
# We first copy over all the prior results and configs.
original_results_dir = os.path.join(model_dir, "results")
results_dir = os.path.join(output_dir, "results")
results_dict = dict(elapsed_time=time.time() - experiment_timer)
results.update_result_directory(results_dir, "postprocess", results_dict,
original_results_dir)
|
google-research/disentanglement_lib
|
disentanglement_lib/postprocessing/postprocess.py
|
Python
|
apache-2.0
| 5,854
|
[
"Gaussian"
] |
63a743e9a2a53619ac050af1fbf2f93320d6536cc6bc2a777f1fdbaa27383cfb
|
# -*- coding: utf-8 -*-
"""
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- **Shake Event Test Cases.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'tim@kartoza.com'
__version__ = '0.5.0'
__date__ = '2/08/2012'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import difflib
import logging
import os
import shutil
import unittest
import datetime
import pytz
import requests
from qgis.core import QgsFeatureRequest
from realtime.push_rest import InaSAFEDjangoREST
from realtime.earthquake.push_shake import \
push_shake_event_to_rest
from realtime.earthquake.shake_event import ShakeEvent
from realtime.earthquake.make_map import process_event
from realtime.utilities import base_data_dir
from realtime.utilities import (
shakemap_extract_dir,
data_dir,
realtime_logger_name)
from safe.common.utilities import temp_dir, unique_filename
from safe.common.version import get_version
from safe.test.utilities import standard_data_path, get_qgis_app
# The logger is initialised in realtime.__init__
LOGGER = logging.getLogger(realtime_logger_name())
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
# Shake ID for this test
shakes_id = ['20131105060809', '20150918201057']
SHAKE_ID = shakes_id[0]
SHAKE_ID_2 = shakes_id[1]
class TestShakeEvent(unittest.TestCase):
"""Tests relating to shake events"""
# noinspection PyPep8Naming
def setUp(self):
"""Copy our cached dataset from the fixture dir to the cache dir."""
# Since ShakeEvent will be using sftp_shake_data, we'll copy the grid
# file inside 20131105060809 folder to
# shakemap_extract_dir/20131105060809/grid.xml
shake_path = standard_data_path('hazard', 'shake_data')
for shake_id in shakes_id:
input_path = os.path.abspath(
os.path.join(shake_path, shake_id, 'output/grid.xml'))
target_folder = os.path.join(
shakemap_extract_dir(), shake_id)
if not os.path.exists(target_folder):
os.makedirs(target_folder)
target_path = os.path.abspath(
os.path.join(target_folder, 'grid.xml'))
shutil.copyfile(input_path, target_path)
# noinspection PyPep8Naming
def tearDown(self):
"""Delete the cached data."""
for shake_id in shakes_id:
target_path = os.path.join(shakemap_extract_dir(), shake_id)
shutil.rmtree(target_path)
def test_grid_file_path(self):
"""Test grid_file_path works using cached data."""
expected_path = os.path.join(
shakemap_extract_dir(), SHAKE_ID, 'grid.xml')
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
data_is_local_flag=True)
grid_path = shake_event.grid_file_path()
self.assertEquals(expected_path, grid_path)
def test_to_string(self):
"""Test __str__ works properly."""
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
data_is_local_flag=True)
expected_state = (
'latitude: -2.43\n'
'longitude: 140.62\n'
'event_id: 20131105060809\n'
'magnitude: 3.6\n'
'depth: 10.0\n'
'description: None\n'
'location: Papua\n'
'day: 5\n'
'month: 11\n'
'year: 2013\n'
'time: 2013-11-05 06:08:09+07:07\n'
'time_zone: WIB\n'
'x_minimum: 139.37\n'
'x_maximum: 141.87\n'
'y_minimum: -3.67875\n'
'y_maximum: -1.18125\n'
'rows: 101.0\n'
'columns: 101.0\n'
'mmi_data: Populated\n'
'population_raster_path: None\n'
'impact_file: None\n'
'impact_keywords_file: None\n'
'fatality_counts: None\n'
'displaced_counts: None\n'
'affected_counts: None\n'
'extent_with_cities: Not set\n'
'zoom_factor: 1.25\n'
'search_boxes: None\n')
state = str(shake_event)
message = (('Expected:\n----------------\n%s'
'\n\nGot\n------------------\n%s\n') %
(expected_state, state))
self.assertEqual(state, expected_state, message)
def test_local_cities(self):
"""Test that we can retrieve the cities local to the event"""
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
data_is_local_flag=True)
# Get teh mem layer
cities_layer = shake_event.local_cities_memory_layer()
provider = cities_layer.dataProvider()
expected_feature_count = 2
self.assertEquals(provider.featureCount(), expected_feature_count)
strings = []
request = QgsFeatureRequest()
for feature in cities_layer.getFeatures(request):
# fetch map of attributes
attributes = cities_layer.dataProvider().attributeIndexes()
for attribute_key in attributes:
strings.append("%d: %s\n" % (
attribute_key, feature[attribute_key]))
strings.append('------------------\n')
LOGGER.debug('Mem table:\n %s' % strings)
file_path = unique_filename(prefix='test_local_cities',
suffix='.txt',
dir=temp_dir('test'))
cities_file = file(file_path, 'w')
cities_file.writelines(strings)
cities_file.close()
fixture_path = os.path.join(data_dir(),
'tests',
'test_local_cities.txt')
cities_file = file(fixture_path)
expected_string = cities_file.readlines()
cities_file.close()
diff = difflib.unified_diff(expected_string, strings)
diff_list = list(diff)
diff_string = ''
for _, myLine in enumerate(diff_list):
diff_string += myLine
message = ('Diff is not zero length:\n'
'Control file: %s\n'
'Test file: %s\n'
'Diff:\n%s' %
(fixture_path,
file_path,
diff_string))
self.assertEqual(diff_string, '', message)
def test_mmi_potential_damage(self):
"""Test mmi_potential_damage function."""
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
data_is_local_flag=True)
values = range(1, 11)
expected_result = ['None', 'None', 'None', 'None', 'Very light',
'Light', 'Moderate', 'Mod/Heavy', 'Heavy',
'Very heavy']
result = []
for value in values:
result.append(shake_event.mmi_potential_damage(value))
message = 'Got:\n%s\nExpected:\n%s\n' % (result, expected_result)
self.assertEqual(result, expected_result, message)
def test_cities_to_shape(self):
"""Test that we can retrieve the cities local to the event."""
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
data_is_local_flag=True)
file_path = shake_event.cities_to_shapefile()
self.assertTrue(os.path.exists(file_path))
def test_cities_search_boxes_to_shape(self):
"""Test that we can retrieve the search boxes used to find cities."""
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
data_is_local_flag=True)
file_path = shake_event.city_search_boxes_to_shapefile()
self.assertTrue(os.path.exists(file_path))
def test_calculate_fatalities(self):
"""Test that we can calculate fatalities."""
LOGGER.debug(QGIS_APP.showSettings())
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
data_is_local_flag=True)
result, fatalities_html = shake_event.calculate_impacts()
# Get the os environment INASAFE_WORK_DIR if it exists
inasafe_work_dir = base_data_dir()
expected_result = ('%s/shakemaps-extracted/20131105060809/impact'
'-nearest.tif') % inasafe_work_dir
message = 'Got: %s, Expected: %s' % (result, expected_result)
self.assertEqual(result, expected_result, message)
expected_result = ('%s/shakemaps-extracted/20131105060809/impacts'
'.html') % inasafe_work_dir
message = 'Got: %s, Expected: %s' % (fatalities_html, expected_result)
self.assertEqual(fatalities_html, expected_result, message)
expected_fatalities = {2: 0.0,
3: 0.0,
4: 0.0,
5: 0.0,
6: 0.0,
7: 0.0,
8: 0.0,
9: 0.0,
10: 0.0}
message = 'Got: %s, Expected: %s' % (
shake_event.fatality_counts, expected_fatalities)
self.assertDictEqual(
shake_event.fatality_counts, expected_fatalities, message)
def test_sorted_impacted_cities(self):
"""Test getting impacted cities sorted by mmi then population."""
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
data_is_local_flag=True)
table = shake_event.sorted_impacted_cities()
file_path = unique_filename(
prefix='test_sorted_impacted_cities',
suffix='.txt',
dir=temp_dir('test'))
cities_file = file(file_path, 'w')
cities_file.writelines(str(table))
cities_file.close()
table = str(table).replace(', \'', ',\n\'')
table += '\n'
fixture_path = os.path.join(
data_dir(), 'tests', 'test_sorted_impacted_cities.txt')
cities_file = file(fixture_path)
expected_string = cities_file.read()
cities_file.close()
expected_string = expected_string.replace(', \'', ',\n\'')
self.max_diff = None
message = 'Expectation:\n%s, Got\n%s' % (expected_string, table)
self.assertEqual(expected_string, table, message)
def test_impacted_cities_table(self):
"""Test getting impacted cities table."""
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
data_is_local_flag=True)
table, path = shake_event.impacted_cities_table()
table_dict = table.to_dict()
expected_string = [
{
'name': 'Jayapura',
'population': '134'
},
{
'name': 'Abepura',
'population': '62'
}
]
for i in range(1, len(table.rows)):
self.assertEqual(
table_dict['rows'][i]['cells'][1]
['content']['text'][0]['text'],
expected_string[i - 1].get('name'))
self.assertEqual(
table_dict['rows'][i]['cells'][2]
['content']['text'][0]['text'],
expected_string[i - 1].get('population'))
self.max_diff = None
# Get the os environment INASAFE_WORK_DIR if it exists
inasafe_work_dir = base_data_dir()
expected_path = (
'%s/shakemaps-extracted/20131105060809/affected-cities.html' %
inasafe_work_dir)
message = 'Got:\n%s\nExpected:\n%s\n' % (path, expected_path)
self.assertEqual(path, expected_path, message)
def test_fatalities_table(self):
"""Test rendering a fatalities table."""
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
data_is_local_flag=True)
shake_event.calculate_impacts()
result = shake_event.impact_table()
# TODO compare actual content of impact table...
# Get the os environment INASAFE_WORK_DIR if it exists
inasafe_work_dir = base_data_dir()
expected_result = (
'%s/shakemaps-extracted/20131105060809/impacts.html' %
inasafe_work_dir)
message = 'Got:\n%s\nExpected:\n%s' % (result, expected_result)
self.assertEqual(result, expected_result, message)
def test_event_info_dict(self):
"""Test we can get a dictionary of location info nicely."""
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
data_is_local_flag=True)
result = shake_event.event_dict()
software_tag = ('This report was created using InaSAFE version %s. '
'Visit http://inasafe.org for more information.' %
get_version())
# noinspection PyUnresolvedReferences
expected_dict = {
'place-name': u'Jayapura',
'shake-grid-location': u'Papua',
'depth-name': u'Depth',
'fatalities-name': u'Estimated fatalities',
'fatalities-count': u'0', # 44 only after render
'elapsed-time': u'', # empty as it will change
'legend-name': u'Population count per grid cell',
'fatalities-range': '0 - 100',
'longitude-name': u'Longitude',
'located-label': u'Located',
'distance-unit': u'km',
'bearing-compass': u'NW',
'elapsed-time-name': u'Elapsed time since event',
'exposure-table-name': u'Estimated number of people '
u'affected by each MMI level',
'longitude-value': u'140\xb037\u203212.00\u2033E',
'city-table-name': u'Nearby Places',
'bearing-text': u'bearing',
'limitations': (
u'This impact estimation is automatically generated and only '
u'takes into account the population and cities affected by '
u'different levels of ground shaking. The estimate is based '
u'on ground shaking data from BMKG, population count data '
u'derived by Australian Government from worldpop.org.uk, '
u'place information from geonames.org and software developed '
u'by BNPB. Limitations in the estimates of ground shaking, '
u'population and place names datasets may result in '
u'significant misrepresentation of the on-the-ground '
u'situation in the figures shown here. Consequently '
u'decisions should not be made solely on the information '
u'presented here and should always be verified by ground '
u'truthing and other reliable information sources. The '
u'fatality calculation assumes that no fatalities occur '
u'for shake levels below MMI 4. Fatality counts of less than '
u'50 are disregarded.'),
'depth-unit': u'km',
'latitude-name': u'Latitude',
'mmi': '3.6',
'map-name': u'Estimated Earthquake Impact',
'date': '5-11-2013',
'bearing-degrees': '-37.75\xb0',
'formatted-date-time': '05-Nov-13 06:08:09 +0707',
'distance': '0.02',
'direction-relation': u'of',
'software-tag': software_tag,
'credits': (
u'Supported by the Australian Government, Geoscience '
u'Australia and the World Bank-GFDRR.'),
'latitude-value': u'2\xb025\u203248.00\u2033S',
'time': '6:8:9',
'depth-value': '10.0'}
result['elapsed-time'] = u''
message = 'Got:\n%s\nExpected:\n%s\n' % (result, expected_dict)
self.max_diff = None
difference = DictDiffer(result, expected_dict)
LOGGER.debug(difference.all())
self.assertDictEqual(expected_dict, result, message)
def test_event_info_string(self):
"""Test we can get a location info string nicely."""
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
data_is_local_flag=True)
expected_result = (
u"M 3.6 5-11-2013 6:8:9 "
u"Latitude: 2°25′48.00″S "
u"Longitude: 140°37′12.00″E "
u"Depth: 10.0km Located 0.02km NW of Papua")
result = shake_event.event_info()
message = ('Got:\n%s\nExpected:\n%s\n' %
(result, expected_result))
self.assertEqual(result, expected_result, message)
def test_render_map(self):
"""Test render_map function in shake_event."""
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
data_is_local_flag=True)
# Render Map
shake_event.render_map()
# There should be exist:
# 1. SHAKE_ID-en.pdf
# 2. SHAKE_ID-en.png
# 3. SHAKE_ID-thumb-en.png
# 4. SHAKE_ID-metadata-en.pickle
# 5. mmi-cities.shp, shx, dbf, prj, qml
# 6. mmi-contours-nearest.shp, shx, dbf, prj, qml
# 7. city-search-boxes.shp, shx, dbf, prj, qml
# 8. composer-template.qpt
# 9. project.qgs
target_dir = os.path.join(shakemap_extract_dir(), SHAKE_ID)
shapefile_extension = ['shp', 'shx', 'dbf', 'prj', 'qml']
# 1
pdf_path = os.path.join(target_dir, '%s-en.pdf' % SHAKE_ID)
message = 'PDF Report is not generated successfully in %s' % pdf_path
self.assertTrue(os.path.exists(pdf_path), message)
# 2
png_path = os.path.join(target_dir, '%s-en.png' % SHAKE_ID)
message = 'PNG Report is not generated successfully in %s' % png_path
self.assertTrue(os.path.exists(png_path), message)
# 3
thumbnail_path = os.path.join(target_dir, '%s-thumb-en.png' % SHAKE_ID)
message = 'PNG Thumbnail is not generated successfully in %s' % (
thumbnail_path)
self.assertTrue(os.path.exists(thumbnail_path), message)
# 4
metadata_path = os.path.join(
target_dir, '%s-metadata-en.pickle' % SHAKE_ID)
message = 'Metadata file is not generated successfully in %s' % (
metadata_path)
self.assertTrue(os.path.exists(metadata_path), message)
# 5. mmi-cities.shp, shx, dbf, prj, qml
mmi_cities_path = os.path.join(target_dir, 'mmi-cities.shp')
for extension in shapefile_extension:
file_path = mmi_cities_path.replace('shp', extension)
message = 'mmi-cities.%s is not generated successfully in %s' % (
extension, file_path)
self.assertTrue(os.path.exists(file_path), message)
# 6. mmi-contours-nearest.shp, shx, dbf, prj, qml
mmi_contours_path = os.path.join(
target_dir, 'mmi-contours-nearest.shp')
for extension in shapefile_extension:
file_path = mmi_contours_path.replace('shp', extension)
message = (
'mmi-contours-nearest.%s is not generated successfully in '
'%s') % (extension, file_path)
self.assertTrue(os.path.exists(file_path), message)
# 7. city-search-boxes.shp, shx, dbf, prj, qml
city_search_boxes_path = os.path.join(
target_dir, 'city-search-boxes.shp')
for extension in shapefile_extension:
file_path = city_search_boxes_path.replace('shp', extension)
message = (
'city-search-boxes.%s is not generated successfully in '
'%s') % (extension, file_path)
self.assertTrue(os.path.exists(file_path), message)
# 8
composer_template_path = os.path.join(
target_dir, 'composer-template.qpt')
message = (
'Composer template file is not generated successfully in %s' %
composer_template_path)
self.assertTrue(os.path.exists(composer_template_path), message)
# 9
qgs_project_path = os.path.join(
target_dir, 'project.qgs')
message = 'QGIS Project file is not generated successfully in %s' % (
qgs_project_path)
self.assertTrue(os.path.exists(qgs_project_path), message)
def test_bearing_to_cardinal(self):
"""Test we can convert a bearing to a cardinal direction."""
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
data_is_local_flag=True)
# Ints should work
expected_result = 'SSE'
result = shake_event.bearing_to_cardinal(160)
message = ('Got:\n%s\nExpected:\n%s\n' %
(result, expected_result))
self.assertEqual(result, expected_result, message)
# Floats should work
expected_result = 'SW'
result = shake_event.bearing_to_cardinal(225.4)
message = ('Got:\n%s\nExpected:\n%s\n' %
(result, expected_result))
self.assertEqual(result, expected_result, message)
# non numeric data as input should return None
expected_result = None
result = shake_event.bearing_to_cardinal('foo')
message = ('Got:\n%s\nExpected:\n%s\n' %
(result, expected_result))
self.assertEqual(result, expected_result, message)
def test_i18n(self):
"""See if internationalisation is working."""
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
locale='id',
data_is_local_flag=True)
shaking = shake_event.mmi_shaking(5)
expected_shaking = 'Sedang'
self.assertEqual(expected_shaking, shaking)
def test_login_to_realtime(self):
# get logged in session
inasafe_django = InaSAFEDjangoREST()
self.assertTrue(inasafe_django.is_logged_in)
def test_push_to_realtime(self):
# only do the test if realtime test server is configured
inasafe_django = InaSAFEDjangoREST()
if inasafe_django.is_configured():
working_dir = shakemap_extract_dir()
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID,
locale='en',
data_is_local_flag=True)
# generate report
shake_event.render_map()
# push to realtime django
push_shake_event_to_rest(shake_event)
# check shake event exists
session = inasafe_django.rest
response = session.earthquake(SHAKE_ID).GET()
self.assertEqual(response.status_code, requests.codes.ok)
event_dict = shake_event.event_dict()
earthquake_data = {
'shake_id': shake_event.event_id,
'magnitude': float(event_dict.get('mmi')),
'depth': float(event_dict.get('depth-value')),
'time': shake_event.shake_grid.time,
'location': {
'type': 'Point',
'coordinates': [
shake_event.shake_grid.longitude,
shake_event.shake_grid.latitude
]
},
'location_description': event_dict.get('shake-grid-location')
}
for key, value in earthquake_data.iteritems():
if isinstance(value, datetime.datetime):
self.assertEqual(
datetime.datetime.strptime(
response.json()[key], '%Y-%m-%dT%H:%M:%SZ'
).replace(tzinfo=pytz.utc),
value
)
else:
self.assertEqual(response.json()[key], value)
def test_uses_grid_location(self):
"""Test regarding issue #2438
"""
working_dir = shakemap_extract_dir()
# population_path =
shake_event = ShakeEvent(
working_dir=working_dir,
event_id=SHAKE_ID_2,
locale='en',
force_flag=True,
data_is_local_flag=True,
# population_raster_path=population_path
)
expected_location = 'Yogyakarta'
self.assertEqual(
shake_event.event_dict()['shake-grid-location'],
expected_location)
inasafe_django = InaSAFEDjangoREST()
if inasafe_django.is_configured():
# generate report
shake_event.render_map()
# push to realtime django
push_shake_event_to_rest(shake_event)
# check shake event exists
session = inasafe_django.rest
response = session.earthquake(SHAKE_ID_2).GET()
self.assertEqual(response.status_code, requests.codes.ok)
self.assertEqual(
response.json()['location_description'],
shake_event.event_dict()['shake-grid-location'])
class DictDiffer(object):
"""
Taken from
http://stackoverflow.com/questions/1165352/
fast-comparison-between-two-python-dictionary
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(
past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
"""Differences between two dictionaries as items added."""
return self.set_current - self.intersect
def removed(self):
"""Differences between two dictionaries as items removed."""
return self.set_past - self.intersect
def changed(self):
"""Differences between two dictionaries as values changed."""
return set(o for o in self.intersect if
self.past_dict[o] != self.current_dict[o])
def unchanged(self):
"""Differences between 2 dictionaries as values not changed."""
return set(o for o in self.intersect if
self.past_dict[o] == self.current_dict[o])
def all(self):
"""Test all."""
string = 'Added: %s\n' % self.added()
string += 'Removed: %s\n' % self.removed()
string += 'changed: %s\n' % self.changed()
return string
if __name__ == '__main__':
suite = unittest.makeSuite(TestShakeEvent, 'test_local_cities')
runner = unittest.TextTestRunner(verbosity=2)
unittest.main()
|
easmetz/inasafe
|
realtime/test/test_shake_event.py
|
Python
|
gpl-3.0
| 28,049
|
[
"VisIt"
] |
0886aa4f29310128d6884467fa5be5144c9003e8070e60064b61407566c47f25
|
#!/usr/bin/env python
# coding: utf-8
# ---
# syncID: e6ccf19a4b454ca594388eeaa88ebe12
# title: "Calculate Vegetation Biomass from LiDAR Data in Python"
# description: "Learn to calculate the biomass of standing vegetation using a canopy height model data product."
# dateCreated: 2017-06-21
# authors: Tristan Goulden
# contributors: Donal O'Leary
# estimatedTime: 1 hour
# packagesLibraries: numpy, gdal, matplotlib, matplotlib.pyplot, os
# topics: lidar,remote-sensing
# languagesTool: python
# dataProduct: DP1.10098.001, DP3.30015.001,
# code1: https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/tutorials/Python/Lidar/lidar-biomass/calc-biomass_py/calc-biomass_py.ipynb
# tutorialSeries: intro-lidar-py-series
# urlTitle: calc-biomass-py
# ---
# <div id="ds-objectives" markdown="1">
#
# In this tutorial, we will calculate the biomass for a section of the SJER site. We
# will be using the Canopy Height Model discrete LiDAR data product as well as NEON
# field data on vegetation data. This tutorial will calculate Biomass for individual
# trees in the forest.
#
# ### Objectives
# After completing this tutorial, you will be able to:
#
# * Learn how to apply a guassian smoothing fernal for high-frequency spatial filtering
# * Apply a watershed segmentation algorithm for delineating tree crowns
# * Calculate biomass predictor variables from a CHM
# * Setup training data for Biomass predictions
# * Apply a Random Forest machine learning approach to calculate biomass
#
#
# ### Install Python Packages
#
# * **numpy**
# * **gdal**
# * **matplotlib**
# * **matplotlib.pyplot**
# * **os**
#
#
# ### Download Data
#
# If you have already downloaded the data set for the Data Institute, you have the
# data for this tutorial within the SJER directory. If you would like to just
# download the data for this tutorial use the following link.
#
# <a href="https://neondata.sharefile.com/d-s58db39240bf49ac8" class="link--button link--arrow">
# Download the Biomass Calculation teaching data subset</a>
#
# </div>
# In this tutorial, we will calculate the biomass for a section of the SJER site. We
# will be using the Canopy Height Model discrete LiDAR data product as well as NEON
# field data on vegetation data. This tutorial will calculate Biomass for individual
# trees in the forest.
#
# The calculation of biomass consists of four primary steps:
#
# 1. Delineating individual tree crowns
# 2. Calculating predictor variables for all individuals
# 3. Collecting training data
# 4. Applying a regression model to estiamte biomass from predictors
#
# In this tutorial we will use a watershed segmentation algorithm for delineating
# tree crowns (step 1) and and a Random Forest (RF) machine learning algorithm for
# relating the predictor variables to biomass (part 4). The predictor variables were
# selected following suggestions by Gleason et al. (2012) and biomass estimates were
# determined from DBH (diamter at breast height) measurements following relationships
# given in Jenkins et al. (2003).
#
# ## Get Started
#
# First, we need to specify the directory where we will find and save the data needed for this tutorial. You will need to change this line to suit your local machine. I have decided to save my data in the following directory:
# In[1]:
data_path = '/Users/olearyd/Git/data/'
# Next, we will import several of the typical libraries.
# In[2]:
import numpy as np
import os
import gdal, osr
import matplotlib.pyplot as plt
import sys
from scipy import ndimage as ndi
get_ipython().run_line_magic('matplotlib', 'inline')
# Next, we will add libraries from skilearn which will help with the watershed delination, determination of predictor variables and random forest algorithm
# In[3]:
#Import biomass specific libraries
from skimage.morphology import watershed
from skimage.feature import peak_local_max
from skimage.measure import regionprops
from sklearn.ensemble import RandomForestRegressor
# ## Define functions
#
# Now we will define a few functions that allow us to more easily work with the NEON data.
#
# * `plot_band_array`: function to plot NEON spatial data.
# In[4]:
#Define plot band array function
def plot_band_array(band_array,image_extent,title,cmap_title,colormap,colormap_limits):
plt.imshow(band_array,extent=image_extent)
cbar = plt.colorbar(); plt.set_cmap(colormap); plt.clim(colormap_limits)
cbar.set_label(cmap_title,rotation=270,labelpad=20)
plt.title(title); ax = plt.gca()
ax.ticklabel_format(useOffset=False, style='plain')
rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90)
# * `array2raster`: function to output geotiff files.
# In[5]:
def array2raster(newRasterfn,rasterOrigin,pixelWidth,pixelHeight,array,epsg):
cols = array.shape[1]
rows = array.shape[0]
originX = rasterOrigin[0]
originY = rasterOrigin[1]
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(newRasterfn, cols, rows, 1, gdal.GDT_Float32)
outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
outband = outRaster.GetRasterBand(1)
outband.WriteArray(array)
outRasterSRS = osr.SpatialReference()
outRasterSRS.ImportFromEPSG(epsg)
outRaster.SetProjection(outRasterSRS.ExportToWkt())
outband.FlushCache()
# * `raster2array`: function to conver rasters to an array.
# In[6]:
def raster2array(geotif_file):
metadata = {}
dataset = gdal.Open(geotif_file)
metadata['array_rows'] = dataset.RasterYSize
metadata['array_cols'] = dataset.RasterXSize
metadata['bands'] = dataset.RasterCount
metadata['driver'] = dataset.GetDriver().LongName
metadata['projection'] = dataset.GetProjection()
metadata['geotransform'] = dataset.GetGeoTransform()
mapinfo = dataset.GetGeoTransform()
metadata['pixelWidth'] = mapinfo[1]
metadata['pixelHeight'] = mapinfo[5]
metadata['ext_dict'] = {}
metadata['ext_dict']['xMin'] = mapinfo[0]
metadata['ext_dict']['xMax'] = mapinfo[0] + dataset.RasterXSize/mapinfo[1]
metadata['ext_dict']['yMin'] = mapinfo[3] + dataset.RasterYSize/mapinfo[5]
metadata['ext_dict']['yMax'] = mapinfo[3]
metadata['extent'] = (metadata['ext_dict']['xMin'],metadata['ext_dict']['xMax'],
metadata['ext_dict']['yMin'],metadata['ext_dict']['yMax'])
if metadata['bands'] == 1:
raster = dataset.GetRasterBand(1)
metadata['noDataValue'] = raster.GetNoDataValue()
metadata['scaleFactor'] = raster.GetScale()
# band statistics
metadata['bandstats'] = {} # make a nested dictionary to store band stats in same
stats = raster.GetStatistics(True,True)
metadata['bandstats']['min'] = round(stats[0],2)
metadata['bandstats']['max'] = round(stats[1],2)
metadata['bandstats']['mean'] = round(stats[2],2)
metadata['bandstats']['stdev'] = round(stats[3],2)
array = dataset.GetRasterBand(1).ReadAsArray(0,0,
metadata['array_cols'],
metadata['array_rows']).astype(np.float)
array[array==int(metadata['noDataValue'])]=np.nan
array = array/metadata['scaleFactor']
return array, metadata
elif metadata['bands'] > 1:
print('More than one band ... need to modify function for case of multiple bands')
# * `crown_geometric_volume_pth`: function to get tree crown volumn.
# In[7]:
def crown_geometric_volume_pth(tree_data,min_tree_height,pth):
p = np.percentile(tree_data, pth)
tree_data_pth = [v if v < p else p for v in tree_data]
crown_geometric_volume_pth = np.sum(tree_data_pth - min_tree_height)
return crown_geometric_volume_pth, p
# * `get_predictors`: function to get the trees from the biomass data.
# In[8]:
def get_predictors(tree,chm_array, labels):
indexes_of_tree = np.asarray(np.where(labels==tree.label)).T
tree_crown_heights = chm_array[indexes_of_tree[:,0],indexes_of_tree[:,1]]
full_crown = np.sum(tree_crown_heights - np.min(tree_crown_heights))
crown50, p50 = crown_geometric_volume_pth(tree_crown_heights,tree.min_intensity,50)
crown60, p60 = crown_geometric_volume_pth(tree_crown_heights,tree.min_intensity,60)
crown70, p70 = crown_geometric_volume_pth(tree_crown_heights,tree.min_intensity,70)
return [tree.label,
np.float(tree.area),
tree.major_axis_length,
tree.max_intensity,
tree.min_intensity,
p50, p60, p70,
full_crown, crown50, crown60, crown70]
# ## Canopy Height Data
#
# With everything set up, we can now start working with our data by define the file path to our CHM file. Note that you will need to change this and subsequent filepaths according to your local machine.
# In[9]:
chm_file = data_path+'NEON_D17_SJER_DP3_256000_4106000_CHM.tif'
# When we output the results, we will want to include the same file information as the input, so we will gather the file name information.
# In[10]:
#Get info from chm file for outputting results
just_chm_file = os.path.basename(chm_file)
just_chm_file_split = just_chm_file.split(sep="_")
# Now we will get the CHM data...
# In[11]:
chm_array, chm_array_metadata = raster2array(chm_file)
# ..., plot it, and save the figure.
# In[12]:
#Plot the original CHM
plt.figure(1)
#Plot the CHM figure
plot_band_array(chm_array,chm_array_metadata['extent'],
'Canopy height Model',
'Canopy height (m)',
'Greens',[0, 9])
plt.savefig(data_path+just_chm_file[0:-4]+'_CHM.png',dpi=300,orientation='landscape',
bbox_inches='tight',
pad_inches=0.1)
# It looks like SJER primarily has low vegetation with scattered taller trees.
#
# ## Create Filtered CHM
#
# Now we will use a Gaussian smoothing kernal (convolution) across the data set to remove spurious high vegetation points. This will help ensure we are finding the treetops properly before running the watershed segmentation algorithm.
#
# For different forest types it may be necessary to change the input parameters. Information on the function can be found in the <a href="https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.filters.gaussian_filter.html" target="_blank">SciPy documentation</a>.
#
# Of most importance are the second and fifth inputs. The second input defines the standard deviation of the Gaussian smoothing kernal. Too large a value will apply too much smoothing, too small and some spurious high points may be left behind. The fifth, the truncate value, controls after how many standard deviations the Gaussian kernal will get cut off (since it theoretically goes to infinity).
# In[13]:
#Smooth the CHM using a gaussian filter to remove spurious points
chm_array_smooth = ndi.gaussian_filter(chm_array,2,
mode='constant',cval=0,truncate=2.0)
chm_array_smooth[chm_array==0] = 0
# Now save a copy of filtered CHM. We will later use this in our code, so we'll output it into our data directory.
# In[14]:
#Save the smoothed CHM
array2raster(data_path+'chm_filter.tif',
(chm_array_metadata['ext_dict']['xMin'],chm_array_metadata['ext_dict']['yMax']),
1,-1,
np.array(chm_array_smooth,dtype=float),
32611)
# ## Determine local maximums
#
# Now we will run an algorithm to determine local maximums within the image. Setting indices to 'False' returns a raster of the maximum points, as opposed to a list of coordinates. The footprint parameter is an area where only a single peak can be found. This should be approximately the size of the smallest tree. Information on more sophisticated methods to define the window can be found in Chen (2006).
# In[15]:
#Calculate local maximum points in the smoothed CHM
local_maxi = peak_local_max(chm_array_smooth,indices=False, footprint=np.ones((5, 5)))
# Our new object `local_maxi` is an array of boolean values where each pixel is identified as either being the local maximum (`True`) or not being the local maximum (`False`).
# In[16]:
local_maxi
# This is very helpful, but it can be difficult to visualizee boolean values using our typical numeric plotting procedures as defined in the `plot_band_array` function above. Therefore, we will need to convert this boolean array to an numeric format to use this function. Booleans convert easily to integers with values of `False=0` and `True=1` using the `.astype(int)` method.
# In[17]:
local_maxi.astype(int)
# Next ,we can plot the raster of local maximums bo coercing the boolean array into an array ofintegers inline. The following figure shows the difference in finding local maximums for a filtered vs. non-filtered CHM.
#
# We will save the graphics (.png) in an outputs folder sister to our working directory and data outputs (.tif) to our data directory.
# In[18]:
#Plot the local maximums
plt.figure(2)
plot_band_array(local_maxi.astype(int),chm_array_metadata['extent'],
'Maximum',
'Maxi',
'Greys',
[0, 1])
plt.savefig(data_path+just_chm_file[0:-4]+ '_Maximums.png',
dpi=300,orientation='landscape',
bbox_inches='tight',pad_inches=0.1)
array2raster(data_path+'maximum.tif',
(chm_array_metadata['ext_dict']['xMin'],chm_array_metadata['ext_dict']['yMax']),
1,-1,np.array(local_maxi,dtype=np.float32),32611)
# If we were to look at the overlap between the tree crowns and the local maxima from each method, it would appear a bit like this raster.
#
# <figure>
# <a href="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-filter-vs-nonfilter.jpg">
# <img src="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-filter-vs-nonfilter.jpg"></a>
# <figcaption> The difference in finding local maximums for a filtered vs.
# non-filtered CHM.
# Source: National Ecological Observatory Network (NEON)
# </figcaption>
# </figure>
#
#
# Apply labels to all of the local maximum points
# In[19]:
#Identify all the maximum points
markers = ndi.label(local_maxi)[0]
# Next we will create a mask layer of all of the vegetation points so that the watershed segmentation will only occur on the trees and not extend into the surrounding ground points. Since 0 represent ground points in the CHM, setting the mask to 1 where the CHM is not zero will define the mask
# In[20]:
#Create a CHM mask so the segmentation will only occur on the trees
chm_mask = chm_array_smooth
chm_mask[chm_array_smooth != 0] = 1
# ## Watershed segmentation
#
# As in a river system, a watershed is divided by a ridge that divides areas. Here our watershed are the individual tree canopies and the ridge is the delineation between each one.
#
# <figure>
# <a href="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-watershed-segments.png">
# <img src="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-watershed-segments.png"></a>
# <figcaption> A raster classified based on watershed segmentation.
# Source: National Ecological Observatory Network (NEON)
# </figcaption>
# </figure>
#
# Next, we will perform the watershed segmentation which produces a raster of labels.
# In[21]:
#Perfrom watershed segmentation
labels = watershed(chm_array_smooth, markers, mask=chm_mask)
labels_for_plot = labels.copy()
labels_for_plot = np.array(labels_for_plot,dtype = np.float32)
labels_for_plot[labels_for_plot==0] = np.nan
max_labels = np.max(labels)
# In[22]:
#Plot the segments
plot_band_array(labels_for_plot,chm_array_metadata['extent'],
'Crown Segmentation','Tree Crown Number',
'Spectral',[0, max_labels])
plt.savefig(data_path+just_chm_file[0:-4]+'_Segmentation.png',
dpi=300,orientation='landscape',
bbox_inches='tight',pad_inches=0.1)
array2raster(data_path+'labels.tif',
(chm_array_metadata['ext_dict']['xMin'],
chm_array_metadata['ext_dict']['yMax']),
1,-1,np.array(labels,dtype=float),32611)
# Now we will get several properties of the individual trees will be used as predictor variables.
# In[23]:
#Get the properties of each segment
tree_properties = regionprops(labels,chm_array)
# Now we will get the predictor variables to match the (soon to be loaded) training data using the function defined above. The first column will be segment IDs, the rest will be the predictor variables.
# In[24]:
predictors_chm = np.array([get_predictors(tree, chm_array, labels) for tree in tree_properties])
X = predictors_chm[:,1:]
tree_ids = predictors_chm[:,0]
# ## Training data
#
# We now bring in the training data file which is a simple CSV file with no header. The first column is biomass, and the remaining columns are the same predictor variables defined above. The tree diameter and max height are defined in the NEON vegetation structure data along with the tree DBH. The field validated values are used for training, while the other were determined from the CHM and camera images by manually delineating the tree crowns and pulling out the relevant information from the CHM.
#
# Biomass was calculated from DBH according to the formulas in Jenkins et al. (2003).
#
# If you didn't download this training dataset above, you can <a href="https://neondata.sharefile.com/share/view/cdc8242e24ad4517/fobd4959-4cf0-44ab-acc6-0695a04a1afc" target="_blank">Download the training dataset CSV here</a>.
# In[25]:
#Define the file of training data
training_data_file = data_path+'SJER_Biomass_Training.csv'
#Read in the training data from a CSV file
training_data = np.genfromtxt(training_data_file,delimiter=',')
#Grab the biomass (Y) from the first line
biomass = training_data[:,0]
#Grab the biomass prdeictors from the remaining lines
biomass_predictors = training_data[:,1:12]
# ## Random Forest classifiers
#
# We can then define parameters of the Random Forest classifier and fit the predictor variables from the training data to the Biomass estaimtes.
# In[26]:
#Define paraemters for Random forest regressor
max_depth = 30
#Define regressor rules
regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2)
#Fit the biomass to regressor variables
regr_rf.fit(biomass_predictors,biomass)
# We now apply the Random Forest model to the predictor variables to retreive biomass
# In[27]:
#Apply the model to the predictors
estimated_biomass = regr_rf.predict(X)
# For outputting a raster, copy the labels raster to a biomass raster, then cycle through the segments and assign the biomass estimate to each individual tree segment.
# In[28]:
#Set an out raster with the same size as the labels
biomass_map = np.array((labels),dtype=float)
#Assign the appropriate biomass to the labels
biomass_map[biomass_map==0] = np.nan
for tree_id, biomass_of_tree_id in zip(tree_ids, estimated_biomass):
biomass_map[biomass_map == tree_id] = biomass_of_tree_id
# ## Calc Biomass
# Collect some of the biomass statistics and then plot the results and save an output geotiff.
# In[29]:
#Get biomass stats for plotting
mean_biomass = np.mean(estimated_biomass)
std_biomass = np.std(estimated_biomass)
min_biomass = np.min(estimated_biomass)
sum_biomass = np.sum(estimated_biomass)
print('Sum of biomass is ',sum_biomass,' kg')
#Plot the biomass!
plt.figure(5)
plot_band_array(biomass_map,chm_array_metadata['extent'],
'Biomass (kg)','Biomass (kg)',
'winter',
[min_biomass+std_biomass, mean_biomass+std_biomass*3])
plt.savefig(data_path+just_chm_file_split[0]+'_'+just_chm_file_split[1]+'_'+just_chm_file_split[2]+'_'+just_chm_file_split[3]+'_'+just_chm_file_split[4]+'_'+just_chm_file_split[5]+'_'+'Biomass.png',
dpi=300,orientation='landscape',
bbox_inches='tight',
pad_inches=0.1)
array2raster(data_path+'biomass.tif',
(chm_array_metadata['ext_dict']['xMin'],chm_array_metadata['ext_dict']['yMax']),
1,-1,np.array(biomass_map,dtype=float),32611)
# In[ ]:
|
NEONScience/NEON-Data-Skills
|
tutorials/Python/Lidar/lidar-biomass/calc-biomass_py/calc-biomass_py.py
|
Python
|
agpl-3.0
| 20,510
|
[
"Gaussian"
] |
c955f9e13d4578e45731d9e2b2557a48a9ddb4ef00c1bf621034bb57a87a19c9
|
# Copyright (C) 2008-2009 Open Society Institute
# Thomas Moroz: tmoroz@sorosny.org
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License Version 2 as published
# by the Free Software Foundation. You may not use, modify or distribute
# this program under any other version of the GNU General Public License.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
Move a piece of content to another folder. Constraints:
- ** DO NOT SEND ALERT EMAILS DURING THIS PROCESS !! **
- All blog entries/comments, not a subset
- Retain the docid, owner, and create/modified dates
- If the owner can't be found, assign to the System User
- Put a <p><em>This ${content type} was originally authored in the
"Information Program Staff Community" community.</em></p> in the top
of the body text.
"""
from karl.utils import find_catalog
from karl.utils import find_community
from karl.utils import find_users
from karl.views.utils import make_unique_name
from optparse import OptionParser
from karl.scripting import get_default_config
from karl.scripting import open_root
from repoze.bfg.traversal import model_path
from repoze.bfg.traversal import find_model
from repoze.folder.interfaces import IFolder
from repoze.lemonade.content import get_content_type
from repoze.workflow import get_workflow
import logging
import sys
import transaction
log = logging.getLogger(__name__)
def postorder(startnode):
def visit(node):
if IFolder.providedBy(node):
for child in node.values():
for result in visit(child):
yield result
yield node
if hasattr(node, '_p_deactivate'):
# attempt to not run out of memory
node._p_deactivate()
return visit(startnode)
def move_content(root, src, dst, wf_state):
try:
context = find_model(root, src)
except KeyError:
print >>sys.stderr, "Source content not found: %s" % src
sys.exit(-1)
try:
dest_folder = find_model(root, dst)
except KeyError:
print >>sys.stderr, "Destination folder not found: %s" % dst
sys.exit(-1)
src_community = find_community(context)
catalog = find_catalog(root)
assert catalog is not None
users = find_users(root)
assert users is not None
if src_community is not None:
move_header = ('<p><em>This was originally authored '
'in the "%s" community.</em></p>' %
src_community.title)
else:
move_header = ''
src_folder = context.__parent__
name = context.__name__
log.info("Moving %s", model_path(context))
for obj in postorder(context):
if hasattr(obj, 'docid'):
docid = obj.docid
catalog.document_map.remove_docid(docid)
catalog.unindex_doc(docid)
del src_folder[name]
if (context.creator != 'admin'
and users.get_by_id(context.creator) is None):
# give the entry to the system admin
log.warning(
"User %s not found; reassigning to admin", context.creator)
context.creator = 'admin'
if name in dest_folder:
name = make_unique_name(dest_folder, context.title)
dest_folder[name] = context
for obj in postorder(context):
if hasattr(obj, 'docid'):
docid = obj.docid
catalog.document_map.add(model_path(obj), docid)
catalog.index_doc(docid, obj)
if wf_state is not None:
wf = get_workflow(get_content_type(context), 'security', context)
wf.transition_to_state(context, None, wf_state)
if hasattr(context, 'text'):
context.text = "%s\n%s" % (move_header, context.text)
def main(argv=sys.argv):
logging.basicConfig()
log.setLevel(logging.INFO)
parser = OptionParser(
description="Move content to another folder",
usage="%prog [options] content_path dest_folder",
)
parser.add_option('-C', '--config', dest='config', default=None,
help="Specify a paster config file. Defaults to $CWD/etc/karl.ini")
parser.add_option('-d', '--dry-run', dest='dry_run',
action="store_true", default=False,
help="Don't commit the transaction")
parser.add_option('-S', '--security-state', dest='security_state',
default=None,
help="Force workflow transition to given state. By "
"default no transition is performed.")
options, args = parser.parse_args()
if len(args) != 2:
parser.error("Source content and destination folder are required")
config = options.config
if not config:
config = get_default_config()
root, closer = open_root(config)
try:
move_content(root, args[0], args[1], options.security_state)
except:
transaction.abort()
raise
else:
if options.dry_run:
log.info("Aborting transaction.")
transaction.abort()
else:
log.info("Committing transaction.")
transaction.commit()
if __name__ == '__main__':
main()
|
boothead/karl
|
karl/scripts/mvcontent.py
|
Python
|
gpl-2.0
| 5,598
|
[
"VisIt"
] |
a6ba758a84e44b0d9b6037e16851eb9d6b8429989ac80a53b663da03d6e26aff
|
# Dataset created from this:
# Elements of Statistical Learning 2nd Ed.; Hastie, Tibshirani, Friedman; Feb 2011
# example 10.2 page 357
# Ten features, standard independent Gaussian. Target y is:
# y[i] = 1 if sum(X[i]) > .34 else -1
# 9.34 is the median of a chi-squared random variable with 10 degrees of freedom
# (sum of squares of 10 standard Gaussians)
# http://www.stanford.edu/~hastie/local.ftp/Springer/ESLII_print5.pdf
# from sklearn.datasets import make_hastie_10_2
# import numpy as np
# i = 1000000
# f = 10
# (X,y) = make_hastie_10_2(n_samples=i,random_state=None)
# y.shape = (i,1)
# Y = np.hstack((X,y))
# np.savetxt('./1mx' + str(f) + '_hastie_10_2.data', Y, delimiter=',', fmt='%.2f');
import unittest, time, sys, copy
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_util, h2o_import as h2i
def glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=30):
print "\nStarting GLM of", csvFilename
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname,
hex_key=csvFilename + ".hex", schema='put', timeoutSecs=30)
y = 10
# Took n_folds out, because GLM doesn't include n_folds time and it's slow
# wanted to compare GLM time to my measured time
# hastie has two values, 1 and -1. need to use case for one of them
kwargs = {'response': y, 'alpha': 0, 'family': 'binomial'}
h2o.nodes[0].to_enum(src_key=parseResult['destination_key'], column_index=y+1)
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "GLM in", (time.time() - start), "secs (python measured)"
h2o_glm.simpleCheckGLM(self, glm, "C8", **kwargs)
# compare this glm to the first one. since the files are replications, the results
# should be similar?
glm_model = glm['glm_model']
validation = glm_model['submodels'][0]['validation']
if self.validation1:
h2o_glm.compareToFirstGlm(self, 'auc', validation, self.validation1)
else:
self.validation1 = copy.deepcopy(validation)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
validation1 = {}
def test_GLM2_hastie_shuffle(self):
# gunzip it and cat it to create 2x and 4x replications in SYNDATASETS_DIR
# FIX! eventually we'll compare the 1x, 2x and 4x results like we do
# in other tests. (catdata?)
# This test also adds file shuffling, to see that row order doesn't matter
csvFilename = "1mx10_hastie_10_2.data.gz"
bucket = 'home-0xdiag-datasets'
csvPathname = 'standard' + '/' + csvFilename
fullPathname = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True)
glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=30)
filename1x = "hastie_1x.data"
pathname1x = SYNDATASETS_DIR + '/' + filename1x
h2o_util.file_gunzip(fullPathname, pathname1x)
filename1xShuf = "hastie_1x.data_shuf"
pathname1xShuf = SYNDATASETS_DIR + '/' + filename1xShuf
h2o_util.file_shuffle(pathname1x, pathname1xShuf)
filename2x = "hastie_2x.data"
pathname2x = SYNDATASETS_DIR + '/' + filename2x
h2o_util.file_cat(pathname1xShuf, pathname1xShuf, pathname2x)
filename2xShuf = "hastie_2x.data_shuf"
pathname2xShuf = SYNDATASETS_DIR + '/' + filename2xShuf
h2o_util.file_shuffle(pathname2x, pathname2xShuf)
glm_doit(self, filename2xShuf, None, pathname2xShuf, timeoutSecs=45)
# too big to shuffle?
filename4x = "hastie_4x.data"
pathname4x = SYNDATASETS_DIR + '/' + filename4x
h2o_util.file_cat(pathname2xShuf,pathname2xShuf,pathname4x)
glm_doit(self,filename4x, None, pathname4x, timeoutSecs=120)
if __name__ == '__main__':
h2o.unit_main()
|
111t8e/h2o-2
|
py/testdir_single_jvm/test_GLM2_hastie_shuffle.py
|
Python
|
apache-2.0
| 4,072
|
[
"Gaussian"
] |
a0ff55b8108c6c52745087ee9471a857e41bc390bf689c203c30a3346c39eb02
|
import csv
from typing import Dict
import io
import random
import sys
def _sanitize_id(x: str) -> str:
return ''.join(str(random.randint(0, 9)) for _ in range(len(x)))
first_names = [
'Tom', 'Mary', 'Patricia', 'Linda', 'John', 'Brian', 'Jim', 'Nick', 'Sally',
'Cindy', 'Kelly', 'Kim', 'Maria'
]
last_names = [
'Smith', 'Johnson', 'Williams', 'Jones', 'Brown', 'Davis', 'Miller',
'Wilson', 'Moore', 'Taylor', 'Anderson', 'Thomas', 'Jackson', 'White',
'Harris', 'Martin', 'Thompson'
]
def _make_random_name():
return random.choice(first_names) + ' ' + random.choice(last_names)
def _make_random_account():
return 'Visa Debit *' + ''.join(str(random.randint(0, 9)) for _ in range(4))
class EntrySanitizer(object):
def __init__(self):
self._name_map = {'': ''} # Dict[str, str]
self._account_map = {
'': '',
'Venmo balance': 'Venmo balance'
} # Dict[str, str]
def _sanitize_name(self, name: str) -> str:
return self._name_map.setdefault(name, _make_random_name())
def _sanitize_account(self, x: str) -> str:
return self._account_map.setdefault(x, _make_random_account())
def sanitize(self, row: Dict[str, str]) -> Dict[str, str]:
new_row = row.copy()
new_row[' ID'] = _sanitize_id(row[' ID'])
for key in ['From', 'To']:
new_row[key] = self._sanitize_name(row[key])
for key in ['Funding Source', 'Destination']:
new_row[key] = self._sanitize_account(row[key])
return new_row
def sanitize_transactions_data(contents: str) -> str:
reader = csv.DictReader(io.StringIO(contents))
entry_santizer = EntrySanitizer()
assert reader.fieldnames is not None
output = io.StringIO()
writer = csv.DictWriter(output, reader.fieldnames, dialect='unix', quoting=csv.QUOTE_ALL)
writer.writeheader()
for line in reader:
writer.writerow(entry_santizer.sanitize(line))
return output.getvalue()
if __name__ == '__main__':
print(sanitize_transactions_data(sys.stdin.read()))
|
jbms/beancount-import
|
beancount_import/source/venmo_sanitize.py
|
Python
|
gpl-2.0
| 2,089
|
[
"Brian"
] |
46f0dbfbcd87c524adebb60e4ab2e362cc7209c74384be475fc4054f28ec76e4
|
import pdb
import numpy as np
import math
import time
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import cuda
from util import gaussian_logp
from util import gaussian_logp0
class VAE(chainer.Chain):
def __init__(self, dim_in, dim_hidden, dim_latent, num_layers, temperature, num_zsamples=1):
super(VAE, self).__init__()
# initialise first encoder and decoder hidden layer separately because
# the input and output dims differ from the other hidden layers
# q(a|x)
self.qlina0 = L.Linear(dim_in, dim_hidden)
self.plina0 = L.Linear(dim_in+dim_latent, dim_hidden)
self._children.append('qlina0')
self._children.append('plina0')
# q(z|a,x)
self.qlinz0 = L.Linear(dim_in+dim_latent, dim_hidden)
self.plinx0 = L.Linear(dim_latent, dim_hidden)
self._children.append('qlinz0')
self._children.append('plinx0')
# Set up the auxiliary inference model q(a|x) and the latent inference model q(z|a,x)
for i in range(num_layers-1):
# encoder for a
layer_name = 'qlina' + str(i+1)
setattr(self, layer_name, L.Linear(2*dim_hidden, dim_hidden))
self._children.append(layer_name)
# decoder for a
layer_name = 'plina' + str(i+1)
setattr(self, layer_name, L.Linear(2*dim_hidden, dim_hidden))
self._children.append(layer_name)
# encoder for z
layer_name = 'qlinz' + str(i+1)
setattr(self, layer_name, L.Linear(2*dim_hidden, dim_hidden))
self._children.append(layer_name)
# decoder for z
layer_name = 'plinx' + str(i+1)
setattr(self, layer_name, L.Linear(2*dim_hidden, dim_hidden))
self._children.append(layer_name)
# initialise the encoder and decoder output layer separately because
# the input and output dims differ from the other hidden layers
self.qlina_mu = L.Linear(2*dim_hidden, dim_latent)
self.qlina_ln_var = L.Linear(2*dim_hidden, dim_latent)
self.qlinz_mu = L.Linear(2*dim_hidden, dim_latent)
self.qlinz_ln_var = L.Linear(2*dim_hidden, dim_latent)
self.plina_mu = L.Linear(2*dim_hidden, dim_latent)
self.plina_ln_var = L.Linear(2*dim_hidden, dim_latent)
self.plinx_mu = L.Linear(2*dim_hidden, dim_in)
self.plinx_ln_var = L.Linear(2*dim_hidden, dim_in)
self._children.append('qlina_mu')
self._children.append('qlina_ln_var')
self._children.append('qlinz_mu')
self._children.append('qlinz_ln_var')
self._children.append('plina_mu')
self._children.append('plina_ln_var')
self._children.append('plinx_mu')
self._children.append('plinx_ln_var')
self.num_layers = num_layers
self.temperature = temperature
self.num_zsamples = num_zsamples
self.epochs_seen = 0
def encode_a(self, x):
a_params = F.crelu(self.qlina0(x))
for i in range(self.num_layers-1):
layer_name = 'qlina' + str(i+1)
a_params = F.crelu(self[layer_name](a_params))
self.qmu_a = self.qlina_mu(a_params)
self.qln_var_a = self.qlina_ln_var(a_params)
return self.qmu_a, self.qln_var_a
def encode_z(self, x, a):
# a = F.gaussian(self.qmu_a, self.qln_var_a) # This should be outside the encoding function. Pass the function a.
net_input = F.concat((x,a), axis=1)
h = F.crelu(self.qlinz0(net_input))
for i in range(self.num_layers-1):
layer_name = 'qlinz' + str(i+1)
h = F.crelu(self[layer_name](h))
self.qmu_z = self.qlinz_mu(h)
self.qln_var_z = self.qlinz_ln_var(h)
return self.qmu_z, self.qln_var_z
def decode_a(self, z, x):
net_input = F.concat((x,z), axis=1)
h = F.crelu(self.plina0(net_input))
for i in range(self.num_layers-1):
layer_name = 'plina' + str(i+1)
h = F.crelu(self[layer_name](h))
self.pmu_a = self.plina_mu(h)
self.pln_var_a = self.plina_ln_var(h)
return self.pmu_a, self.pln_var_a
def decode(self,z):
h = F.crelu(self.plinx0(z))
for i in range(self.num_layers-1):
layer_name = 'plinx' + str(i+1)
h = F.crelu(self[layer_name](h))
self.pmu = self.plinx_mu(h)
self.pln_var = self.plinx_ln_var(h)
return self.pmu, self.pln_var
def __call__(self, x):
# Compute parameters for q(z|x, a)
encoding_time_1 = time.time()
qmu_a, qln_var_a = self.encode_a(x)
encoding_time_1 = float(time.time() - encoding_time_1)
a_enc = F.gaussian(qmu_a, qln_var_a)
encoding_time_2 = time.time()
qmu_z, qln_var_z = self.encode_z(x, a_enc)
encoding_time_2 = float(time.time() - encoding_time_2)
encoding_time = encoding_time_1 + encoding_time_2
decoding_time_average = 0.
self.kl = 0
self.logp = 0
logp_a_xz = 0
logp_x_z = 0
logp_z = 0
logq_a_x = 0
logq_z_ax = 0
current_temperature = min(self.temperature['value'],1.0)
self.temperature['value'] += self.temperature['increment']
for j in xrange(self.num_zsamples):
# z ~ q(z|x, a)
z = F.gaussian(self.qmu_z, self.qln_var_z)
# Compute p(x|z)
decoding_time = time.time()
pmu_a, pln_var_a = self.decode_a(z, x)
pmu_x, pln_var_x = self.decode(z)
decoding_time = time.time() - decoding_time
decoding_time_average += decoding_time
logp_a_xz += gaussian_logp(a_enc, pmu_a, pln_var_a)
logp_x_z += gaussian_logp(x, pmu_x, pln_var_x)
logp_z += current_temperature*gaussian_logp0(z)
logq_a_x += gaussian_logp(a_enc, qmu_a, qln_var_a)
logq_z_ax += current_temperature*gaussian_logp(z, qmu_z, qln_var_z)
logp_a_xz /= self.num_zsamples
logp_x_z /= self.num_zsamples
logp_z /= self.num_zsamples
logq_a_x /= self.num_zsamples
logq_z_ax /= self.num_zsamples
decoding_time_average /= self.num_zsamples
self.logp /= self.num_zsamples
self.obj_batch = logp_a_xz + logp_x_z + logp_z - logq_a_x - logq_z_ax
self.kl = logq_z_ax - logp_z
self.logp = logp_x_z
self.timing_info = np.array([encoding_time,decoding_time_average])
batch_size = self.obj_batch.shape[0]
self.obj = -F.sum(self.obj_batch)/batch_size
return self.obj
|
ashwindcruz/dgm
|
adgm_pose/model.py
|
Python
|
mit
| 6,764
|
[
"Gaussian"
] |
63f87a7ce9b4f3aa324c69b8bb534c28a383cbb4877ece19b6a002f85e81718a
|
"""
LDA: Linear Discriminant Analysis
"""
# Authors: Matthieu Perrot
# Mathieu Blondel
import warnings
import numpy as np
from scipy import linalg, ndimage
from .base import BaseEstimator, ClassifierMixin, TransformerMixin
from .utils.extmath import logsum
class LDA(BaseEstimator, ClassifierMixin, TransformerMixin):
"""
Linear Discriminant Analysis (LDA)
Parameters
----------
n_components: int
Number of components (< n_classes - 1)
priors : array, optional, shape = [n_classes]
Priors on classes
Attributes
----------
`means_` : array-like, shape = [n_classes, n_features]
Class means
`xbar_` : float, shape = [n_features]
Over all mean
`priors_` : array-like, shape = [n_classes]
Class priors (sum to 1)
`covariance_` : array-like, shape = [n_features, n_features]
Covariance matrix (shared by all classes)
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None)
>>> print clf.predict([[-0.8, -1]])
[1]
See also
--------
QDA
"""
def __init__(self, n_components=None, priors=None):
self.n_components = n_components
self.priors = np.asarray(priors) if priors is not None else None
if self.priors is not None:
if (self.priors < 0).any():
raise ValueError('priors must be non-negative')
if self.priors.sum() != 1:
print 'warning: the priors do not sum to 1. Renormalizing'
self.priors = self.priors / self.priors.sum()
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""
Fit the LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariance : boolean
If True the covariance matrix (shared by all classes) is computed
and stored in self.covariance_ attribute.
"""
X = np.asanyarray(X)
y = np.asanyarray(y)
if y.dtype.char.lower() not in ('b', 'h', 'i'):
# We need integer values to be able to use
# ndimage.measurements and np.bincount on numpy >= 2.0.
# We currently support (u)int8, (u)int16 and (u)int32.
# Note that versions of scipy >= 0.8 can also accept
# (u)int64. We however don't support it for backwards
# compatibility.
y = y.astype(np.int32)
if X.ndim != 2:
raise ValueError('X must be a 2D array')
if X.shape[0] != y.shape[0]:
raise ValueError(
'Incompatible shapes: X has %s samples, while y '
'has %s' % (X.shape[0], y.shape[0]))
n_samples = X.shape[0]
n_features = X.shape[1]
classes = np.unique(y)
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
classes_indices = [(y == c).ravel() for c in classes]
if self.priors is None:
counts = np.array(ndimage.measurements.sum(
np.ones(n_samples, dtype=y.dtype), y, index=classes))
self.priors_ = counts / float(n_samples)
else:
self.priors_ = self.priors
# Group means n_classes*n_features matrix
means = []
Xc = []
cov = None
if store_covariance:
cov = np.zeros((n_features, n_features))
for group_indices in classes_indices:
Xg = X[group_indices, :]
meang = Xg.mean(0)
means.append(meang)
# centered group data
Xgc = Xg - meang
Xc.append(Xgc)
if store_covariance:
cov += np.dot(Xgc.T, Xgc)
if store_covariance:
cov /= (n_samples - n_classes)
self.covariance_ = cov
self.means_ = np.asarray(means)
Xc = np.concatenate(Xc, 0)
# ----------------------------
# 1) within (univariate) scaling by with classes std-dev
scaling = 1. / Xc.std(0)
fac = float(1) / (n_samples - n_classes)
# ----------------------------
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc * scaling)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
# Scaling of within covariance is: V' 1/S
scaling = (scaling * V.T[:, :rank].T).T / S[:rank]
## ----------------------------
## 3) Between variance scaling
# Overall mean
xbar = np.dot(self.priors_, self.means_)
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(means - xbar).T).T, scaling)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use svd to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
# compose the scalings
self.scaling = np.dot(scaling, V.T[:, :rank])
self.xbar_ = xbar
# weight vectors / centroids
self.coef_ = np.dot(self.means_ - self.xbar_, self.scaling)
self.intercept_ = -0.5 * np.sum(self.coef_ ** 2, axis=1) + \
np.log(self.priors_)
self.classes = classes
return self
def decision_function(self, X):
"""
This function return the decision function values related to each
class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
X = np.asanyarray(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scaling)
return np.dot(X, self.coef_.T) + self.intercept_
def transform(self, X):
"""
Project the data so as to maximize class separation (large separation
between projected class means and small variance within each class).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
"""
X = np.asanyarray(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scaling)
n_comp = X.shape[1] if self.n_components is None else self.n_components
return np.dot(X, self.coef_[:n_comp].T)
def predict(self, X):
"""
This function does classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self.decision_function(X)
y_pred = self.classes[d.argmax(1)]
return y_pred
def predict_proba(self, X):
"""
This function return posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self.decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""
This function return posterior log-probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self.decision_function(X)
loglikelihood = (values - values.max(axis=1)[:, np.newaxis])
normalization = logsum(loglikelihood, axis=1)
return loglikelihood - normalization[:, np.newaxis]
|
ominux/scikit-learn
|
sklearn/lda.py
|
Python
|
bsd-3-clause
| 8,887
|
[
"Gaussian"
] |
fd24022fbc67ee45497ce5f9d3f6c3956b145fda908c1b57c1b6dff0d789ffa8
|
#!/usr/bin/python
from numpy import ones, append, array, mean, std;
from math import sqrt, exp, pi;
from math import log10;
def checkScales(data, deg):
'If factors have varaying scales, module recommends to normalize data'
cols = data.shape[1];
mx, mn = [], [];
for i in range(cols): #find the exponent value for each columns' max min
mx.append( abs( log10( max( data[:, i] ) ) ) );
mn.append( abs( log10( min( data[:, i] ) ) ) );
for i in range(cols-1): #determine if columns deviate more than deg from each other
for j in range(i+1, cols):
if abs(mx[i]-mx[j]) > deg or abs(mn[i]-mn[j]) > deg:
return True; #data needs to be normalized
return False; #data does not need to be normalized
def calcMu(nums):
return sum(nums)/float(len(nums));
def calcSig(nums):
mu = calcMu(nums);
var = sum([pow(x-mu,2) for x in nums])/float(len(nums)-1);
return sqrt(var);
def normalize(data):
norm = data;
cols = data.shape[1];
mu, sig = [], [];
for i in range(cols):
m = mean(data[:, i]); mu.append(m);
s = std(data[:, i]); sig.append(s);
norm[:, i] = (data[:, i] - m) / s;
return norm, mu, sig;
class NaiveBayes(object):
def __init__(self):
self.summaries = 0;
self.db = {}; #dictonary database of additonal information for classification and prediction
def separateByClass(self, data):
'separates training data by their outcome class and creates a dictonar \
where the class is the key, and training examples are items'
partedClasses = {} #partition data by their respective outcome class (last column)
for dat in data:
if dat[-1] not in partedClasses:
partedClasses[ dat[-1] ] = [] #create a dictionary entry for the new class type
partedClasses[ dat[-1] ].append( dat[:-1] ) #append values for features
return partedClasses;
def summarize(self, dataset):
'computes mean and standard derivation for a vector dataset \
if you want to normalize a matrix, use the normalize()'
return [(calcMu(attribute), calcSig(attribute)) for attribute in zip(*dataset)];
def summarizeByClass(self, dataset):
'creates a dictonary for each class where the keys are \
the class type (decision outcomes) and the itme values are the respective data examples'
partedClasses = self.separateByClass(dataset);
self.summaries = {};
for classVal, instances in partedClasses.iteritems():
#summarize data by computing the mean and std for each column
self.summaries[classVal] = self.summarize(instances);
def calcProbability(slef, x, mu, sig): #formula is gaussian probability density function
'computes the probability of a single feature to occur'
#print x, mu, sig;
exponent = exp( -( pow(x-mu, 2) / ( 2*pow(sig, 2) ) ) );
return (1 / (sqrt(2.0*pi) * sig)) * exponent;
def calcClassProbabilities(self, inputVector):
'computes probability of all class to occur for the given feature values'
probabilities = {};
for classVal, classSummaries in self.summaries.iteritems(): #for each class
probabilities[classVal] = 1;
for i in range(len(classSummaries)):#for each feature of a class
mu, sig = classSummaries[i];
#compute the probability for the given input vector
x = inputVector[i];
probabilities[classVal] *= self.calcProbability(x, mu, sig);
return probabilities;
def predict(self, inputVector):
'evaluates which class is most likely to occur for the given input vector'
probabilities = self.calcClassProbabilities(inputVector);
label, maxProb = None, 0;
for classVal, prob in probabilities.iteritems():
#print classVal, prob;
if prob > maxProb:
label = classVal;
maxProb = prob;
return label, maxProb;
############### REGRESSION CLASSIFIER DOES NOT WORK FOR BOUNDED TARGET VALUE #######
############### PENDING MODIFICATION TO BETA-REGRESSION ############################
class LinRegress(object):
def __init__(self):
self.theta = 0; #predictor weight(s) for factors
def computeCost(self, rows, theta):
'computes square error between prediction and expected value'
hyp = self.X.dot(theta);
sqError = (hyp - self.Y)**2;
return (1.0 / (2 * rows)) * sum(sqError);
def gradientDescent(self, rows, steps, theta):
'minimizes error between prediction and expected value, descent is optimized by use of adaptive alpha'
curError = 1 + max(self.Y); #assign the expected error some large value
X_trans = self.X.T;
alpha = 0.01;
rho = 1.1;
sig = 0.5;
for i in range(steps):
hyp = self.X.dot(theta);
theta -= alpha * (1.0 / rows) * (X_trans.dot(hyp - self.Y));
#increase alpha as long as error keeps minimizing, else take a step back and continue
error = self.computeCost(rows, theta);
#print error[0], '|', curError[0];
if error < curError[0]:
curError = error;
alpha = alpha * rho;
self.theta = theta;
else:
alpha = alpha * sig;
theta = self.theta;
def train(self, steps, X, y):
self.Y = y;
rows, cols = X.shape;
self.X = ones((rows, cols+1)); #default bias is 1
self.theta = ones(shape=(cols+1,1));
self.X[:, 1:] = X;
self.gradientDescent(rows, steps, self.theta);
def predict(self, dat):
'returns a prediction for the datum.'
if type(self.theta) == int:
print '\n> Predictor is not trained Yet. Please train before making any predictions\n';
return None;
else:
datum = ones((1, self.theta.shape[0])); #insert bias into input data
datum[:, 1:] = dat;
return datum.dot(self.theta)[0][0]; #compute prediction
#X = data[:, :cols-1]; #first columns are features for the outcome
#self.Y = data[:, cols-1]; #last column is outcome
#self.Y.shape = [rows, 1]; #properly define the shape of y
#self.Y = self.Y;
#if checkScales(X, deg): #check if data needs to be normalized
# X = normalize(X);
#dat = fromiter(dat.split(self.delim), dtype = float); #split dat string to np.array for prediction
##################################################################
|
parejadan/accurp-engine
|
src/predictors.py
|
Python
|
mit
| 5,889
|
[
"Gaussian"
] |
654f3e0049336ba238df62232d21ad89b16e6d479a05cd6d809616d2604b8ae5
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_applicationprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of ApplicationProfile Avi RESTful Object
description:
- This module is used to configure ApplicationProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- User defined description for the object.
dns_service_profile:
description:
- Specifies various dns service related controls for virtual service.
dos_rl_profile:
description:
- Specifies various security related controls for virtual service.
http_profile:
description:
- Specifies the http application proxy profile parameters.
name:
description:
- The name of the application profile.
required: true
preserve_client_ip:
description:
- Specifies if client ip needs to be preserved for backend connection.
- Not compatible with connection multiplexing.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
tcp_app_profile:
description:
- Specifies the tcp application proxy profile parameters.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Specifies which application layer proxy is enabled for the virtual service.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the application profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create an Application Profile for HTTP application enabled for SSL traffic
avi_applicationprofile:
controller: ''
username: ''
password: ''
http_profile:
cache_config:
age_header: true
aggressive: false
date_header: true
default_expire: 600
enabled: false
heuristic_expire: false
max_cache_size: 0
max_object_size: 4194304
mime_types_group_refs:
- admin:System-Cacheable-Resource-Types
min_object_size: 100
query_cacheable: false
xcache_header: true
client_body_timeout: 0
client_header_timeout: 10000
client_max_body_size: 0
client_max_header_size: 12
client_max_request_size: 48
compression_profile:
compressible_content_ref: admin:System-Compressible-Content-Types
compression: false
remove_accept_encoding_header: true
type: AUTO_COMPRESSION
connection_multiplexing_enabled: true
hsts_enabled: false
hsts_max_age: 365
http_to_https: false
httponly_enabled: false
keepalive_header: false
keepalive_timeout: 30000
max_bad_rps_cip: 0
max_bad_rps_cip_uri: 0
max_bad_rps_uri: 0
max_rps_cip: 0
max_rps_cip_uri: 0
max_rps_unknown_cip: 0
max_rps_unknown_uri: 0
max_rps_uri: 0
post_accept_timeout: 30000
secure_cookie_enabled: false
server_side_redirect_to_https: false
spdy_enabled: false
spdy_fwd_proxy_mode: false
ssl_client_certificate_mode: SSL_CLIENT_CERTIFICATE_NONE
ssl_everywhere_enabled: false
websockets_enabled: true
x_forwarded_proto_enabled: false
xff_alternate_name: X-Forwarded-For
xff_enabled: true
name: System-HTTP
tenant_ref: admin
type: APPLICATION_PROFILE_TYPE_HTTP
'''
RETURN = '''
obj:
description: ApplicationProfile (api/applicationprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
dns_service_profile=dict(type='dict',),
dos_rl_profile=dict(type='dict',),
http_profile=dict(type='dict',),
name=dict(type='str', required=True),
preserve_client_ip=dict(type='bool',),
tcp_app_profile=dict(type='dict',),
tenant_ref=dict(type='str',),
type=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'applicationprofile',
set([]))
if __name__ == '__main__':
main()
|
bjolivot/ansible
|
lib/ansible/modules/network/avi/avi_applicationprofile.py
|
Python
|
gpl-3.0
| 6,354
|
[
"VisIt"
] |
51784f8ce6966c417c840f7c96e16a5587676837457eac4f73e8aa5b1994271d
|
"""
Acceptance tests for Studio related to the acid xblock.
"""
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from common.test.acceptance.pages.xblock.acid import AcidView
from common.test.acceptance.tests.helpers import AcceptanceTest
class XBlockAcidBase(AcceptanceTest):
"""
Base class for tests that verify that XBlock integration is working correctly
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(XBlockAcidBase, self).setUp()
# Define a unique course identifier
self.course_info = {
'org': 'test_org',
'number': 'course_' + self.unique_id[:5],
'run': 'test_' + self.unique_id,
'display_name': 'Test Course ' + self.unique_id
}
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_id = '{org}.{number}.{run}'.format(**self.course_info)
self.setup_fixtures()
self.auth_page = AutoAuthPage(
self.browser,
staff=False,
username=self.user.get('username'),
email=self.user.get('email'),
password=self.user.get('password')
)
self.auth_page.visit()
def validate_acid_block_preview(self, acid_block):
"""
Validate the Acid Block's preview
"""
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('user_state'))
self.assertTrue(acid_block.scope_passed('user_state_summary'))
self.assertTrue(acid_block.scope_passed('preferences'))
self.assertTrue(acid_block.scope_passed('user_info'))
def test_acid_block_preview(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.expand_subsection().unit('Test Unit').go_to()
acid_block = AcidView(self.browser, unit.xblocks[0].preview_selector)
self.validate_acid_block_preview(acid_block)
def test_acid_block_editor(self):
"""
Verify that all expected acid block tests pass in studio editor
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.expand_subsection().unit('Test Unit').go_to()
acid_block = AcidView(self.browser, unit.xblocks[0].edit().editor_selector)
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
class XBlockAcidNoChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with no children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
self.user = course_fix.user
class XBlockAcidParentBase(XBlockAcidBase):
"""
Base class for tests that verify that parent XBlock integration is working correctly
"""
__test__ = False
def validate_acid_block_preview(self, acid_block):
super(XBlockAcidParentBase, self).validate_acid_block_preview(acid_block)
self.assertTrue(acid_block.child_tests_passed)
def test_acid_block_preview(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.expand_subsection().unit('Test Unit').go_to()
container = unit.xblocks[0].go_to_container()
acid_block = AcidView(self.browser, container.xblocks[0].preview_selector)
self.validate_acid_block_preview(acid_block)
class XBlockAcidEmptyParentTest(XBlockAcidParentBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
)
)
)
)
).install()
self.user = course_fix.user
class XBlockAcidChildTest(XBlockAcidParentBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
XBlockFixtureDesc('acid', 'First Acid Child', metadata={'name': 'first'}),
XBlockFixtureDesc('acid', 'Second Acid Child', metadata={'name': 'second'}),
XBlockFixtureDesc('html', 'Html Child', data="<html>Contents</html>"),
)
)
)
)
).install()
self.user = course_fix.user
def test_acid_block_preview(self):
super(XBlockAcidChildTest, self).test_acid_block_preview()
def test_acid_block_editor(self):
super(XBlockAcidChildTest, self).test_acid_block_editor()
|
fintech-circle/edx-platform
|
common/test/acceptance/tests/studio/test_studio_acid_xblock.py
|
Python
|
agpl-3.0
| 7,011
|
[
"VisIt"
] |
f6dae4c25ccc2154ce38df7522c7b004d9dd39ebbff24797f3c4a586ae415f7d
|
# Author(s): TJ Lane (tjlane@stanford.edu) and Christian Schwantes
# (schwancr@stanford.edu)
# Contributors: Vince Voelz, Kyle Beauchamp, Robert McGibbon
# Copyright (c) 2014, Stanford University
# All rights reserved.
"""
Functions for computing forward committors for an MSM. The forward
committor is defined for a set of sources and sink states, and for
each state, the forward committor is the probability that a walker
starting at that state will visit the sink state before the source
state.
These are some canonical references for TPT. Note that TPT
is really a specialization of ideas very familiar to the
mathematical study of Markov chains, and there are many
books, manuscripts in the mathematical literature that
cover the same concepts.
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
from mdtraj.utils.six.moves import xrange
__all__ = ['committors', 'conditional_committors',
'_committors', '_conditional_committors']
def committors(sources, sinks, msm):
"""
Get the forward committors of the reaction sources -> sinks.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
msm : msmbuilder.MarkovStateModel
MSM fit to the data.
Returns
-------
forward_committors : np.ndarray
The forward committors for the reaction sources -> sinks
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016.
"""
if hasattr(msm, 'all_transmats_'):
commits = np.zeros(msm.all_transmats_.shape[:2])
for i, tprob in enumerate(msm.all_transmats_):
commits[i, :] = _committors(sources, sinks, tprob)
return np.median(commits, axis=0)
return _committors(sources, sinks, msm.transmat_)
def conditional_committors(source, sink, waypoint, msm,
forward_committors=None):
"""
Computes the conditional committors :math:`q^{ABC^+}` which are is the
probability of starting in one state and visiting state B before A while
also visiting state C at some point.
Note that in the notation of Dickson et. al. this computes :math:`h_c(A,B)`,
with ``sources = A``, ``sinks = B``, ``waypoint = C``
Parameters
----------
waypoint : int
The index of the intermediate state
source : int
The index of the source state
sink : int
The index of the sink state
msm : msmbuilder.MarkovStateModel
MSM to analyze.
forward_committors : ndarray
Forward committors source->sink, if pre-calculated
Returns
-------
cond_committors : np.ndarray
Conditional committors, i.e. the probability of visiting
a waypoint when on a path between source and sink.
See Also
--------
msmbuilder.tpt.fraction_visited : function
Calculate the fraction of visits to a waypoint from a given
source to a sink.
msmbuilder.tpt.hub_scores : function
Compute the 'hub score', the weighted fraction of visits for an
entire network.
Notes
-----
Employs dense linear algebra, memory use scales as N^2,
and cycle use scales as N^3
References
----------
.. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052.
"""
# typecheck
for data in [source, sink, waypoint]:
if not isinstance(data, int):
raise ValueError("source, sink, and waypoint must be integers.")
if (source == waypoint) or (sink == waypoint) or (sink == source):
raise ValueError('source, sink, waypoint must all be disjoint!')
if hasattr(msm, 'all_transmats_'):
cond_committors = np.zeros(msm.all_transmats_.shape[:2])
for i, tprob in enumerate(msm.all_transmats_):
cond_committors[i, :] = _conditional_committors(source, sink,
waypoint, tprob,
forward_committors)
return np.median(cond_committors, axis=0)
return _conditional_committors(source, sink, waypoint, msm.transmat_,
forward_committors)
def _conditional_committors(source, sink, waypoint, tprob,
forward_committors=None):
"""
Computes the conditional committors :math:`q^{ABC^+}` which are is the
probability of starting in one state and visiting state B before A while
also visiting state C at some point.
Note that in the notation of Dickson et. al. this computes :math:`h_c(A,B)`,
with ``sources = A``, ``sinks = B``, ``waypoint = C``
Parameters
----------
waypoint : int
The index of the intermediate state
source : int
The index of the source state
sink : int
The index of the sink state
tprob : np.ndarray
Transition matrix
forward_committors : ndarray
Forward committors source->sink, if pre-calculated
Returns
-------
cond_committors : np.ndarray
Conditional committors, i.e. the probability of visiting
a waypoint when on a path between source and sink.
Notes
-----
Employs dense linear algebra, memory use scales as N^2,
and cycle use scales as N^3
References
----------
.. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052.
"""
n_states = np.shape(tprob)[0]
if forward_committors is None:
forward_committors = _committors([source], [sink], tprob)
# permute the transition matrix into cannonical form - send waypoint the the
# last row, and source + sink to the end after that
Bsink_indices = [source, sink, waypoint]
perm = np.array([i for i in xrange(n_states) if i not in Bsink_indices],
dtype=int)
perm = np.concatenate([perm, Bsink_indices])
permuted_tprob = tprob[perm, :][:, perm]
# extract P, R
n = n_states - len(Bsink_indices)
P = permuted_tprob[:n, :n]
R = permuted_tprob[:n, n:]
# calculate the conditional committors ( B = N*R ), B[i,j] is the prob
# state i ends in j, where j runs over the source + sink + waypoint
# (waypoint is position -1)
B = np.dot(np.linalg.inv(np.eye(n) - P), R)
# add probs for the sinks, waypoint / b[i] is P( i --> {C & not A, B} )
b = np.append(B[:, -1].flatten(), [0.0] * (len(Bsink_indices) - 1) + [1.0])
cond_committors = b * forward_committors[waypoint]
# get the original order
cond_committors = cond_committors[np.argsort(perm)]
return cond_committors
def _committors(sources, sinks, tprob):
"""
Get the forward committors of the reaction sources -> sinks.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
tprob : np.ndarray
Transition matrix
Returns
-------
forward_committors : np.ndarray
The forward committors for the reaction sources -> sinks
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016.
"""
n_states = np.shape(tprob)[0]
sources = np.array(sources, dtype=int).reshape((-1, 1))
sinks = np.array(sinks, dtype=int).reshape((-1, 1))
# construct the committor problem
lhs = np.eye(n_states) - tprob
for a in sources:
lhs[a, :] = 0.0 # np.zeros(n)
lhs[:, a] = 0.0
lhs[a, a] = 1.0
for b in sinks:
lhs[b, :] = 0.0 # np.zeros(n)
lhs[:, b] = 0.0
lhs[b, b] = 1.0
ident_sinks = np.zeros(n_states)
ident_sinks[sinks] = 1.0
rhs = np.dot(tprob, ident_sinks)
rhs[sources] = 0.0
rhs[sinks] = 1.0
forward_committors = np.linalg.solve(lhs, rhs)
return forward_committors
|
Eigenstate/msmbuilder
|
msmbuilder/tpt/committor.py
|
Python
|
lgpl-2.1
| 9,898
|
[
"MDTraj",
"VisIt"
] |
345067534d9109e61ffff1f87d126a1acae1eb321ce958aa2be0a5ac15c1f20b
|
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Anisotropic potentials."""
import json
from hoomd import md
from hoomd.md.pair.pair import Pair
from hoomd.logging import log
from hoomd.data.parameterdicts import ParameterDict, TypeParameterDict
from hoomd.data.typeparam import TypeParameter
from hoomd.data.typeconverter import OnlyTypes, OnlyFrom, positive_real
class AnisotropicPair(Pair):
r"""Generic anisotropic pair potential.
Users should not instantiate `AnisotropicPair` directly. It is a base
class that provides common features to all anisotropic pair forces.
All anisotropic pair potential commands specify that a given potential
energy, force and torque be computed on all non-excluded particle pairs in
the system within a short range cutoff distance :math:`r_{\mathrm{cut}}`.
The interaction energy, forces and torque depend on the inter-particle
separation :math:`\vec r` and on the orientations :math:`\vec q_i`,
:math:`q_j`, of the particles.
`AnisotropicPair` is similar to `hoomd.md.pair.Pair` except it does not
support the `xplor` shifting mode or `r_on`.
Args:
nlist (hoomd.md.nlist.NList) : The neighbor list.
default_r_cut (`float`, optional) : The default cutoff for the
potential, defaults to ``None`` which means no cutoff
:math:`[\mathrm{length}]`.
mode (`str`, optional) : the energy shifting mode, defaults to "none".
"""
def __init__(self, nlist, default_r_cut=None, mode="none"):
self._nlist = OnlyTypes(md.nlist.NList, strict=True)(nlist)
tp_r_cut = TypeParameter('r_cut', 'particle_types',
TypeParameterDict(positive_real, len_keys=2))
if default_r_cut is not None:
tp_r_cut.default = default_r_cut
self._param_dict.update(ParameterDict(mode=OnlyFrom(['none', 'shift'])))
self.mode = mode
self._add_typeparam(tp_r_cut)
def _return_type_shapes(self):
type_shapes = self.cpp_force.getTypeShapesPy()
ret = [json.loads(json_string) for json_string in type_shapes]
return ret
class Dipole(AnisotropicPair):
r"""Screened dipole-dipole interactions.
Implements the force and energy calculations for both magnetic and
electronic dipole-dipole interactions. When particles have charge as well as
a dipole moment, the interactions are through electronic dipole moments. If
the particles have no charge then the interaction is through magnetic or
electronic dipoles. Note whether a dipole is magnetic or electronic does not
change the functional form of the potential only the units associated with
the potential parameters.
Args:
nlist (`hoomd.md.nlist.NList`): Neighbor list
default_r_cut (float): Default cutoff radius :math:`[\mathrm{length}]`.
mode (str): energy shifting/smoothing mode
`Dipole` computes the (screened) interaction between pairs of
particles with dipoles and electrostatic charges. The total energy
computed is:
.. math::
U_{dipole} = U_{dd} + U_{de} + U_{ee}
U_{dd} = A e^{-\kappa r}
\left(\frac{\vec{\mu_i}\cdot\vec{\mu_j}}{r^3}
- 3\frac{(\vec{\mu_i}\cdot \vec{r_{ji}})
(\vec{\mu_j}\cdot \vec{r_{ji}})}
{r^5}
\right)
U_{de} = A e^{-\kappa r}
\left(\frac{(\vec{\mu_j}\cdot \vec{r_{ji}})q_i}{r^3}
- \frac{(\vec{\mu_i}\cdot \vec{r_{ji}})q_j}{r^3}
\right)
U_{ee} = A e^{-\kappa r} \frac{q_i q_j}{r}
See `hoomd.md.pair.Pair` for details on how forces are calculated and the
available energy shifting and smoothing modes. Use ``params`` dictionary to
set potential coefficients. The coefficients must be set per unique pair of
particle types.
Note:
All units are given for electronic dipole moments.
.. py:attribute:: params
The dipole potential parameters. The dictionary has the following
keys:
* ``A`` (`float`, **required**) - :math:`A` - electrostatic energy
scale (*default*: 1.0)
:math:`[\mathrm{energy} \cdot \mathrm{length} \cdot
\mathrm{charge}^{-2}]`
* ``kappa`` (`float`, **required**) - :math:`\kappa` - inverse
screening length :math:`[\mathrm{length}^{-1}]`
Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``],
`dict`]
.. py:attribute:: mu
:math:`\mu` - the magnetic magnitude of the particle local reference
frame as a tuple (i.e. :math:`(\mu_x, \mu_y, \mu_z)`)
:math:`[\mathrm{charge} \cdot \mathrm{length}]`.
Type: `TypeParameter` [``particle_type``, `tuple` [`float`, `float`,
`float` ]]
Example::
nl = nlist.Cell()
dipole = md.pair.Dipole(nl, default_r_cut=3.0)
dipole.params[('A', 'B')] = dict(A=1.0, kappa=4.0)
dipole.mu['A'] = (4.0, 1.0, 0.0)
"""
_cpp_class_name = "AnisoPotentialPairDipole"
def __init__(self, nlist, default_r_cut=None, mode='none'):
super().__init__(nlist, default_r_cut, mode)
params = TypeParameter(
'params', 'particle_types',
TypeParameterDict(A=float, kappa=float, len_keys=2))
mu = TypeParameter('mu', 'particle_types',
TypeParameterDict((float, float, float), len_keys=1))
self._extend_typeparam((params, mu))
class GayBerne(AnisotropicPair):
r"""Gay-Berne anisotropic pair potential.
Warning: The code has yet to be updated to the current API.
Args:
nlist (`hoomd.md.nlist.NList`): Neighbor list
default_r_cut (float): Default cutoff radius :math:`[\mathrm{length}]`.
mode (str): energy shifting/smoothing mode.
`GayBerne` computes the Gay-Berne potential between anisotropic
particles.
This version of the Gay-Berne potential supports identical pairs of uniaxial
ellipsoids, with orientation-independent energy-well depth. The potential
comes from the following paper Allen et. al. 2006 `paper link`_.
.. _paper link: http://dx.doi.org/10.1080/00268970601075238
The interaction energy for this anisotropic pair potential is
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{GB}}(\vec r, \vec e_i, \vec e_j)
= & 4 \varepsilon \left[ \zeta^{-12} - \zeta^{-6} \right];
& \zeta < \zeta_{\mathrm{cut}} \\
= & 0; & \zeta \ge \zeta_{\mathrm{cut}} \\
\end{eqnarray*}
.. math::
\zeta = \left(\frac{r-\sigma+\sigma_{\mathrm{min}}}
{\sigma_{\mathrm{min}}}\right)
\sigma^{-2} = \frac{1}{2} \hat{\vec{r}}
\cdot \vec{H^{-1}} \cdot \hat{\vec{r}}
\vec{H} = 2 \ell_\perp^2 \vec{1}
+ (\ell_\parallel^2 - \ell_\perp^2)
(\vec{e_i} \otimes \vec{e_i} + \vec{e_j} \otimes \vec{e_j})
with :math:`\sigma_{\mathrm{min}} = 2 \min(\ell_\perp, \ell_\parallel)`.
The cut-off parameter :math:`r_{\mathrm{cut}}` is defined for two particles
oriented parallel along the **long** axis, i.e.
:math:`\zeta_{\mathrm{cut}} = \left(\frac{r-\sigma_{\mathrm{max}}
+ \sigma_{\mathrm{min}}}{\sigma_{\mathrm{min}}}\right)`
where :math:`\sigma_{\mathrm{max}} = 2 \max(\ell_\perp, \ell_\parallel)` .
The quantities :math:`\ell_\parallel` and :math:`\ell_\perp` denote the
semi-axis lengths parallel and perpendicular to particle orientation.
Use ``params`` dictionary to set potential coefficients. The coefficients
must be set per unique pair of particle types.
.. py:attribute:: params
The Gay-Berne potential parameters. The dictionary has the following
keys:
* ``epsilon`` (`float`, **required**) - :math:`\varepsilon`
:math:`[\mathrm{energy}]`
* ``lperp`` (`float`, **required**) - :math:`\ell_\perp`
:math:`[\mathrm{length}]`
* ``lpar`` (`float`, **required**) - :math:`\ell_\parallel`
:math:`[\mathrm{length}]`
Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``],
`dict`]
Example::
nl = nlist.Cell()
gay_berne = md.pair.GayBerne(nlist=nl, default_r_cut=2.5)
gay_berne.params[('A', 'A')] = dict(epsilon=1.0, lperp=0.45, lpar=0.5)
gay_berne.r_cut[('A', 'B')] = 2 ** (1.0 / 6.0)
"""
_cpp_class_name = "AnisoPotentialPairGB"
def __init__(self, nlist, default_r_cut=None, mode='none'):
super().__init__(nlist, default_r_cut, mode)
params = TypeParameter(
'params', 'particle_types',
TypeParameterDict(epsilon=float,
lperp=float,
lpar=float,
len_keys=2))
self._add_typeparam(params)
@log(category="object")
def type_shapes(self):
"""Get all the types of shapes in the current simulation.
Example:
>>> gay_berne.type_shapes
[{'type': 'Ellipsoid', 'a': 1.0, 'b': 1.0, 'c': 1.5}]
Returns:
A list of dictionaries, one for each particle type in the system.
"""
return super()._return_type_shapes()
|
joaander/hoomd-blue
|
hoomd/md/pair/aniso.py
|
Python
|
bsd-3-clause
| 9,444
|
[
"HOOMD-blue"
] |
cbb6c6b5bdab59a9c9977b0c4e971df08faf40c935f1bfd9acefb5e7e209f4bb
|
import json
import boto3
from botocore.exceptions import ClientError
import pytest
import sure # pylint: disable=unused-import
from moto import mock_iam
invalid_policy_document_test_cases = [
{
"document": "This is not a json document",
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
}
},
"error_message": "Policy document must be version 2012-10-17 or greater.",
},
{
"document": {
"Version": "2008-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Policy document must be version 2012-10-17 or greater.",
},
{
"document": {
"Version": "2013-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {"Version": "2012-10-17"},
"error_message": "Syntax errors in policy.",
},
{
"document": {"Version": "2012-10-17", "Statement": ["afd"]},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
"Extra field": "value",
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Extra field": "value",
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Id": ["cd3a324d2343d942772346-34234234423404-4c2242343242349d1642ee"],
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Id": {},
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "invalid",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "invalid",
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Actions/Conditions must be prefaced by a vendor, e.g., iam, sdb, ec2, etc.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"NotAction": "",
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Actions/Conditions must be prefaced by a vendor, e.g., iam, sdb, ec2, etc.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "a a:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Vendor a a is not valid",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:List:Bucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Actions/Condition can contain only one colon.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3s:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
{
"Effect": "Allow",
"Action": "s:3s:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
],
},
"error_message": "Actions/Condition can contain only one colon.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "invalid resource",
},
},
"error_message": 'Resource invalid resource must be in ARN format or "*".',
},
{
"document": {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "EnableDisableHongKong",
"Effect": "Allow",
"Action": ["account:EnableRegion", "account:DisableRegion"],
"Resource": "",
"Condition": {
"StringEquals": {"account:TargetRegion": "ap-east-1"}
},
},
{
"Sid": "ViewConsole",
"Effect": "Allow",
"Action": ["aws-portal:ViewAccount", "account:ListRegions"],
"Resource": "",
},
],
},
"error_message": 'Resource must be in ARN format or "*".',
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s:3:ListBucket",
"Resource": "sdfsadf",
},
},
"error_message": 'Resource sdfsadf must be in ARN format or "*".',
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": ["adf"],
},
},
"error_message": 'Resource adf must be in ARN format or "*".',
},
{
"document": {
"Version": "2012-10-17",
"Statement": {"Effect": "Allow", "Action": "s3:ListBucket", "Resource": ""},
},
"error_message": 'Resource must be in ARN format or "*".',
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"NotAction": "s3s:ListBucket",
"Resource": "a:bsdfdsafsad",
},
},
"error_message": 'Partition "bsdfdsafsad" is not valid for resource "arn:bsdfdsafsad:*:*:*:*".',
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"NotAction": "s3s:ListBucket",
"Resource": "a:b:cadfsdf",
},
},
"error_message": 'Partition "b" is not valid for resource "arn:b:cadfsdf:*:*:*".',
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"NotAction": "s3s:ListBucket",
"Resource": "a:b:c:d:e:f:g:h",
},
},
"error_message": 'Partition "b" is not valid for resource "arn:b:c:d:e:f:g:h".',
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "aws:s3:::example_bucket",
},
},
"error_message": 'Partition "s3" is not valid for resource "arn:s3:::example_bucket:*".',
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": [
"arn:error:s3:::example_bucket",
"arn:error:s3::example_bucket",
],
},
},
"error_message": 'Partition "error" is not valid for resource "arn:error:s3:::example_bucket".',
},
{
"document": {"Version": "2012-10-17", "Statement": []},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {"Effect": "Allow", "Action": "s3:ListBucket"},
},
"error_message": "Policy statement must contain resources.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {"Effect": "Allow", "Action": "s3:ListBucket", "Resource": []},
},
"error_message": "Policy statement must contain resources.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {"Effect": "Allow", "Action": "invalid"},
},
"error_message": "Policy statement must contain resources.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {"Effect": "Allow", "Resource": "arn:aws:s3:::example_bucket"},
},
"error_message": "Policy statement must contain actions.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {"Version": "2012-10-17", "Statement": {"Effect": "Allow"}},
"error_message": "Policy statement must contain actions.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": [],
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Policy statement must contain actions.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Deny"},
{
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
],
},
"error_message": "Policy statement must contain actions.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:iam:::example_bucket",
},
},
"error_message": 'IAM resource path must either be "*" or start with user/, federated-user/, role/, group/, instance-profile/, mfa/, server-certificate/, policy/, sms-mfa/, saml-provider/, oidc-provider/, report/, access-report/.',
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3::example_bucket",
},
},
"error_message": "The policy failed legacy parsing",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {"Effect": "Allow", "Resource": "arn:aws:s3::example_bucket"},
},
"error_message": "The policy failed legacy parsing",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws",
},
},
"error_message": "Resource vendor must be fully qualified and cannot contain regexes.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": {"a": "arn:aws:s3:::example_bucket"},
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Deny",
"Action": "s3:ListBucket",
"Resource": ["adfdf", {}],
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"NotAction": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"NotResource": [],
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Deny",
"Action": [[]],
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"NotAction": "s3s:ListBucket",
"Action": [],
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": {},
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": [],
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": "a",
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"a": "b"},
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"DateGreaterThan": "b"},
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"DateGreaterThan": []},
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"DateGreaterThan": {"a": {}}},
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"DateGreaterThan": {"a": {}}},
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"x": {"a": "1"}},
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"ForAnyValue::StringEqualsIfExists": {"a": "asf"}},
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": [
{"ForAllValues:StringEquals": {"aws:TagKeys": "Department"}}
],
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:iam:us-east-1::example_bucket",
},
},
"error_message": "IAM resource arn:aws:iam:us-east-1::example_bucket cannot contain region information.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:us-east-1::example_bucket",
},
},
"error_message": "Resource arn:aws:s3:us-east-1::example_bucket can not contain region information.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Sid": {},
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Sid": [],
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "sdf",
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
{"Sid": "sdf", "Effect": "Allow"},
],
},
"error_message": "Statement IDs (SID) in a single policy must be unique.",
},
{
"document": {
"Statement": [
{
"Sid": "sdf",
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
{"Sid": "sdf", "Effect": "Allow"},
]
},
"error_message": "Policy document must be version 2012-10-17 or greater.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"NotAction": "s3:ListBucket",
"Action": "iam:dsf",
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"NotResource": "*",
},
},
"error_message": "Syntax errors in policy.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "denY",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
"error_message": "The policy failed legacy parsing",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"DateGreaterThan": {"a": "sdfdsf"}},
},
},
"error_message": "The policy failed legacy parsing",
},
{
"document": {
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"DateGreaterThan": {"a": "sdfdsf"}},
}
},
"error_message": "Policy document must be version 2012-10-17 or greater.",
},
{
"document": {
"Statement": {
"Effect": "denY",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
}
},
"error_message": "Policy document must be version 2012-10-17 or greater.",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Condition": {"DateGreaterThan": {"a": "sdfdsf"}},
},
},
"error_message": "The policy failed legacy parsing",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"NotAction": "s3:ListBucket",
"Resource": "arn:aws::::example_bucket",
},
},
"error_message": "The policy failed legacy parsing",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "allow",
"Resource": "arn:aws:s3:us-east-1::example_bucket",
},
},
"error_message": "The policy failed legacy parsing",
},
{
"document": {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "sdf",
"Effect": "aLLow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
{"Sid": "sdf", "Effect": "Allow"},
],
},
"error_message": "The policy failed legacy parsing",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"NotResource": "arn:aws:s3::example_bucket",
},
},
"error_message": "The policy failed legacy parsing",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"DateLessThanEquals": {"a": "234-13"}},
},
},
"error_message": "The policy failed legacy parsing",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {
"DateLessThanEquals": {"a": "2016-12-13t2:00:00.593194+1"}
},
},
},
"error_message": "The policy failed legacy parsing",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {
"DateLessThanEquals": {"a": "2016-12-13t2:00:00.1999999999+10:59"}
},
},
},
"error_message": "The policy failed legacy parsing",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"DateLessThan": {"a": "9223372036854775808"}},
},
},
"error_message": "The policy failed legacy parsing",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:error:s3:::example_bucket",
"Condition": {"DateGreaterThan": {"a": "sdfdsf"}},
},
},
"error_message": "The policy failed legacy parsing",
},
{
"document": {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws::fdsasf",
},
},
"error_message": "The policy failed legacy parsing",
},
]
valid_policy_documents = [
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": ["arn:aws:s3:::example_bucket"],
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "iam: asdf safdsf af ",
"Resource": "arn:aws:s3:::example_bucket",
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": ["arn:aws:s3:::example_bucket", "*"],
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "*",
"Resource": "arn:aws:s3:::example_bucket",
},
},
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
}
],
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "service-prefix:action-name",
"Resource": "*",
"Condition": {
"DateGreaterThan": {"aws:CurrentTime": "2017-07-01T00:00:00Z"},
"DateLessThan": {"aws:CurrentTime": "2017-12-31T23:59:59Z"},
},
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "fsx:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:iam:::user/example_bucket",
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s33:::example_bucket",
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:fdsasf",
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {},
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"ForAllValues:StringEquals": {"aws:TagKeys": "Department"}},
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:cloudwatch:us-east-1::example_bucket",
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:ec2:us-east-1::example_bucket",
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:invalid-service:::example_bucket",
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:invalid-service:us-east-1::example_bucket",
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {
"DateGreaterThan": {"aws:CurrentTime": "2017-07-01T00:00:00Z"},
"DateLessThan": {"aws:CurrentTime": "2017-12-31T23:59:59Z"},
},
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"DateGreaterThan": {}},
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"DateGreaterThan": {"a": []}},
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"a": {}},
},
},
{
"Version": "2012-10-17",
"Statement": {
"Sid": "dsfsdfsdfsdfsdfsadfsd",
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "ConsoleDisplay",
"Effect": "Allow",
"Action": [
"iam:GetRole",
"iam:GetUser",
"iam:ListRoles",
"iam:ListRoleTags",
"iam:ListUsers",
"iam:ListUserTags",
],
"Resource": "*",
},
{
"Sid": "AddTag",
"Effect": "Allow",
"Action": ["iam:TagUser", "iam:TagRole"],
"Resource": "*",
"Condition": {
"StringEquals": {"aws:RequestTag/CostCenter": ["A-123", "B-456"]},
"ForAllValues:StringEquals": {"aws:TagKeys": "CostCenter"},
},
},
],
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"NotAction": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Deny",
"Action": "s3:*",
"NotResource": [
"arn:aws:s3:::HRBucket/Payroll",
"arn:aws:s3:::HRBucket/Payroll/*",
],
},
},
{
"Version": "2012-10-17",
"Id": "sdfsdfsdf",
"Statement": {
"Effect": "Allow",
"NotAction": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "aaaaaadsfdsafsadfsadfaaaaa:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3-s:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3.s:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"NotAction": "s3:ListBucket",
"NotResource": "*",
},
},
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "sdf",
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
{
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
},
],
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"DateGreaterThan": {"a": "01T"}},
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"x": {}, "y": {}},
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"StringEqualsIfExists": {"a": "asf"}},
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"ForAnyValue:StringEqualsIfExists": {"a": "asf"}},
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"DateLessThanEquals": {"a": "2019-07-01T13:20:15Z"}},
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {
"DateLessThanEquals": {"a": "2016-12-13T21:20:37.593194+00:00"}
},
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"DateLessThanEquals": {"a": "2016-12-13t2:00:00.593194+23"}},
},
},
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::example_bucket",
"Condition": {"DateLessThan": {"a": "-292275054"}},
},
},
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowViewAccountInfo",
"Effect": "Allow",
"Action": [
"iam:GetAccountPasswordPolicy",
"iam:GetAccountSummary",
"iam:ListVirtualMFADevices",
],
"Resource": "*",
},
{
"Sid": "AllowManageOwnPasswords",
"Effect": "Allow",
"Action": ["iam:ChangePassword", "iam:GetUser"],
"Resource": "arn:aws:iam::*:user/${aws:username}",
},
{
"Sid": "AllowManageOwnAccessKeys",
"Effect": "Allow",
"Action": [
"iam:CreateAccessKey",
"iam:DeleteAccessKey",
"iam:ListAccessKeys",
"iam:UpdateAccessKey",
],
"Resource": "arn:aws:iam::*:user/${aws:username}",
},
{
"Sid": "AllowManageOwnSigningCertificates",
"Effect": "Allow",
"Action": [
"iam:DeleteSigningCertificate",
"iam:ListSigningCertificates",
"iam:UpdateSigningCertificate",
"iam:UploadSigningCertificate",
],
"Resource": "arn:aws:iam::*:user/${aws:username}",
},
{
"Sid": "AllowManageOwnSSHPublicKeys",
"Effect": "Allow",
"Action": [
"iam:DeleteSSHPublicKey",
"iam:GetSSHPublicKey",
"iam:ListSSHPublicKeys",
"iam:UpdateSSHPublicKey",
"iam:UploadSSHPublicKey",
],
"Resource": "arn:aws:iam::*:user/${aws:username}",
},
{
"Sid": "AllowManageOwnGitCredentials",
"Effect": "Allow",
"Action": [
"iam:CreateServiceSpecificCredential",
"iam:DeleteServiceSpecificCredential",
"iam:ListServiceSpecificCredentials",
"iam:ResetServiceSpecificCredential",
"iam:UpdateServiceSpecificCredential",
],
"Resource": "arn:aws:iam::*:user/${aws:username}",
},
{
"Sid": "AllowManageOwnVirtualMFADevice",
"Effect": "Allow",
"Action": ["iam:CreateVirtualMFADevice", "iam:DeleteVirtualMFADevice"],
"Resource": "arn:aws:iam::*:mfa/${aws:username}",
},
{
"Sid": "AllowManageOwnUserMFA",
"Effect": "Allow",
"Action": [
"iam:DeactivateMFADevice",
"iam:EnableMFADevice",
"iam:ListMFADevices",
"iam:ResyncMFADevice",
],
"Resource": "arn:aws:iam::*:user/${aws:username}",
},
{
"Sid": "DenyAllExceptListedIfNoMFA",
"Effect": "Deny",
"NotAction": [
"iam:CreateVirtualMFADevice",
"iam:EnableMFADevice",
"iam:GetUser",
"iam:ListMFADevices",
"iam:ListVirtualMFADevices",
"iam:ResyncMFADevice",
"sts:GetSessionToken",
],
"Resource": "*",
"Condition": {"BoolIfExists": {"aws:MultiFactorAuthPresent": "false"}},
},
],
},
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "ListAndDescribe",
"Effect": "Allow",
"Action": [
"dynamodb:List*",
"dynamodb:DescribeReservedCapacity*",
"dynamodb:DescribeLimits",
"dynamodb:DescribeTimeToLive",
],
"Resource": "*",
},
{
"Sid": "SpecificTable",
"Effect": "Allow",
"Action": [
"dynamodb:BatchGet*",
"dynamodb:DescribeStream",
"dynamodb:DescribeTable",
"dynamodb:Get*",
"dynamodb:Query",
"dynamodb:Scan",
"dynamodb:BatchWrite*",
"dynamodb:CreateTable",
"dynamodb:Delete*",
"dynamodb:Update*",
"dynamodb:PutItem",
],
"Resource": "arn:aws:dynamodb:*:*:table/MyTable",
},
],
},
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["ec2:AttachVolume", "ec2:DetachVolume"],
"Resource": ["arn:aws:ec2:*:*:volume/*", "arn:aws:ec2:*:*:instance/*"],
"Condition": {
"ArnEquals": {
"ec2:SourceInstanceARN": "arn:aws:ec2:*:*:instance/instance-id"
}
},
}
],
},
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["ec2:AttachVolume", "ec2:DetachVolume"],
"Resource": "arn:aws:ec2:*:*:instance/*",
"Condition": {
"StringEquals": {"ec2:ResourceTag/Department": "Development"}
},
},
{
"Effect": "Allow",
"Action": ["ec2:AttachVolume", "ec2:DetachVolume"],
"Resource": "arn:aws:ec2:*:*:volume/*",
"Condition": {
"StringEquals": {"ec2:ResourceTag/VolumeUser": "${aws:username}"}
},
},
],
},
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "StartStopIfTags",
"Effect": "Allow",
"Action": [
"ec2:StartInstances",
"ec2:StopInstances",
"ec2:DescribeTags",
],
"Resource": "arn:aws:ec2:region:account-id:instance/*",
"Condition": {
"StringEquals": {
"ec2:ResourceTag/Project": "DataAnalytics",
"aws:PrincipalTag/Department": "Data",
}
},
}
],
},
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "ListYourObjects",
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": ["arn:aws:s3:::bucket-name"],
"Condition": {
"StringLike": {
"s3:prefix": [
"cognito/application-name/${cognito-identity.amazonaws.com:sub}"
]
}
},
},
{
"Sid": "ReadWriteDeleteYourObjects",
"Effect": "Allow",
"Action": ["s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
"Resource": [
"arn:aws:s3:::bucket-name/cognito/application-name/${cognito-identity.amazonaws.com:sub}",
"arn:aws:s3:::bucket-name/cognito/application-name/${cognito-identity.amazonaws.com:sub}/*",
],
},
],
},
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["s3:ListAllMyBuckets", "s3:GetBucketLocation"],
"Resource": "*",
},
{
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::bucket-name",
"Condition": {
"StringLike": {"s3:prefix": ["", "home/", "home/${aws:userid}/*"]}
},
},
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::bucket-name/home/${aws:userid}",
"arn:aws:s3:::bucket-name/home/${aws:userid}/*",
],
},
],
},
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "ConsoleAccess",
"Effect": "Allow",
"Action": [
"s3:GetAccountPublicAccessBlock",
"s3:GetBucketAcl",
"s3:GetBucketLocation",
"s3:GetBucketPolicyStatus",
"s3:GetBucketPublicAccessBlock",
"s3:ListAllMyBuckets",
],
"Resource": "*",
},
{
"Sid": "ListObjectsInBucket",
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": ["arn:aws:s3:::bucket-name"],
},
{
"Sid": "AllObjectActions",
"Effect": "Allow",
"Action": "s3:*Object",
"Resource": ["arn:aws:s3:::bucket-name/*"],
},
],
},
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowViewAccountInfo",
"Effect": "Allow",
"Action": ["iam:GetAccountPasswordPolicy", "iam:GetAccountSummary"],
"Resource": "*",
},
{
"Sid": "AllowManageOwnPasswords",
"Effect": "Allow",
"Action": ["iam:ChangePassword", "iam:GetUser"],
"Resource": "arn:aws:iam::*:user/${aws:username}",
},
{
"Sid": "AllowManageOwnAccessKeys",
"Effect": "Allow",
"Action": [
"iam:CreateAccessKey",
"iam:DeleteAccessKey",
"iam:ListAccessKeys",
"iam:UpdateAccessKey",
],
"Resource": "arn:aws:iam::*:user/${aws:username}",
},
{
"Sid": "AllowManageOwnSigningCertificates",
"Effect": "Allow",
"Action": [
"iam:DeleteSigningCertificate",
"iam:ListSigningCertificates",
"iam:UpdateSigningCertificate",
"iam:UploadSigningCertificate",
],
"Resource": "arn:aws:iam::*:user/${aws:username}",
},
{
"Sid": "AllowManageOwnSSHPublicKeys",
"Effect": "Allow",
"Action": [
"iam:DeleteSSHPublicKey",
"iam:GetSSHPublicKey",
"iam:ListSSHPublicKeys",
"iam:UpdateSSHPublicKey",
"iam:UploadSSHPublicKey",
],
"Resource": "arn:aws:iam::*:user/${aws:username}",
},
{
"Sid": "AllowManageOwnGitCredentials",
"Effect": "Allow",
"Action": [
"iam:CreateServiceSpecificCredential",
"iam:DeleteServiceSpecificCredential",
"iam:ListServiceSpecificCredentials",
"iam:ResetServiceSpecificCredential",
"iam:UpdateServiceSpecificCredential",
],
"Resource": "arn:aws:iam::*:user/${aws:username}",
},
],
},
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "ec2:*",
"Resource": "*",
"Effect": "Allow",
"Condition": {"StringEquals": {"ec2:Region": "region"}},
}
],
},
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "rds:*",
"Resource": ["arn:aws:rds:region:*:*"],
},
{"Effect": "Allow", "Action": ["rds:Describe*"], "Resource": ["*"]},
],
},
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Action": "rds:*",
"Resource": ["arn:aws:rds:region:*:*"],
},
{
"Sid": "",
"Effect": "Allow",
"Action": ["rds:Describe*"],
"Resource": ["*"],
},
],
},
]
@pytest.mark.parametrize("invalid_policy_document", invalid_policy_document_test_cases)
@mock_iam
def test_create_policy_with_invalid_policy_document(invalid_policy_document):
conn = boto3.client("iam", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
conn.create_policy(
PolicyName="TestCreatePolicy",
PolicyDocument=json.dumps(invalid_policy_document["document"]),
)
ex.value.response["Error"]["Code"].should.equal("MalformedPolicyDocument")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.value.response["Error"]["Message"].should.equal(
invalid_policy_document["error_message"]
)
@pytest.mark.parametrize("valid_policy_document", valid_policy_documents)
@mock_iam
def test_create_policy_with_valid_policy_document(valid_policy_document):
conn = boto3.client("iam", region_name="us-east-1")
conn.create_policy(
PolicyName="TestCreatePolicy", PolicyDocument=json.dumps(valid_policy_document)
)
|
spulec/moto
|
tests/test_iam/test_iam_policies.py
|
Python
|
apache-2.0
| 51,224
|
[
"ADF"
] |
1454b7287f7c39fd737d087489c9c9a3cf48a487d72e48cb28c8f5974d8ea2bb
|
import simtk.unit as u
from simtk.openmm import app
import simtk.openmm as mm
import numpy as np
from mdtraj.testing import eq
from unittest import skipIf
from openmoltools import utils
import os
import openmoltools.openeye
import pandas as pd
import mdtraj as md
from mdtraj.testing import raises
smiles_fails_with_strictStereo = "CN1CCN(CC1)CCCOc2cc3c(cc2OC)C(=[NH+]c4cc(c(cc4Cl)Cl)OC)C(=C=[N-])C=[NH+]3"
try:
oechem = utils.import_("openeye.oechem")
if not oechem.OEChemIsLicensed(): raise(ImportError("Need License for OEChem!"))
oequacpac = utils.import_("openeye.oequacpac")
if not oequacpac.OEQuacPacIsLicensed(): raise(ImportError("Need License for oequacpac!"))
oeiupac = utils.import_("openeye.oeiupac")
if not oeiupac.OEIUPACIsLicensed(): raise(ImportError("Need License for OEOmega!"))
oeomega = utils.import_("openeye.oeomega")
if not oeomega.OEOmegaIsLicensed(): raise(ImportError("Need License for OEOmega!"))
HAVE_OE = True
except:
HAVE_OE = False
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_butanol_keepconfs():
m0 = openmoltools.openeye.iupac_to_oemol("butanol")
m1 = openmoltools.openeye.get_charges(m0, keep_confs=1)
eq(m0.NumAtoms(), m1.NumAtoms())
assert m1.NumConfs() == 1, "This OEMol was created to have a single conformation."
assert m1.NumAtoms() == 15, "Butanol should have 15 atoms"
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_butanol():
m0 = openmoltools.openeye.iupac_to_oemol("butanol")
m1 = openmoltools.openeye.get_charges(m0)
eq(m0.NumAtoms(), m1.NumAtoms())
assert m1.NumConfs() >= 2, "Butanol should have multiple conformers."
assert m1.NumAtoms() == 15, "Butanol should have 15 atoms"
all_data = {}
for k, molecule in enumerate(m1.GetConfs()):
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(molecule)
all_data[k] = names_to_charges
eq(sum(names_to_charges.values()), 0.0, decimal=7) # Net charge should be zero
# Build a table of charges indexed by conformer number and atom name
all_data = pd.DataFrame(all_data)
# The standard deviation along the conformer axis should be zero if all conformers have same charges
eq(all_data.std(1).values, np.zeros(m1.NumAtoms()), decimal=7)
with utils.enter_temp_directory():
# Try saving to disk as mol2
openmoltools.openeye.molecule_to_mol2(m1, "out.mol2")
# Make sure MDTraj can read the output
t = md.load("out.mol2")
# Make sure MDTraj can read the charges / topology info
atoms, bonds = md.formats.mol2.mol2_to_dataframes("out.mol2")
# Finally, make sure MDTraj and OpenEye report the same charges.
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(m1)
q = atoms.set_index("name").charge
q0 = pd.Series(names_to_charges)
delta = q - q0 # An object containing the charges, with atom names as indices
eq(delta.values, np.zeros_like(delta.values), decimal=4)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_benzene():
m0 = openmoltools.openeye.iupac_to_oemol("benzene")
m1 = openmoltools.openeye.get_charges(m0)
eq(m0.NumAtoms(), m1.NumAtoms())
print(m1.NumConfs())
assert m1.NumConfs() == 1, "Benezene should have 1 conformer"
assert m1.NumAtoms() == 12, "Benezene should have 12 atoms"
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(m1)
eq(sum(names_to_charges.values()), 0.0, decimal=7) # Net charge should be zero
with utils.enter_temp_directory():
# Try saving to disk as mol2
openmoltools.openeye.molecule_to_mol2(m1, "out.mol2")
# Make sure MDTraj can read the output
t = md.load("out.mol2")
# Make sure MDTraj can read the charges / topology info
atoms, bonds = md.formats.mol2.mol2_to_dataframes("out.mol2")
# Finally, make sure MDTraj and OpenEye report the same charges.
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(m1)
q = atoms.set_index("name").charge
q0 = pd.Series(names_to_charges)
delta = q - q0 # An object containing the charges, with atom names as indices
eq(delta.values, np.zeros_like(delta.values), decimal=4)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_link_in_utils():
m0 = openmoltools.openeye.iupac_to_oemol("benzene")
m1 = openmoltools.openeye.get_charges(m0)
with utils.enter_temp_directory():
# This function was moved from utils to openeye, so check that the old link still works.
utils.molecule_to_mol2(m1, "out.mol2")
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_smiles():
m0 = openmoltools.openeye.smiles_to_oemol("CCCCO")
charged0 = openmoltools.openeye.get_charges(m0)
m1 = openmoltools.openeye.iupac_to_oemol("butanol")
charged1 = openmoltools.openeye.get_charges(m1)
eq(charged0.NumAtoms(), charged1.NumAtoms())
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_ffxml():
with utils.enter_temp_directory():
m0 = openmoltools.openeye.smiles_to_oemol("CCCCO")
charged0 = openmoltools.openeye.get_charges(m0)
m1 = openmoltools.openeye.smiles_to_oemol("ClC(Cl)(Cl)Cl")
charged1 = openmoltools.openeye.get_charges(m1)
trajectories, ffxml = openmoltools.openeye.oemols_to_ffxml([charged0, charged1])
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_ffxml_simulation():
"""Test converting toluene and benzene smiles to oemol to ffxml to openmm simulation."""
with utils.enter_temp_directory():
m0 = openmoltools.openeye.smiles_to_oemol("Cc1ccccc1")
charged0 = openmoltools.openeye.get_charges(m0)
m1 = openmoltools.openeye.smiles_to_oemol("c1ccccc1")
charged1 = openmoltools.openeye.get_charges(m1)
ligands = [charged0, charged1]
n_atoms = [15,12]
trajectories, ffxml = openmoltools.openeye.oemols_to_ffxml(ligands)
eq(len(trajectories),len(ligands))
pdb_filename = utils.get_data_filename("chemicals/proteins/1vii.pdb")
temperature = 300 * u.kelvin
friction = 0.3 / u.picosecond
timestep = 0.01 * u.femtosecond
protein_traj = md.load(pdb_filename)
protein_traj.center_coordinates()
protein_top = protein_traj.top.to_openmm()
protein_xyz = protein_traj.openmm_positions(0)
for k, ligand in enumerate(ligands):
ligand_traj = trajectories[k]
ligand_traj.center_coordinates()
eq(ligand_traj.n_atoms, n_atoms[k])
eq(ligand_traj.n_frames, 1)
#Move the pre-centered ligand sufficiently far away from the protein to avoid a clash.
min_atom_pair_distance = ((ligand_traj.xyz[0] ** 2.).sum(1) ** 0.5).max() + ((protein_traj.xyz[0] ** 2.).sum(1) ** 0.5).max() + 0.3
ligand_traj.xyz += np.array([1.0, 0.0, 0.0]) * min_atom_pair_distance
ligand_xyz = ligand_traj.openmm_positions(0)
ligand_top = ligand_traj.top.to_openmm()
ffxml.seek(0)
forcefield = app.ForceField("amber10.xml", ffxml, "tip3p.xml")
model = app.modeller.Modeller(protein_top, protein_xyz)
model.add(ligand_top, ligand_xyz)
model.addSolvent(forcefield, padding=0.4 * u.nanometer)
system = forcefield.createSystem(model.topology, nonbondedMethod=app.PME, nonbondedCutoff=1.0 * u.nanometers, constraints=app.HAngles)
integrator = mm.LangevinIntegrator(temperature, friction, timestep)
simulation = app.Simulation(model.topology, system, integrator)
simulation.context.setPositions(model.positions)
print("running")
simulation.step(1)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
@raises(RuntimeError)
def test_charge_fail1():
with utils.enter_temp_directory():
openmoltools.openeye.smiles_to_antechamber(smiles_fails_with_strictStereo, "test.mol2", "test.frcmod", strictStereo=True)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
@raises(RuntimeError)
def test_charge_fail2():
m = openmoltools.openeye.smiles_to_oemol(smiles_fails_with_strictStereo)
m = openmoltools.openeye.get_charges(m, strictStereo=True, keep_confs=1)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_charge_success1():
with utils.enter_temp_directory():
openmoltools.openeye.smiles_to_antechamber(smiles_fails_with_strictStereo, "test.mol2", "test.frcmod", strictStereo=False)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_charge_success2():
m = openmoltools.openeye.smiles_to_oemol(smiles_fails_with_strictStereo)
m = openmoltools.openeye.get_charges(m, strictStereo=False)
|
kyleabeauchamp/openmoltools
|
openmoltools/tests/test_openeye.py
|
Python
|
gpl-2.0
| 9,177
|
[
"MDTraj",
"OpenMM"
] |
28b2887bf33572a2c7b4eff1ad854d1f3ca9b9f3208fb1e9a8ddc62c8bfc1059
|
# Orca
#
# Copyright 2006-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for planner."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2006-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
import orca.braille_generator as braille_generator
from orca.orca_i18n import _ # for gettext support
class BrailleGenerator(braille_generator.BrailleGenerator):
"""We make this to appropiately present ribbon's toggle button in
a toolbar used to display in a menu those options that doesn't
fill in toolbar when the application is resized. Also for each one
of the grphics buttons in the main window."""
def __init__(self, script):
braille_generator.BrailleGenerator.__init__(self, script)
def _generateDisplayedText(self, obj, **args ):
"""Returns an array of strings for use by braille that represents all
the text being displayed by the object. [[[WDW - consider
returning an empty array if this is not a text object.]]]
"""
result = []
# This is the black triangle at the far right of the toolbar.
#
handleRibbonButton = \
obj and not obj.name \
and obj.getRole() == pyatspi.ROLE_TOGGLE_BUTTON \
and obj.parent.getRole() == pyatspi.ROLE_TOOL_BAR
# This is one of the Gantt, Tasks, Resources, etc., buttons on the
# left hand side of the main window.
#
handleTabButton = \
obj and not obj.name \
and obj.getRole() == pyatspi.ROLE_TOGGLE_BUTTON \
and obj.parent.getRole() == pyatspi.ROLE_FILLER \
and len(obj.parent) == 2
if handleRibbonButton:
result.append(_("Display more options"))
elif handleTabButton:
result.append(self._script.utilities.displayedText(obj.parent[1]))
else:
result.extend(
braille_generator.BrailleGenerator._generateDisplayedText(
self, obj, **args))
return result
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/scripts/apps/planner/braille_generator.py
|
Python
|
gpl-3.0
| 2,812
|
[
"ORCA"
] |
11fea9a82cd46cba7b09684557578a9206794249109b3fa3d54678a0a34dacaf
|
# Open EPW and STAT weather files together
#
# Ladybug: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Ladybug.
#
# Copyright (c) 2013-2015, Chris Mackey <Chris@MackeyArchitecture.com>
# Ladybug is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Ladybug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ladybug; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to automatically download a .zip file from the Department of Energy's (DOE) database, unzip the file, and open both the .epw and .stat weather files into Grasshopper.
The component requires the URL of the zipped file for the specific climate that you want to import from the DOE's website. To open the DOE's website, use the Ladybug_download EPW Weather File component.
Note that you can copy the zip file URL to your clipboard by right-clicking on the "ZIP" link for the climate that you want on the DOE's website and choosing "Copy Link Address."
-
Provided by Ladybug 0.0.60
Args:
_weatherFileURL: A text string representing the .zip file URL from the Department of Energy's (DOE's) website. To open the DOE's website, use the Ladybug_download EPW Weather File component. Note that you can copy the zip file URL to your clipboard by right-clicking on the "ZIP" link for the climate that you want on the DOE's website and choosing "Copy Link Address."
workingDir_: An optional text string representing a file path to a working directory on your computer where you would like to download and unzip the file. If nothing is set, the weather files will be downloaded to C:/ladybug/ and placed in a folder with the name of the weather file location.
Returns:
epwFile: The file path of the downloaded epw file.
statFile: The file path of the downloaded stat file.
"""
ghenv.Component.Name = "Ladybug_Open EPW And STAT Weather Files"
ghenv.Component.NickName = 'Open EPW + STAT'
ghenv.Component.Message = 'VER 0.0.60\nSEP_11_2015'
ghenv.Component.Category = "Ladybug"
ghenv.Component.SubCategory = "0 | Ladybug"
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "2"
except: pass
import scriptcontext as sc
import urllib
import os
import zipfile,os.path
from clr import AddReference
AddReference('Grasshopper')
import Grasshopper.Kernel as gh
import time
doc = gh.GH_Document()
def checkTheInputs():
# import the classes
if sc.sticky.has_key('ladybug_release'):
try:
if not sc.sticky['ladybug_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Ladybug to use this compoent." + \
"Use updateLadybug component to update userObjects.\n" + \
"If you have already updated userObjects drag Ladybug_Ladybug component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
lb_defaultFolder = sc.sticky["Ladybug_DefaultFolder"]
#Check the inputs to make sure that a valid DOE URL has been connected.
if _weatherFileURL and _weatherFileURL.startswith('http://apps1.eere.energy.gov/buildings/energyplus/weatherdata/') and _weatherFileURL.endswith('.zip') and _weatherFileURL != 'http://apps1.eere.energy.gov/buildings/energyplus/weatherdata/Example.zip':
folderName = _weatherFileURL.split('/')[-1].split('.')[0]
checkData = True
elif _weatherFileURL == 'http://apps1.eere.energy.gov/buildings/energyplus/weatherdata/Example.zip':
checkData = False
else:
checkData = False
warning = "_weatherFileURL is not a valid web address to a DOE weather file. "
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
#If no working directory is specified, default to C:\ladybug.
if workingDir_ != None and checkData == True:
workingDir = workingDir_
elif workingDir_ == None and checkData == True:
workingDir = lb_defaultFolder + folderName + '\\'
else:
workingDir = None
return checkData, workingDir
else:
print "You should first let the Ladybug fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let the Ladybug fly...")
return False, None
def download(url, workingDir):
try:
if not os.path.isdir(workingDir):
os.mkdir(workingDir)
webFile = urllib.urlopen(url)
if webFile != None:
localFile = open(workingDir + '/' + url.split('/')[-1], 'wb')
localFile.write(webFile.read())
webFile.close()
localFile.close()
Address = workingDir + url.split('/')[-1]
return Address
else:
return None
except:
warning = 'You are not connected to the internet and you do not have the weather files already on your computer. You must be connected to the internet to download the files with this component.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
return None
def unzip(source_filename, dest_dir):
with zipfile.ZipFile(source_filename) as zf:
for member in zf.infolist():
# Path traversal defense copied from
# http://hg.python.org/cpython/file/tip/Lib/http/server.py#l789
words = member.filename.split('\\')
path = dest_dir
for word in words[:-1]:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir, ''): continue
path = os.path.join(path, word)
zf.extract(member, path)
def addresses(filename, directory):
filenamewords = filename.split('.zip')[-2]
epw = filenamewords + '.epw'
stat = filenamewords + '.stat'
return epw, stat
def checkIfAlreadyDownloaded(workingDir, url):
zipFileAddress = Address = workingDir + url.split('/')[-1]
epw, stat = addresses(zipFileAddress, workingDir)
if os.path.isfile(epw) == True and os.path.isfile(stat) == True:
return True, epw, stat
else:
return False, None, None
checkData = False
#Check the inputs to make sure that they are the correct syntax.
res = checkTheInputs()
if res!= -1:
checkData, workingDir = res
#Check to see if the file has already been downloaded to the C:\ladybug drive.
if checkData == True:
checkData2, epwFile, statFile = checkIfAlreadyDownloaded(workingDir, _weatherFileURL)
else: checkData2 = True
#Download the zip file to the directory.
if checkData == True and checkData2 == False:
zipFileAddress = download(_weatherFileURL, workingDir)
else: pass
#Unzip the file and load it into Grasshopper!!!!
if checkData == True and checkData2 == False and zipFileAddress:
unzip(zipFileAddress, workingDir)
epwFile, statFile = addresses(zipFileAddress, workingDir)
else: pass
|
samuto/ladybug
|
src/Ladybug_Open EPW And STAT Weather Files.py
|
Python
|
gpl-3.0
| 7,729
|
[
"EPW"
] |
16990b42a5d4b81a61e4dfa6de10daa5e45d6dd5d4cf6f292209f64944552dc0
|
# -*- coding: utf-8 -*-
#
# ConnPlotter.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# ConnPlotter --- A Tool to Generate Connectivity Pattern Matrices
"""
ConnPlotter is a tool to create connectivity pattern tables.
For background on ConnPlotter, please see
Eilen Nordlie and Hans Ekkehard Plesser.
Connection Pattern Tables: A new way to visualize connectivity
in neuronal network models.
Frontiers in Neuroinformatics 3:39 (2010)
doi: 10.3389/neuro.11.039.2009
Example:
# code creating population and connection lists
from ConnPlotter import ConnectionPattern, SynType
# Case A: All connections have the same "synapse_model".
#
# Connections with weight < 0 are classified as excitatory,
# weight > 0 are classified as inhibitory.
# Each sender must make either excitatory or inhibitory connection,
# not both. When computing totals, excit/inhib connections are
# weighted with +-1.
pattern = ConnectionPattern(layerList, connList)
# Case B: All connections have the same "synapse_model", but violate Dale's law
#
# Connections with weight < 0 are classified as excitatory,
# weight > 0 are classified as inhibitory.
# A single sender may have excitatory and inhibitory connections.
# When computing totals, excit/inhib connections are
# weighted with +-1.
pattern = ConnectionPattern(layerList, connList,
synTypes=(((SynType('exc', 1.0, 'b'),
SynType('inh', -1.0, 'r')),)))
# Case C: Synapse models are "AMPA", "NMDA", "GABA_A", "GABA_B".
#
# Connections are plotted by synapse model, with AMPA and NMDA
# on the top row, GABA_A and GABA_B in the bottom row when
# combining by layer. Senders must either have AMPA and NMDA or
# GABA_A and GABA_B synapses, but not both. When computing totals,
# AMPA and NMDA connections are weighted with +1, GABA_A and GABA_B
# with -1.
pattern = ConnectionPattern(layerList, connList)
# Case D: Explicit synapse types.
#
# If your network model uses other synapse types, or you want to use
# other weighting factors when computing totals, or you want different
# colormaps, you must specify synapse type information explicitly for
# ALL synase models in your network. For each synapse model, you create
# a
#
# SynType(name, tweight, cmap)
#
# object, where "name" is the synapse model name, "tweight" the weight
# to be given to the type when computing totals (usually >0 for excit,
# <0 for inhib synapses), and "cmap" the "colormap": if may be a
# matplotlib.colors.Colormap instance or any valid matplotlib color
# specification; in the latter case, as colormap will be generated
# ranging from white to the given color.
# Synapse types are passed as a tuple of tuples. Synapses in a tuple form
# a group. ConnPlotter assumes that a sender may make synapses with all
# types in a single group, but never synapses with types from different
# groups (If you group by transmitter, this simply reflects Dale's law).
# When connections are aggregated by layer, each group is printed on one
# row.
pattern = ConnectionPattern(layerList, connList, synTypes = \
((SynType('Asyn', 1.0, 'orange'),
SynType('Bsyn', 2.5, 'r'),
SynType('Csyn', 0.5, (1.0, 0.5, 0.0))), # end first group
(SynType('Dsyn', -1.5, matplotlib.cm.jet),
SynType('Esyn', -3.2, '0.95'))))
# See documentation of class ConnectionPattern for more options.
# plotting the pattern
# show connection kernels for all sender-target pairs and all synapse models
pattern.plot()
# combine synapses of all types for each sender-target pair
# always used red-blue (inhib-excit) color scale
pattern.plot(aggrSyns=True)
# for each pair of sender-target layer pair, show sums for each synapse type
pattern.plot(aggrGroups=True)
# As mode layer, but combine synapse types.
# always used red-blue (inhib-excit) color scale
pattern.plot(aggrSyns=True, aggrGroups=True)
# Show only synases of the selected type(s)
pattern.plot(mode=('AMPA',))
pattern.plot(mode=('AMPA', 'GABA_A'))
# use same color scales for all patches
pattern.plot(globalColors=True)
# manually specify limits for global color scale
pattern.plot(globalColors=True, colorLimits=[0, 2.5])
# save to file(s)
# NB: do not write to PDF directly, this seems to cause artifacts
pattern.plot(file='net.png')
pattern.plot(file=('net.eps','net.png'))
# You can adjust some properties of the figure by changing the
# default values in plotParams.
# Experimentally, you can dump the connection pattern into a LaTeX table
pattern.toLaTeX('pattern.tex', standalone=True)
# Figure layout can be modified by changing the global variable plotParams.
# Please see the documentation for class PlotParams for details.
# Changes 30 June 2010:
# - Singular layers (extent 0x0) are ignored as target layers.
# The reason for this is so that single-generator "layers" can be
# displayed as input.
# Problems:
# - singularity is not made clear visually
# - This messes up the diagonal shading
# - makes no sense to aggregate any longer
"""
# ----------------------------------------------------------------------------
from . import colormaps as cm
import matplotlib as mpl
import numpy as np
import warnings
import nest
__all__ = ['ConnectionPattern', 'SynType', 'plotParams', 'PlotParams']
# ----------------------------------------------------------------------------
# To do:
# - proper testsuite
# - layers of different sizes not handled properly
# (find biggest layer extent in each direction, then center;
# may run into problems with population label placement)
# - clean up main
# - color bars
# - "bad color" should be configurable
# - fix hack for colormaps import
# - use generators where possible (eg kernels?)
# ----------------------------------------------------------------------------
class SynType(object):
"""
Provide information about how synapse types should be rendered.
A singly nested list of SynType objects can be passed to the
ConnectionPattern constructor to specify layout and rendering info.
"""
def __init__(self, name, relweight, cmap):
"""
Arguments:
name Name of synapse type (string, must be unique)
relweight Relative weight of synapse type when aggregating
across synapse types. Should be negative for inhibitory
connections.
cmap Either a matplotlib.colors.Colormap instance or a
color specification. In the latter case, the colormap
will be built from white to the color given. Thus,
the color should be fully saturated. Colormaps should
have "set_bad(color='white')".
"""
self.name, self.relweight = name, relweight
if isinstance(cmap, mpl.colors.Colormap):
self.cmap = cmap
else:
self.cmap = cm.make_colormap(cmap)
# ----------------------------------------------------------------------------
class PlotParams(object):
"""
Collects parameters governing plotting.
Implemented using properties to ensure they are read-only.
"""
class Margins(object):
"""Width of outer margins, in mm."""
def __init__(self):
"""Set default values."""
self._left = 15.0
self._right = 10.0
self._top = 10.0
self._bottom = 10.0
self._colbar = 10.0
@property
def left(self):
return self._left
@left.setter
def left(self, l):
self._left = float(l)
@property
def right(self):
return self._right
@right.setter
def right(self, r):
self._right = float(r)
@property
def top(self):
return self._top
@top.setter
def top(self, t):
self._top = float(t)
@property
def bottom(self):
return self._bottom
@bottom.setter
def bottom(self, b):
self._bottom = float(b)
@property
def colbar(self):
return self._colbar
@colbar.setter
def colbar(self, b):
self._colbar = float(b)
def __init__(self):
"""Set default values"""
self._n_kern = 100
self._patch_size = 20.0 # 20 mm
self._layer_bg = {'super': '0.9', 'diag': '0.8', 'sub': '0.9'}
self._layer_font = mpl.font_manager.FontProperties(size='large')
self._layer_orient = {'sender': 'horizontal', 'target': 'horizontal'}
self._pop_font = mpl.font_manager.FontProperties(size='small')
self._pop_orient = {'sender': 'horizontal', 'target': 'horizontal'}
self._lgd_tick_font = mpl.font_manager.FontProperties(size='x-small')
self._lgd_title_font = mpl.font_manager.FontProperties(size='xx-small')
self._lgd_ticks = None
self._lgd_tick_fmt = None
self._lgd_location = None
self._cbwidth = None
self._cbspace = None
self._cbheight = None
self._cboffset = None
self._z_layer = 25
self._z_pop = 50
self._z_conn = 100
self.margins = self.Margins()
def reset(self):
"""
Reset to default values.
"""
self.__init__()
@property
def n_kern(self):
"""Sample long kernel dimension at N_kernel points."""
return self._n_kern
@n_kern.setter
def n_kern(self, n):
if n <= 0:
raise ValueError('n_kern > 0 required')
self._n_kern = n
@property
def patch_size(self):
"""Length of the longest edge of the largest patch, in mm."""
return self._patch_size
@patch_size.setter
def patch_size(self, sz):
if sz <= 0:
raise ValueError('patch_size > 0 required')
self._patch_size = sz
@property
def layer_bg(self):
"""
Dictionary of colors for layer background.
Entries "super", "diag", "sub". Each entry
can be set to any valid color specification.
If just a color is given, create dict by
brightening/dimming.
"""
return self._layer_bg
@layer_bg.setter
def layer_bg(self, bg):
if isinstance(bg, dict):
if set(bg.keys()) != set(('super', 'diag', 'sub')):
raise ValueError(
'Background dict must have keys "super", "diag", "sub"')
for bgc in bg.values():
if not mpl.colors.is_color_like(bgc):
raise ValueError('Entries in background dict must be ' +
'valid color specifications.')
self._layer_bg = bg
elif not mpl.colors.is_color_like(bg):
raise ValueError(
'layer_bg must be dict or valid color specification.')
else: # is color like
rgb = mpl.colors.colorConverter.to_rgb(bg)
self._layer_bg = {'super': [1.1 * c for c in rgb],
'diag': rgb,
'sub': [0.9 * c for c in rgb]}
@property
def layer_font(self):
"""
Font to use for layer labels.
Can be set to a matplotlib.font_manager.FontProperties instance.
"""
return self._layer_font
@layer_font.setter
def layer_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('layer_font must be a ' +
'matplotlib.font_manager.FontProperties instance')
self._layer_font = font
@property
def layer_orientation(self):
"""
Orientation of layer labels.
Dictionary with orientation of sender and target labels. Orientation
is either 'horizontal', 'vertial', or a value in degrees. When set
to a single string or number, this value is used for both sender and
target labels.
"""
return self._layer_orient
@layer_orientation.setter
def layer_orientation(self, orient):
if isinstance(orient, (str, float, int)):
tmp = {'sender': orient, 'target': orient}
elif isinstance(orient, dict):
tmp = self._layer_orient
tmp.update(orient)
else:
raise ValueError(
'Orientation ust be set to dict, string or number.')
if len(tmp) > 2:
raise ValueError('Orientation dictionary can only contain keys ' +
'"sender" and "target".')
self._layer_orient = tmp
@property
def pop_font(self):
"""
Font to use for population labels.
Can be set to a matplotlib.font_manager.FontProperties instance.
"""
return self._pop_font
@pop_font.setter
def pop_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('pop_font must be a ' +
'matplotlib.font_manager.FontProperties instance')
self._pop_font = font
@property
def pop_orientation(self):
"""
Orientation of population labels.
Dictionary with orientation of sender and target labels. Orientation
is either 'horizontal', 'vertial', or a value in degrees. When set
to a single string or number, this value is used for both sender and
target labels.
"""
return self._pop_orient
@pop_orientation.setter
def pop_orientation(self, orient):
if isinstance(orient, (str, float, int)):
tmp = {'sender': orient, 'target': orient}
elif isinstance(orient, dict):
tmp = self._pop_orient
tmp.update(orient)
else:
raise ValueError(
'Orientation ust be set to dict, string or number.')
if len(tmp) > 2:
raise ValueError('Orientation dictionary can only contain keys ' +
'"sender" and "target".')
self._pop_orient = tmp
@property
def legend_tick_font(self):
"""
FontProperties for legend (colorbar) ticks.
"""
return self._lgd_tick_font
@legend_tick_font.setter
def legend_tick_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('legend_tick_font must be a ' +
'matplotlib.font_manager.FontProperties instance')
self._lgd_tick_font = font
@property
def legend_title_font(self):
"""
FontProperties for legend (colorbar) titles.
"""
return self._lgd_title_font
@legend_title_font.setter
def legend_title_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('legend_title_font must be a ' +
'matplotlib.font_manager.FontProperties instance')
self._lgd_title_font = font
@property
def legend_ticks(self):
"""
Ordered list of values at which legend (colorbar) ticks shall be set.
"""
return self._lgd_ticks
@legend_ticks.setter
def legend_ticks(self, ticks):
self._lgd_ticks = ticks
@property
def legend_tick_format(self):
"""
C-style format string for legend (colorbar) tick marks.
"""
return self._lgd_tick_fmt
@legend_tick_format.setter
def legend_tick_format(self, tickfmt):
self._lgd_tick_fmt = tickfmt
@property
def legend_location(self):
"""
If set to 'top', place legend label above colorbar,
if None, to the left.
"""
return self._lgd_location
@legend_location.setter
def legend_location(self, loc):
self._lgd_location = loc
@property
def cbwidth(self):
"""
Width of single colorbar, relative to figure width.
"""
return self._cbwidth
@cbwidth.setter
def cbwidth(self, cbw):
self._cbwidth = cbw
@property
def cbheight(self):
"""
Height of colorbar, relative to margins.colbar
"""
return self._cbheight
@cbheight.setter
def cbheight(self, cbh):
self._cbheight = cbh
@property
def cbspace(self):
"""
Spacing between colorbars, relative to figure width.
"""
return self._cbspace
@cbspace.setter
def cbspace(self, cbs):
self._cbspace = cbs
@property
def cboffset(self):
"""
Left offset of colorbar, relative to figure width.
"""
return self._cboffset
@cboffset.setter
def cboffset(self, cbo):
self._cboffset = cbo
@property
def z_layer(self):
"""Z-value for layer label axes."""
return self._z_layer
@property
def z_pop(self):
"""Z-value for population label axes."""
return self._z_pop
@property
def z_conn(self):
"""Z-value for connection kernel axes."""
return self._z_conn
# ----------------------------------------------------------------------------
# plotting settings, default values
plotParams = PlotParams()
# ----------------------------------------------------------------------------
class ConnectionPattern(object):
"""
Connection pattern representation for plotting.
When a ConnectionPattern is instantiated, all connection kernels
are pre-computed. They can later be plotted in various forms by
calling the plot() method.
The constructor requires layer and connection lists:
ConnectionPattern(layerList, connList, synTypes, **kwargs)
The layerList is used to:
- determine the size of patches
- determine the block structure
All other information is taken from the connList. Information
about synapses is inferred from the connList.
The following keyword arguments can also be given:
poporder : Population order. A dictionary mapping population names
to numbers; populations will be sorted in diagram in order
of increasing numbers. Otherwise, they are sorted
alphabetically.
intensity: 'wp' - use weight * probability (default)
'p' - use probability alone
'tcd' - use total charge deposited * probability
requires mList and Vmem; per v 0.7 only supported
for ht_neuron.
mList : model list; required for 'tcd'
Vmem : membrane potential; required for 'tcd'
"""
# ------------------------------------------------------------------------
class _LayerProps(object):
"""
Information about layer.
"""
def __init__(self, name, extent):
"""
name : name of layer
extent: spatial extent of the layer
"""
self.name = name
self.ext = extent
self.singular = extent[0] == 0.0 and extent[1] == 0.0
# ------------------------------------------------------------------------
class _SynProps(object):
"""
Information on how to plot patches for a synapse type.
"""
def __init__(self, row, col, tweight, cmap, idx):
"""
row, col: Position of synapse in grid of synapse patches, from 0,0
tweight : weight used when adding kernels for different synapses
cmap : colormap for synapse type (matplotlib.colors.Colormap)
idx : linear index, used to order colorbars in figure
"""
self.r, self.c = row, col
self.tw = tweight
self.cmap = cmap
self.index = idx
# --------------------------------------------------------------------
class _PlotKern(object):
"""
Representing object ready for plotting.
"""
def __init__(self, sl, sn, tl, tn, syn, kern):
"""
sl : sender layer
sn : sender neuron/population
tl : target layer
tn : target neuron/population
syn : synapse model
kern: kernel values (numpy masked array)
All arguments but kern are strings.
"""
self.sl = sl
self.sn = sn
self.tl = tl
self.tn = tn
self.syn = syn
self.kern = kern
# ------------------------------------------------------------------------
class _Connection(object):
def __init__(self, conninfo, layers, synapses, tgt_model, intensity, tcd, Vmem):
"""
Arguments:
conninfo: list of connection info entries:
(sender,target,conn_dict)
layers : list of _LayerProps objects
synapses: list of _SynProps objects
tgt_model: model of target neurons
intensity: 'wp', 'p', 'tcd'
tcd : tcd object
Vmem : reference membrane potential for tcd calculations
"""
self._intensity = intensity
# get source and target layer
self.slayer, self.tlayer = conninfo[:2]
lnames = [l.name for l in layers]
if self.slayer not in lnames:
raise Exception('Unknown source layer "%s".' % self.slayer)
if self.tlayer not in lnames:
raise Exception('Unknown target layer "%s".' % self.tlayer)
# if target layer is singular (extent==(0,0)),
# we do not create a full object
self.singular = False
for l in layers:
if l.name == self.tlayer and l.singular:
self.singular = True
return
# see if we connect to/from specific neuron types
cdict = conninfo[2]
sdict = conninfo[3]
if 'sources' in cdict:
if tuple(cdict['sources'].keys()) == ('model',):
self.snrn = cdict['sources']['model']
else:
raise ValueError(
'Can only handle sources in form {"model": ...}')
else:
self.snrn = None
if 'targets' in cdict:
if tuple(cdict['targets'].keys()) == ('model',):
self.tnrn = cdict['targets']['model']
else:
raise ValueError(
'Can only handle targets in form {"model": ...}')
else:
self.tnrn = None
# now get (mean) weight, we need this if we classify
# connections by sign of weight only
try:
self._mean_wght = _weighteval(sdict['weight'])
except:
raise ValueError('No or corrupt weight information.')
# synapse model
if sorted(synapses.keys()) == ['exc', 'inh']:
# implicit synapse type, we ignore value of
# 'synapse_model', it is for use by NEST only
if self._mean_wght >= 0:
self.synmodel = 'exc'
else:
self.synmodel = 'inh'
else:
try:
self.synmodel = sdict['synapse_model']
if self.synmodel not in synapses:
raise Exception('Unknown synapse model "%s".'
% self.synmodel)
except:
raise Exception('Explicit synapse model info required.')
# store information about connection
try:
self._mask = cdict['mask']
self._p_raw = cdict['p']
self._wght = sdict['weight']
# next line presumes only one layer name will match
self._textent = [tl.ext for tl in layers
if tl.name == self.tlayer][0]
if intensity == 'tcd':
self._tcd = tcd(self.synmodel, tgt_model, Vmem)
else:
self._tcd = None
except:
raise Exception('Corrupt connection dictionary')
# prepare for lazy evaluation
self._p = None
# --------------------------------------------------------------------
@property
def keyval(self):
"""
Return key and _Connection as tuple.
Useful to create dictionary via list comprehension.
"""
if self.singular:
return (None, self)
else:
return ((self.slayer, self.snrn, self.tlayer,
self.tnrn, self.synmodel),
self)
# --------------------------------------------------------------------
@property
def kernval(self):
"""Kernel value, as masked array."""
if self._p is None:
self._p = _evalkernel(self._mask, self._p_raw,
self._mean_wght,
self._textent, self._intensity,
self._tcd)
return self._p
# --------------------------------------------------------------------
@property
def mask(self):
"""Dictionary describing the mask."""
return self._mask
# --------------------------------------------------------------------
@property
def kernel(self):
"""Dictionary describing the kernel."""
return self._p_raw
# --------------------------------------------------------------------
@property
def weight(self):
"""Dictionary describing weight distribution."""
return self._wght
# --------------------------------------------------------------------
def matches(self, sl=None, sn=None, tl=None, tn=None, syn=None):
"""
Return True if all non-None arguments match.
Arguments:
sl : sender layer
sn : sender neuron type
tl : target layer
tn : target neuron type
syn: synapse type
"""
return ((sl is None or sl == self.slayer) and
(sn is None or sn == self.snrn) and
(tl is None or tl == self.tlayer) and
(tn is None or tn == self.tnrn) and
(syn is None or syn == self.synmodel))
# ------------------------------------------------------------------------
class _Patch(object):
"""
Represents a patch, i.e., an axes that will actually contain an
imshow graphic of a connection kernel.
The patch object contains the physical coordinates of the patch,
as well as a reference to the actual Axes object once it is created.
Also contains strings to be used as sender/target labels.
Everything is based on a coordinate system looking from the top left
corner down.
"""
# --------------------------------------------------------------------
def __init__(self, left, top, row, col, width, height,
slabel=None, tlabel=None, parent=None):
"""
Arguments:
left, top : Location of top-left corner
row, col : row, column location in parent block
width, height : Width and height of patch
slabel, tlabel: Values for sender/target label
parent : _Block to which _Patch/_Block belongs
"""
self.l, self.t, self.r, self.c = left, top, row, col
self.w, self.h = width, height
self.slbl, self.tlbl = slabel, tlabel
self.ax = None
self._parent = parent
# --------------------------------------------------------------------
def _update_size(self, new_lr):
"""Update patch size by inspecting all children."""
if new_lr[0] < self.l:
raise ValueError(
"new_lr[0] = %f < l = %f" % (new_lr[0], self.l))
if new_lr[1] < self.t:
raise ValueError(
"new_lr[1] = %f < t = %f" % (new_lr[1], self.t))
self.w, self.h = new_lr[0] - self.l, new_lr[1] - self.t
if self._parent:
self._parent._update_size(new_lr)
# --------------------------------------------------------------------
@property
def tl(self):
"""Top left corner of the patch."""
return (self.l, self.t)
# --------------------------------------------------------------------
@property
def lr(self):
"""Lower right corner of the patch."""
return (self.l + self.w, self.t + self.h)
# --------------------------------------------------------------------
@property
def l_patches(self):
"""Left edge of leftmost _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return min([e.l_patches for e in _flattened(self.elements)])
else:
return self.l
# --------------------------------------------------------------------
@property
def t_patches(self):
"""Top edge of topmost _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return min([e.t_patches for e in _flattened(self.elements)])
else:
return self.t
# --------------------------------------------------------------------
@property
def r_patches(self):
"""Right edge of rightmost _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return max([e.r_patches for e in _flattened(self.elements)])
else:
return self.l + self.w
# --------------------------------------------------------------------
@property
def b_patches(self):
"""Bottom edge of lowest _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return max([e.b_patches for e in _flattened(self.elements)])
else:
return self.t + self.h
# --------------------------------------------------------------------
@property
def location(self):
if self.r < self.c:
return 'super'
elif self.r == self.c:
return 'diag'
else:
return 'sub'
# ------------------------------------------------------------------------
class _Block(_Patch):
"""
Represents a block of patches.
A block is initialized with its top left corner and is then built
row-wise downward and column-wise to the right. Rows are added by
block.newRow(2.0, 1.5)
where 2.0 is the space between rows, 1.5 the space between the
first row. Elements are added to a row by
el = block.newElement(1.0, 0.6, 's', 't')
el = block.newElement(1.0, 0.6, 's', 't', size=[2.0, 3.0])
The first example adds a new _Block to the row. 1.0 is space between
blocks, 0.6 space before the first block in a row. 's' and 't' are
stored as slbl and tlbl (optional). If size is given, a _Patch with
the given size is created. _Patch is atomic. newElement() returns the
_Block or _Patch created.
"""
# --------------------------------------------------------------------
def __init__(self, left, top, row, col, slabel=None, tlabel=None,
parent=None):
ConnectionPattern._Patch.__init__(self, left, top, row, col, 0, 0,
slabel, tlabel, parent)
self.elements = []
self._row_top = None # top of current row
self._row = 0
self._col = 0
# --------------------------------------------------------------------
def newRow(self, dy=0.0, dynew=0.0):
"""
Open new row of elements.
Arguments:
dy : vertical skip before new row
dynew: vertical skip if new row is first row
"""
if self.elements:
# top of row is bottom of block so far + dy
self._row_top = self.lr[1] + dy
else:
# place relative to top edge of parent
self._row_top = self.tl[1] + dynew
self._row += 1
self._col = 0
self.elements.append([])
# --------------------------------------------------------------------
def newElement(self, dx=0.0, dxnew=0.0, slabel=None, tlabel=None,
size=None):
"""
Append new element to last row.
Creates _Block instance if size is not given, otherwise _Patch.
Arguments:
dx : horizontal skip before new element
dxnew : horizontal skip if new element is first
slabel: sender label (on y-axis)
tlabel: target label (on x-axis)
size : size of _Patch to create
Returns:
Created _Block or _Patch.
"""
assert (self.elements)
if self.elements[-1]:
# left edge is right edge of block so far + dx
col_left = self.lr[0] + dx
else:
# place relative to left edge of parent
col_left = self.tl[0] + dxnew
self._col += 1
if size is not None:
elem = ConnectionPattern._Patch(col_left, self._row_top,
self._row, self._col,
size[0], size[1], slabel,
tlabel, self)
else:
elem = ConnectionPattern._Block(col_left, self._row_top,
self._row, self._col,
slabel, tlabel, self)
self.elements[-1].append(elem)
self._update_size(elem.lr)
return elem
# --------------------------------------------------------------------
def addMargin(self, rmarg=0.0, bmarg=0.0):
"""Extend block by margin to right and bottom."""
if rmarg < 0.0:
raise ValueError('rmarg must not be negative!')
if bmarg < 0.0:
raise ValueError('bmarg must not be negative!')
lr = self.lr
self._update_size((lr[0] + rmarg, lr[1] + bmarg))
# ------------------------------------------------------------------------
def _prepareAxes(self, mode, showLegend):
"""
Prepare information for all axes, but do not create the actual axes
yet.
mode: one of 'detailed', 'by layer', 'totals'
"""
# parameters for figure, all quantities are in mm
patchmax = plotParams.patch_size # length of largest patch dimension
# actual parameters scaled from default patchmax = 20mm
lmargin = plotParams.margins.left
tmargin = plotParams.margins.top
rmargin = plotParams.margins.right
bmargin = plotParams.margins.bottom
cbmargin = plotParams.margins.colbar
blksep = 3. / 20. * patchmax # distance between blocks
popsep = 2. / 20. * patchmax # distance between populations
synsep = 0.5 / 20. * patchmax # distance between synapse types
# find maximal extents of individual patches, horizontal and vertical
maxext = max(_flattened([l.ext for l in self._layers]))
patchscale = patchmax / float(maxext) # determines patch size
# obtain number of synaptic patches per population pair
# maximum column across all synapse types, same for rows
nsyncols = max([s.c for s in self._synAttr.values()]) + 1
nsynrows = max([s.r for s in self._synAttr.values()]) + 1
# dictionary mapping into patch-axes, to they can be found later
self._patchTable = {}
# set to store all created patches to avoid multiple
# creation of patches at same location
axset = set()
# create entire setup, top-down
self._axes = self._Block(lmargin, tmargin, 1, 1)
for sl in self._layers:
# get sorted list of populations for sender layer
spops = sorted([p[1] for p in self._pops if p[0] == sl.name],
key=lambda pn: self._poporder[pn])
self._axes.newRow(blksep, 0.0)
for tl in self._layers:
# ignore singular target layers
if tl.singular:
continue
# get sorted list of populations for target layer
tpops = sorted([p[1] for p in self._pops if p[0] == tl.name],
key=lambda pn: self._poporder[pn])
# compute size for patches
patchsize = patchscale * np.array(tl.ext)
block = self._axes.newElement(blksep, 0.0, sl.name, tl.name)
if mode == 'totals':
# single patch
block.newRow(popsep, popsep / 2.)
p = block.newElement(popsep, popsep / 2., size=patchsize)
self._patchTable[(sl.name, None, tl.name, None, None)] = p
elif mode == 'layer':
# We loop over all rows and columns in the synapse patch
# grid. For each (r,c), we find the pertaining synapse name
# by reverse lookup in the _synAttr dictionary. This is
# inefficient, but should not be too costly overall. But we
# must create the patches in the order they are placed.
# NB: We must create also those block.newElement() that are
# not registered later, since block would otherwise not
# skip over the unused location.
for r in range(nsynrows):
block.newRow(synsep, popsep / 2.)
for c in range(nsyncols):
p = block.newElement(synsep, popsep / 2.,
size=patchsize)
smod = [k for k, s in self._synAttr.items()
if s.r == r and s.c == c]
if smod:
assert (len(smod) == 1)
self._patchTable[(sl.name, None, tl.name,
None, smod[0])] = p
elif mode == 'population':
# one patch per population pair
for sp in spops:
block.newRow(popsep, popsep / 2.)
for tp in tpops:
pblk = block.newElement(popsep, popsep / 2.,
sp, tp)
pblk.newRow(synsep, synsep / 2.)
self._patchTable[(sl.name, sp,
tl.name, tp, None)] = \
pblk.newElement(synsep, blksep / 2.,
size=patchsize)
else:
# detailed presentation of all pops
for sp in spops:
block.newRow(popsep, popsep / 2.)
for tp in tpops:
pblk = block.newElement(popsep, popsep / 2.,
sp, tp)
pblk.newRow(synsep, synsep / 2.)
# Find all connections with matching properties
# all information we need here is synapse model.
# We store this in a dictionary mapping synapse
# patch column to synapse model, for use below.
syns = dict(
[(self._synAttr[c.synmodel].c, c.synmodel)
for c in _flattened(self._cTable.values())
if c.matches(sl.name, sp, tl.name, tp)])
# create all synapse patches
for n in range(nsyncols):
# Do not duplicate existing axes.
if (sl.name, sp, tl.name, tp, n) in axset:
continue
# Create patch. We must create also such
# patches that do not have synapses, since
# spacing would go wrong otherwise.
p = pblk.newElement(synsep, 0.0,
size=patchsize)
# if patch represents existing synapse,
# register
if n in syns:
self._patchTable[(sl.name, sp, tl.name,
tp, syns[n])] = p
block.addMargin(popsep / 2., popsep / 2.)
self._axes.addMargin(rmargin, bmargin)
if showLegend:
self._axes.addMargin(0, cbmargin) # add color bar at bottom
figwidth = self._axes.lr[0] - self._axes.tl[
0] - rmargin # keep right marg out of calc
if mode == 'totals' or mode == 'population':
# single patch at right edge, 20% of figure
if plotParams.cbwidth:
lwidth = plotParams.cbwidth * figwidth
else:
lwidth = 0.2 * figwidth
if lwidth > 100.0: # colorbar shouldn't be wider than 10cm
lwidth = 100.0
lheight = (plotParams.cbheight * cbmargin
if plotParams.cbheight else 0.3 * cbmargin)
if plotParams.legend_location is None:
cblift = 0.9 * cbmargin
else:
cblift = 0.7 * cbmargin
self._cbPatches = self._Patch(self._axes.tl[0],
self._axes.lr[1] - cblift,
None, None,
lwidth,
lheight)
else:
# one patch per synapse type, 20% of figure or less
# we need to get the synapse names in ascending order
# of synapse indices
snames = [s[0] for s in
sorted([(k, v) for k, v in self._synAttr.items()],
key=lambda kv: kv[1].index)
]
snum = len(snames)
if plotParams.cbwidth:
lwidth = plotParams.cbwidth * figwidth
if plotParams.cbspace:
lstep = plotParams.cbspace * figwidth
else:
lstep = 0.5 * lwidth
else:
if snum < 5:
lwidth = 0.15 * figwidth
lstep = 0.1 * figwidth
else:
lwidth = figwidth / (snum + 1.0)
lstep = (figwidth - snum * lwidth) / (snum - 1.0)
if lwidth > 100.0: # colorbar shouldn't be wider than 10cm
lwidth = 100.0
lstep = 30.0
lheight = (plotParams.cbheight * cbmargin
if plotParams.cbheight else 0.3 * cbmargin)
if plotParams.cboffset is not None:
offset = plotParams.cboffset
else:
offset = lstep
if plotParams.legend_location is None:
cblift = 0.9 * cbmargin
else:
cblift = 0.7 * cbmargin
self._cbPatches = {}
for j in range(snum):
self._cbPatches[snames[j]] = \
self._Patch(
self._axes.tl[0] + offset + j * (lstep + lwidth),
self._axes.lr[1] - cblift,
None, None,
lwidth,
lheight)
# ------------------------------------------------------------------------
def _scaledBox(self, p):
"""Scaled axes rectangle for patch, reverses y-direction."""
xsc, ysc = self._axes.lr
return self._figscale * np.array(
[p.l / xsc, 1 - (p.t + p.h) / ysc, p.w / xsc, p.h / ysc])
# ------------------------------------------------------------------------
def _scaledBoxNR(self, p):
"""Scaled axes rectangle for patch, does not reverse y-direction."""
xsc, ysc = self._axes.lr
return self._figscale * np.array(
[p.l / xsc, p.t / ysc, p.w / xsc, p.h / ysc])
# ------------------------------------------------------------------------
def _configSynapses(self, cList, synTypes):
"""Configure synapse information based on connections and user info."""
# compile information on synapse types and weights
synnames = set(c[3]['synapse_model'] for c in cList)
synweights = set(_weighteval(c[3]['weight']) for c in cList)
# set up synTypes for all pre-defined cases
if synTypes:
# check if there is info for all synapse types
stnames = _flattened([[s.name for s in r] for r in synTypes])
if len(stnames) != len(set(stnames)):
raise ValueError(
'Names of synapse types in synTypes must be unique!')
if len(synnames) > 1 and not synnames.issubset(set(stnames)):
raise ValueError('synTypes must provide information about' +
'all synapse types.')
elif len(synnames) == 1:
# only one synapse type used
if min(synweights) >= 0:
# all weights positive
synTypes = ((SynType('exc', 1.0, 'red'),),)
elif max(synweights) <= 0:
# all weights negative
synTypes = ((SynType('inh', -1.0, 'blue'),),)
else:
# positive and negative weights, assume Dale holds
synTypes = ((SynType('exc', 1.0, 'red'),),
(SynType('inh', -1.0, 'blue'),))
elif synnames == set(['AMPA', 'GABA_A']):
# only AMPA and GABA_A
synTypes = ((SynType('AMPA', 1.0, 'red'),),
(SynType('GABA_A', -1.0, 'blue'),))
elif synnames.issubset(set(['AMPA', 'NMDA', 'GABA_A', 'GABA_B'])):
synTypes = ((SynType('AMPA', 1.0, 'red'),
SynType('NMDA', 1.0, 'orange'),),
(SynType('GABA_A', -1.0, 'blue'),
SynType('GABA_B', -1.0, 'purple'),))
else:
raise ValueError('Connection list contains unknown synapse ' +
'models; synTypes required.')
# now build _synAttr by assigning blocks to rows
self._synAttr = {}
row = 0
ctr = 0
for sgroup in synTypes:
col = 0
for stype in sgroup:
self._synAttr[stype.name] = self._SynProps(row, col,
stype.relweight,
stype.cmap, ctr)
col += 1
ctr += 1
row += 1
# ------------------------------------------------------------------------
def __init__(self, lList, cList, synTypes=None, intensity='wp',
mList=None, Vmem=None, poporder=None):
"""
lList : layer list
cList : connection list
synTypes : nested list of synapse types
intensity: 'wp' - weight * probability
'p' - probability
'tcd' - |total charge deposited| * probability
requires mList; currently only for ht_model
proper results only if Vmem within reversal
potentials
mList : model list; only needed with 'tcd'
Vmem : reference membrane potential for 'tcd'
poporder : dictionary mapping population names to numbers; populations
will be sorted in diagram in order of increasing numbers.
"""
# extract layers to dict mapping name to extent
self._layers = [self._LayerProps(l[0], l[3]) for l in lList]
# ensure layer names are unique
lnames = [l.name for l in self._layers]
if len(lnames) != len(set(lnames)):
raise ValueError('Layer names must be unique.')
# set up synapse attributes
self._configSynapses(cList, synTypes)
# if tcd mode, build tcd representation
if intensity != 'tcd':
tcd = None
else:
assert (mList)
from . import tcd_nest
tcd = tcd_nest.TCD(mList)
# Build internal representation of connections.
# This representation contains one entry for each sender pop,
# target pop, synapse type tuple. Creating the connection object
# implies computation of the kernel.
# Several connection may agree in all properties, these need to be
# added here. Therefore, we need to build iteratively and store
# everything in a dictionary, so we can find early instances.
self._cTable = {}
for conn in cList:
# Extract target model name
tgt_model = [layer_spec[1] for layer_spec in lList if layer_spec[0] == conn[1]][0]
print(tgt_model)
key, val = self._Connection(conn, self._layers, self._synAttr, tgt_model,
intensity, tcd, Vmem).keyval
if key:
if key in self._cTable:
self._cTable[key].append(val)
else:
self._cTable[key] = [val]
# number of layers
self._nlyr = len(self._layers)
# compile list of populations, list(set()) makes list unique
self._pops = list(
set(_flattened([[(c.slayer, c.snrn), (c.tlayer, c.tnrn)]
for c in _flattened(self._cTable.values())])))
self._npop = len(self._pops)
# store population ordering; if not given, use alphabetical ordering
# also add any missing populations alphabetically at end
# layers are ignored
# create alphabetically sorted list of unique population names
popnames = sorted(list(set([p[1] for p in self._pops])),
key=lambda x: x if x is not None else "")
if poporder:
self._poporder = poporder
next = max(self._poporder.values()) + 1 # next free sorting index
else:
self._poporder = {}
next = 0
for pname in popnames:
if pname not in self._poporder:
self._poporder[pname] = next
next += 1
# compile list of synapse types
self._synTypes = list(
set([c.synmodel for c in _flattened(self._cTable.values())]))
# ------------------------------------------------------------------------
def plot(self, aggrGroups=False, aggrSyns=False, globalColors=False,
colorLimits=None, showLegend=True,
selectSyns=None, file=None, fixedWidth=None):
"""
Plot connection pattern.
By default, connections between any pair of populations
are plotted on the screen, with separate color scales for
all patches.
Arguments:
aggrGroups If True, aggregate projections with the same synapse type
and the same source and target groups (default: False)
aggrSyns If True, aggregate projections with the same synapse model
(default: False)
globalColors If True, use global color scale, otherwise local
(default: False)
colorLimits If given, must be two element vector for lower and
upper limits of color scale. Implies globalColors
(default: None)
showLegend If True, show legend below CPT (default: True).
selectSyns If tuple of synapse models, show only connections of the
give types. Cannot be combined with aggregation.
file If given, save plot to given file name; file may also be a
tuple of file names, the figure will then be saved to all
files. This may be useful if you want to save the same figure
in several formats.
fixedWidth Figure will be scaled to this width in mm by changing
patch size.
Returns:
kern_min, kern_max Minimal and maximal values of kernels,
with kern_min <= 0, kern_max >= 0.
Output:
figure created
"""
import matplotlib.pyplot as plt
# translate new to old paramter names (per v 0.5)
normalize = globalColors
if colorLimits:
normalize = True
if selectSyns:
if aggrGroups or aggrSyns:
raise ValueError(
'selectSyns cannot be combined with aggregation.')
selected = selectSyns
mode = 'select'
elif aggrGroups and aggrSyns:
mode = 'totals'
elif aggrGroups and not aggrSyns:
mode = 'layer'
elif aggrSyns and not aggrGroups:
mode = 'population'
else:
mode = None
if mode == 'layer':
# reduce to dimensions sender layer, target layer, synapse type
# add all kernels agreeing on these three attributes
plotKerns = []
for slayer in self._layers:
for tlayer in self._layers:
for synmodel in self._synTypes:
kerns = [c.kernval for c in
_flattened(self._cTable.values())
if c.matches(sl=slayer.name, tl=tlayer.name,
syn=synmodel)]
if len(kerns) > 0:
plotKerns.append(
self._PlotKern(slayer.name, None, tlayer.name,
None, synmodel,
_addKernels(kerns)))
elif mode == 'population':
# reduce to dimensions sender layer, target layer
# all all kernels, weighting according to synapse type
plotKerns = []
for spop in self._pops:
for tpop in self._pops:
kerns = [self._synAttr[c.synmodel].tw * c.kernval for c in
_flattened(self._cTable.values())
if c.matches(sl=spop[0], sn=spop[1], tl=tpop[0],
tn=tpop[1])]
if len(kerns) > 0:
plotKerns.append(
self._PlotKern(spop[0], spop[1], tpop[0], tpop[1],
None,
_addKernels(kerns)))
elif mode == 'totals':
# reduce to dimensions sender layer, target layer
# all all kernels, weighting according to synapse type
plotKerns = []
for slayer in self._layers:
for tlayer in self._layers:
kerns = [self._synAttr[c.synmodel].tw * c.kernval for c in
_flattened(self._cTable.values())
if c.matches(sl=slayer.name, tl=tlayer.name)]
if len(kerns) > 0:
plotKerns.append(
self._PlotKern(slayer.name, None, tlayer.name,
None, None, _addKernels(kerns)))
elif mode == 'select':
# copy only those kernels that have the requested synapse type,
# no dimension reduction
# We need to sum all kernels in the list for a set of attributes
plotKerns = [
self._PlotKern(clist[0].slayer, clist[0].snrn, clist[0].tlayer,
clist[0].tnrn,
clist[0].synmodel,
_addKernels([c.kernval for c in clist]))
for clist in self._cTable.values() if
clist[0].synmodel in selected]
else:
# copy all
# We need to sum all kernels in the list for a set of attributes
plotKerns = [
self._PlotKern(clist[0].slayer, clist[0].snrn, clist[0].tlayer,
clist[0].tnrn,
clist[0].synmodel,
_addKernels([c.kernval for c in clist]))
for clist in self._cTable.values()]
self._prepareAxes(mode, showLegend)
if fixedWidth:
margs = plotParams.margins.left + plotParams.margins.right
if fixedWidth <= margs:
raise ValueError('Requested width must be less than ' +
'width of margins (%g mm)' % margs)
currWidth = self._axes.lr[0]
currPatchMax = plotParams.patch_size # store
# compute required patch size
plotParams.patch_size = ((fixedWidth - margs) /
(currWidth - margs) * currPatchMax)
# build new axes
del self._axes
self._prepareAxes(mode, showLegend)
# restore patch size
plotParams.patch_size = currPatchMax
# create figure with desired size
fsize = np.array(self._axes.lr) / 25.4 # convert mm to inches
f = plt.figure(figsize=fsize, facecolor='w')
# size will be rounded according to DPI setting, adjust fsize
dpi = f.get_dpi()
fsize = np.floor(fsize * dpi) / dpi
# check that we got the correct size
actsize = np.array([f.get_figwidth(), f.get_figheight()], dtype=float)
if all(actsize == fsize):
self._figscale = 1.0 # no scaling
else:
warnings.warn("""
WARNING: Figure shrunk on screen!
The figure is shrunk to fit onto the screen.
Please specify a different backend using the -d
option to obtain full-size figures. Your current
backend is: %s
""" % mpl.get_backend())
plt.close(f)
# determine scale: most shrunk dimension
self._figscale = np.min(actsize / fsize)
# create shrunk on-screen figure
f = plt.figure(figsize=self._figscale * fsize, facecolor='w')
# just ensure all is well now
actsize = np.array([f.get_figwidth(), f.get_figheight()],
dtype=float)
# add decoration
for block in _flattened(self._axes.elements):
ax = f.add_axes(self._scaledBox(block),
facecolor=plotParams.layer_bg[block.location],
xticks=[], yticks=[],
zorder=plotParams.z_layer)
if hasattr(ax, 'frame'):
ax.frame.set_visible(False)
else:
for sp in ax.spines.values():
# turn off axis lines, make room for frame edge
sp.set_color('none')
if block.l <= self._axes.l_patches and block.slbl:
ax.set_ylabel(block.slbl,
rotation=plotParams.layer_orientation['sender'],
fontproperties=plotParams.layer_font)
if block.t <= self._axes.t_patches and block.tlbl:
ax.set_xlabel(block.tlbl,
rotation=plotParams.layer_orientation['target'],
fontproperties=plotParams.layer_font)
ax.xaxis.set_label_position('top')
# inner blocks for population labels
if mode not in ('totals', 'layer'):
for pb in _flattened(block.elements):
if not isinstance(pb, self._Block):
continue # should not happen
ax = f.add_axes(self._scaledBox(pb),
facecolor='none', xticks=[], yticks=[],
zorder=plotParams.z_pop)
if hasattr(ax, 'frame'):
ax.frame.set_visible(False)
else:
for sp in ax.spines.values():
# turn off axis lines, make room for frame edge
sp.set_color('none')
if pb.l + pb.w >= self._axes.r_patches and pb.slbl:
ax.set_ylabel(pb.slbl,
rotation=plotParams.pop_orientation[
'sender'],
fontproperties=plotParams.pop_font)
ax.yaxis.set_label_position('right')
if pb.t + pb.h >= self._axes.b_patches and pb.tlbl:
ax.set_xlabel(pb.tlbl,
rotation=plotParams.pop_orientation[
'target'],
fontproperties=plotParams.pop_font)
# determine minimum and maximum values across all kernels,
# but set min <= 0, max >= 0
kern_max = max(0.0, max([np.max(kern.kern) for kern in plotKerns]))
kern_min = min(0.0, min([np.min(kern.kern) for kern in plotKerns]))
# determine color limits for plots
if colorLimits:
c_min, c_max = colorLimits # explicit values
else:
# default values for color limits
# always 0 as lower limit so anything > 0 is non-white,
# except when totals or populations
c_min = None if mode in ('totals', 'population') else 0.0
c_max = None # use patch maximum as upper limit
if normalize:
# use overall maximum, at least 0
c_max = kern_max
if aggrSyns:
# use overall minimum, if negative, otherwise 0
c_min = kern_min
# for c_max, use the larger of the two absolute values
c_max = kern_max
# if c_min is non-zero, use same color scale for neg values
if c_min < 0:
c_min = -c_max
# Initialize dict storing sample patches for each synapse type for use
# in creating color bars. We will store the last patch of any given
# synapse type for reference. When aggrSyns, we have only one patch
# type and store that.
if not aggrSyns:
samplePatches = dict(
[(sname, None) for sname in self._synAttr.keys()])
else:
# only single type of patches
samplePatches = None
for kern in plotKerns:
p = self._patchTable[(kern.sl, kern.sn, kern.tl,
kern.tn, kern.syn)]
p.ax = f.add_axes(self._scaledBox(p), aspect='equal',
xticks=[], yticks=[], zorder=plotParams.z_conn)
p.ax.patch.set_edgecolor('none')
if hasattr(p.ax, 'frame'):
p.ax.frame.set_visible(False)
else:
for sp in p.ax.spines.values():
# turn off axis lines, make room for frame edge
sp.set_color('none')
if not aggrSyns:
# we have synapse information -> not totals, a vals positive
assert (kern.syn)
assert (np.min(kern.kern) >= 0.0)
# we may overwrite here, but this does not matter, we only need
# some reference patch
samplePatches[kern.syn] = p.ax.imshow(kern.kern,
vmin=c_min, vmax=c_max,
cmap=self._synAttr[
kern.syn].cmap) # ,
# interpolation='nearest')
else:
# we have totals, special color table and normalization
# we may overwrite here, but this does not matter, we only need
# some reference patch
samplePatches = p.ax.imshow(kern.kern,
vmin=c_min, vmax=c_max,
cmap=cm.bluered,
norm=cm.ZeroCenterNorm())
# interpolation='nearest')
# Create colorbars at bottom of figure
if showLegend:
# FIXME: rewrite the function to avoid comparisons with None!
f_min = float("-inf") if c_min is None else c_min
f_max = float("-inf") if c_max is None else c_max
# Do we have kernel values exceeding the color limits?
if f_min <= kern_min and kern_max <= f_max:
extmode = 'neither'
elif f_min > kern_min and kern_max <= f_max:
extmode = 'min'
elif f_min <= kern_min and kern_max > f_max:
extmode = 'max'
else:
extmode = 'both'
if aggrSyns:
cbax = f.add_axes(self._scaledBox(self._cbPatches))
# by default, use 4 ticks to avoid clogging
# according to docu, we need a separate Locator object
# for each axis.
if plotParams.legend_ticks:
tcks = plotParams.legend_ticks
else:
tcks = mpl.ticker.MaxNLocator(nbins=4)
if normalize:
# colorbar with freely settable ticks
cb = f.colorbar(samplePatches, cax=cbax,
orientation='horizontal',
ticks=tcks,
format=plotParams.legend_tick_format,
extend=extmode)
else:
# colorbar with tick labels 'Exc', 'Inh'
# we add the color bare here explicitly, so we get no
# problems if the sample patch includes only pos or
# only neg values
cb = mpl.colorbar.ColorbarBase(cbax, cmap=cm.bluered,
orientation='horizontal')
cbax.set_xticks([0, 1])
cbax.set_xticklabels(['Inh', 'Exc'])
cb.outline.set_linewidth(0.5) # narrower line around colorbar
# fix font for ticks
plt.setp(cbax.get_xticklabels(),
fontproperties=plotParams.legend_tick_font)
# no title in this case
else:
# loop over synapse types
for syn in self._synAttr.keys():
cbax = f.add_axes(self._scaledBox(self._cbPatches[syn]))
if plotParams.legend_location is None:
cbax.set_ylabel(
syn,
fontproperties=plotParams.legend_title_font,
rotation='horizontal')
else:
cbax.set_title(
syn,
fontproperties=plotParams.legend_title_font,
rotation='horizontal')
if normalize:
# by default, use 4 ticks to avoid clogging
# according to docu, we need a separate Locator object
# for each axis.
if plotParams.legend_ticks:
tcks = plotParams.legend_ticks
else:
tcks = mpl.ticker.MaxNLocator(nbins=4)
# proper colorbar
cb = f.colorbar(samplePatches[syn], cax=cbax,
orientation='horizontal',
ticks=tcks,
format=plotParams.legend_tick_format,
extend=extmode)
cb.outline.set_linewidth(
0.5) # narrower line around colorbar
# fix font for ticks
plt.setp(cbax.get_xticklabels(),
fontproperties=plotParams.legend_tick_font)
else:
# just a solid color bar with no ticks
cbax.set_xticks([])
cbax.set_yticks([])
# full-intensity color from color map
cbax.set_facecolor(self._synAttr[syn].cmap(1.0))
# narrower border
if hasattr(cbax, 'frame'):
cbax.frame.set_linewidth(0.5)
else:
for sp in cbax.spines.values():
sp.set_linewidth(0.5)
# save to file(s), use full size
f.set_size_inches(fsize)
if isinstance(file, (list, tuple)):
for fn in file:
f.savefig(fn)
elif isinstance(file, str):
f.savefig(file)
f.set_size_inches(actsize) # reset size for further interactive work
return kern_min, kern_max
# ------------------------------------------------------------------------
def toLaTeX(self, file, standalone=False, enumerate=False, legend=True):
"""
Write connection table to file.
Arguments:
file output file name
standalone create complete LaTeX file (default: False)
enumerate enumerate connections (default: False)
legend add explanation of functions used (default: True)
"""
lfile = open(file, 'w')
if not lfile:
raise Exception('Could not open file "%s"' % file)
if standalone:
lfile.write(
r"""
\documentclass[a4paper,american]{article}
\usepackage[pdftex,margin=1in,centering,
noheadfoot,a4paper]{geometry}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{color}
\usepackage{calc}
\usepackage{tabularx} % autom. adjusts column width in tables
\usepackage{multirow} % allows entries spanning several rows
\usepackage{colortbl} % allows coloring tables
\usepackage[fleqn]{amsmath}
\setlength{\mathindent}{0em}
\usepackage{mathpazo}
\usepackage[scaled=.95]{helvet}
\renewcommand\familydefault{\sfdefault}
\renewcommand\arraystretch{1.2}
\pagestyle{empty}
% \hdr{ncols}{label}{title}
%
% Typeset header bar across table with ncols columns
% with label at left margin and centered title
%
\newcommand{\hdr}[3]{%
\multicolumn{#1}{|l|}{%
\color{white}\cellcolor[gray]{0.0}%
\textbf{\makebox[0pt]{#2}\hspace{0.5\linewidth}%
\makebox[0pt][c]{#3}}%
}%
}
\begin{document}
""")
lfile.write(
r"""
\noindent\begin{tabularx}{\linewidth}{%s|l|l|l|c|c|X|}\hline
\hdr{%d}{}{Connectivity}\\\hline
%s \textbf{Src} & \textbf{Tgt} & \textbf{Syn} &
\textbf{Wght} & \textbf{Mask} & \textbf{Kernel} \\\hline
""" % (('|r', 7, '&') if enumerate else ('', 6, '')))
# ensure sorting according to keys, gives some alphabetic sorting
haveU, haveG = False, False
cctr = 0 # connection counter
for ckey in sorted(self._cTable.keys()):
for conn in self._cTable[ckey]:
cctr += 1
if enumerate:
lfile.write('%d &' % cctr)
# take care to escape _ in names such as GABA_A
# also remove any pending '/None'
lfile.write((r'%s/%s & %s/%s & %s' %
(conn.slayer, conn.snrn, conn.tlayer, conn.tnrn,
conn.synmodel)).replace('_', r'\_').replace(
'/None', ''))
lfile.write(' & \n')
if isinstance(conn.weight, (int, float)):
lfile.write(r'%g' % conn.weight)
elif 'uniform' in conn.weight:
cw = conn.weight['uniform']
lfile.write(
r'$\mathcal{U}[%g, %g)$' % (cw['min'], cw['max']))
haveU = True
else:
raise ValueError(
'Unkown weight type "%s"' % conn.weight.__str__)
lfile.write(' & \n')
if 'circular' in conn.mask:
lfile.write(r'$\leq %g$' % conn.mask['circular']['radius'])
elif 'rectangular' in conn.mask:
cmr = conn.mask['rectangular']
lfile.write(
r"""$[(%+g, %+g), (%+g, %+g)]$"""
% (cmr['lower_left'][0], cmr['lower_left'][1],
cmr['upper_right'][0], cmr['upper_right'][1]))
else:
raise ValueError(
'Unknown mask type "%s"' % conn.mask.__str__)
lfile.write(' & \n')
if isinstance(conn.kernel, (int, float)):
lfile.write(r'$%g$' % conn.kernel)
elif isinstance(conn.kernel, nest.Parameter):
lfile.write(r'$<Parameter>$')
elif 'gaussian' in conn.kernel:
ckg = conn.kernel['gaussian']
lfile.write(r'$\mathcal{G}(p_0 = %g, \sigma = %g)$' %
(ckg['p_center'], ckg['sigma']))
haveG = True
else:
raise ValueError(
'Unkown kernel type "%s"' % conn.kernel.__str__)
lfile.write('\n')
lfile.write(r'\\\hline' '\n')
if legend and (haveU or haveG):
# add bottom line with legend
lfile.write(r'\hline' '\n')
lfile.write(r'\multicolumn{%d}{|l|}{\footnotesize ' %
(7 if enumerate else 6))
if haveG:
lfile.write(r'$\mathcal{G}(p_0, \sigma)$: ' +
r'$p(\mathbf{x})=p_0 e^{-\mathbf{x}^2/2\sigma^2}$')
if haveG and haveU:
lfile.write(r', ')
if haveU:
lfile.write(
r'$\mathcal{U}[a, b)$: uniform distribution on $[a, b)$')
lfile.write(r'}\\\hline' '\n')
lfile.write(r'\end{tabularx}' '\n\n')
if standalone:
lfile.write(r'\end{document}''\n')
lfile.close()
# ----------------------------------------------------------------------------
def _evalkernel(mask, kernel, weight, extent, intensity, tcd):
"""
Plot kernel within extent.
Kernel values are multiplied with abs(weight). If weight is a
distribution, the mean value is used.
Result is a masked array, in which the values outside the mask are
masked.
"""
# determine resolution, number of data points
dx = max(extent) / plotParams.n_kern
nx = np.ceil(extent[0] / dx)
ny = np.ceil(extent[1] / dx)
x = np.linspace(-0.5 * extent[0], 0.5 * extent[0], nx)
y = np.linspace(-0.5 * extent[1], 0.5 * extent[1], ny)
X, Y = np.meshgrid(x, y)
if intensity == 'wp':
return np.ma.masked_array(abs(weight) * _kerneval(X, Y, kernel),
np.logical_not(_maskeval(X, Y, mask)))
elif intensity == 'p':
return np.ma.masked_array(_kerneval(X, Y, kernel),
np.logical_not(_maskeval(X, Y, mask)))
elif intensity == 'tcd':
return np.ma.masked_array(
abs(tcd) * abs(weight) * _kerneval(X, Y, kernel),
np.logical_not(_maskeval(X, Y, mask)))
# ----------------------------------------------------------------------------
def _weighteval(weight):
"""Returns weight, or mean of distribution, signed."""
w = None
if isinstance(weight, (float, int)):
w = weight
elif isinstance(weight, dict):
assert (len(weight) == 1)
if 'uniform' in weight:
w = 0.5 * (weight['uniform']['min'] + weight['uniform']['max'])
elif 'gaussian' in weight:
w = weight['gaussian']['mean']
else:
raise Exception(
'Unknown weight type "%s"' % tuple(weight.keys())[0])
if not w:
raise Exception('Cannot handle weight.')
return float(w)
# ----------------------------------------------------------------------------
def _maskeval(x, y, mask):
"""
Evaluate mask given as topology style dict at
(x,y). Assume x,y are 2d numpy matrices.
"""
assert (len(mask) == 1)
if 'circular' in mask:
r = mask['circular']['radius']
m = x ** 2 + y ** 2 <= r ** 2
elif 'doughnut' in mask:
ri = mask['doughnut']['inner_radius']
ro = mask['doughnut']['outer_radius']
d = x ** 2 + y ** 2
m = np.logical_and(ri <= d, d <= ro)
elif 'rectangular' in mask:
ll = mask['rectangular']['lower_left']
ur = mask['rectangular']['upper_right']
m = np.logical_and(np.logical_and(ll[0] <= x, x <= ur[0]),
np.logical_and(ll[1] <= y, y <= ur[1]))
else:
raise Exception('Unknown mask type "%s"' % tuple(mask.keys())[0])
return m
# ----------------------------------------------------------------------------
def _kerneval(x, y, fun):
"""
Evaluate function given as topology style dict at
(x,y). Assume x,y are 2d numpy matrices
"""
if isinstance(fun, (float, int)):
return float(fun) * np.ones(np.shape(x))
elif isinstance(fun, nest.Parameter):
# Create a single node in origo that we can apply the Parameter on.
origo_node = nest.Create('iaf_psc_alpha', positions=nest.spatial.free([[0., 0.]]))
return np.array([fun.apply(origo_node, list(np.column_stack((xn, yn)))) for xn, yn in zip(x, y)])
# something very wrong
raise Exception('Cannot handle kernel.')
# ----------------------------------------------------------------------------
def _addKernels(kList):
"""
Add a list of kernels.
Arguments:
kList: List of masked arrays of equal size.
Returns:
Masked array of same size as input. All values are added,
setting masked values to 0. The mask for the sum is the
logical AND of all individual masks, so that only such
values are masked that are masked in all kernels.
_addKernels always returns a new array object, even if
kList has only a single element.
"""
assert (len(kList) > 0)
if len(kList) < 2:
return kList[0].copy() if isinstance(kList[0], dict) else kList[0]
d = np.ma.filled(kList[0], fill_value=0).copy()
m = kList[0].mask.copy()
for k in kList[1:]:
d += np.ma.filled(k, fill_value=0)
m = np.logical_and(m, k.mask)
return np.ma.masked_array(d, m)
# ----------------------------------------------------------------------------
def _flattened(lst):
"""Returned list flattend at first level."""
return sum(lst, [])
# ----------------------------------------------------------------------------
"""
if __name__ == "__main__":
import sys
sys.path += ['./examples']
# import simple
# reload(simple)
cp = ConnectionPattern(simple.layerList, simple.connectList)
import simple2
reload(simple2)
cp2 = ConnectionPattern(simple2.layerList, simple2.connectList)
st3 = ((SynType('GABA_B', -5.0, 'orange'),
SynType('GABA_A', -1.0, 'm')),
(SynType('NMDA', 5.0, 'b'),
SynType('FOO', 1.0, 'aqua'),
SynType('AMPA', 3.0, 'g')))
cp3s = ConnectionPattern(simple2.layerList, simple2.connectList,
synTypes=st3)
import simple3
reload(simple3)
cp3 = ConnectionPattern(simple3.layerList, simple3.connectList)
# cp._prepareAxes('by layer')
# cp2._prepareAxes('by layer')
# cp3._prepareAxes('detailed')
cp2.plot()
cp2.plot(mode='layer')
cp2.plot(mode='population')
cp2.plot(mode='totals')
cp2.plot(mode=('AMPA',))
cp2.plot(mode=('AMPA','GABA_B'))
# cp3.plot()
# cp3.plot(mode='population')
# cp3.plot(mode='layer')
# cp3.plot(mode='totals')
# cp.plot(normalize=True)
# cp.plot(totals=True, normalize=True)
# cp2.plot()
# cp2.plot(file=('cp3.eps'))
# cp2.plot(byLayer=True)
# cp2.plot(totals=True)
"""
|
espenhgn/nest-simulator
|
extras/ConnPlotter/ConnPlotter.py
|
Python
|
gpl-2.0
| 83,899
|
[
"Gaussian",
"NEURON"
] |
e82e8e1895217bda93253a3556db418e5716075c2f70979a8cabda88088623da
|
# coding: utf-8
from __future__ import unicode_literals, division
# This module implements new error handlers for QChem runs.
import os
from pymatgen.io.qchem.inputs import QCInput
from pymatgen.io.qchem.outputs import QCOutput
from custodian.custodian import ErrorHandler
from custodian.utils import backup
__author__ = "Samuel Blau, Brandon Wood, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Samuel Blau"
__email__ = "samblau1@gmail.com"
__status__ = "Alpha"
__date__ = "3/26/18"
__credits__ = "Xiaohui Qu"
class QChemErrorHandler(ErrorHandler):
"""
Master QChemErrorHandler class that handles a number of common errors
that occur during QChem runs.
"""
is_monitor = False
def __init__(self,
input_file="mol.qin",
output_file="mol.qout",
scf_max_cycles=200,
geom_max_cycles=200):
"""
Initializes the error handler from a set of input and output files.
Args:
input_file (str): Name of the QChem input file.
output_file (str): Name of the QChem output file.
scf_max_cycles (int): The max iterations to set to fix SCF failure.
geom_max_cycles (int): The max iterations to set to fix geometry
optimization failure.
"""
self.input_file = input_file
self.output_file = output_file
self.scf_max_cycles = scf_max_cycles
self.geom_max_cycles = geom_max_cycles
self.outdata = None
self.errors = []
self.opt_error_history = []
def check(self):
# Checks output file for errors.
self.outdata = QCOutput(self.output_file).data
self.errors = self.outdata.get("errors")
# If we aren't out of optimization cycles, but we were in the past, reset the history
if "out_of_opt_cycles" not in self.errors and len(self.opt_error_history) > 0:
self.opt_error_history = []
# If we're out of optimization cycles and we have unconnected fragments, no need to handle any errors
if "out_of_opt_cycles" in self.errors and self.outdata["structure_change"] == "unconnected_fragments":
return False
return len(self.errors) > 0
def correct(self):
backup({self.input_file, self.output_file})
actions = []
self.qcinp = QCInput.from_file(self.input_file)
if "SCF_failed_to_converge" in self.errors:
# Check number of SCF cycles. If not set or less than scf_max_cycles,
# increase to that value and rerun. If already set, check if
# scf_algorithm is unset or set to DIIS, in which case set to GDM.
# Otherwise, tell user to call SCF error handler and do nothing.
if str(self.qcinp.rem.get("max_scf_cycles")) != str(
self.scf_max_cycles):
self.qcinp.rem["max_scf_cycles"] = self.scf_max_cycles
actions.append({"max_scf_cycles": self.scf_max_cycles})
elif self.qcinp.rem.get("scf_algorithm", "diis").lower() == "diis":
self.qcinp.rem["scf_algorithm"] = "gdm"
actions.append({"scf_algorithm": "gdm"})
elif self.qcinp.rem.get("scf_algorithm", "gdm").lower() == "gdm":
self.qcinp.rem["scf_algorithm"] = "diis_gdm"
actions.append({"scf_algorithm": "diis_gdm"})
else:
print(
"More advanced changes may impact the SCF result. Use the SCF error handler"
)
elif "out_of_opt_cycles" in self.errors:
# Check number of opt cycles. If less than geom_max_cycles, increase
# to that value, set last geom as new starting geom and rerun.
if str(self.qcinp.rem.get(
"geom_opt_max_cycles")) != str(self.geom_max_cycles):
self.qcinp.rem["geom_opt_max_cycles"] = self.geom_max_cycles
actions.append({"geom_max_cycles:": self.scf_max_cycles})
if len(self.outdata.get("energy_trajectory")) > 1:
self.qcinp.molecule = self.outdata.get(
"molecule_from_last_geometry")
actions.append({"molecule": "molecule_from_last_geometry"})
# If already at geom_max_cycles, often can just get convergence by restarting
# from the geometry of the last cycle. But we'll also save any structural
# changes that happened along the way.
else:
self.opt_error_history += [self.outdata["structure_change"]]
if len(self.opt_error_history) > 1:
if self.opt_error_history[-1] == "no_change":
# If no structural changes occured in two consecutive optimizations,
# and we still haven't converged, then just exit.
return {"errors": self.errors, "actions": None, "opt_error_history": self.opt_error_history}
self.qcinp.molecule = self.outdata.get("molecule_from_last_geometry")
actions.append({"molecule": "molecule_from_last_geometry"})
elif "unable_to_determine_lamda" in self.errors:
# Set last geom as new starting geom and rerun. If no opt cycles,
# use diff SCF strat? Diff initial guess? Change basis?
if len(self.outdata.get("energy_trajectory")) > 1:
self.qcinp.molecule = self.outdata.get(
"molecule_from_last_geometry")
actions.append({"molecule": "molecule_from_last_geometry"})
elif self.qcinp.rem.get("scf_algorithm", "diis").lower() == "diis":
self.qcinp.rem["scf_algorithm"] = "rca_diis"
actions.append({"scf_algorithm": "rca_diis"})
if self.qcinp.rem.get("gen_scfman"):
self.qcinp.rem["gen_scfman"] = False
actions.append({"gen_scfman": False})
else:
print(
"Use a different initial guess? Perhaps a different basis?"
)
elif "linear_dependent_basis" in self.errors:
# DIIS -> RCA_DIIS. If already RCA_DIIS, change basis?
if self.qcinp.rem.get("scf_algorithm", "diis").lower() == "diis":
self.qcinp.rem["scf_algorithm"] = "rca_diis"
actions.append({"scf_algorithm": "rca_diis"})
if self.qcinp.rem.get("gen_scfman"):
self.qcinp.rem["gen_scfman"] = False
actions.append({"gen_scfman": False})
else:
print("Perhaps use a better basis?")
elif "failed_to_transform_coords" in self.errors:
# Check for symmetry flag in rem. If not False, set to False and rerun.
# If already False, increase threshold?
if not self.qcinp.rem.get("sym_ignore") or self.qcinp.rem.get(
"symmetry"):
self.qcinp.rem["sym_ignore"] = True
self.qcinp.rem["symmetry"] = False
actions.append({"sym_ignore": True})
actions.append({"symmetry": False})
else:
print("Perhaps increase the threshold?")
elif "input_file_error" in self.errors:
print(
"Something is wrong with the input file. Examine error message by hand."
)
return {"errors": self.errors, "actions": None}
elif "failed_to_read_input" in self.errors:
# Almost certainly just a temporary problem that will not be encountered again. Rerun job as-is.
actions.append({"rerun job as-is"})
elif "IO_error" in self.errors:
# Almost certainly just a temporary problem that will not be encountered again. Rerun job as-is.
actions.append({"rerun job as-is"})
elif "read_molecule_error" in self.errors:
# Almost certainly just a temporary problem that will not be encountered again. Rerun job as-is.
actions.append({"rerun job as-is"})
elif "never_called_qchem" in self.errors:
# Almost certainly just a temporary problem that will not be encountered again. Rerun job as-is.
actions.append({"rerun job as-is"})
elif "unknown_error" in self.errors:
print("Examine error message by hand.")
return {"errors": self.errors, "actions": None}
else:
# You should never get here. If correct is being called then errors should have at least one entry,
# in which case it should have been caught by the if/elifs above.
print(
"If you get this message, something has gone terribly wrong!")
return {"errors": self.errors, "actions": None}
os.rename(self.input_file, self.input_file + ".last")
self.qcinp.write_file(self.input_file)
return {"errors": self.errors, "actions": actions}
class QChemSCFErrorHandler(ErrorHandler):
"""
QChem ErrorHandler class that addresses SCF non-convergence.
"""
is_monitor = False
def __init__(self,
input_file="mol.qin",
output_file="mol.qout",
rca_gdm_thresh=1.0E-3,
scf_max_cycles=200):
"""
Initializes the error handler from a set of input and output files.
Args:
input_file (str): Name of the QChem input file.
output_file (str): Name of the QChem output file.
rca_gdm_thresh (float): The threshold for the prior scf algorithm.
If last deltaE is larger than the threshold try RCA_DIIS
first, else, try DIIS_GDM first.
scf_max_cycles (int): The max iterations to set to fix SCF failure.
"""
self.input_file = input_file
self.output_file = output_file
self.scf_max_cycles = scf_max_cycles
self.geom_max_cycles = geom_max_cycles
self.qcinp = QCInput.from_file(self.input_file)
self.outdata = None
self.errors = None
self.qchem_job = qchem_job
def check(self):
# Checks output file for errors.
self.outdata = QCOutput(self.output_file).data
self.errors = self.outdata.get("errors")
return len(self.errors) > 0
def correct(self):
print("This hasn't been implemented yet!")
return {"errors": self.errors, "actions": None}
|
specter119/custodian
|
custodian/qchem/handlers.py
|
Python
|
mit
| 10,594
|
[
"pymatgen"
] |
c108ef12022b45102447eacf188a0dcb99dbee6f199219e8a564e1db087f63f5
|
#!/usr/bin/env python
#
# Copyright (c) 2014-2016 Apple Inc. All rights reserved.
# Copyright (C) 2015 Canon Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# Builtins generator templates, which can be filled with string.Template.
class BuiltinsGeneratorTemplates:
DefaultCopyright = "2016 Apple Inc. All rights reserved."
LicenseText = (
"""Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
""")
DoNotEditWarning = (
"""// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py""")
HeaderIncludeGuard = (
"""#pragma once""")
NamespaceTop = (
"""namespace ${namespace} {""")
NamespaceBottom = (
"""} // namespace ${namespace}""")
CombinedHeaderStaticMacros = (
"""#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, overriddenName, argumentCount) \\
JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
${macroPrefix}_FOREACH_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
#undef DECLARE_BUILTIN_GENERATOR""")
SeparateHeaderStaticMacros = (
"""#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, overriddenName, argumentCount) \\
JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
#undef DECLARE_BUILTIN_GENERATOR""")
CombinedJSCImplementationStaticMacros = (
"""
#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, overriddenName, argumentCount) \\
JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \\
{\\
return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
}
${macroPrefix}_FOREACH_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
#undef DEFINE_BUILTIN_GENERATOR
""")
SeparateJSCImplementationStaticMacros = (
"""
#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, overriddenName, argumentCount) \\
JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \\
{\\
return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
}
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
#undef DEFINE_BUILTIN_GENERATOR
""")
CombinedWebCoreImplementationStaticMacros = (
"""
#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, overriddenName, argumentCount) \\
JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \\
{\\
JSVMClientData* clientData = static_cast<JSVMClientData*>(vm.clientData); \\
return clientData->builtinFunctions().${objectNameLC}Builtins().codeName##Executable()->link(vm, clientData->builtinFunctions().${objectNameLC}Builtins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \\
}
${macroPrefix}_FOREACH_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
#undef DEFINE_BUILTIN_GENERATOR
""")
SeparateWebCoreImplementationStaticMacros = (
"""
#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, overriddenName, argumentCount) \\
JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \\
{\\
JSVMClientData* clientData = static_cast<JSVMClientData*>(vm.clientData); \\
return clientData->builtinFunctions().${objectNameLC}Builtins().codeName##Executable()->link(vm, clientData->builtinFunctions().${objectNameLC}Builtins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \\
}
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
#undef DEFINE_BUILTIN_GENERATOR
""")
SeparateHeaderWrapperBoilerplate = (
"""class ${objectName}BuiltinsWrapper : private JSC::WeakHandleOwner {
public:
explicit ${objectName}BuiltinsWrapper(JSC::VM* vm)
: m_vm(*vm)
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, overriddenName, length) , m_##name##Source(JSC::makeSource(StringImpl::createFromLiteral(s_##name, length), { }))
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
#undef INITIALIZE_BUILTIN_SOURCE_MEMBERS
{
}
#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, overriddenName, length) \\
JSC::UnlinkedFunctionExecutable* name##Executable(); \\
const JSC::SourceCode& name##Source() const { return m_##name##Source; }
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
#undef EXPOSE_BUILTIN_EXECUTABLES
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
void exportNames();
private:
JSC::VM& m_vm;
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, overriddenName, length) \\
JSC::SourceCode m_##name##Source;\\
JSC::Weak<JSC::UnlinkedFunctionExecutable> m_##name##Executable;
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
#undef DECLARE_BUILTIN_SOURCE_MEMBERS
};
#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, overriddenName, length) \\
inline JSC::UnlinkedFunctionExecutable* ${objectName}BuiltinsWrapper::name##Executable() \\
{\\
if (!m_##name##Executable) {\\
JSC::Identifier executableName = functionName##PublicName();\\
if (overriddenName)\\
executableName = JSC::Identifier::fromString(&m_vm, overriddenName);\\
m_##name##Executable = JSC::Weak<JSC::UnlinkedFunctionExecutable>(JSC::createBuiltinExecutable(m_vm, m_##name##Source, executableName, s_##name##ConstructAbility), this, &m_##name##Executable);\\
}\\
return m_##name##Executable.get();\\
}
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
#undef DEFINE_BUILTIN_EXECUTABLES
inline void ${objectName}BuiltinsWrapper::exportNames()
{
#define EXPORT_FUNCTION_NAME(name) m_vm.propertyNames->appendExternalName(name##PublicName(), name##PrivateName());
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_FUNCTION_NAME(EXPORT_FUNCTION_NAME)
#undef EXPORT_FUNCTION_NAME
}""")
SeparateHeaderInternalFunctionsBoilerplate = (
"""class ${objectName}BuiltinFunctions {
public:
explicit ${objectName}BuiltinFunctions(JSC::VM& vm) : m_vm(vm) { }
void init(JSC::JSGlobalObject&);
void visit(JSC::SlotVisitor&);
public:
JSC::VM& m_vm;
#define DECLARE_BUILTIN_SOURCE_MEMBERS(functionName) \\
JSC::WriteBarrier<JSC::JSFunction> m_##functionName##Function;
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_SOURCE_MEMBERS)
#undef DECLARE_BUILTIN_SOURCE_MEMBERS
};
inline void ${objectName}BuiltinFunctions::init(JSC::JSGlobalObject& globalObject)
{
#define EXPORT_FUNCTION(codeName, functionName, overriddenName, length)\\
m_##functionName##Function.set(m_vm, &globalObject, JSC::JSFunction::create(m_vm, codeName##Generator(m_vm), &globalObject));
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(EXPORT_FUNCTION)
#undef EXPORT_FUNCTION
}
inline void ${objectName}BuiltinFunctions::visit(JSC::SlotVisitor& visitor)
{
#define VISIT_FUNCTION(name) visitor.append(m_##name##Function);
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_FUNCTION_NAME(VISIT_FUNCTION)
#undef VISIT_FUNCTION
}
""")
|
teamfx/openjfx-8u-dev-rt
|
modules/web/src/main/native/Source/JavaScriptCore/Scripts/builtins/builtins_templates.py
|
Python
|
gpl-2.0
| 9,799
|
[
"VisIt"
] |
7efacfb9fe9b06691704093bb0fe60e61f75ee012618f85003992bfbc98e0319
|
"""Define a graph of nodes-links-patches.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]
>>> graph = Graph((node_y, node_x))
>>> graph.x_of_node
array([ 0., 1., 2., 0., 1., 2., 0., 1., 2.])
>>> graph.y_of_node
array([ 0., 0., 0., 1., 1., 1., 2., 2., 2.])
>>> links = ((0, 1), (1, 2),
... (0, 3), (1, 4), (2, 5),
... (3, 4), (4, 5),
... (3, 6), (4, 7), (5, 8),
... (6, 7), (7, 8))
>>> graph = Graph((node_y, node_x), links=links)
>>> graph.nodes_at_link # doctest: +NORMALIZE_WHITESPACE
array([[0, 1], [1, 2],
[0, 3], [1, 4], [2, 5],
[3, 4], [4, 5],
[3, 6], [4, 7], [5, 8],
[6, 7], [7, 8]])
>>> graph.node_at_link_head
array([1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 7, 8])
>>> graph.node_at_link_tail
array([0, 1, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7])
>>> graph.links_at_node # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 2, -1, -1], [ 1, 3, 0, -1], [ 4, 1, -1, -1],
[ 5, 7, 2, -1], [ 6, 8, 5, 3], [ 9, 6, 4, -1],
[10, 7, -1, -1], [11, 10, 8, -1], [11, 9, -1, -1]])
>>> graph.link_dirs_at_node # doctest: +NORMALIZE_WHITESPACE
array([[-1, -1, 0, 0], [-1, -1, 1, 0], [-1, 1, 0, 0],
[-1, -1, 1, 0], [-1, -1, 1, 1], [-1, 1, 1, 0],
[-1, 1, 0, 0], [-1, 1, 1, 0], [ 1, 1, 0, 0]])
>>> patches = ((5, 3, 0, 2), (6, 4, 1, 3), (10, 8, 5, 7), (11, 9, 6, 8))
>>> graph = Graph((node_y, node_x), links=links, patches=patches)
>>> graph.links_at_patch
array([[ 3, 5, 2, 0],
[ 4, 6, 3, 1],
[ 8, 10, 7, 5],
[ 9, 11, 8, 6]])
>>> graph.nodes_at_patch
array([[4, 3, 0, 1],
[5, 4, 1, 2],
[7, 6, 3, 4],
[8, 7, 4, 5]])
"""
import six
from six.moves import range
import numpy as np
import xarray as xr
import json
from ..core.utils import as_id_array, argsort_points_by_x_then_y
from ..utils.jaggedarray import flatten_jagged_array
from ..utils.decorators import store_result_in_grid, read_only_array
from .sort import sort_graph, reindex_by_xy, reorder_links_at_patch
from .object.at_node import get_links_at_node
from .object.at_patch import get_nodes_at_patch
from .quantity.of_link import (get_angle_of_link, get_length_of_link,
get_midpoint_of_link)
from .quantity.of_patch import get_centroid_of_patch, get_area_of_patch
from .sort.sort import reverse_one_to_many, reorient_link_dirs
from .ugrid import update_nodes_at_patch, ugrid_from_unstructured
def _parse_sorting_opt(sorting):
SORTING_OPTS = ('xy', 'ccw', 'ne')
as_dict = None
if isinstance(sorting, bool):
as_dict = dict([(opt, format) for opt in SORTING_OPTS])
elif isinstance(sorting, dict):
as_dict = dict(sorting.items())
for opt in SORTING_OPTS:
sorting.setdefault(opt, True)
return as_dict
def find_perimeter_nodes(graph):
"""Find nodes on the perimeter of a graph.
Uses a convex hull to locate the perimeter nodes of a graph.
Parameters
----------
graph : graph_like
A Graph of nodes (just requires *xy_of_node*).
Returns
-------
ndarray of int
Identifiers of the perimeter nodes.
"""
from scipy.spatial import ConvexHull
hull = ConvexHull(graph.xy_of_node, qhull_options='Qt')
return as_id_array(hull.vertices)
class thawed(object):
def __init__(self, graph):
self._graph = graph
self._initially_frozen = graph.frozen
def __enter__(self):
self._graph.thaw()
def __exit__(self, ex_type, ex_value, traceback):
if self._initially_frozen:
self._graph.freeze()
class Graph(object):
"""Define the connectivity of a graph of nodes, links, and patches."""
def __init__(self, mesh, **kwds):
"""Define a graph of connected nodes.
Parameters
----------
mesh : Dataset
xarray Dataset that defines the topology in ugrid format.
"""
if not isinstance(mesh, xr.Dataset):
node_y_and_x = mesh
links = kwds.get('links', None)
patches = kwds.get('patches', None)
mesh = ugrid_from_unstructured(node_y_and_x, links=links,
patches=patches)
self._ds = mesh
self._frozen = False
self.freeze()
if kwds.get('sort', True):
Graph.sort(self)
self._origin = (0., 0.)
@property
def frozen(self):
return self._frozen
def thawed(self):
return thawed(self)
def sort(self):
with self.thawed():
reorient_link_dirs(self)
sorted_nodes, sorted_links, sorted_patches = reindex_by_xy(self)
reorder_links_at_patch(self)
return sorted_nodes, sorted_links, sorted_patches
def freeze(self):
"""Freeze the graph by making arrays read-only."""
for var in self.ds:
self.ds[var].values.flags.writeable = False
self._frozen = True
def thaw(self):
"""Thaw the graph by making arrays writable."""
for var in self.ds:
self.ds[var].values.flags.writeable = True
self._frozen = False
def _add_variable(self, name, var, dims=None, attrs=None):
kwds = dict(data=var, dims=dims, attrs=attrs)
self.ds.update({name: xr.DataArray(**kwds)})
if self._frozen:
self.freeze()
@property
def ds(self):
return self._ds
def to_dict(self):
return self.ds.to_dict()
def to_json(self):
return json.dumps(self.ds.to_dict())
def to_netcdf(self, *args, **kwds):
"""Write graph contents to a netCDF file.
See xarray.Dataset.to_netcdf for a complete list of parameters.
Below are only the most common.
Parameters
----------
path : str, optional
Path to which to save this graph.
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any
existing file at this location will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', 'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
"""
self.ds.to_netcdf(*args, **kwds)
@classmethod
def from_netcdf(cls, fname):
return cls.from_dataset(xr.open_dataset(fname))
@classmethod
def from_dict(cls, meta):
return cls((meta['y_of_node'], meta['x_of_node']),
links=meta.get('nodes_at_link', None),
patches=meta.get('links_at_patch', None))
@classmethod
def load(cls, source):
if isinstance(source, six.string_types):
return cls.from_netcdf(source)
elif isinstance(source, (dict, xr.Dataset)):
return cls.from_dict(source)
else:
raise ValueError('source must be dict-like or NetCDF ({type})'.format(type=type(source)))
def __str__(self):
return str(self.ds)
def __repr__(self):
return repr(self.ds)
@property
def ndim(self):
return 2
@property
@store_result_in_grid()
@read_only_array
def xy_of_node(self):
"""Get x and y-coordinates of node.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = [0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1]
>>> graph = Graph((node_y, node_x))
>>> graph.xy_of_node[:, 0]
array([ 0., 1., 2., 0., 1., 2.])
>>> graph.xy_of_node[:, 1]
array([ 0., 0., 0., 1., 1., 1.])
"""
return np.stack((self.x_of_node, self.y_of_node)).T.copy()
@property
def x_of_node(self):
"""Get x-coordinate of node.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = [0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1]
>>> graph = Graph((node_y, node_x))
>>> graph.x_of_node
array([ 0., 1., 2., 0., 1., 2.])
"""
return self.ds['x_of_node'].values
@property
def y_of_node(self):
"""Get y-coordinate of node.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = [0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1]
>>> graph = Graph((node_y, node_x))
>>> graph.y_of_node
array([ 0., 0., 0., 1., 1., 1.])
"""
return self.ds['y_of_node'].values
@property
def nodes(self):
"""Get identifier for each node.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = [0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1]
>>> graph = Graph((node_y, node_x))
>>> graph.nodes
array([0, 1, 2, 3, 4, 5])
"""
return self.ds['node'].values
@property
@store_result_in_grid()
@read_only_array
def perimeter_nodes(self):
return find_perimeter_nodes(self)
@property
def number_of_nodes(self):
"""Get total number of nodes.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = [0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1]
>>> graph = Graph((node_y, node_x))
>>> graph.number_of_nodes == 6
True
"""
return self.ds.dims['node']
@property
def nodes_at_link(self):
"""Get nodes at either end of links.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = [0, 1, 2, 0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2]
>>> links = ((0, 1), (1, 2),
... (0, 3), (1, 4), (2, 5),
... (3, 4), (4, 5),
... (3, 6), (4, 7), (5, 8),
... (6, 7), (7, 8))
>>> graph = Graph((node_y, node_x), links=links)
>>> graph.nodes_at_link # doctest: +NORMALIZE_WHITESPACE
array([[0, 1], [1, 2],
[0, 3], [1, 4], [2, 5],
[3, 4], [4, 5],
[3, 6], [4, 7], [5, 8],
[6, 7], [7, 8]])
"""
return self.ds['nodes_at_link'].values
@property
def node_at_link_tail(self):
"""Get nodes at link tail.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = [0, 1, 2, 0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2]
>>> links = ((0, 1), (1, 2),
... (0, 3), (1, 4), (2, 5),
... (3, 4), (4, 5),
... (3, 6), (4, 7), (5, 8),
... (6, 7), (7, 8))
>>> graph = Graph((node_y, node_x), links=links)
>>> graph.node_at_link_tail
array([0, 1, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7])
"""
return self.nodes_at_link[:, 0]
@property
def node_at_link_head(self):
"""Get nodes at link head.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = [0, 1, 2, 0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2]
>>> links = ((0, 1), (1, 2),
... (0, 3), (1, 4), (2, 5),
... (3, 4), (4, 5),
... (3, 6), (4, 7), (5, 8),
... (6, 7), (7, 8))
>>> graph = Graph((node_y, node_x), links=links)
>>> graph.node_at_link_head
array([1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 7, 8])
"""
return self.nodes_at_link[:, 1]
@property
def number_of_links(self):
"""Get nodes at link head.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = [0, 1, 2, 0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2]
>>> links = ((0, 1), (1, 2),
... (0, 3), (1, 4), (2, 5),
... (3, 4), (4, 5),
... (3, 6), (4, 7), (5, 8),
... (6, 7), (7, 8))
>>> graph = Graph((node_y, node_x), links=links)
>>> graph.number_of_links == 12
True
"""
try:
return self.ds.dims['link']
except KeyError:
return 0
@property
def links_at_patch(self):
"""Get the links that define a patch.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = [0, 1, 2, 0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2]
>>> links = ((0, 1), (1, 2),
... (0, 3), (1, 4), (2, 5),
... (3, 4), (4, 5),
... (3, 6), (4, 7), (5, 8),
... (6, 7), (7, 8))
>>> patches = ((0, 3, 5, 2), (1, 4, 6, 3))
>>> graph = Graph((node_y, node_x), links=links, patches=patches)
>>> graph.links_at_patch
array([[3, 5, 2, 0],
[4, 6, 3, 1]])
"""
return self.ds['links_at_patch'].values
@property
# @store_result_in_grid()
@read_only_array
def nodes_at_patch(self):
"""Get the nodes that define a patch.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = ([0, 1, 2, 0, 1, 2, 0, 1, 2],
... [0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> links = ((0, 1), (1, 2),
... (0, 3), (1, 4), (2, 5),
... (3, 4), (4, 5),
... (3, 6), (4, 7), (5, 8),
... (6, 7), (7, 8))
>>> patches = ((0, 3, 5, 2), (1, 4, 6, 3))
>>> graph = Graph((node_y, node_x), links=links, patches=patches)
>>> graph.nodes_at_patch
array([[4, 3, 0, 1],
[5, 4, 1, 2]])
"""
return get_nodes_at_patch(self)
@property
@store_result_in_grid()
@read_only_array
def patches_at_node(self):
"""Get the patches that touch each node.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = ([0, 1, 2, 0, 1, 2],
... [0, 0, 0, 1, 1, 1])
>>> links = ((0, 1), (1, 2),
... (0, 3), (1, 4), (2, 5),
... (3, 4), (4, 5))
>>> patches = ((0, 3, 5, 2), (1, 4, 6, 3))
>>> graph = Graph((node_y, node_x), links=links, patches=patches)
>>> graph.patches_at_node # doctest: +NORMALIZE_WHITESPACE
array([[ 0, -1], [ 0, 1], [ 1, -1],
[ 0, -1], [ 0, 1], [ 1, -1]])
"""
return reverse_one_to_many(self.nodes_at_patch)
@property
@store_result_in_grid()
@read_only_array
def patches_at_link(self):
"""Get the patches on either side of each link.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = ([0, 1, 2, 0, 1, 2],
... [0, 0, 0, 1, 1, 1])
>>> links = ((0, 1), (1, 2),
... (0, 3), (1, 4), (2, 5),
... (3, 4), (4, 5))
>>> patches = ((0, 3, 5, 2), (1, 4, 6, 3))
>>> graph = Graph((node_y, node_x), links=links, patches=patches)
>>> graph.patches_at_link # doctest: +NORMALIZE_WHITESPACE
array([[ 0, -1], [ 1, -1],
[ 0, -1], [ 0, 1], [ 1, -1],
[ 0, -1], [ 1, -1]])
"""
return reverse_one_to_many(self.links_at_patch, min_counts=2)
try:
return self.ds['patches_at_link'].values
except KeyError:
patches_at_link = xr.DataArray(
data=reverse_one_to_many(self.links_at_patch, min_counts=2),
dims=('link', 'Two'),
attrs={'cf_role': 'edge_node_connectivity',
'long_name': 'patches on either side of a link',
'start_index': 0})
self.ds.update({'patches_at_link': patches_at_link})
return self.ds['patches_at_link'].values
@property
def number_of_patches(self):
"""Get the number of patches.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = [0, 1, 2, 0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2]
>>> links = ((0, 1), (1, 2),
... (0, 3), (1, 4), (2, 5),
... (3, 4), (4, 5),
... (3, 6), (4, 7), (5, 8),
... (6, 7), (7, 8))
>>> patches = ((0, 3, 5, 2), (1, 4, 6, 3))
>>> graph = Graph((node_y, node_x), links=links, patches=patches)
>>> graph.number_of_patches == 2
True
"""
try:
return self.ds.dims['patch']
except KeyError:
return 0
@property
def links_at_node(self):
"""Get links touching a node.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x = [0, 1, 2, 0, 1, 2, 0, 1, 2]
>>> node_y = [0, 0, 0, 1, 1, 1, 2, 2, 2]
>>> links = ((0, 1), (1, 2),
... (0, 3), (1, 4), (2, 5),
... (3, 4), (4, 5),
... (3, 6), (4, 7), (5, 8),
... (6, 7), (7, 8))
>>> graph = Graph((node_y, node_x), links=links)
>>> graph.links_at_node # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 2, -1, -1], [ 1, 3, 0, -1], [ 4, 1, -1, -1],
[ 5, 7, 2, -1], [ 6, 8, 5, 3], [ 9, 6, 4, -1],
[10, 7, -1, -1], [11, 10, 8, -1], [11, 9, -1, -1]])
"""
try:
return self._links_at_node
except AttributeError:
(self._links_at_node,
self._link_dirs_at_node) = self._create_links_and_dirs_at_node()
return self._links_at_node
def _create_links_and_dirs_at_node(self):
return get_links_at_node(self, sort=True)
# return get_links_at_node(self, sort=self._sorting['ccw'])
@property
def link_dirs_at_node(self):
"""Get directions of links touching a node.
Examples
--------
>>> from landlab.graph import Graph
>>> node_x, node_y = [0, 1, 2, 0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2]
>>> links = ((0, 1), (1, 2),
... (0, 3), (1, 4), (2, 5),
... (3, 4), (4, 5),
... (3, 6), (4, 7), (5, 8),
... (6, 7), (7, 8))
>>> graph = Graph((node_y, node_x), links=links)
>>> graph.link_dirs_at_node # doctest: +NORMALIZE_WHITESPACE
array([[-1, -1, 0, 0], [-1, -1, 1, 0], [-1, 1, 0, 0],
[-1, -1, 1, 0], [-1, -1, 1, 1], [-1, 1, 1, 0],
[-1, 1, 0, 0], [-1, 1, 1, 0], [ 1, 1, 0, 0]])
"""
try:
return self._link_dirs_at_node
except AttributeError:
(self._links_at_node,
self._link_dirs_at_node) = self._create_links_and_dirs_at_node()
return self._link_dirs_at_node
@property
@store_result_in_grid()
@read_only_array
def angle_of_link(self):
"""Get the angle of each link.
Examples
--------
>>> import numpy as np
>>> from landlab.graph import Graph
>>> node_x, node_y = ([0, 1, 2, 0, 1, 2],
... [0, 0, 0, 1, 1, 1])
>>> links = ((0, 1), (1, 2),
... (0, 3), (1, 4), (2, 5),
... (3, 4), (4, 5))
>>> graph = Graph((node_y, node_x), links=links)
>>> graph.angle_of_link * 180. / np.pi
array([ 0., 0., 90., 90., 90., 0., 0.])
"""
return get_angle_of_link(self)
@property
@store_result_in_grid()
@read_only_array
def length_of_link(self):
"""Get the length of links.
Examples
--------
>>> import numpy as np
>>> from landlab.graph import UniformRectilinearGraph
>>> graph = UniformRectilinearGraph((2, 3), spacing=(1, 2))
>>> graph.length_of_link
array([ 2., 2., 1., 1., 1., 2., 2.])
"""
return get_length_of_link(self)
@property
@store_result_in_grid()
@read_only_array
def midpoint_of_link(self):
"""Get the middle of links.
Examples
--------
>>> import numpy as np
>>> from landlab.graph import UniformRectilinearGraph
>>> graph = UniformRectilinearGraph((2, 3), spacing=(1, 2))
>>> graph.midpoint_of_link # doctest: +NORMALIZE_WHITESPACE
array([[ 1. , 0. ], [ 3. , 0. ],
[ 0. , 0.5], [ 2. , 0.5], [ 4. , 0.5],
[ 1. , 1. ], [ 3. , 1. ]])
"""
return get_midpoint_of_link(self)
@property
@store_result_in_grid()
@read_only_array
def xy_of_link(self):
return get_midpoint_of_link(self)
@property
@store_result_in_grid()
@read_only_array
def xy_of_patch(self):
return get_centroid_of_patch(self)
@property
@store_result_in_grid()
@read_only_array
def area_of_patch(self):
return get_area_of_patch(self)
|
csherwood-usgs/landlab
|
landlab/graph/graph.py
|
Python
|
mit
| 22,050
|
[
"NetCDF"
] |
c72f82b3683d02ce3f9b034c858d04249df4cd8a1b8274279424bd754186e5dd
|
"""
Test of the classical LM model for language modelling
"""
from groundhog.datasets import LMIterator
from groundhog.trainer.SGD_momentum import SGD as SGD_m
from groundhog.trainer.SGD import SGD
from groundhog.mainLoop import MainLoop
from groundhog.layers import MultiLayer, \
RecurrentMultiLayer, \
RecurrentMultiLayerInp, \
RecurrentMultiLayerShortPath, \
RecurrentMultiLayerShortPathInp, \
RecurrentMultiLayerShortPathInpAll, \
SoftmaxLayer, \
LastState,\
UnaryOp, \
DropOp, \
Operator, \
Shift, \
GaussianNoise, \
SigmoidLayer
from groundhog.layers import maxpool, \
maxpool_ntimes, \
last, \
last_ntimes,\
tanh, \
sigmoid, \
rectifier,\
hard_sigmoid, \
hard_tanh
from groundhog.models import LM_Model
from theano.sandbox.scan import scan
import numpy
import theano
import theano.tensor as TT
linear = lambda x:x
rect = lambda x:TT.maximum(0., x)
theano.config.allow_gc = False
def get_text_data(state):
def out_format (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
def out_format_valid (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
train_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
seq_len = state['seqlen'],
mode="train",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format,
can_fit=True)
valid_data = LMIterator(
batch_size=state['bs'],
path=state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences = True,
seq_len= state['seqlen'],
mode="valid",
reset =state['reset'],
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
test_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences=True,
seq_len= state['seqlen'],
mode="test",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
if 'wiki' in state['path']:
test_data = None
return train_data, valid_data, test_data
def jobman(state, channel):
# load dataset
rng = numpy.random.RandomState(state['seed'])
# declare the dimensionalies of the input and output
if state['chunks'] == 'words':
state['n_in'] = 10000
state['n_out'] = 10000
else:
state['n_in'] = 50
state['n_out'] = 50
train_data, valid_data, test_data = get_text_data(state)
## BEGIN Tutorial
### Define Theano Input Variables
x = TT.lvector('x')
y = TT.lvector('y')
h0 = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
### Neural Implementation of the Operators: \oplus
#### Word Embedding
emb_words = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inp_nhids']),
activation=eval(state['inp_activ']),
init_fn='sample_weights_classic',
weight_noise=state['weight_noise'],
rank_n_approx = state['rank_n_approx'],
scale=state['inp_scale'],
sparsity=state['inp_sparse'],
learn_bias = True,
bias_scale=eval(state['inp_bias']),
name='emb_words')
#### Deep Transition Recurrent Layer
rec = eval(state['rec_layer'])(
rng,
eval(state['nhids']),
activation = eval(state['rec_activ']),
#activation = 'TT.nnet.sigmoid',
bias_scale = eval(state['rec_bias']),
scale=eval(state['rec_scale']),
sparsity=eval(state['rec_sparse']),
init_fn=eval(state['rec_init']),
weight_noise=state['weight_noise'],
name='rec')
#### Stiching them together
##### (1) Get the embedding of a word
x_emb = emb_words(x, no_noise_bias=state['no_noise_bias'])
##### (2) Embedding + Hidden State via DT Recurrent Layer
reset = TT.scalar('reset')
rec_layer = rec(x_emb, n_steps=x.shape[0],
init_state=h0*reset,
no_noise_bias=state['no_noise_bias'],
truncate_gradient=state['truncate_gradient'],
batch_size=1)
## BEGIN Exercise: DOT-RNN
### Neural Implementation of the Operators: \lhd
#### Exercise (1)
#### Hidden state -> Intermediate Layer
emb_state = MultiLayer(
rng,
n_in=eval(state['nhids'])[-1],
n_hids=eval(state['dout_nhid']),
activation=linear,
init_fn=eval(state['dout_init']),
weight_noise=state['weight_noise'],
scale=state['dout_scale'],
sparsity=state['dout_sparse'],
learn_bias = True,
bias_scale=eval(state['dout_bias']),
name='emb_state')
#### Exercise (1)
#### Input -> Intermediate Layer
emb_words_out = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['dout_nhid']),
activation=linear,
init_fn='sample_weights_classic',
weight_noise=state['weight_noise'],
scale=state['dout_scale'],
sparsity=state['dout_sparse'],
rank_n_approx=state['dout_rank_n_approx'],
learn_bias = False,
bias_scale=eval(state['dout_bias']),
name='emb_words_out')
#### Hidden State: Combine emb_state and emb_words_out
#### Exercise (1)
outhid_activ = UnaryOp(activation=eval(state['dout_activ']))
#### Exercise (2)
outhid_dropout = DropOp(dropout=state['dropout'], rng=rng)
#### Softmax Layer
output_layer = SoftmaxLayer(
rng,
eval(state['dout_nhid']),
state['n_out'],
scale=state['out_scale'],
bias_scale=state['out_bias_scale'],
init_fn="sample_weights_classic",
weight_noise=state['weight_noise'],
sparsity=state['out_sparse'],
sum_over_time=True,
name='out')
### Few Optional Things
#### Direct shortcut from x to y
if state['shortcut_inpout']:
shortcut = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inpout_nhids']),
activations=eval(state['inpout_activ']),
init_fn='sample_weights_classic',
weight_noise = state['weight_noise'],
scale=eval(state['inpout_scale']),
sparsity=eval(state['inpout_sparse']),
learn_bias=eval(state['inpout_learn_bias']),
bias_scale=eval(state['inpout_bias']),
name='shortcut')
#### Learning rate scheduling (1/(1+n/beta))
state['clr'] = state['lr']
def update_lr(obj, cost):
stp = obj.step
if isinstance(obj.state['lr_start'], int) and stp > obj.state['lr_start']:
time = float(stp - obj.state['lr_start'])
new_lr = obj.state['clr']/(1+time/obj.state['lr_beta'])
obj.lr = new_lr
if state['lr_adapt']:
rec.add_schedule(update_lr)
### Neural Implementations of the Language Model
#### Training
if state['shortcut_inpout']:
additional_inputs = [rec_layer, shortcut(x)]
else:
additional_inputs = [rec_layer]
##### Exercise (1): Compute the output intermediate layer
outhid = outhid_activ(emb_state(rec_layer) + emb_words_out(x))
##### Exercise (2): Apply Dropout
outhid = outhid_dropout(outhid)
train_model = output_layer(outhid,
no_noise_bias=state['no_noise_bias'],
additional_inputs=additional_inputs).train(target=y,
scale=numpy.float32(1./state['seqlen']))
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
if state['carry_h0']:
train_model.updates += [(h0, nw_h0)]
#### Validation
h0val = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
rec_layer = rec(emb_words(x, use_noise=False),
n_steps = x.shape[0],
batch_size=1,
init_state=h0val*reset,
use_noise=False)
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
##### Exercise (1): Compute the output intermediate layer
outhid = outhid_activ(emb_state(rec_layer) + emb_words_out(x))
##### Exercise (2): Apply Dropout
outhid = outhid_dropout(outhid, use_noise=False)
if state['shortcut_inpout']:
additional_inputs=[rec_layer, shortcut(x, use_noise=False)]
else:
additional_inputs=[rec_layer]
valid_model = output_layer(outhid,
additional_inputs=additional_inputs,
use_noise=False).validate(target=y, sum_over_time=True)
valid_updates = []
if state['carry_h0']:
valid_updates = [(h0val, nw_h0)]
valid_fn = theano.function([x,y, reset], valid_model.out,
name='valid_fn', updates=valid_updates)
#### Sampling
##### single-step sampling
def sample_fn(word_tm1, h_tm1):
x_emb = emb_words(word_tm1, use_noise = False, one_step=True)
h0 = rec(x_emb, state_before=h_tm1, one_step=True, use_noise=False)[-1]
outhid = outhid_dropout(outhid_activ(emb_state(h0, use_noise=False, one_step=True) +
emb_words_out(word_tm1, use_noise=False, one_step=True), one_step=True),
use_noise=False, one_step=True)
word = output_layer.get_sample(state_below=outhid, additional_inputs=[h0], temp=1.)
return word, h0
##### scan for iterating the single-step sampling multiple times
[samples, summaries], updates = scan(sample_fn,
states = [
TT.alloc(numpy.int64(0), state['sample_steps']),
TT.alloc(numpy.float32(0), 1, eval(state['nhids'])[-1])],
n_steps= state['sample_steps'],
name='sampler_scan')
##### build a Theano function for sampling
sample_fn = theano.function([], [samples],
updates=updates, profile=False, name='sample_fn')
##### Load a dictionary
dictionary = numpy.load(state['dictionary'])
if state['chunks'] == 'chars':
dictionary = dictionary['unique_chars']
else:
dictionary = dictionary['unique_words']
def hook_fn():
sample = sample_fn()[0]
print 'Sample:',
if state['chunks'] == 'chars':
print "".join(dictionary[sample])
else:
for si in sample:
print dictionary[si],
print
### Build and Train a Model
#### Define a model
model = LM_Model(
cost_layer = train_model,
weight_noise_amount=state['weight_noise_amount'],
valid_fn = valid_fn,
clean_before_noise_fn = False,
noise_fn = None,
rng = rng)
if state['reload']:
model.load(state['prefix']+'model.npz')
#### Define a trainer
##### Training algorithm (SGD)
if state['moment'] < 0:
algo = SGD(model, state, train_data)
else:
algo = SGD_m(model, state, train_data)
##### Main loop of the trainer
main = MainLoop(train_data,
valid_data,
test_data,
model,
algo,
state,
channel,
train_cost = False,
hooks = hook_fn,
validate_postprocess = eval(state['validate_postprocess']))
## Run!
main.main()
if __name__=='__main__':
state = {}
# complete path to data (cluster specific)
state['seqlen'] = 100
state['path']= "/data/lisa/data/PennTreebankCorpus/pentree_char_and_word.npz"
state['dictionary']= "/data/lisa/data/PennTreebankCorpus/dictionaries.npz"
state['chunks'] = 'chars'
state['seed'] = 123
# flag .. don't need to change it. It says what to do if you get cost to
# be nan .. you could raise, though I would leave it to this
state['on_nan'] = 'warn'
# DATA
# For wikipedia validation set is extremely large. Is very time
# wasteful. This value is only used for validation set, and IMHO should
# be something like seqlen * 10000 (i.e. the validation should be only
# 10000 steps
state['reset'] = -1
# For music/ word level I think 50 is a good idea. For character this
# should be at least 100 (I think there are problems with getting state
# of the art otherwise). Note most people use 200 !
# The job stops when learning rate declines to this value. It can be
# useful, because sometimes is hopeless to wait for validation error to
# get below minerr, or for the time to expire
state['minlr'] = float(5e-7)
# Layers
# Input
# Input weights are sampled from a gaussian with std=scale; this is the
# standard way to initialize
state['rank_n_approx'] = 0
state['inp_nhids'] = '[200]'
state['inp_activ'] = '[linear]'
state['inp_bias'] = '[0.]'
state['inp_sparse']= -1 # dense
state['inp_scale'] = .1
# This is for the output weights
state['out_scale'] = .1
state['out_bias_scale'] = -.5
state['out_sparse'] = -1
state['dout_nhid'] = '200'
state['dout_activ'] = '"TT.nnet.sigmoid"'
state['dout_sparse']= 20
state['dout_scale'] = 1.
state['dout_bias'] = '[0]'
state['dout_init'] = "'sample_weights'"
state['dout_rank_n_approx'] = 0
state['dropout'] = .5
# HidLayer
# Hidden units on for the internal layers of DT-RNN. Having a single
# value results in a standard RNN
state['nhids'] = '[100, 100]'
# Activation of each layer
state['rec_activ'] = '"TT.nnet.sigmoid"'
state['rec_bias'] = '.0'
state['rec_sparse'] ='20'
state['rec_scale'] = '1.'
# sample_weights - you rescale the weights such that the largest
# singular value is scale
# sample_weights_classic : just sample weights from a gaussian with std
# equal to scale
state['rec_init'] = "'sample_weights'"
state['rec_layer'] = 'RecurrentMultiLayerShortPathInpAll'
# SGD params
state['bs'] = 1 # the size of the minibatch
state['lr'] = 1. # initial learning rate
state['cutoff'] = 1. # threshold for gradient rescaling
state['moment'] = 0.995 #-.1 # momentum
# Do not optimize these
state['weight_noise'] = True # white Gaussian noise in weights
state['weight_noise_amount'] = 0.075 # standard deviation
# maximal number of updates
state['loopIters'] = int(1e8)
# maximal number of minutes to wait until killing job
state['timeStop'] = 48*60 # 48 hours
# Construct linear connections from input to output. These are factored
# (like the rank_n) to deal with the possible high dimensionality of the
# input, but it is a linear projection that feeds into the softmax
state['shortcut_inpout'] = False
state['shortcut_rank'] = 200
# Main Loop
# Make this to be a decently large value. Otherwise you waste a lot of
# memory keeping track of the training error (and other things) at each
# step + the stdout becomes extremely large
state['trainFreq'] = 100
state['hookFreq'] = 5000
state['validFreq'] = 1000
state['saveFreq'] = 15 # save every 15 minutes
state['prefix'] = 'model_' # prefix of the save files
state['reload'] = False # reload
state['overwrite'] = 1
# Threhold should be 1.004 for PPL, for entropy (which is what
# everything returns, it should be much smaller. Running value is 1.0002
# We should not hyperoptimize this
state['divide_lr'] = 2.
state['cost_threshold'] = 1.0002
state['patience'] = 1
state['validate_postprocess'] = 'lambda x:10**(x/numpy.log(10))'
state['truncate_gradient'] = 80 # truncated BPTT
state['lr_adapt'] = 0 # 1/(1 + n/n0) scheduling
state['lr_beta'] = 10*1900.
state['lr_start'] = 'on_error'
state['no_noise_bias'] = True # do not use weight noise for biases
state['carry_h0'] = True # carry over h0 across updates
state['sample_steps'] = 80
# Do not change these
state['minerr'] = -1
state['shift'] = 1 # n-step forward prediction
state['cutoff_rescale_length'] = False
jobman(state, None)
|
wavelets/GroundHog
|
scripts/DT_RNN_Tut_Ex.py
|
Python
|
bsd-3-clause
| 16,496
|
[
"Gaussian"
] |
b405704d2b3ffbed22bcc10f46ea817ec179eede50d635f7c83201ded5aec0f3
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Author: Pauli Virtanen, 2016
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
import operator
from .util import inf, nan, is_na
def compute_stats(samples, number):
"""
Statistical analysis of measured samples.
Parameters
----------
samples : list of float
List of total times (y) of benchmarks.
number : int
Repeat number for each sample.
Returns
-------
beta_hat : float
Estimated time per iteration
stats : dict
Information on statistics of the estimator.
"""
if len(samples) < 1:
return None, None
Y = list(samples)
# Median and quantiles
y_50, ci_50 = quantile_ci(Y, 0.5, alpha_min=0.99)
y_25 = quantile(Y, 0.25)
y_75 = quantile(Y, 0.75)
# If nonparametric CI estimation didn't give an estimate,
# use the credible interval of a bayesian posterior distribution.
a, b = ci_50
if (math.isinf(a) or math.isinf(b)) and len(Y) > 1:
# Compute posterior distribution for location, assuming
# exponential noise. The MLE is equal to the median.
c = LaplacePosterior(Y)
# Use the CI from that distribution to extend beyond sample
# bounds
if math.isinf(a):
a = min(c.ppf(0.01/2), min(Y))
if math.isinf(b):
b = max(c.ppf(1 - 0.01/2), max(Y))
ci_50 = (a, b)
# Produce results
result = y_50
stats = {'ci_99_a': ci_50[0],
'ci_99_b': ci_50[1],
'q_25': y_25,
'q_75': y_75,
'repeat': len(Y),
'number': number}
return result, stats
def get_err(result, stats):
"""
Return an 'error measure' suitable for informing the user
about the spread of the measurement results.
"""
a, b = stats['q_25'], stats['q_75']
return (b - a)/2
def get_weight(stats):
"""
Return a data point weight for the result.
"""
if stats is None or 'ci_99_a' not in stats or 'ci_99_b' not in stats:
return None
try:
a = stats['ci_99_a']
b = stats['ci_99_b']
return 2 / abs(b - a)
except ZeroDivisionError:
return None
def is_different(samples_a, samples_b, stats_a, stats_b, p_threshold=0.002):
"""Check whether the samples are statistically different.
If sample data is not provided, or the sample is too small, falls
back to a pessimistic CI-based check. If it returns True, then the
difference is statistically significant. If it returns False, it
might or might not be statistically significant.
Parameters
----------
samples_a, samples_b
Input samples
stats_a, stats_b
Input stats data
"""
if samples_a is not None and samples_b is not None:
# Raw data present: Mann-Whitney U test, but only if there's
# enough data so that the test can return True
a = [x for x in samples_a if not is_na(x)]
b = [x for x in samples_b if not is_na(x)]
p_min = 1 / binom(len(a) + len(b), min(len(a), len(b)))
if p_min < p_threshold:
_, p = mann_whitney_u(a, b)
return p < p_threshold
# If confidence intervals overlap, reject.
# Corresponds to a test with ill-specified threshold p-value,
# which generally can be significantly smaller than p <= 0.01
# depending on the actual data. For normal test (known variance),
# 0.00027 <= p <= 0.01.
ci_a = (stats_a['ci_99_a'], stats_a['ci_99_b'])
ci_b = (stats_b['ci_99_a'], stats_b['ci_99_b'])
if ci_a[1] >= ci_b[0] and ci_a[0] <= ci_b[1]:
return False
return True
def quantile_ci(x, q, alpha_min=0.01):
"""
Compute a quantile and a confidence interval.
Assumes independence, but otherwise nonparametric.
Parameters
----------
x : list of float
Samples
q : float
Quantile to compute, in [0,1].
alpha_min : float, optional
Limit for coverage.
The result has coverage >= 1 - alpha_min.
Returns
-------
m : float
Quantile of x
ci : tuple of floats
Confidence interval (a, b), of coverage >= alpha_min.
"""
y = sorted(x)
n = len(y)
alpha_min = min(alpha_min, 1 - alpha_min)
pa = alpha_min / 2
pb = 1 - pa
a = -inf
b = inf
# It's known that
#
# Pr[X_{(r)} < m < X_{(s)}] = Pr[r <= K <= s-1], K ~ Bin(n,p)
#
# where cdf(m) = p defines the quantile.
#
# Simplest median CI follows by picking r,s such that
#
# F(r;n,q) <= alpha/2
# F(s;n,q) >= 1 - alpha/2
#
# F(k;n,q) = sum(binom_pmf(n, j, q) for j in range(k))
#
# Then (y[r-1], y[s-1]) is a CI.
# If no such r or s exists, replace by +-inf.
F = 0
for k, yp in enumerate(y):
F += binom_pmf(n, k, q)
# F = F(k+1;n,q)
if F <= pa:
a = yp
if F >= pb:
b = yp
break
m = quantile(y, q)
return m, (a, b)
def quantile(x, q):
"""
Compute quantile/percentile of the data
Parameters
----------
x : list of float
Data set
q : float
Quantile to compute, 0 <= q <= 1
"""
if not 0 <= q <= 1:
raise ValueError("Invalid quantile")
y = sorted(x)
n = len(y)
z = (n - 1) * q
j = int(math.floor(z))
z -= j
if j == n - 1:
m = y[-1]
else:
m = (1 - z)*y[j] + z*y[j+1]
return m
_mann_whitney_u_memo = {}
def mann_whitney_u(x, y, method='auto'):
"""
Mann-Whitney U test
Ties are handled conservatively, returning the least significant
tie breaking.
Parameters
----------
x, y : list of float
Samples to test
method : {'auto', 'exact', 'normal'}
Whether to compute p-value exactly of via normal approximation.
The option 'auto' switches to approximation for sample size > 20.
Returns
-------
u : int
U-statistic
p : float
p-value for two-sided alternative
References
----------
.. [1] Mann & Whitney, Ann. Math. Statist. 18, 50 (1947).
.. [2] Gibbons & Chakraborti, "Nonparametric statistical inference". (2003)
"""
memo = _mann_whitney_u_memo
if len(memo) > 100000:
memo.clear()
m = len(x)
n = len(y)
if method == 'auto':
if max(m, n) > 20:
method = 'normal'
else:
method = 'exact'
u, ties = mann_whitney_u_u(x, y)
# Conservative tie breaking
if u <= m*n//2 and u + ties >= m*n//2:
ties = m*n//2 - u
ux1 = min(u, m*n - u)
ux2 = min(u + ties, m*n - (u + ties))
if ux1 >= ux2:
ux = ux1
else:
u = u + ties
ux = ux2
# Get p-value
if method == 'exact':
p1 = mann_whitney_u_cdf(m, n, ux, memo)
p2 = 1.0 - mann_whitney_u_cdf(m, n, max(m*n//2, m*n - ux - 1), memo)
p = p1 + p2
elif method == 'normal':
N = m + n
var = m*n*(N + 1) / 12
z = (ux - m*n/2) / math.sqrt(var)
cdf = 0.5 * math.erfc(-z / math.sqrt(2))
p = 2 * cdf
else:
raise ValueError("Unknown method {!r}".format(method))
return u, p
def mann_whitney_u_u(x, y):
u = 0
ties = 0
for xx in x:
for yy in y:
if xx > yy:
u += 1
elif xx == yy:
ties += 1
return u, ties
def mann_whitney_u_cdf(m, n, u, memo=None):
if memo is None:
memo = {}
cdf = 0
for uu in range(u + 1):
cdf += mann_whitney_u_pmf(m, n, uu, memo)
return cdf
def mann_whitney_u_pmf(m, n, u, memo=None):
if memo is None:
memo = {}
return mann_whitney_u_r(m, n, u, memo) / binom(m + n, m)
def mann_whitney_u_r(m, n, u, memo=None):
"""
Number of orderings in Mann-Whitney U test.
The PMF of U for samples of sizes (m, n) is given by
p(u) = r(m, n, u) / binom(m + n, m).
References
----------
.. [1] Mann & Whitney, Ann. Math. Statist. 18, 50 (1947).
"""
if u < 0:
value = 0
elif m == 0 or n == 0:
value = 1 if u == 0 else 0
else:
# Don't bother figuring out table construction, memoization
# sorts it out
if memo is None:
memo = {}
key = (m, n, u)
value = memo.get(key)
if value is not None:
return value
value = (mann_whitney_u_r(m, n - 1, u, memo)
+ mann_whitney_u_r(m - 1, n, u - n, memo))
memo[key] = value
return value
def binom_pmf(n, k, p):
"""Binomial pmf = (n choose k) p**k (1 - p)**(n - k)"""
if not (0 <= k <= n):
return 0
if p == 0:
return 1.0 * (k == 0)
elif p == 1.0:
return 1.0 * (k == n)
logp = math.log(p)
log1mp = math.log(1 - p)
return math.exp(lgamma(1 + n) - lgamma(1 + n - k) - lgamma(1 + k)
+ k*logp + (n - k)*log1mp)
_BERNOULLI = [1.0, -0.5, 0.166666666667, 0.0, -0.0333333333333, 0.0, 0.0238095238095]
def lgamma(x):
"""
Log gamma function. Only implemented at integers.
"""
if x <= 0:
raise ValueError("Domain error")
if x > 100:
# DLMF 5.11.1
r = 0.5 * math.log(2*math.pi) + (x - 0.5) * math.log(x) - x
for k in range(1, len(_BERNOULLI)//2 + 1):
r += _BERNOULLI[2*k] / (2*k*(2*k - 1) * x**(2*k - 1))
return r
# Fall back to math.factorial
int_x = int(x)
err_int = abs(x - int_x)
if err_int < 1e-12 * abs(x):
return math.log(math.factorial(int_x - 1))
# Would need full implementation
return nan
def binom(n, k):
"""
Binomial coefficient (n over k)
"""
n = operator.index(n)
k = operator.index(k)
if not 0 <= k <= n:
return 0
m = n + 1
num = 1
den = 1
for j in range(1, min(k, n - k) + 1):
num *= m - j
den *= j
return num // den
class LaplacePosterior(object):
"""
Univariate distribution::
p(beta|y) = N [sum(|y_j - beta|)]**(-nu-1)
where N is the normalization factor.
Parameters
----------
y : list of float
Samples
nu : float, optional
Degrees of freedom. Default: len(y)-1
Notes
-----
This is the posterior distribution in the Bayesian model assuming
Laplace distributed noise::
p(y|beta,sigma) = N exp(- sum_j (1/sigma) |y_j - beta|)
p(sigma) ~ 1/sigma
nu = len(y) - 1
The MLE for beta is median(y).
Note that the same approach applied to a Gaussian model::
p(y|beta,sigma) = N exp(- sum_j 1/(2 sigma^2) (y_j - beta)^2)
results to::
p(beta|y) = N T(t, m-1); t = (beta - mean(y)) / (sstd(y) / sqrt(m))
where ``T(t, nu)`` is the Student t-distribution pdf, which then gives
the standard textbook formulas for the mean.
"""
def __init__(self, y, nu=None):
if len(y) == 0:
raise ValueError("empty input")
if nu is None:
self.nu = len(y) - 1
else:
self.nu = nu
# Sort input
y = sorted(y)
# Get location and scale so that data is centered at MLE, and
# the unnormalized PDF at MLE has amplitude ~ 1/nu.
#
# Proper scaling of inputs is important to avoid overflows
# when computing the unnormalized CDF integrals below.
self.mle = quantile(y, 0.5)
self._y_scale = sum(abs(yp - self.mle) for yp in y)
self._y_scale *= self.nu**(1/(self.nu + 1))
# Shift and scale
if self._y_scale != 0:
self.y = [(yp - self.mle)/self._y_scale for yp in y]
else:
self.y = [0 for yp in y]
self._cdf_norm = None
self._cdf_memo = {}
def _cdf_unnorm(self, beta):
"""
Unnormalized CDF of this distribution::
cdf_unnorm(b) = int_{-oo}^{b} 1/(sum_j |y - b'|)**(m+1) db'
"""
if beta != beta:
return beta
for k, y in enumerate(self.y):
if y > beta:
k0 = k
break
else:
k0 = len(self.y)
cdf = 0
nu = self.nu
# Save some work by memoizing intermediate results
if k0 - 1 in self._cdf_memo:
k_start = k0
cdf = self._cdf_memo[k0 - 1]
else:
k_start = 0
cdf = 0
# Do the integral piecewise, resolving the absolute values
for k in range(k_start, k0 + 1):
c = 2*k - len(self.y)
y = sum(self.y[k:]) - sum(self.y[:k])
if k == 0:
a = -inf
else:
a = self.y[k-1]
if k == k0:
b = beta
else:
b = self.y[k]
if c == 0:
term = (b - a) / y**(nu+1)
else:
term = 1/(nu*c) * ((a*c + y)**(-nu) - (b*c + y)**(-nu))
cdf += max(0, term) # avoid rounding error
if k != k0:
self._cdf_memo[k] = cdf
if beta == inf:
self._cdf_memo[len(self.y)] = cdf
return cdf
def _ppf_unnorm(self, cdfx):
"""
Inverse function for _cdf_unnorm
"""
# Find interval
for k in range(len(self.y) + 1):
if cdfx <= self._cdf_memo[k]:
break
# Invert on interval
c = 2*k - len(self.y)
y = sum(self.y[k:]) - sum(self.y[:k])
nu = self.nu
if k == 0:
term = cdfx
else:
a = self.y[k-1]
term = cdfx - self._cdf_memo[k-1]
if k == 0:
z = -nu*c*term
if z > 0:
beta = (z**(-1/nu) - y) / c
else:
beta = -inf
elif c == 0:
beta = a + term * y**(nu+1)
else:
z = (a*c + y)**(-nu) - nu*c*term
if z > 0:
beta = (z**(-1/nu) - y)/c
else:
beta = inf
if k < len(self.y):
beta = min(beta, self.y[k])
return beta
def pdf(self, beta):
"""
Probability distribution function
"""
return math.exp(self.logpdf(beta))
def logpdf(self, beta):
"""
Logarithm of probability distribution function
"""
if self._y_scale == 0:
return inf if beta == self.mle else -inf
beta = (beta - self.mle) / self._y_scale
if self._cdf_norm is None:
self._cdf_norm = self._cdf_unnorm(inf)
ws = sum(abs(yp - beta) for yp in self.y)
m = self.nu
return -(m+1)*math.log(ws) - math.log(self._cdf_norm) - math.log(self._y_scale)
def cdf(self, beta):
"""
Cumulative probability distribution function
"""
if self._y_scale == 0:
return 1.0*(beta > self.mle)
beta = (beta - self.mle) / self._y_scale
if self._cdf_norm is None:
self._cdf_norm = self._cdf_unnorm(inf)
return self._cdf_unnorm(beta) / self._cdf_norm
def ppf(self, cdf):
"""
Percent point function (inverse function for cdf)
"""
if cdf < 0 or cdf > 1.0:
return nan
if self._y_scale == 0:
return self.mle
if self._cdf_norm is None:
self._cdf_norm = self._cdf_unnorm(inf)
cdfx = min(cdf * self._cdf_norm, self._cdf_norm)
beta = self._ppf_unnorm(cdfx)
return beta * self._y_scale + self.mle
|
qwhelan/asv
|
asv/statistics.py
|
Python
|
bsd-3-clause
| 15,797
|
[
"Gaussian"
] |
95a3dc9069b244fd12f56b344f40d3d95fab0cc44d83848630e57c1943babeef
|
# Example: How to prepare a new payment with the Mollie API, and render the QRcode.
#
import os
import time
from app import database_write, get_public_url
from mollie.api.client import Client
from mollie.api.error import Error
PUBLIC_URL = get_public_url()
def main():
try:
#
# Initialize the Mollie API library with your API key.
#
# See: https://www.mollie.com/dashboard/settings/profiles
#
api_key = os.environ.get("MOLLIE_API_KEY", "test_test")
mollie_client = Client()
mollie_client.set_api_key(api_key)
#
# Generate a unique webshop order id for this example. It is important to include this unique attribute
# in the redirectUrl (below) so a proper return page can be shown to the customer.
#
my_webshop_id = int(time.time())
#
# Payment parameters:
# amount Currency and value. This example creates a € 120,- payment.
# description Description of the payment.
# webhookUrl Webhook location, used to report when the payment changes state.
# redirectUrl Redirect location. The customer will be redirected there after the payment.
# metadata Custom metadata that is stored with the payment.
#
# include=... Request an optional QRcode from Mollie.
#
payment = mollie_client.payments.create(
{
"amount": {"currency": "EUR", "value": "120.00"},
"description": "My first API payment",
"webhookUrl": f"{PUBLIC_URL}02-webhook-verification",
"redirectUrl": f"{PUBLIC_URL}03-return-page?my_webshop_id={my_webshop_id}",
"metadata": {"my_webshop_id": str(my_webshop_id)},
},
include="details.qrCode",
)
#
# In this example we store the order with its payment status in a database.
#
data = {"status": payment.status}
database_write(my_webshop_id, data)
#
# Display the QRcode to the customer, to complete the payment.
#
qr_code = payment.details["qrCode"]
body = f"""
Use the QRcode or visit the checkout page to complete the payment.
<hr/>
<img src='{qr_code["src"]}' />
<hr/>
<a href="{payment.checkout_url}">Visit checkout page</a>.
"""
return body
except Error as err:
return f"API call failed: {err}"
if __name__ == "__main__":
print(main())
|
mollie/mollie-api-python
|
examples/01-new-payment-using-qrcode.py
|
Python
|
bsd-2-clause
| 2,550
|
[
"VisIt"
] |
12060e8b41630d5a487d12790095b24baa963114d4753f93c8c33407b9efc626
|
"""
Infinite mixture model : A generalization of Bayesian mixture models
with an unspecified number of classes
"""
import numpy as np
from bgmm import BGMM, detsh
from scipy.special import gammaln
def co_labelling(z, kmax=None, kmin=None):
"""
return a sparse co-labelling matrix given the label vector z
Parameters
----------
z: array of shape(n_samples),
the input labels
kmax: int, optional,
considers only the labels in the range [0, kmax[
Returns
-------
colabel: a sparse coo_matrix,
yields the co labelling of the data
i.e. c[i,j]= 1 if z[i]==z[j], 0 otherwise
"""
from scipy.sparse import coo_matrix
n = z.size
colabel = coo_matrix((n, n))
if kmax == None:
kmax = z.max() + 1
if kmin == None:
kmin = z.min() - 1
for k in np.unique(z):
if (k < kmax) & (k > kmin):
i = np.array(np.nonzero(z == k))
row = np.repeat(i, i.size)
col = np.ravel(np.tile(i, i.size))
data = np.ones((i.size) ** 2)
colabel = colabel + coo_matrix((data, (row, col)), shape=(n, n))
return colabel
class IMM(BGMM):
"""
The class implements Infinite Gaussian Mixture model
or Dirichlet Proces Mixture Model.
This simply a generalization of Bayesian Gaussian Mixture Models
with an unknown number of classes.
"""
def __init__(self, alpha=.5, dim=1):
"""
Parameters
----------
alpha: float, optional,
the parameter for cluster creation
dim: int, optional,
the dimension of the the data
Note: use the function set_priors() to set adapted priors
"""
self.dim = dim
self.alpha = alpha
self.k = 0
self.prec_type = 'full'
# initialize weights
self.weights = [1]
def set_priors(self, x):
""" Set the priors in order of having them weakly uninformative
this is from Fraley and raftery;
Journal of Classification 24:155-181 (2007)
Parameters
----------
x, array of shape (n_samples,self.dim)
the data used in the estimation process
"""
# a few parameters
small = 0.01
elshape = (1, self.dim, self.dim)
mx = np.reshape(x.mean(0), (1, self.dim))
dx = x - mx
vx = np.maximum(1.e-15, np.dot(dx.T, dx) / x.shape[0])
px = np.reshape(np.diag(1.0 / np.diag(vx)), elshape)
# set the priors
self._prior_means = mx
self.prior_means = mx
self.prior_weights = self.alpha
self._prior_scale = px
self.prior_scale = px
self._prior_dof = self.dim + 2
self.prior_dof = [self._prior_dof]
self._prior_shrinkage = small
self.prior_shrinkage = [self._prior_shrinkage]
# cache some pre-computations
self._dets_ = detsh(px[0])
self._dets = [self._dets_]
self._inv_prior_scale_ = np.reshape(np.linalg.inv(px[0]), elshape)
self.prior_dens = None
def set_constant_densities(self, prior_dens=None):
"""Set the null and prior densities as constant
(assuming a compact domain)
Parameters
----------
prior_dens: float, optional
constant for the prior density
"""
self.prior_dens = prior_dens
def sample(self, x, niter=1, sampling_points=None, init=False,
kfold=None, verbose=0):
"""sample the indicator and parameters
Parameters
----------
x: array of shape (n_samples, self.dim)
the data used in the estimation process
niter: int,
the number of iterations to perform
sampling_points: array of shape(nbpoints, self.dim), optional
points where the likelihood will be sampled
this defaults to x
kfold: int or array, optional,
parameter of cross-validation control
by default, no cross-validation is used
the procedure is faster but less accurate
verbose=0: verbosity mode
Returns
-------
likelihood: array of shape(nbpoints)
total likelihood of the model
"""
self.check_x(x)
if sampling_points == None:
average_like = np.zeros(x.shape[0])
else:
average_like = np.zeros(sampling_points.shape[0])
splike = self.likelihood_under_the_prior(sampling_points)
plike = self.likelihood_under_the_prior(x)
if init:
self.k = 1
z = np.zeros(x.shape[0])
self.update(x, z)
like = self.likelihood(x, plike)
z = self.sample_indicator(like)
for i in range(niter):
if kfold == None:
like = self.simple_update(x, z, plike)
else:
like = self.cross_validated_update(x, z, plike, kfold)
if sampling_points == None:
average_like += like
else:
average_like += np.sum(
self.likelihood(sampling_points, splike), 1)
average_like /= niter
return average_like
def simple_update(self, x, z, plike):
"""
This is a step in the sampling procedure
that uses internal corss_validation
Parameters
----------
x: array of shape(n_samples, dim),
the input data
z: array of shape(n_samples),
the associated membership variables
plike: array of shape(n_samples),
the likelihood under the prior
Returns
-------
like: array od shape(n_samples),
the likelihood of the data
"""
like = self.likelihood(x, plike)
# standard + likelihood under the prior
# like has shape (x.shape[0], self.k+1)
z = self.sample_indicator(like)
# almost standard, but many new components can be created
self.reduce(z)
self.update(x, z)
return like.sum(1)
def cross_validated_update(self, x, z, plike, kfold=10):
"""
This is a step in the sampling procedure
that uses internal corss_validation
Parameters
----------
x: array of shape(n_samples, dim),
the input data
z: array of shape(n_samples),
the associated membership variables
plike: array of shape(n_samples),
the likelihood under the prior
kfold: int, or array of shape(n_samples), optional,
folds in the cross-validation loop
Returns
-------
like: array od shape(n_samples),
the (cross-validated) likelihood of the data
"""
n_samples = x.shape[0]
slike = np.zeros(n_samples)
if np.isscalar(kfold):
aux = np.argsort(np.random.rand(n_samples))
idx = - np.ones(n_samples).astype(np.int)
j = np.ceil(n_samples / kfold)
kmax = kfold
for k in range(kmax):
idx[aux[k * j:min(n_samples, j * (k + 1))]] = k
else:
if np.array(kfold).size != n_samples:
raise ValueError('kfold and x do not have the same size')
uk = np.unique(kfold)
np.random.shuffle(uk)
idx = np.zeros(n_samples).astype(np.int)
for i, k in enumerate(uk):
idx += (i * (kfold == k))
kmax = uk.max() + 1
for k in range(kmax):
test = np.zeros(n_samples).astype('bool')
test[idx == k] = 1
train = np.logical_not(test)
# remove a fraction of the data
# and re-estimate the clusters
z[train] = self.reduce(z[train])
self.update(x[train], z[train])
# draw the membership for the left-out datas
alike = self.likelihood(x[test], plike[test])
slike[test] = alike.sum(1)
# standard + likelihood under the prior
# like has shape (x.shape[0], self.k+1)
z[test] = self.sample_indicator(alike)
# almost standard, but many new components can be created
return slike
def reduce(self, z):
"""Reduce the assignments by removing empty clusters and update self.k
Parameters
----------
z: array of shape(n),
a vector of membership variables changed in place
Returns
-------
z: the remapped values
"""
uz = np.unique(z[z > - 1])
for i, k in enumerate(uz):
z[z == k] = i
self.k = z.max() + 1
return z
def update(self, x, z):
""" Update function (draw a sample of the IMM parameters)
Parameters
----------
x array of shape (n_samples,self.dim)
the data used in the estimation process
z array of shape (n_samples), type = np.int
the corresponding classification
"""
# re-dimension the priors in order to match self.k
self.prior_means = np.repeat(self._prior_means, self.k, 0)
self.prior_dof = self._prior_dof * np.ones(self.k)
self.prior_shrinkage = self._prior_shrinkage * np.ones(self.k)
self._dets = self._dets_ * np.ones(self.k)
self._inv_prior_scale = np.repeat(self._inv_prior_scale_, self.k, 0)
# initialize some variables
self.means = np.zeros((self.k, self.dim))
self.precisions = np.zeros((self.k, self.dim, self.dim))
# proceed with the update
BGMM.update(self, x, z)
def update_weights(self, z):
"""
Given the allocation vector z, resmaple the weights parameter
Parameters
----------
z array of shape (n_samples), type = np.int
the allocation variable
"""
pop = np.hstack((self.pop(z), 0))
self.weights = pop + self.prior_weights
self.weights /= self.weights.sum()
def sample_indicator(self, like):
""" Sample the indicator from the likelihood
Parameters
----------
like: array of shape (nbitem,self.k)
component-wise likelihood
Returns
-------
z: array of shape(nbitem): a draw of the membership variable
Notes
-----
The behaviour is different from standard bgmm in that z can take
arbitrary values
"""
z = BGMM.sample_indicator(self, like)
z[z == self.k] = self.k + np.arange(np.sum(z == self.k))
return z
def likelihood_under_the_prior(self, x):
""" Computes the likelihood of x under the prior
Parameters
----------
x, array of shape (self.n_samples,self.dim)
returns
-------
w, the likelihood of x under the prior model (unweighted)
"""
if self.prior_dens is not None:
return self.prior_dens * np.ones(x.shape[0])
a = self._prior_dof
tau = self._prior_shrinkage
tau /= (1 + tau)
m = self._prior_means
b = self._prior_scale
ib = np.linalg.inv(b[0])
ldb = np.log(detsh(b[0]))
scalar_w = np.log(tau / np.pi) * self.dim
scalar_w += 2 * gammaln((a + 1) / 2)
scalar_w -= 2 * gammaln((a - self.dim) / 2)
scalar_w -= ldb * a
w = scalar_w * np.ones(x.shape[0])
for i in range(x.shape[0]):
w[i] -= (a + 1) * np.log(detsh(ib + tau * (m - x[i:i + 1]) *
(m - x[i:i + 1]).T))
w /= 2
return np.exp(w)
def likelihood(self, x, plike=None):
"""
return the likelihood of the model for the data x
the values are weighted by the components weights
Parameters
----------
x: array of shape (n_samples, self.dim),
the data used in the estimation process
plike: array os shape (n_samples), optional,x
the desnity of each point under the prior
Returns
-------
like, array of shape(nbitem,self.k)
component-wise likelihood
"""
if plike == None:
plike = self.likelihood_under_the_prior(x)
plike = np.reshape(plike, (x.shape[0], 1))
if self.k > 0:
like = self.unweighted_likelihood(x)
like = np.hstack((like, plike))
else:
like = plike
like *= self.weights
return like
class MixedIMM(IMM):
"""
Particular IMM with an additional null class.
The data is supplied together
with a sample-related probability of being under the null.
"""
def __init__(self, alpha=.5, dim=1):
"""
Parameters
----------
alpha: float, optional,
the parameter for cluster creation
dim: int, optional,
the dimension of the the data
Note: use the function set_priors() to set adapted priors
"""
IMM.__init__(self, alpha, dim)
def set_constant_densities(self, null_dens=None, prior_dens=None):
"""
Set the null and prior densities as constant
(over a supposedly compact domain)
Parameters
----------
null_dens: float, optional
constant for the null density
prior_dens: float, optional
constant for the prior density
"""
self.null_dens = null_dens
self.prior_dens = prior_dens
def sample(self, x, null_class_proba, niter=1, sampling_points=None,
init=False, kfold=None, co_clustering=False, verbose=0):
"""
sample the indicator and parameters
Parameters
----------
x: array of shape (n_samples, self.dim),
the data used in the estimation process
null_class_proba: array of shape(n_samples),
the probability to be under the null
niter: int,
the number of iterations to perform
sampling_points: array of shape(nbpoints, self.dim), optional
points where the likelihood will be sampled
this defaults to x
kfold: int, optional,
parameter of cross-validation control
by default, no cross-validation is used
the procedure is faster but less accurate
co_clustering: bool, optional
if True,
return a model of data co-labelling across iterations
verbose=0: verbosity mode
Returns
-------
likelihood: array of shape(nbpoints)
total likelihood of the model
pproba: array of shape(n_samples),
the posterior of being in the null
(the posterior of null_class_proba)
coclust: only if co_clustering==True,
sparse_matrix of shape (n_samples, n_samples),
frequency of co-labelling of each sample pairs
across iterations
"""
self.check_x(x)
pproba = np.zeros(x.shape[0])
if sampling_points == None:
average_like = np.zeros(x.shape[0])
else:
average_like = np.zeros(sampling_points.shape[0])
splike = self.likelihood_under_the_prior(sampling_points)
plike = self.likelihood_under_the_prior(x)
if init:
self.k = 1
z = np.zeros(x.shape[0])
self.update(x, z)
like = self.likelihood(x, plike)
z = self.sample_indicator(like, null_class_proba)
if co_clustering:
from scipy.sparse import coo_matrix
coclust = coo_matrix((x.shape[0], x.shape[0]))
for i in range(niter):
if kfold == None:
like = self.simple_update(x, z, plike, null_class_proba)
else:
like, z = self.cross_validated_update(x, z, plike,
null_class_proba, kfold)
llike = self.likelihood(x, plike)
z = self.sample_indicator(llike, null_class_proba)
pproba += (z == - 1)
if co_clustering:
coclust = coclust + co_labelling(z, self.k, -1)
if sampling_points == None:
average_like += like
else:
average_like += np.sum(
self.likelihood(sampling_points, splike), 1)
average_like /= niter
pproba /= niter
if co_clustering:
coclust /= niter
return average_like, pproba, coclust
return average_like, pproba
def simple_update(self, x, z, plike, null_class_proba):
""" One step in the sampling procedure (one data sweep)
Parameters
----------
x: array of shape(n_samples, dim),
the input data
z: array of shape(n_samples),
the associated membership variables
plike: array of shape(n_samples),
the likelihood under the prior
null_class_proba: array of shape(n_samples),
prior probability to be under the null
Returns
-------
like: array od shape(n_samples),
the likelihood of the data under the H1 hypothesis
"""
like = self.likelihood(x, plike)
# standard + likelihood under the prior
# like has shape (x.shape[0], self.k+1)
z = self.sample_indicator(like, null_class_proba)
# almost standard, but many new components can be created
self.reduce(z)
self.update(x, z)
return like.sum(1)
def cross_validated_update(self, x, z, plike, null_class_proba, kfold=10):
"""
This is a step in the sampling procedure
that uses internal corss_validation
Parameters
----------
x: array of shape(n_samples, dim),
the input data
z: array of shape(n_samples),
the associated membership variables
plike: array of shape(n_samples),
the likelihood under the prior
kfold: int, optional, or array
number of folds in cross-validation loop
or set of indexes for the cross-validation procedure
null_class_proba: array of shape(n_samples),
prior probability to be under the null
Returns
-------
like: array od shape(n_samples),
the (cross-validated) likelihood of the data
z: array of shape(n_samples),
the associated membership variables
Notes
-----
When kfold is an array, there is an internal reshuffling to randomize
the order of updates
"""
n_samples = x.shape[0]
slike = np.zeros(n_samples)
if np.isscalar(kfold):
aux = np.argsort(np.random.rand(n_samples))
idx = - np.ones(n_samples).astype(np.int)
j = np.ceil(n_samples / kfold)
kmax = kfold
for k in range(kmax):
idx[aux[k * j:min(n_samples, j * (k + 1))]] = k
else:
if np.array(kfold).size != n_samples:
raise ValueError('kfold and x do not have the same size')
uk = np.unique(kfold)
np.random.shuffle(uk)
idx = np.zeros(n_samples).astype(np.int)
for i, k in enumerate(uk):
idx += (i * (kfold == k))
kmax = uk.max() + 1
for k in range(kmax):
# split at iteration k
test = np.zeros(n_samples).astype('bool')
test[idx == k] = 1
train = np.logical_not(test)
# remove a fraction of the data
# and re-estimate the clusters
z[train] = self.reduce(z[train])
self.update(x[train], z[train])
# draw the membership for the left-out data
alike = self.likelihood(x[test], plike[test])
slike[test] = alike.sum(1)
# standard + likelihood under the prior
# like has shape (x.shape[0], self.k+1)
z[test] = self.sample_indicator(alike, null_class_proba[test])
# almost standard, but many new components can be created
return slike, z
def sample_indicator(self, like, null_class_proba):
"""
sample the indicator from the likelihood
Parameters
----------
like: array of shape (nbitem,self.k)
component-wise likelihood
null_class_proba: array of shape(n_samples),
prior probability to be under the null
Returns
-------
z: array of shape(nbitem): a draw of the membership variable
Notes
-----
Here z=-1 encodes for the null class
"""
n = like.shape[0]
conditional_like_1 = ((1 - null_class_proba) * like.T).T
conditional_like_0 = np.reshape(null_class_proba *
self.null_dens, (n, 1))
conditional_like = np.hstack((conditional_like_0, conditional_like_1))
z = BGMM.sample_indicator(self, conditional_like) - 1
z[z == self.k] = self.k + np.arange(np.sum(z == self.k))
return z
def main():
""" Illustrative example of the behaviour of imm
"""
n = 100
dim = 2
alpha = .5
aff = np.random.randn(dim, dim)
x = np.dot(np.random.randn(n, dim), aff)
igmm = IMM(alpha, dim)
igmm.set_priors(x)
# warming
igmm.sample(x, niter=100, kfold=10)
print 'number of components: ', igmm.k
#
print 'number of components: ', igmm.k
if dim < 3:
from gmm import plot2D
plot2D(x, igmm, verbose=1)
return igmm
if __name__ == '__main__':
main()
|
bthirion/nipy
|
nipy/algorithms/clustering/imm.py
|
Python
|
bsd-3-clause
| 22,092
|
[
"Gaussian"
] |
ee4652332bb2c1e65cf5a02ee4d3695b47d5bc46d8cadc69938c4087ed7970b4
|
#
# create.py
#
# Python interface for the iRobot Create
#
# Zach Dodds dodds@cs.hmc.edu
# updated for SIGCSE 3/9/07
#create.py
#Written 2008-6-3 by Peter Mawhorter
#Based on pyCreate's create module... this is a minimal version of that code.
#create2.py
#Added code for wait for angle, wait for distance, wait for event (C.A. Berry) - 8/22/08
#Added code for sensor streaming, removed obsolete, unnecessary code (integrate odometery) (CAB) - 8/26/08
#create3.py
#Added the senseAndRetry code from prior versions
#Modified the moveTo(0 adn turnTo() functions to work with go instead of drive function
#NOTE: renamed back to create.py - 9/16/08
# v1.1 Added access to overcurrent; made sensor streaming optional in the Create constructor. This allows
# manual polling if desired. (M Boutell, 10/2/2008)
# v1.2 Added IR broadcast streaming functions, so a robot can send an IR signal
# over the omnidirectional IR LEDs. Also Create constructor now calls setLED,
# so the Create displays an amber light when the power is on.
# v2.0 Complete overhaul, removing many duplicate functions not used by students.
# Removed threaded sensor polling: less efficient if many sensors needed, but more predictable
# Added IR broadcast streaming using the scripting functionality of the OI,
# so it can stream while moving without needing an additional thread.
# v2.1 Added constants for array indices when sensors return an array
# v2.2 Added support for a simulator to connect via a network socket (RHIT Senior Project Team 9/12/09)
# v2.3 Added ability to retry getting sensor data.
# v3.0 (TODO: rename shutdown as disconnect)
version = 2.3
import serial
import socket
import math
import time
import select
import dummy_thread # thread libs needed to lock serial port during transmissions
from threading import *
# The Create's baudrate and timeout:
baudrate = 57600
timeout = 0.5
# some module-level definitions for the robot commands
START = chr(128) # already converted to bytes...
BAUD = chr(129) # + 1 byte
CONTROL = chr(130) # deprecated for Create
SAFE = chr(131)
FULL = chr(132)
POWER = chr(133)
SPOT = chr(134) # Same for the Roomba and Create
CLEAN = chr(135) # Clean button - Roomba
COVER = chr(135) # Cover demo - Create
MAX = chr(136) # Roomba
DEMO = chr(136) # Create
DRIVE = chr(137) # + 4 bytes
LEDS = chr(139) # + 3 bytes
SONG = chr(140) # + 2N+2 bytes, where N is the number of notes
PLAY = chr(141) # + 1 byte
SENSORS = chr(142) # + 1 byte
FORCESEEKINGDOCK = chr(143) # same on Roomba and Create
# the above command is called "Cover and Dock" on the Create
DRIVEDIRECT = chr(145) # Create only
STREAM = chr(148) # Create only
QUERYLIST = chr(149) # Create only
PAUSERESUME = chr(150) # Create only
WAITTIME = chr(155)#Added by CAB, time in 1 data byte 1 in 10ths of a second
WAITDIST = chr(156)#Added by CAB, distance in 16-bit signed in mm
WAITANGLE = chr(157)#Added by CAB, angle in 16-bit signed in degrees
WAITEVENT = chr(158)#Added by CAB, event in signed event number
# MB added these for scripting
DEFINE_SCRIPT = chr(152)
RUN_SCRIPT = chr(153)
# the four SCI modes
# the code will try to keep track of which mode the system is in,
# but this might not be 100% trivial...
OFF_MODE = 0
PASSIVE_MODE = 1
SAFE_MODE = 2
FULL_MODE = 3
# Command codes are opcodes sent to the Create via serial. They define the
# possible message types.
COMMANDS = {
"START":chr(128),
"BAUD":chr(129),
"MODE_PASSIVE":chr(128),
"MODE_SAFE":chr(131),
"MODE_FULL":chr(132),
"DEMO":chr(136),
"DEMO_COVER":chr(135),
"DEMO_COVER_AND_DOCK":chr(143),
"DEMO_SPOT":chr(134),
"DRIVE":chr(137),
"DRIVE_DIRECT":chr(145),
"LEDS":chr(139),
"SONG":chr(140),
"PLAY_SONG":chr(141),
"SENSORS":chr(142),
"QUERY_LIST":chr(149),
"STREAM":chr(148),
"PAUSE/RESUME_STREAM":chr(150),
"DIGITAL_OUTPUTS":chr(147),
"LOW_SIDE_DRIVERS":chr(138),
"PWM_LOW_SIDE_DRIVERS":chr(144),
"SEND_IR":chr(151),
# MB added these for scripting
"DEFINE_SCRIPT":chr(152),
"RUN_SCRIPT":chr(153)
}
#TODO: define the rest of the command codes in the SCI.
# Constants for array indices when sensors return an array
# Bumps and wheeldrops
WHEELDROP_CASTER = 0
WHEELDROP_LEFT = 1
WHEELDROP_RIGHT = 2
BUMP_LEFT = 3
BUMP_RIGHT = 4
# Buttons
BUTTON_ADVANCE = 0
BUTTON_PLAY = 1
# Overcurrents
LEFT_WHEEL = 0
RIGHT_WHEEL = 1
LD_2 = 2
LD_0 = 3
LD_1 = 4
# Use digital inputs
BAUD_RATE_CHANGE = 0
DIGITAL_INPUT_3 = 1
DIGITAL_INPUT_2 = 2
DIGITAL_INPUT_1 = 3
DIGITAL_INPUT_0 = 4
# Charging sources available
HOME_BASE = 0
INTERNAL_CHARGER = 1
# For the getSensor retry loop.
MIN_SENSOR_RETRIES = 2 # 1 s
RETRY_SLEEP_TIME = 0.5 # 50ms
class SensorModule:
def __init__(self, packetID, parseMode, packetSize):
self.ID =packetID
self.interpret = parseMode
self.size = packetSize
# Sensor codes are used to ask for data along with a QUERY command.
SENSORS = {
"BUMPS_AND_WHEEL_DROPS":SensorModule(chr(7),"ONE_BYTE_UNPACK",1),
"WALL":SensorModule(chr(8),"ONE_BYTE_UNSIGNED",1),
"CLIFF_LEFT":SensorModule(chr(9),"ONE_BYTE_UNSIGNED",1),
"CLIFF_FRONT_LEFT":SensorModule(chr(10),"ONE_BYTE_UNSIGNED",1),
"CLIFF_FRONT_RIGHT":SensorModule(chr(11),"ONE_BYTE_UNSIGNED",1),
"CLIFF_RIGHT":SensorModule(chr(12),"ONE_BYTE_UNSIGNED",1),
"VIRTUAL_WALL":SensorModule(chr(13),"ONE_BYTE_UNSIGNED",1),
"OVERCURRENTS":SensorModule(chr(14),"ONE_BYTE_UNPACK",1),
"IR_BYTE":SensorModule(chr(17),"ONE_BYTE_UNSIGNED",1),
"BUTTONS":SensorModule(chr(18),"ONE_BYTE_UNPACK",1),
"DISTANCE":SensorModule(chr(19),"TWO_BYTE_SIGNED",2),
"ANGLE":SensorModule(chr(20),"TWO_BYTE_SIGNED",2),
"CHARGING_STATE":SensorModule(chr(21),"ONE_BYTE_UNSIGNED",1),
"VOLTAGE":SensorModule(chr(22),"TWO_BYTE_UNSIGNED",2),
"CURRENT":SensorModule(chr(23),"TWO_BYTE_SIGNED",2),
"BATTERY_TEMPERATURE":SensorModule(chr(24),"ONE_BYTE_SIGNED",1),
"BATTERY_CHARGE":SensorModule(chr(25),"TWO_BYTE_UNSIGNED",2),
"BATTERY_CAPACITY":SensorModule(chr(26),"TWO_BYTE_UNSIGNED",2),
"WALL_SIGNAL":SensorModule(chr(27),"TWO_BYTE_UNSIGNED",2),
"CLIFF_LEFT_SIGNAL":SensorModule(chr(28),"TWO_BYTE_UNSIGNED",2),
"CLIFF_FRONT_LEFT_SIGNAL":SensorModule(chr(29),"TWO_BYTE_UNSIGNED",2),
"CLIFF_FRONT_RIGHT_SIGNAL":SensorModule(chr(30),"TWO_BYTE_UNSIGNED",2),
"CLIFF_RIGHT_SIGNAL":SensorModule(chr(31),"TWO_BYTE_UNSIGNED",2),
"USER_DIGITAL_INPUTS":SensorModule(chr(32),"ONE_BYTE_UNPACK",1),
"USER_ANALOG_INPUT":SensorModule(chr(33),"TWO_BYTE_UNSIGNED",2),
"CHARGING_SOURCES_AVAILABLE":SensorModule(chr(34),"ONE_BYTE_UNSIGNED",1),
"OI_MODE":SensorModule(chr(35),"ONE_BYTE_UNSIGNED",1),
"SONG_NUMBER":SensorModule(chr(36),"ONE_BYTE_UNSIGNED",1),
"SONG_PLAYING":SensorModule(chr(37),"ONE_BYTE_UNSIGNED",1),
"NUMBER_OF_STREAM_PACKETS":SensorModule(chr(38),"ONE_BYTE_UNSIGNED",1),
"VELOCITY":SensorModule(chr(39),"TWO_BYTE_SIGNED",2),
"RADIUS":SensorModule(chr(40),"TWO_BYTE_SIGNED",2),
"RIGHT_VELOCITY":SensorModule(chr(41),"TWO_BYTE_SIGNED",2),
"LEFT_VELOCITY":SensorModule(chr(42),"TWO_BYTE_SIGNED",2)
}
# Interpretation codes are used to tell how to deal with the raw data from a sensor query
# Note a negative value implies one byte of data is being dealt with (also includes 0), a positive implies 2 bytes
INTERPRET = {
"ONE_BYTE_UNPACK":-1,
"ONE_BYTE_SIGNED":-2,
"ONE_BYTE_UNSIGNED":-3,
"NO_HANDLING":0,
"TWO_BYTE_SIGNED":1,
"TWO_BYTE_UNSIGNED":2
}
# some module-level functions for dealing with bits and bytes
#
def bytesOfR( r ):
""" for looking at the raw bytes of a sensor reply, r """
print('raw r is', r)
for i in range(len(r)):
print('byte', i, 'is', ord(r[i]))
print('finished with formatR')
def bitOfByte( bit, byte ):
""" returns a 0 or 1: the value of the 'bit' of 'byte' """
if bit < 0 or bit > 7:
print('Your bit of', bit, 'is out of range (0-7)')
print('returning 0')
return 0
return ((byte >> bit) & 0x01)
def toBinary( val, numBits ):
""" prints numBits digits of val in binary """
if numBits == 0: return
toBinary( val>>1 , numBits-1 )
print((val & 0x01)) # print least significant bit
def fromBinary( s ):
""" s is a string of 0's and 1's """
if s == '': return 0
lowbit = ord(s[-1]) - ord('0')
return lowbit + 2*fromBinary( s[:-1] )
def twosComplementInt1byte( byte ):
""" returns an int of the same value of the input
int (a byte), but interpreted in two's
complement
the output range should be -128 to 127
"""
# take everything except the top bit
topbit = bitOfByte( 7, byte )
lowerbits = byte & 127
if topbit == 1:
return lowerbits - (1 << 7)
else:
return lowerbits
def twosComplementInt2bytes( highByte, lowByte ):
""" returns an int which has the same value
as the twosComplement value stored in
the two bytes passed in
the output range should be -32768 to 32767
chars or ints can be input, both will be
truncated to 8 bits
"""
# take everything except the top bit
topbit = bitOfByte( 7, highByte )
lowerbits = highByte & 127
unsignedInt = lowerbits << 8 | (lowByte & 0xFF)
if topbit == 1:
# with sufficient thought, I've convinced
# myself of this... we'll see, I suppose.
return unsignedInt - (1 << 15)
else:
return unsignedInt
def toTwosComplement2Bytes( value ):
""" returns two bytes (ints) in high, low order
whose bits form the input value when interpreted in
two's complement
"""
# if positive or zero, it's OK
if value >= 0:
eqBitVal = value
# if it's negative, I think it is this
else:
eqBitVal = (1<<16) + value
return ( (eqBitVal >> 8) & 0xFF, eqBitVal & 0xFF )
def displayVersion():
print("pycreate version", version)
class CommunicationError(Exception):
'''
This error indicates that there was a problem communicating with the
Create. The string msg indicates what went wrong.
'''
def __init__(self, msg):
self.msg = msg
def __str__(self):
return str(self.msg)
def __repr__(self):
return "CommunicationError(" + repr(self.msg) + ")"
# ======================The CREATE ROBOT CLASS (modified by CAB 8/08)==========================
class Create:
""" the Create class is an abstraction of the iRobot Create's
SCI interface, including communication and a bit
of processing of the strings passed back and forth
when you create an object of type Create, the code
will try to open a connection to it - so, it will fail
if it's not attached!
"""
# TODO: check if we can start in other modes...
#======================== Starting up and Shutting Down================
def __init__(self, PORT, startingMode=SAFE_MODE, sim_mode = False):
""" the constructor which tries to open the
connection to the robot at port PORT
"""
# to do: find the shortest safe serial timeout value...
# to do: use the timeout to do more error checking than
# is currently done...
#
# the -1 here is because windows starts counting from 1
# in the hardware control panel, but not in pyserial, it seems
displayVersion()
# fields for simulator
self.in_sim_mode = False
self.sim_sock = None
self.sim_host = '127.0.0.1'
self.sim_port = 65000
self.maxSensorRetries = MIN_SENSOR_RETRIES
# if PORT is the string 'simulated' (or any string for the moment)
# we use our SRSerial class
self.comPort = PORT #we want to keep track of the port number for reconnect() calls
print('PORT is', PORT)
if type(PORT) == type('string'):
if PORT == 'sim':
self.init_sim_mode()
self.ser = None
else:
# for Mac/Linux - use whole port name
# print 'In Mac/Linux mode...'
self.ser = serial.Serial(PORT, baudrate=57600, timeout=0.5)
# otherwise, we try to open the numeric serial port...
if (sim_mode):
self.init_sim_mode()
else:
# print 'In Windows mode...'
try:
self.ser = serial.Serial(PORT-1, baudrate=57600, timeout=0.5)
if (sim_mode):
self.init_sim_mode()
except serial.SerialException:
print("unable to access the serial port - please cycle the robot's power")
# did the serial port actually open?
if self.in_sim_mode:
print("In simulator mode")
elif self.ser.isOpen():
print('Serial port did open on iRobot Create...')
else:
print('Serial port did NOT open, check the')
print(' - port number')
print(' - physical connection')
print(' - baud rate of the roomba (it\'s _possible_, if unlikely,')
print(' that it might be set to 19200 instead')
print(' of the default 57600 - removing and')
print(' reinstalling the battery should reset it.')
# define the class' Open Interface mode
self.sciMode = OFF_MODE
if (startingMode == SAFE_MODE):
print('Putting the robot into safe mode...')
self.toSafeMode()
time.sleep(0.3)
if (startingMode == FULL_MODE):
print('Putting the robot into full mode...')
self.toSafeMode()
time.sleep(0.3)
self.toFullMode()
self.serialLock = dummy_thread.allocate_lock()
#self.setLEDs(80,255,0,0) # MB: was 100, want more yellowish
def send(self, bytes1):
if self.in_sim_mode:
if self.ser:
self.ser.write( (bytes(bytes1, encoding = 'Latin-1')) )
#print(bytes1)
print (bytes(bytes1, encoding = 'Latin-1'))
self.sim_sock.send( (bytes(bytes1, encoding = 'Latin-1')) )
else:
self.ser.write( bytes1 )
def read(self, bytes):
message = ""
if self.in_sim_mode:
if self.ser:
self.ser.read( bytes )
message = self.sim_sock.recv( bytes )
else:
message = self.ser.read( bytes )
return str(message, encoding='Latin-1');
def init_sim_mode(self):
print('In simulated mode, connecting to simulator socket')
self.in_sim_mode = True # SRSerial('mapSquare.txt')
self.sim_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sim_sock.connect((self.sim_host,self.sim_port))
def reconnect(self,comPort):
'''
This method closes the existing connection and reestablishes it.
When things get bad, this is the only method of recovery.
'''
# Just in case it was stuck moving somewhere, stop the Create:
self.stop()
# Close the connection:
self._close()
# Reestablish the serial connection to the Create:
self.__init__(comPort)
self.start()
time.sleep(0.25) # The recommended 200ms+ pause after mode commands.
if (self.sciMode == SAFE_MODE):
print('Putting the robot into safe mode...')
self.toSafeMode()
time.sleep(0.3)
if (self.sciMode == FULL_MODE):
print('Putting the robot into full mode...')
self.toSafeMode()
time.sleep(0.3)
self.toFullMode()
time.sleep(.25) # The recommended 200ms+ pause after mode commands.
def start(self):
""" changes from OFF_MODE to PASSIVE_MODE """
self.send( START )
# they recommend 20 ms between mode-changing commands
time.sleep(0.25)
# change the mode we think we're in...
return
def shutdown(self):
'''
This method shuts down the connection to the Create, after first
stopping the Create and putting the Create into passive mode.
'''
self.stop()
self.__sendmsg(COMMANDS["MODE_PASSIVE"],'')
time.sleep(0.25) # The recommended 200ms+ pause after mode commands.
self.serialLock.acquire()
self.start() # send Create back to passive mode
time.sleep(0.1)
if self.in_sim_mode:
self.sim_sock.close()
else:
self.ser.close()
self.serialLock.release()
# MB: added back in as private method, since reconnect uses it.
def _close(self):
""" tries to shutdown the robot as kindly as possible, by
clearing any remaining odometric data
going to passive mode
closing the serial port
"""
self.serialLock.acquire()
self.start() # send Create back to passive mode
time.sleep(0.1)
self.ser.close()
self.serialLock.release()
return
def _closeSer(self):
""" just disconnects the serial port """
self.serialLock.acquire()
self.ser.close()
self.serialLock.release()
return
def _openSer(self):
self.serialLock.acquire()
""" opens the port again """
self.ser.open()
self.serialLock.release()
return
#=============================== Serial Communication
def __sendmsg(self, opcode, dataBytes):
'''
This method functions as the base of the protocol, sending a message
with a particular opcode and the given data bytes. opcode should be
a character; use the constants defined at the top of this file.
data_bytes must be a string, and should have the proper length
according to which opcode is used. See the Create serial protocol
manual for more details.
'''
#lock
self.serialLock.acquire() #note: blocking
successful = False
while not successful:
try:
self.send(opcode + dataBytes)
successful = True
except select.error:
pass
self.serialLock.release()
#unlock
def __sendOpCode(self, opcode):
'''
This method functions as the base of the protocol, sending a message
with a particular opcode and the given data bytes. opcode should be
a character; use the constants defined at the top of this file.
data_bytes must be a string, and should have the proper length
according to which opcode is used. See the Create serial protocol
manual for more details.
'''
#lock
self.serialLock.acquire() #note: blocking
successful = False
while not successful:
try:
self.send(opcode)
successful = True
except select.error:
pass
self.serialLock.release()
#unlock
def __recvmsg(self, numBytes):
'''
This method is used internally for receiving data from the Create.
It blocks for at most timeout seconds, and then returns as a string
the bytes of the message received. It reads num_bytes bytes from the
serial connection. If no message exists, it returns the empty
string.
'''
#lock
self.serialLock.acquire()
successful = False
favor = None
while not successful:
try:
favor = self.read(numBytes)
successful = True
except select.error:
pass
self.serialLock.release()
#unlock
return favor
def __sendAndRecvMsg(self,opcode,dataSendBytes,numBytesExpected):
#lock
self.serialLock.acquire()
#send
successful = False
while not successful:
try:
self.send(opcode + dataSendBytes)
successful = True
except select.error:
pass
#wait?
#receive
successful = False
favor = None
while not successful:
try:
favor = self.read(numBytesExpected)
successful = True
except select.error:
pass
self.serialLock.release()
#unlock
return favor
#========================= Moving Around ================================================
def stop(self):
""" stop calls go(0,0) """
self.go(0,0)
def go( self, cmPerSec=0, degPerSec=0 ):
""" go(cmPerSec, degPerSec) sets the robot's linear velocity to
cmPerSec centimeters per second and its angular velocity to
degPerSec degrees per second
go() is equivalent to go(0,0)
"""
if cmPerSec == 0:
# just handle rotation
# convert to radians
radPerSec = math.radians(degPerSec)
# make sure the direction is correct
if radPerSec >= 0: dirstr = 'CCW'
else: dirstr = 'CW'
# compute the velocity, given that the robot's
# radius is 258mm/2.0
velMmSec = math.fabs(radPerSec) * (258.0/2.0)
# send it off to the robot
self.drive( velMmSec, 0, dirstr )
elif degPerSec == 0:
# just handle forward/backward translation
velMmSec = 10.0*cmPerSec
bigRadius = 32767
# send it off to the robot
self.drive( velMmSec, bigRadius )
else:
# move in the appropriate arc
radPerSec = math.radians(degPerSec)
velMmSec = 10.0*cmPerSec
radiusMm = velMmSec / radPerSec
# check for extremes
if radiusMm > 32767: radiusMm = 32767
if radiusMm < -32767: radiusMm = -32767
self.drive( velMmSec, radiusMm )
return
def driveDirect( self, leftCmSec=0, rightCmSec=0 ):
""" Go(cmpsec, degpsec) sets the robot's velocity to
cmpsec centimeters per second
degpsec degrees per second
Go() is equivalent to go(0,0)
"""
""" sends velocities of each wheel independently
left_cm_sec: left wheel velocity in cm/sec (capped at +- 50)
right_cm_sec: right wheel velocity in cm/sec (capped at +- 50)
"""
if leftCmSec < -50: leftCmSec = -50
if leftCmSec > 50: leftCmSec = 50
if rightCmSec < -50: rightCmSec = -50
if rightCmSec > 50: rightCmSec = 50
# convert to mm/sec, ensure we have integers
leftHighVal, leftLowVal = toTwosComplement2Bytes( int(leftCmSec*10) )
rightHighVal, rightLowVal = toTwosComplement2Bytes( int(rightCmSec*10) )
# send these bytes and set the stored velocities
byteList = (rightHighVal,rightLowVal,leftHighVal,leftLowVal)
if type(byteList) in (list, tuple, set):
temp = ''
for char in byteList:
temp += chr(char)
byteList = temp
self.__sendmsg(DRIVEDIRECT,byteList)
#self.send( DRIVEDIRECT )
#self.send( chr(rightHighVal) )
#self.send( chr(rightLowVal) )
#self.send( chr(leftHighVal) )
#self.send( chr(leftLowVal) )
return
def waitTime(self,seconds):
""" robot waits for the specified time to past (tenths of secs) before executing the next command (CAB)"""
timeVal= twosComplementInt1byte(int(seconds))
#send the command to the Creeate:
self.__sendmsg(WAITTIME,chr(timeVal))
def waitEvent(self,eventNumber):
""" robot waits for the specified event to happen before executing the next command (CAB)"""
eventVal= twosComplementInt1byte(int(eventNumber))
#Send the command to the Create:
self.__sendmsg(WAITEVENT,chr(eventVal))
def waitDistance(self,centimeters):
""" robot waits for the specified distance before executing the next command (CAB)"""
distInMm = 10*centimeters
distHighVal, distLowVal=toTwosComplement2Bytes( int(distInMm) )
#Send the command to the Create:
self.__sendmsg(WAITDIST,chr(distHighVal)+chr(distLowVal))
def waitAngle(self,degrees):
""" robot waits for the specified angle before executing the next command (CAB)"""
anglHighVal, anglLowVal=toTwosComplement2Bytes( int(degrees) )
# Send the command for data to the Create:
self.__sendmsg(WAITANGLE,chr(anglHighVal)+chr(anglLowVal))
def drive (self, roombaMmSec, roombaRadiusMm, turnDir='CCW'):
""" implements the drive command as specified
the turnDir should be either 'CW' or 'CCW' for
clockwise or counterclockwise - this is only
used if roombaRadiusMm == 0 (or rounds down to 0)
other drive-related calls are available
"""
# first, they should be ints
# in case they're being generated mathematically
if type(roombaMmSec) != type(42):
roombaMmSec = int(roombaMmSec)
if type(roombaRadiusMm) != type(42):
roombaRadiusMm = int(roombaRadiusMm)
# we check that the inputs are within limits
# if not, we cap them there
if roombaMmSec < -500:
roombaMmSec = -500
if roombaMmSec > 500:
roombaMmSec = 500
# if the radius is beyond the limits, we go straight
# it doesn't really seem to go straight, however...
if roombaRadiusMm < -2000:
roombaRadiusMm = 32768
if roombaRadiusMm > 2000:
roombaRadiusMm = 32768
# get the two bytes from the velocity
# these come back as numbers, so we will chr them
velHighVal, velLowVal = toTwosComplement2Bytes( roombaMmSec )
# get the two bytes from the radius in the same way
# note the special cases
if roombaRadiusMm == 0:
if turnDir == 'CW':
roombaRadiusMm = -1
else: # default is 'CCW' (turning left)
roombaRadiusMm = 1
radiusHighVal, radiusLowVal = toTwosComplement2Bytes( roombaRadiusMm )
#print 'bytes are', velHighVal, velLowVal, radiusHighVal, radiusLowVal
# send these bytes and set the stored velocities
byteList = (velHighVal,velLowVal,radiusHighVal,radiusLowVal)
if type(byteList) in (list, tuple, set):
temp = ''
for char in byteList:
temp += chr(char)
byteList = temp
self.__sendmsg(DRIVE,byteList)
#self.send( DRIVE )
#self.send( chr(velHighVal) )
#self.send( chr(velLowVal) )
#self.send( chr(radiusHighVal) )
#self.send( chr(radiusLowVal) )
#========================== SENSORS ==============================
def sensorDataIsOK(self):
'''Detects data incoherency. Returns false if incoherent ("sensor junk").'''
# Attempting to reconnect or shutdown the robot from within this
# function didn't work. Solution is to call the function using syntax:
# if not robot.sensorDataIsOK():
# robot.shutdown()
# return (exit before calling other robot code.)
time.sleep(1)
self.stop()
self.getSensor('DISTANCE')
distance = self.getSensor('DISTANCE')
#Both angle and distance should be ~0. If not, then the sensor was filled
#with junk initially, so we reconnect.
if abs(distance) > 10:
#self.reconnect(self.comPort)
time.sleep(1)
print("Sensors could not be validated.")
#self.shutdown()
return False
return True
def setMaxSensorTimeout(self, newTimeout):
''' Allows the user to wait longer for the robot
to return sensor data to the computer. Each retry takes 50 ms.'''
self.maxSensorRetries = newTimeout / RETRY_SLEEP_TIME
self.maxSensorRetries = max(newTimeout, MIN_SENSOR_RETRIES)
def getSensor(self, sensorToRead):
'''Reads the value of the requested sensor from the robot and returns it.'''
# Send the request for data to the Create:
self.__sendmsg(COMMANDS["QUERY_LIST"],
chr(1) + SENSORS[sensorToRead].ID)
# Receive the reply:
# MB: Added ability to retry in case a user is querying the sensors
# while the robot is executing a wait command.
msg = self.__recvmsg(SENSORS[sensorToRead].size)
nRetries = 0
while len(msg) < SENSORS[sensorToRead].size and nRetries < self.maxSensorRetries:
# Serial receive appears to block for 0.5 sec, so we don't
# need to sleep
msg = self.__recvmsg(SENSORS[sensorToRead].size)
nRetries += 1
#print nRetries, "retries needed"
# Last resort: return None and force the user to deal with it,
# rather than crashing.
if len(msg) < SENSORS[sensorToRead].size:
#raise CommunicationError("Improper sensor query response length: ")
#self.close()
return None
msg_len = len(msg)
sensor_bytes = [ord(b) for b in msg[0:msg_len]]
return self._interpretSensor(sensorToRead,sensor_bytes)
def _interpretSensor(self, sensorToRead, raw_data):
'''interprets the raw binary data form a sensor into its appropriate form for use. This function is for internal use - DO NOT CALL'''
data = None
interpret = SENSORS[sensorToRead].interpret
if len(raw_data) < SENSORS[sensorToRead].size:
return None
if interpret == "ONE_BYTE_SIGNED":
data = self._getOneByteSigned(raw_data[0])
elif interpret == "ONE_BYTE_UNSIGNED":
data = self._getOneByteUnsigned(raw_data[0])
elif interpret == "TWO_BYTE_SIGNED":
data = self._getTwoBytesSigned(raw_data[0],raw_data[1])
elif interpret == "TWO_BYTE_UNSIGNED":
data = self._getTwoBytesUnsigned(raw_data[0],raw_data[1])
elif interpret == "ONE_BYTE_UNPACK":
if sensorToRead == "BUMPS_AND_WHEEL_DROPS":
data = self._getLower5Bits(raw_data[0])
elif sensorToRead == "BUTTONS":
data = self._getButtonBits(raw_data[0])
elif sensorToRead == "USER_DIGITAL_INPUTS":
data = self._getLower5Bits(raw_data[0])
if sensorToRead == "OVERCURRENTS":
data = self._getLower5Bits(raw_data[0])
elif interpret == "NO_HANDLING":
data = raw_data
return data
#======================= CARGO BAY OUTPUTS ==========================
def setDigitalOutputs(self, digOut2, digOut1, digOut0):
'''sets the digital output pins of the cargo bay connector to the specifed value (1 or 0)'''
data_byte = int("00000"+str(digOut2)+str(digOut1)+str(digOut0),2)
self.__sendmsg(COMMANDS["DIGITAL_OUTPUTS"],chr(data_byte))
def setLowSideDrivers(self, driver2, driver1, driver0):
'''sets the low side driver output pins of the cargo bay connector to the specifed value (1 or 0)'''
data_byte = int("00000"+str(driver2)+str(driver1)+str(driver0),2)
self.__sendmsg(COMMANDS["LOW_SIDE_DRIVERS"],chr(data_byte))
def setPWMLowSideDrivers(self, dutyCycle2, dutyCycle1, dutyCycle0):
'''sets the low side driver output pins of the cargo bay connector to the specifed value (0 to 255)'''
self.__sendmsg(COMMANDS["PWM_LOW_SIDE_DRIVERS"],
chr(dutyCycle2)+chr(dutyCycle1)+chr(dutyCycle0))
def sendIR(self,byteValue):
''' send the requested byte out of low side driver 1 (pin 23 on Cargo Bay Connector) (0-255) '''
self.__sendmsg(COMMANDS["SEND_IR"], chr(byteValue))
def startIR(self,byteValue):
'''TODO: implement script send to begin sending passed value'''
"""Uses a script so that the robot can receive and perform other
commands concurrently. Alternative to threading. """
print("sending byte", byteValue)
byteList = chr(3); # # script has 3 bytes
byteList += COMMANDS["SEND_IR"]
byteList += chr(byteValue) # IR value
byteList += RUN_SCRIPT #(running at end of def sets up recursion)
self.__sendmsg(DEFINE_SCRIPT,byteList)
self.__sendOpCode(RUN_SCRIPT) #actually run the script
def stopIR(self):
'''TO DO: send null script to end IR streaming'''
"""Uses a script so that the robot can receive and perform other
commands concurrently. Alternative to threading. """
self.__sendmsg(DEFINE_SCRIPT, chr(0)) #define null script
#========================== LIGHTS ==================================
def setLEDs(self, powerColor, powerIntensity, play, advance ):
""" The setLEDs method sets each of the three LEDs, from left to right:
the power LED, the play LED, and the status LED.
The power LED at the left can display colors from green (0) to red (255)
and its intensity can be specified, as well. Hence, power_color and
power_intensity are values from 0 to 255. The other two LED inputs
should either be 0 (off) or 1 (on).
"""
# make sure we're within range...
if advance != 0: advance = 1
if play != 0: play = 1
try:
power = int(powerIntensity)
powercolor = int(powerColor)
except TypeError:
power = 128
powercolor = 128
print('Type exception caught in setAbsoluteLEDs in roomba.py')
print('Your powerColor or powerIntensity was not an integer.')
if power < 0: power = 0
if power > 255: power = 255
if powercolor < 0: powercolor = 0
if powercolor > 255: powercolor = 255
# create the first byte
#firstByteVal = (status << 4) | (spot << 3) | (clean << 2) | (max << 1) | dirtdetect
firstByteVal = (advance << 3) | (play << 1)
# send these as bytes
# print 'bytes are', firstByteVal, powercolor, power
self.send( LEDS )
self.send( chr(firstByteVal) )
self.send( chr(powercolor) )
self.send( chr(power) )
return
#==================== DEMOS ======================
def seekDock(self):
"""sends the force-seeking-dock signal """
self.demo(1)
def demo(self, demoNumber=-1):
""" runs one of the built-in demos for Create
if demoNumber is
<omitted> or
-1 stop current demo
0 wander the surrounding area
1 wander and dock, when the docking station is seen
2 wander a more local area
3 wander to a wall and then follow along it
4 figure 8
5 "wimp" demo: when pushed, move forward
when bumped, move back and away
6 home: will home in on a virtual wall, as
long as the back and sides of the IR receiver
are covered with tape
7 tag: homes in on sequential virtual walls
8 pachelbel: plays the first few notes of the canon in D
9 banjo: plays chord notes according to its cliff sensors
chord key is selected via the bumper
"""
if (demoNumber < -1 or demoNumber > 9):
demoNumber = -1 # stop current demo
self.send( DEMO )
if demoNumber < 0 or demoNumber > 9:
# invalid values are equivalent to stopping
self.send( chr(255) ) # -1
else:
self.send( chr(demoNumber) )
#==================== MUSIC ======================
def setSong(self, songNumber, noteList):
""" this stores a song to roomba's memory to play later
with the playSong command
songNumber must be between 0 and 15 (inclusive)
songDataList is a list of (note, duration) pairs (up to 16)
note is the midi note number, from 31 to 127
(outside this range, the note is a rest)
duration is from 0 to 255 in 1/64ths of a second
"""
# any notes to play?
if type(noteList) != type([]) and type(noteList) != type(()):
print('noteList was', noteList)
return
if len(noteList) < 1:
print('No data in the noteList')
return
if songNumber < 0: songNumber = 0
if songNumber > 15: songNumber = 15
# indicate that a song is coming
self.send( SONG )
self.send( chr(songNumber) )
L = min(len(noteList), 16)
self.send( chr(L) )
# loop through the notes, up to 16
for note in noteList[:L]:
# make sure its a tuple, or else we rest for 1/4 second
if type(note) == type( () ):
#more error checking here!
self.send( chr(note[0]) ) # note number
self.send( chr(note[1]) ) # duration
else:
self.send( chr(30) ) # a rest note
self.send( chr(16) ) # 1/4 of a second
return
def playSong(self, noteList):
""" The input to <tt>playSong</tt> should be specified as a list
of pairs of ( note_number, note_duration ) format. Thus,
r.playSong( [(60,8),(64,8),(67,8),(72,8)] ) plays a quick C chord.
"""
# implemented by setting song #1 to the notes and then playing it
self.setSong(1, noteList)
self.playSongNumber(1)
def playSongNumber(self, songNumber):
""" plays song songNumber """
if songNumber < 0: songNumber = 0
if songNumber > 15: songNumber = 15
self.send( PLAY )
self.send( chr(songNumber) )
def playNote(self, noteNumber, duration, songNumber=0):
""" plays a single note as a song (at songNumber)
duration is in 64ths of a second (1-255)
the note number chart is on page 12 of the open interface manual
"""
# set the song
self.setSong(songNumber, [(noteNumber,duration)])
self.playSongNumber(songNumber)
#==================== Modes ======================
def toSafeMode(self):
""" changes the state (from PASSIVE_MODE or FULL_MODE)
to SAFE_MODE
"""
self.start()
time.sleep(0.03)
# now we're in PASSIVE_MODE, so we repeat the above code...
self.send( SAFE )
# they recommend 20 ms between mode-changing commands
time.sleep(0.03)
# change the mode we think we're in...
self.sciMode = SAFE_MODE
# no response here, so we don't get any...
return
def toFullMode(self):
""" changes the state from PASSIVE to SAFE to FULL_MODE
"""
self.start()
time.sleep(0.03)
self.toSafeMode()
time.sleep(0.03)
self.send( FULL )
time.sleep(0.03)
self.sciMode = FULL_MODE
return
#==================== Class Level Math functions =============
def _getButtonBits( self, r ):
""" r is one byte as an integer """
return [ bitOfByte(2,r), bitOfByte(0,r) ]
def _getLower5Bits( self, r ):
""" r is one byte as an integer """
return [ bitOfByte(4,r), bitOfByte(3,r), bitOfByte(2,r), bitOfByte(1,r), bitOfByte(0,r) ]
def _getOneBit( self, r ):
""" r is one byte as an integer """
if r == 1: return 1
else: return 0
def _getOneByteSigned( self, r ):
""" r is one byte as a signed integer """
return twosComplementInt1byte( r )
def _getOneByteUnsigned( self, r ):
""" r is one byte as an integer """
return r
def _getTwoBytesSigned( self, r1, r2 ):
""" r1, r2 are two bytes as a signed integer """
return twosComplementInt2bytes( r1, r2 )
def _getTwoBytesUnsigned( self, r1, r2 ):
""" r1, r2 are two bytes as an unsigned integer """
return r1 << 8 | r2
def _rawSend( self, listofints ):
for x in listofints:
self.send( chr(x) )
def _rawRecv( self ):
nBytesWaiting = self.ser.inWaiting()
#print 'nBytesWaiting is', nBytesWaiting
r = self.read(nBytesWaiting)
r = [ ord(x) for x in r ]
#print 'r is', r
return r
def _rawRecvStr( self ):
nBytesWaiting = self.ser.inWaiting()
#print 'nBytesWaiting is', nBytesWaiting
r = self.ser.read(nBytesWaiting)
return r
def getMode(self):
""" returns one of OFF_MODE, PASSIVE_MODE, SAFE_MODE, FULL_MODE """
# but how right is it?
return self.sciMode
if __name__ == '__main__':
displayVersion()
|
squirly/turtlecore-web-controller
|
web_control/create.py
|
Python
|
mit
| 42,747
|
[
"Amber"
] |
3eed87ea72768cc5ee9be191e041519f008a5caad88e509995b97f77f56e0f97
|
"""Spectral index distributions."""
import numpy as np
def constant(value=-1.4, shape=1):
"""Good for adopting a single value."""
return np.full(shape, value).astype(np.float32)
def gauss(mean=-1.4, std=1, shape=1):
"""Generate spectral indices from a Gaussian distribution.
Args:
mean (float): Mean spectral index
std (float): Spread of spectral index
shape (tuple): Required array shape
Returns:
array: spectral indices
"""
return np.random.normal(mean, std, shape).astype(np.float32)
|
davidgardenier/frbpoppy
|
frbpoppy/si_dists.py
|
Python
|
mit
| 554
|
[
"Gaussian"
] |
d6d41729907469b8d1eafb66d9ccb25b7de8bdb04d332972012958cb38ed1abb
|
#!/usr/bin/env python
""" move base utils
modified from movebasesquare
ren ye 2016-10-19
"""
import rospy
import actionlib
from actionlib_msgs.msg import *
from geometry_msgs.msg import Pose, Point, Quaternion, Twist, Vector3
from sensor_msgs.msg import NavSatFix
from nav_msgs.msg import Odometry
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import tf
from geographiclib.geodesic import Geodesic
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from visualization_msgs.msg import Marker
from math import radians, pi, sin, cos, sqrt
class MoveBaseUtil():
x0, y0, yaw0 = 0, 0, 0
lat, lon = 0, 0
cancel_id = ""
def __init__(self, nodename="nav_test", is_newnode=True):
if is_newnode:
rospy.init_node(nodename, anonymous=False)
rate = rospy.Rate(10)
else:
rate = None
self.base_frame = rospy.get_param("~base_frame", "base_link")
self.fixed_frame = rospy.get_param("~fixed_frame", "map")
# tf_listener
self.tf_listener = tf.TransformListener()
#rospy.on_shutdown(self.shutdown)
# * get parameters
# * Create a list to hold the target quaternions (orientations)
# * Create a list to hold the waypoint poses
# * create angles
# * convert the angles to quaternions
# * Append each of the four waypoints to the list. Each waypoint
# is a pose consisting of a position and orientation in the map frame.
# Initialize the visualization markers for RViz
self.init_markers()
self.odom_received = False
# rospy.wait_for_message("/odom", Odometry)
# rospy.Subscriber("/odom", Odometry, self.odom_callback, queue_size=50)
rospy.wait_for_message("/odometry/filtered/global", Odometry)
rospy.Subscriber("/odometry/filtered/global", Odometry, self.odom_callback, queue_size=50)
while not self.odom_received:
rospy.sleep(1)
rospy.Subscriber("move_base/cancel", GoalID, self.cancel_callback, queue_size=5)
# * Set a visualization marker at each waypoint
# * Publisher to manually control the robot (e.g. to stop it, queue_size=5)
# self.cmd_vel_pub = rospy.Publisher('move_base_cmd_vel', Twist, queue_size=5)
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=5)
# * Subscribe to the move_base action server
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
# * Wait 60 seconds for the action server to become available
rospy.loginfo("Waiting for move_base action server...")
self.move_base.wait_for_server(rospy.Duration(60))
rospy.loginfo("Connected to move base server")
rospy.loginfo("Starting navigation test")
# * Cycle through the four waypoints
def get_tf(self, fixed_frame, base_frame):
""" transform from base_link to map """
trans_received = False
while not trans_received:
try:
(trans, rot) = self.tf_listener.lookupTransform(fixed_frame,
base_frame,
rospy.Time(0))
trans_received = True
return (Point(*trans), Quaternion(*rot))
except (tf.LookupException,
tf.ConnectivityException,
tf.ExtrapolationException):
pass
def transform_tf(self, x_target_base, y_target_base, yaw_target_base, fixed_frame, base_frame):
""" get the (x, y) wrt fixed frame from (x, y) wrt base frame"""
# (x, y, yaw) of the base frame wrt fixed frame
(trans, rot) = self.get_tf(fixed_frame, base_frame)
x_base_fixed, y_base_fixed = trans.x, trans.y
_, _, yaw_base_fixed = tf.transformations.euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))
# get the point wrt fixed
# final vector = fixed vector + rot_mat * base vector
x_target_fixed, y_target_fixed = x_base_fixed + \
cos(yaw_base_fixed) * x_target_base - sin(yaw_base_fixed) * y_target_base, \
y_base_fixed + \
sin(yaw_base_fixed) * x_target_base + cos(yaw_base_fixed) * y_target_base
yaw_target_fixed = yaw_target_base + rot
return [x_target_fixed, y_target_fixed, yaw_target_fixed]
def convert_gps_to_absolute(self, lat, lon):
""" get current gps point of the boat,
calculate the distance and heading to the target point
remap to map frame """
# calculate distance and azimuth (angle between distance and north)
result = Geodesic.WGS84.Inverse(self.lat, self.lon, lat, lon)
r = result['s12']
azi = result['azi1'] * pi / 180.0
theta = pi / 2 - azi # wrt map's x axis
# print "r and theta", r, theta
center = [self.x0 + r * cos(theta), self.y0 + r * sin(theta), 0]
heading = theta
return [center, heading]
def navsat_fix_callback(self, msg):
""" callback navsat """
self.lat = msg.latitude
self.lon = msg.longitude
self.fix_received = True
def odom_callback(self, msg):
trans, rot = self.get_tf("map", "base_link")
self.x0 = trans.x
self.y0 = trans.y
_, _, self.yaw0 = euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))
self.odom_received=True
# """ call back to subscribe, get odometry data:
# pose and orientation of the current boat,
# suffix 0 is for origin """
# x0 = msg.pose.pose.position.x
# y0 = msg.pose.pose.position.y
# # self.x0 = msg.pose.pose.position.x
# # self.y0 = msg.pose.pose.position.y
# x = msg.pose.pose.orientation.x
# y = msg.pose.pose.orientation.y
# z = msg.pose.pose.orientation.z
# w = msg.pose.pose.orientation.w
# # _, _, self.yaw0 = euler_from_quaternion((x, y, z, w))
# _, _, yaw0 = euler_from_quaternion((x, y, z, w))
# # get odom to map transform
# self.x0, self.y0, self.yaw0 = self.transform_tf(x0, y0, yaw0, "map", "odom")
# rospy.loginfo([self.x0, self.y0, self.yaw0])
def cancel_callback(self, msg):
self.cancel_id = msg.id
print self.cancel_id
# rospy.loginfo(self.cancel_id)
def convert_relative_to_absolute(self, coordinate):
""" boat's tf is base_link
target is polar (r, theta) wrt base_link
need to spawn waypoint (x1, y1) at map
1. calculate target (xtb, ytb) wrt base_link by trignometry
2. tf transfrom from base_link to map
3. calculate target (x1, y1) wrt map by vector calculus:
(x1, y1) = (xb, yb) + rot_mat*(xtb, ytb)
where rot_mat = [cos theta, -sin theta; sin theta, cos theta]
"""
# wrt base_link
# theta is the angle between base_link's x axis and r
# print "x0, y0, yaw0", self.x0, self.y0, self.yaw0
r, theta = coordinate
# print r, theta
x_target_base, y_target_base = r * cos(theta), r * sin(theta)
# print "target base", x_target_base, y_target_base
x_target_rot, y_target_rot = \
cos(self.yaw0) * x_target_base - sin(self.yaw0) * y_target_base, \
sin(self.yaw0) * x_target_base + cos(self.yaw0) * y_target_base
heading = theta + self.yaw0
center = [self.x0 + x_target_rot, self.y0 + y_target_rot, 0]
# print "target rot", x_target_rot, y_target_rot
return [center, heading]
def move(self, goal, mode, mode_param):
""" mode1: continuous movement function, mode_param is the distance from goal that will set the next goal
mode2: stop and rotate mode, mode_param is rotational angle in rad
mode3: normal stop in each waypoint mode, mode_param is unused """
# Send the goal pose to the MoveBaseAction server
self.move_base.send_goal(goal)
finished_within_time = True
go_to_next = False
if mode == 1: # continuous movement function, mode_param is the distance from goal that will set the next goal
# (trans, _) = self.get_tf()
while sqrt((self.x0 - goal.target_pose.pose.position.x) ** 2 +
(self.y0 - goal.target_pose.pose.position.y) ** 2) > mode_param:
rospy.sleep(rospy.Duration(1))
# (trans, _) = self.get_tf()
go_to_next = True
elif mode == 2: # stop and rotate mode, mode_param is rotational angle in rad
finished_within_time = self.move_base.wait_for_result(rospy.Duration(40 * 1))
self.rotation(mode_param)
self.rotation(-2 * mode_param)
self.rotation(mode_param)
else: # normal stop in each waypoint mode, mode_param is unused
finished_within_time = self.move_base.wait_for_result(rospy.Duration(60 * 1))
# If we don't get there in time, abort the goal
if not finished_within_time or go_to_next:
self.move_base.cancel_goal()
rospy.loginfo("Goal cancelled, next...")
else:
# We made it!
state = self.move_base.get_state()
if state == GoalStatus.SUCCEEDED:
rospy.loginfo("Goal succeeded!")
def rotation(self, ang):
rate = rospy.Rate(10)
an_vel = 0.1
duration = ang / an_vel
msg = Twist(Vector3(0.0, 0.0, 0.0), Vector3(0.0, 0.0, an_vel))
rate.sleep()
start_time = rospy.get_time()
while not rospy.is_shutdown():
current_time = rospy.get_time()
if (current_time - start_time) > duration:
self.cmd_vel_pub.publish(Twist(Vector3(0, 0.0, 0.0), Vector3(0.0, 0.0, -2 * an_vel)))
self.cmd_vel_pub.publish(Twist())
break
else:
self.cmd_vel_pub.publish(msg)
rate.sleep()
def reverse_tf(self, distance=5, speed=-1):
""" reverse to certain distance """
rate = rospy.Rate(10)
linear_speed = speed
if linear_speed > 0:
linear_speed = -1 * linear_speed
move_cmd = Twist()
# Set the movement command to forward motion
move_cmd.linear.x = linear_speed
# Get the starting position values
# (position, rotation) = self.get_tf()
# x_start = position.x
# y_start = position.y
rate.sleep()
x_start, y_start = self.x0, self.y0
print x_start, y_start
# Keep track of the distance traveled
d = 0
# Enter the loop to move along a side
while d < distance and not rospy.is_shutdown():
# Publish the Twist message and sleep 1 cycle
self.cmd_vel_pub.publish(move_cmd)
rate.sleep()
# Get the current position
# (position, rotation) = self.get_tf()
# Compute the Euclidean distance from the start
d = sqrt(pow((self.x0 - x_start), 2) +
pow((self.y0 - y_start), 2))
# print d
# Stop the robot before the rotation
move_cmd = Twist()
self.cmd_vel_pub.publish(move_cmd)
rospy.sleep(1)
def reverse_time(self, duration=5, speed=-1):
""" full reverse with a duration """
rate = rospy.Rate(10)
msg = Twist(Vector3(speed, 0.0, 0.0), Vector3(0.0, 0.0, 0.0))
rate.sleep()
start_time = rospy.get_time()
while not rospy.is_shutdown():
current_time = rospy.get_time()
if (current_time - start_time) > duration:
self.cmd_vel_pub.publish(Twist())
break
else:
self.cmd_vel_pub.publish(msg)
rate.sleep()
def init_markers(self):
# Set up our waypoint markers
marker_scale = 0.2
marker_lifetime = 0 # 0 is forever
marker_ns = 'waypoints'
marker_id = 0
marker_color = {'r': 1.0, 'g': 0.7, 'b': 1.0, 'a': 1.0}
# Define a marker publisher.
self.marker_pub = rospy.Publisher('waypoint_markers', Marker, queue_size=5)
# Initialize the marker points list.
self.markers = Marker()
self.markers.ns = marker_ns
self.markers.id = marker_id
# self.markers.type = Marker.ARROW
self.markers.type = Marker.CUBE_LIST
self.markers.action = Marker.ADD
self.markers.lifetime = rospy.Duration(marker_lifetime)
self.markers.scale.x = marker_scale
self.markers.scale.y = marker_scale
self.markers.scale.z = marker_scale
self.markers.color.r = marker_color['r']
self.markers.color.g = marker_color['g']
self.markers.color.b = marker_color['b']
self.markers.color.a = marker_color['a']
self.markers.header.frame_id = 'odom'
self.markers.header.stamp = rospy.Time.now()
self.markers.points = list()
def shutdown(self):
rospy.loginfo("Stopping the robot...")
# Cancel any active goals
self.move_base.cancel_goal()
rospy.sleep(2)
# Stop the robot
self.cmd_vel_pub.publish(Twist())
rospy.sleep(1)
if __name__ == "__main__":
util = MoveBaseUtil()
util.reverse_tf()
|
ron1818/Singaboat_RobotX2016
|
robotx_nav/nodes/move_base_util.py
|
Python
|
gpl-3.0
| 13,497
|
[
"xTB"
] |
3fa575789f887092ea58fb89eeee4cb7138f0af17c372245b3919cb16c1bc532
|
"""
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
import pylab as pl
import numpy as np
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
pl.figure(figsize=(10, 5))
# Plot the decision boundaries
pl.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = pl.contourf(xx, yy, Z, cmap=pl.cm.Paired)
pl.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
pl.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=pl.cm.Paired,
label="Class %s" % n)
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.legend(loc='upper right')
pl.xlabel("Decision Boundary")
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
pl.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
pl.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = pl.axis()
pl.axis((x1, x2, y1, y2 * 1.2))
pl.legend(loc='upper right')
pl.ylabel('Samples')
pl.xlabel('Decision Scores')
pl.subplots_adjust(wspace=0.25)
pl.show()
|
depet/scikit-learn
|
examples/ensemble/plot_adaboost_twoclass.py
|
Python
|
bsd-3-clause
| 3,090
|
[
"Gaussian"
] |
682f7336e14f031499bcb4e788010a5d9ca0fe890c1f1f9d070df223f01496cf
|
from distutils.core import setup
from distutils.core import setup, Extension
import numpy as np
import os
nonempty = lambda x: len(x) > 0
if os.getenv("C_INCLUDE_PATH") is not None:
sys_includes = filter(nonempty, os.getenv("C_INCLUDE_PATH").split(':'))
else:
sys_includes = []
if os.getenv("LIBRARY_PATH") is not None:
sys_libraries = filter(nonempty, os.getenv("LIBRARY_PATH").split(':'))
else:
sys_libraries = []
print sys_includes
print sys_libraries
#extra_compile_args = ['-g', '-pg', '-O0']
extra_compile_args = ['-O3']
# extra_compile_args += [ '--stdlib=libc++'] # uncomment this for OSX/clang
#extra_link_args = ['-Wl,--strip-all']
#extra_link_args = ['-lrt',]
extra_link_args = []
ctree_root = 'src_c'
ctree_sources = ['cover_tree_point.cc', 'cover_tree_pp_debug.cc', 'distances.cc', 'vector_mult_py.cc', 'quadratic_form_py.cc', 'compile_product_tree.cc']
from imp import find_module
f, pathname, descr = find_module("pyublas")
CTREE_INCLUDE_DIRS = [os.path.join(pathname, "include"),]
covertree_module = ctree = Extension('cover_tree',
sources=[os.path.join(ctree_root, s) for s in ctree_sources],
include_dirs=CTREE_INCLUDE_DIRS,
library_dirs=['/'],
#library_dirs=['/home/dmoore/.virtualenvs/sigvisa-dbg/lib/', '/'],
libraries=['boost_python'],
extra_compile_args=extra_compile_args,
extra_link_args = extra_link_args,
)
setup(
name='treegp',
version='0.1.0',
author='Dave Moore',
author_email='dmoore@cs.berkeley.edu',
packages=['treegp'],
url='https://github.com/davmre/treegp',
license='LICENSE',
description='Gaussian Process Regression toolkit for Python/Numpy',
long_description=open('README').read(),
install_requires=[
"numpy >= 1.1.0",
],
include_dirs=[np.get_include()] + sys_includes,
ext_modules=[covertree_module,]
)
|
davmre/treegp
|
setup.py
|
Python
|
gpl-3.0
| 2,119
|
[
"Gaussian"
] |
585410b11aad949fea04ad756ced0f4aa4aad2d502b7080ac60d9ebb231e94ad
|
from pybuilder.core import use_plugin, init, Author
use_plugin("python.install_dependencies")
use_plugin("python.core")
#use_plugin("python.unittest")
use_plugin("python.distutils")
use_plugin('copy_resources')
use_plugin('filter_resources')
authors = [Author('Marco Hoyer', 'marco_hoyer@gmx.de')]
description = """checl_http_yaml: a nagios/icinga check plugin requesting yaml data and checking key-value pairs in response.
for more documentation, visit https://github.com/marco-hoyer/check_http_yaml
"""
name = 'check_http_yaml'
license = 'GNU GPL v3'
summary = 'check_http_yaml - yaml key-value check via http'
url = 'https://github.com/marco-hoyer/check_http_yaml'
version = '1.0'
default_task = ['publish']
@init
def initialize(project):
project.depends_on("requests")
project.install_file('/usr/lib64/icinga/plugins', 'check_http_yaml.py')
project.install_file('/etc/icinga/conf.d/commands', 'check_http_yaml.cfg')
project.set_property('copy_resources_target', '$dir_dist')
project.get_property('copy_resources_glob').append('setup.cfg')
project.set_property('distutils_classifiers', [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Topic :: System :: Monitoring',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
])
@init(environments='teamcity')
def set_properties_for_teamcity_builds(project):
import os
project.version = '%s-%s' % (project.version, os.environ.get('BUILD_NUMBER', 0))
project.default_task = ['install_dependencies', 'package']
project.set_property('install_dependencies_use_mirrors', False)
project.get_property('distutils_commands').append('bdist_rpm')
|
marco-hoyer/check_http_yaml
|
build.py
|
Python
|
gpl-2.0
| 2,064
|
[
"VisIt"
] |
6ffdcd8ebef98722859dec497a23af5a02592eafed3c6e0d84b7bfaf558df2fd
|
import openpathsampling as paths
import openpathsampling.netcdfplus.chaindict as cd
from openpathsampling.integration_tools import md, error_if_no_mdtraj
from openpathsampling.engines.openmm.tools import trajectory_to_mdtraj
from openpathsampling.netcdfplus import WeakKeyCache, \
ObjectJSON, create_to_dict, ObjectStore, PseudoAttribute
from openpathsampling.deprecations import (has_deprecations, deprecate,
MSMBUILDER)
import sys
if sys.version_info > (3, ):
get_code = lambda func: func.__code__
else:
get_code = lambda func: func.func_code
# ==============================================================================
# CLASS CollectiveVariable
# ==============================================================================
class CollectiveVariable(PseudoAttribute):
"""
Wrapper for a function that acts on snapshots or iterables of snapshots
Parameters
----------
name : string
A descriptive name of the collectivevariable. It is used in the string
representation.
cv_time_reversible : bool
If ``True`` (default) the CV assumes that reversed snapshots have the
same value. This is the default case when CVs do not depend on momenta
reversal. This will speed up computation of CVs by about a factor of
two. In rare cases you might want to set this to ``False``
Attributes
----------
name
cv_time_reversible
_cache_dict : :class:`openpathsampling.chaindict.ChainDict`
The ChainDict that will cache calculated values for fast access
"""
# do not store the settings for the disk cache. These are independent
# and stored in the cache itself
_excluded_attr = [
'diskcache_enabled',
'diskcache_allow_incomplete',
'diskcache_chunksize'
]
def __init__(
self,
name,
cv_time_reversible=False
):
super(CollectiveVariable, self).__init__(name, paths.BaseSnapshot)
self.cv_time_reversible = cv_time_reversible
self.diskcache_allow_incomplete = not self.cv_time_reversible
self.diskcache_chunksize = ObjectStore.default_store_chunk_size
self._cache_dict = cd.ReversibleCacheChainDict(
WeakKeyCache(),
reversible=cv_time_reversible
)
self._single_dict._post = self._cache_dict
# self._post = self._single_dict > self._cache_dict
to_dict = create_to_dict(['name', 'cv_time_reversible'])
class InVolumeCV(CollectiveVariable):
"""Turn a :class:`openpathsampling.volume.Volume` into a collective
variable
Attributes
----------
name
volume
"""
def __init__(self, name, volume):
"""
Parameters
----------
name : string
name of the collective variable
volume : openpathsampling.Volume
the Volume instance to be treated as a (storable) CV
"""
super(InVolumeCV, self).__init__(
name,
cv_time_reversible=True
)
self.volume = volume
self._eval_dict = cd.Function(
self._eval,
requires_lists=False
)
self._post = self._post > self._eval_dict
def _eval(self, items):
return bool(self.volume(items))
to_dict = create_to_dict(['name', 'volume'])
class CallableCV(CollectiveVariable):
"""Turn any callable object into a storable :class:`CollectiveVariable`.
Attributes
----------
_callable_dict
The ChainDict that will call the actual function in case non of the
preceding ChainDicts have returned data
"""
def __init__(
self,
name,
cv_callable,
cv_time_reversible=False,
cv_requires_lists=False,
cv_wrap_numpy_array=False,
cv_scalarize_numpy_singletons=False,
**kwargs
):
"""
Parameters
----------
name
cv_callable : callable (function or class with __call__)
The callable to be used
cv_time_reversible
cv_requires_lists : If ``True`` the internal function always a list of
elements instead of single values. It also means that if you call
the CV with a list of snapshots a list of snapshot objects will be
passed. If ``False`` a list of Snapshots like a trajectory will
be passed snapshot by snapshot.
cv_wrap_numpy_array : bool, default: False
if ``True`` the returned array will be wrapped with a
``numpy.array()`` which will convert a list of numpy arrays into a
single large numpy.array. This is useful for post-processing of
larger data since numpy arrays are easier to manipulate.
cv_scalarize_numpy_singletons : bool, default: True
If ``True`` then arrays of length 1 will be treated as array with
one dimension less. e.g. [[1], [2], [3]] will be turned into
[1, 2, 3]. This is often useful, when you use en external function
to get only a single value.
**kwargs : kwargs
a dictionary with named arguments which should be used
with ``c``. Either for class creation or for calling the function
Notes
-----
This function is abstract and need _eval to be implemented to work.
Problem is that there are two types of callable functions:
1. direct functions: these can be called and give the wanted value
``c(snapshot, **kwargs)`` would be the typical call
2. a generating function: a function the creates the callable object
``c(**kwargs)(snapshot)`` is the typical call. This is usually used
for classes. Create the instance and then use it.
This function is very powerful, but need some explanation if you want
the function to be stored alongside all other information in your
storage. The problem is that a python function relies (usually) on
an underlying global namespace that can be accessed. This is especially
important when using an iPython notebook. The problem is, that the
function that stored your used-defined function has no knowledge
about this underlying namespace and its variables. All it can save is
names of variables from your namespace to be used. This means you can
store arbitrary functions, but these will only work, if you call the
reconstructed ones from the same context (scope). This is a powerful
feature because a function might do something different in another
context, but in our case we cannot store these additional information.
What we can do, is analyse your function and determine which variables
(if at all these are) and inform you, if you might run into trouble.
To avoid problems you should try to:
1. import necessary modules inside of your function
2. create constants inside your function
3. if variables from the global scope are used these need to be stored
with the function and this can only be done if they are passed as
arguments to the function and added as kwargs to the FunctionCV
>>> import openpathsampling as paths
>>> def func(snapshot, indices):
>>> import mdtraj as md
>>> return md.compute_dihedrals(
>>> paths.Trajectory([snapshot]).to_mdtraj(), indices=indices)
>>> cv = FunctionCV('my_cv', func, indices=[[4, 6, 8, 10]])
The function will also check if non-standard modules are imported,
which are now ``numpy``, ``math``, ``msmbuilder``, ``pandas`` and
``mdtraj``
"""
super(CallableCV, self).__init__(
name,
cv_time_reversible=cv_time_reversible
)
self.cv_requires_lists = cv_requires_lists
self.cv_wrap_numpy_array = cv_wrap_numpy_array
self.cv_scalarize_numpy_singletons = cv_scalarize_numpy_singletons
self.cv_callable = cv_callable
if kwargs is None:
kwargs = dict()
self.kwargs = kwargs
self._eval_dict = cd.Function(
self._eval,
self.cv_requires_lists,
self.cv_scalarize_numpy_singletons
)
post = self._post > self._eval_dict
if cv_wrap_numpy_array:
# noinspection PyTypeChecker
post = cd.MergeNumpy() > post
self._post = post
def to_dict(self):
dct = super(CallableCV, self).to_dict()
callable_argument = self.__class__.args()[2]
dct[callable_argument] = ObjectJSON.callable_to_dict(self.cv_callable)
dct['cv_requires_lists'] = self.cv_requires_lists
dct['cv_wrap_numpy_array'] = self.cv_wrap_numpy_array
dct['cv_scalarize_numpy_singletons'] = \
self.cv_scalarize_numpy_singletons
dct['kwargs'] = self.kwargs
return dct
@classmethod
def from_dict(cls, dct):
kwargs = dct['kwargs']
del dct['kwargs']
dct.update(kwargs)
obj = cls(**dct)
return obj
# def __eq__(self, other):
# """Override the default Equals behavior"""
# if isinstance(other, self.__class__):
# if self.name != other.name:
# return False
# if self.kwargs != other.kwargs:
# return False
# if self.cv_callable is None or other.cv_callable is None:
# return False
#
# self_code = get_code(self.cv_callable)
# other_code = get_code(other.cv_callable)
# if hasattr(self_code, 'op_code') \
# and hasattr(other_code, 'op_code') \
# and self_code.op_code != other_code.op_code:
# # Compare Bytecode. Not perfect, but should be good enough
# return False
#
# return True
#
# return NotImplemented
__hash__ = CollectiveVariable.__hash__
def _eval(self, items):
return items
class FunctionCV(CallableCV):
"""Turn any function into a :class:`CollectiveVariable`.
Attributes
----------
cv_callable
"""
def __init__(
self,
name,
f,
cv_time_reversible=False,
cv_requires_lists=False,
cv_wrap_numpy_array=False,
cv_scalarize_numpy_singletons=False,
**kwargs
):
r"""
Parameters
----------
name : str
f : (callable) function
The function to be used
cv_time_reversible
cv_requires_lists
cv_wrap_numpy_array
cv_scalarize_numpy_singletons
**kwargs:
a dictionary of named arguments which should be given to
``cv_callable`` (for example, the atoms which define a specific
distance/angle). Finally ``cv_callable(snapshots, **kwargs)`` is
called
See also
--------
:class:`openpathsampling.collectivevariable.CallableCV`
"""
super(FunctionCV, self).__init__(
name,
cv_callable=f,
cv_time_reversible=cv_time_reversible,
cv_requires_lists=cv_requires_lists,
cv_wrap_numpy_array=cv_wrap_numpy_array,
cv_scalarize_numpy_singletons=cv_scalarize_numpy_singletons,
**kwargs
)
@property
def f(self):
return self.cv_callable
def _eval(self, items):
# here the kwargs are used in the callable when it is evaluated
return self.cv_callable(items, **self.kwargs)
class CoordinateFunctionCV(FunctionCV):
"""Turn any function into a :class:`CollectiveVariable`.
Attributes
----------
cv_callable
"""
def __init__(
self,
name,
f,
cv_requires_lists=False,
cv_wrap_numpy_array=False,
cv_scalarize_numpy_singletons=False,
**kwargs
):
"""
Parameters
----------
name
f
cv_requires_lists
cv_wrap_numpy_array
cv_scalarize_numpy_singletons
**kwargs
See also
--------
:class:`openpathsampling.collectivevariable.CallableCV`
"""
super(FunctionCV, self).__init__(
name,
cv_callable=f,
cv_time_reversible=True,
cv_requires_lists=cv_requires_lists,
cv_wrap_numpy_array=cv_wrap_numpy_array,
cv_scalarize_numpy_singletons=cv_scalarize_numpy_singletons,
**kwargs
)
def to_dict(self):
dct = super(CoordinateFunctionCV, self).to_dict()
del dct['cv_time_reversible']
return dct
class GeneratorCV(CallableCV):
"""Turn a callable class or function generating a callable object into a CV
The class instance will be called with snapshots. The instance itself
will be created using the given ``**kwargs``.
"""
def __init__(
self,
name,
generator,
cv_time_reversible=False,
cv_requires_lists=False,
cv_wrap_numpy_array=False,
cv_scalarize_numpy_singletons=False,
**kwargs
):
r"""
Parameters
----------
name
generator : callable class
a class where instances have a ``__call__`` attribute
cv_time_reversible
cv_requires_lists
cv_wrap_numpy_array
cv_scalarize_numpy_singletons
**kwargs
additional arguments which should be given to ``c`` (for example,
the atoms which define a specific distance/angle). Finally an
instance ``instance = cls(**kwargs)`` is created when the CV is
created and using the CV will call ``instance(snapshots)``
Notes
-----
Right now you cannot store user-defined classes. Only classes
from external packages can be used.
"""
super(GeneratorCV, self).__init__(
name,
cv_callable=generator,
cv_time_reversible=cv_time_reversible,
cv_requires_lists=cv_requires_lists,
cv_wrap_numpy_array=cv_wrap_numpy_array,
cv_scalarize_numpy_singletons=cv_scalarize_numpy_singletons,
**kwargs
)
# here the kwargs are used when the callable is created (so only once)
self._instance = generator(**self.kwargs)
@property
def instance(self):
return self._instance
@property
def generator(self):
return self.cv_callable
def _eval(self, items):
trajectory = paths.Trajectory(items)
return [self._instance(snap) for snap in trajectory]
class CoordinateGeneratorCV(GeneratorCV):
"""Turn a callable class or function generating a callable object into a CV
The class instance will be called with snapshots. The instance itself
will be created using the given ``**kwargs``.
"""
def __init__(
self,
name,
generator,
cv_requires_lists=False,
cv_wrap_numpy_array=False,
cv_scalarize_numpy_singletons=False,
**kwargs
):
r"""
Parameters
----------
name
generator
cv_requires_lists
cv_wrap_numpy_array
cv_scalarize_numpy_singletons
**kwargs
Notes
-----
Right now you cannot store user-defined classes. Only classes
from external packages can be used.
"""
super(CoordinateGeneratorCV, self).__init__(
name,
cv_callable=generator,
cv_time_reversible=True,
cv_requires_lists=cv_requires_lists,
cv_wrap_numpy_array=cv_wrap_numpy_array,
cv_scalarize_numpy_singletons=cv_scalarize_numpy_singletons,
**kwargs
)
def to_dict(self):
dct = super(CoordinateGeneratorCV, self).to_dict()
del dct['cv_time_reversible']
return dct
class MDTrajFunctionCV(CoordinateFunctionCV):
"""Make ``CollectiveVariable`` from ``f`` that takes
:class:`mdtraj.Trajectory` as input.
This is identical to FunctionCV except that the function is called with
an :class:`mdtraj.Trajectory` object instead of the
:class:`openpathsampling.Trajectory` one using
``f(traj.to_mdtraj(), **kwargs)``
Examples
--------
>>> # To create an order parameter which calculates the dihedral formed
>>> # by atoms [7,9,15,17] (psi in Ala dipeptide):
>>> import mdtraj as md
>>> traj = 'paths.Trajectory()'
>>> psi_atoms = [7,9,15,17]
>>> psi_orderparam = FunctionCV("psi", md.compute_dihedrals,
>>> indices=[[2,4,6,8]])
>>> print psi_orderparam( traj )
"""
def __init__(self,
name,
f,
topology,
cv_requires_lists=True,
cv_wrap_numpy_array=True,
cv_scalarize_numpy_singletons=True,
**kwargs
):
"""
Parameters
----------
name : str
f
topology : :obj:`openpathsampling.engines.topology.MDTrajTopology`
the mdtraj topology wrapper from OPS that is used to initialize
the featurizer in ``pyemma.coordinates.featurizer(topology)``
cv_requires_lists
cv_wrap_numpy_array
cv_scalarize_numpy_singletons
scalarize_numpy_singletons : bool, default: True
If ``True`` then arrays of length 1 will be treated as array with
one dimension less. e.g. ``[[1], [2], [3]]`` will be turned into
``[1, 2, 3]``. This is often useful, when you use en external
function from mdtraj to get only a single value.
"""
super(MDTrajFunctionCV, self).__init__(
name,
f,
cv_requires_lists=cv_requires_lists,
cv_wrap_numpy_array=cv_wrap_numpy_array,
cv_scalarize_numpy_singletons=cv_scalarize_numpy_singletons,
**kwargs
)
self.topology = topology
def _eval(self, items):
trajectory = paths.Trajectory(items)
t = trajectory_to_mdtraj(trajectory, self.topology.mdtraj)
return self.cv_callable(t, **self.kwargs)
@property
def mdtraj_function(self):
return self.cv_callable
def to_dict(self):
return {
'name': self.name,
'f': ObjectJSON.callable_to_dict(self.f),
'topology': self.topology,
'kwargs': self.kwargs,
'cv_requires_lists': self.cv_requires_lists,
'cv_wrap_numpy_array': self.cv_wrap_numpy_array,
'cv_scalarize_numpy_singletons': self.cv_scalarize_numpy_singletons
}
@has_deprecations
@deprecate(MSMBUILDER)
class MSMBFeaturizerCV(CoordinateGeneratorCV):
"""A CollectiveVariable that uses an MSMBuilder3 featurizer"""
def __init__(
self,
name,
featurizer,
topology,
cv_wrap_numpy_array=True,
cv_scalarize_numpy_singletons=True,
**kwargs
):
"""
Parameters
----------
name
featurizer : msmbuilder.Featurizer, callable
the featurizer used as a callable class
topology : :obj:`openpathsampling.engines.topology.MDTrajTopology`
the mdtraj topology wrapper from OPS that is used to initialize
the featurizer in ``pyemma.coordinates.featurizer(topology)``
**kwargs :
a dictionary of named arguments which should be given to ``c``
(for example, the atoms which define a specific distance/angle).
Finally an instance ``instance = cls(**kwargs)`` is created when
the CV is created and using the CV will call
``instance(snapshots)``
cv_wrap_numpy_array
cv_scalarize_numpy_singletons
Notes
-----
All trajectories or snapshots passed in kwargs will be converted
to mdtraj objects for convenience
"""
md_kwargs = dict()
md_kwargs.update(kwargs)
# turn Snapshot and Trajectory into md.trajectory
for key in md_kwargs:
if isinstance(md_kwargs[key], paths.BaseSnapshot):
md_kwargs[key] = md_kwargs[key].to_mdtraj()
elif isinstance(md_kwargs[key], paths.Trajectory):
md_kwargs[key] = md_kwargs[key].to_mdtraj()
self._instance = featurizer(**md_kwargs)
self.topology = topology
super(GeneratorCV, self).__init__(
name,
cv_callable=featurizer,
cv_time_reversible=True,
cv_requires_lists=True,
cv_wrap_numpy_array=cv_wrap_numpy_array,
cv_scalarize_numpy_singletons=cv_scalarize_numpy_singletons,
**kwargs
)
@property
def featurizer(self):
return self.cv_callable
def _eval(self, items):
trajectory = paths.Trajectory(items)
# create an mdtraj trajectory out of it
ptraj = trajectory_to_mdtraj(trajectory, self.topology.mdtraj)
# run the featurizer
return self._instance.partial_transform(ptraj)
def to_dict(self):
return {
'name': self.name,
'featurizer': ObjectJSON.callable_to_dict(self.featurizer),
'topology': self.topology,
'kwargs': self.kwargs,
'cv_wrap_numpy_array': self.cv_wrap_numpy_array,
'cv_scalarize_numpy_singletons': self.cv_scalarize_numpy_singletons
}
class PyEMMAFeaturizerCV(MSMBFeaturizerCV):
"""Make a CV from a function that takes mdtraj.trajectory as input.
This is identical to :class:`CoordinateGeneratorCV` except that the
function is called with an :class:`mdraj.Trajetory` object instead of the
:class:`openpathsampling.Trajectory` one using ``fnc(traj.to_mdtraj(),
**kwargs)``
"""
def __init__(
self,
name,
featurizer,
topology,
**kwargs
):
"""
Parameters
----------
name
featurizer : :class:`pyemma.coordinates.featurizer`
the pyemma featurizer used as a callable class
topology : :obj:`openpathsampling.engines.topology.MDTrajTopology`
the mdtraj topology wrapper from OPS that is used to initialize
the featurizer in ``pyemma.coordinates.featurizer(topology)``
**kwargs : dict
a dictionary of named arguments which should be given to the
``featurizer`` (for example, the atoms which define a specific
distance/angle).
Finally an instance ``instance = cls(**kwargs)`` is created when
the CV is created and using the CV will call
``instance(snapshots)``
Notes
-----
All trajectories or snapshots passed in kwargs will be converted
to mdtraj objects for convenience
"""
md_kwargs = dict()
md_kwargs.update(kwargs)
# turn Snapshot and Trajectory into md.trajectory
for key in md_kwargs:
if isinstance(md_kwargs[key], paths.BaseSnapshot):
md_kwargs[key] = md_kwargs[key].to_mdtraj()
elif isinstance(md_kwargs[key], paths.Trajectory):
md_kwargs[key] = md_kwargs[key].to_mdtraj()
self.topology = topology
import pyemma.coordinates
self._instance = pyemma.coordinates.featurizer(self.topology.mdtraj)
featurizer(self._instance, **md_kwargs)
super(GeneratorCV, self).__init__(
name,
cv_callable=featurizer,
cv_requires_lists=True,
cv_wrap_numpy_array=True,
cv_scalarize_numpy_singletons=True,
**kwargs
)
def _eval(self, items):
trajectory = paths.Trajectory(items)
t = trajectory_to_mdtraj(trajectory, self.topology.mdtraj)
return self._instance.transform(t)
def to_dict(self):
return {
'name': self.name,
'featurizer': ObjectJSON.callable_to_dict(self.featurizer),
'topology': self.topology,
'kwargs': self.kwargs
}
|
openpathsampling/openpathsampling
|
openpathsampling/collectivevariable.py
|
Python
|
mit
| 24,689
|
[
"MDTraj",
"OpenMM"
] |
81ec6123fecacd5641a5d0e236bb68bef9215df4de7ace23c13fce7fe22d3db7
|
import sys
import struct
import traceback
import envi
import envi.bits as e_bits
from envi.bits import binary
#import sys
#import struct
#import traceback
#import envi
#import envi.bits as e_bits
#from envi.bits import binary
from envi.archs.arm.const import *
from envi.archs.arm.regs import *
# FIXME: TODO
# FIXME ldm sp, { pc } seems to not get marked NOFALL
# FIXME ldm sp, { pc } should probably be marked IF_RET too...
# FIXME b lr / bx lr should be marked IF_RET as well!
# FIXME encoding for swi instruction ( <4 cond> 1111 <24 bytes immediate> ) is totally horked (it's in p_uncond)
# FIXME some arm opcode values are ENC << and some are ENC and some are etc..
# (make all be ENC_FOO << 16 + <their index>
# FIXME the following things dont decode correctly
# 5346544e cmppl r6, #1308622848
#
# Possible future extensions:
# * VectorPointFloat subsystem (coproc 10+11)
# * Debug subsystem (coproc 14)
# * other 'default' coprocs we can handle and add value?
def chopmul(opcode):
op1 = (opcode >> 20) & 0xff
a = (opcode >> 16) & 0xf
b = (opcode >> 12) & 0xf
c = (opcode >> 8) & 0xf
d = (opcode >> 4) & 0xf
e = opcode & 0xf
return (op1<<4)+d,(a,b,c,d,e)
# FIXME this seems to be universal...
def addrToName(mcanv, va):
sym = mcanv.syms.getSymByAddr(va)
if sym != None:
return repr(sym)
return "0x%.8x" % va
# The keys in this table are made of the
# concat of bits 27-21 and 7-4 (only when
# ienc == mul!
iencmul_codes = {
# Basic multiplication opcodes
binary("000000001001"): ("mul",(0,4,2), 0),
binary("000000011001"): ("mul",(0,4,2), IF_PSR_S),
binary("000000101001"): ("mla",(0,4,2,1), 0),
binary("000000111001"): ("mla",(0,4,2,1), IF_PSR_S),
binary("000001001001"): ("umaal",(1,0,4,2), 0),
binary("000010001001"): ("umull",(1,0,4,2), 0),
binary("000010011001"): ("umull",(1,0,4,2), IF_PSR_S),
binary("000010101001"): ("umlal",(1,0,4,2), 0),
binary("000010111001"): ("umlal",(1,0,4,2), IF_PSR_S),
binary("000011001001"): ("smull",(1,0,4,2), 0),
binary("000011011001"): ("smull",(1,0,4,2), IF_PSR_S),
binary("000011101001"): ("smlal",(1,0,4,2), 0),
binary("000011111001"): ("smlal",(1,0,4,2), IF_PSR_S),
# multiplys with <x><y>
# "B"
binary("000100001000"): ("smlabb", (0,4,2,1), 0),
binary("000100001010"): ("smlatb", (0,4,2,1), 0),
binary("000100001100"): ("smlabt", (0,4,2,1), 0),
binary("000100001110"): ("smlatt", (0,4,2,1), 0),
binary("000100101010"): ("smulwb", (0,4,2), 0),
binary("000100101110"): ("smulwt", (0,4,2), 0),
binary("000100101000"): ("smlawb", (0,4,2), 0),
binary("000100101100"): ("smlawt", (0,4,2), 0),
binary("000101001000"): ("smlalbb", (1,0,4,2), 0),
binary("000101001010"): ("smlaltb", (1,0,4,2), 0),
binary("000101001100"): ("smlalbt", (1,0,4,2), 0),
binary("000101001110"): ("smlaltt", (1,0,4,2), 0),
binary("000101101000"): ("smulbb", (0,4,2), 0),
binary("000101101010"): ("smultb", (0,4,2), 0),
binary("000101101100"): ("smulbt", (0,4,2), 0),
binary("000101101110"): ("smultt", (0,4,2), 0),
# type 2 multiplys
binary("011100000001"): ("smuad", (0,4,2), 0),
binary("011100000011"): ("smuadx", (0,4,2), 0),
binary("011100000101"): ("smusd", (0,4,2), 0),
binary("011100000111"): ("smusdx", (0,4,2), 0),
binary("011100000001"): ("smlad", (0,4,2), 0),
binary("011100000011"): ("smladx", (0,4,2), 0),
binary("011100000101"): ("smlsd", (0,4,2), 0),
binary("011100000111"): ("smlsdx", (0,4,2), 0),
binary("011101000001"): ("smlald", (0,4,2), 0),
binary("011101000011"): ("smlaldx", (0,4,2), 0),
binary("011101000101"): ("smlsld", (0,4,2), 0),
binary("011101000111"): ("smlsldx", (0,4,2), 0),
binary("011101010001"): ("smmla", (0,4,2,1), 0),
binary("011101010011"): ("smmlar", (0,4,2,1), 0),
binary("011101011101"): ("smmls", (0,4,2,1), 0),
binary("011101011111"): ("smmlsr", (0,4,2,1), 0),
binary("011101010001"): ("smmul", (0,4,2), 0),
binary("011101010011"): ("smmulr", (0,4,2), 0),
}
def sh_lsl(num, shval):
return (num&0xffffffff) << shval
def sh_lsr(num, shval):
return (num&0xffffffff) >> shval
def sh_asr(num, shval):
return num >> shval
def sh_ror(num, shval):
return (((num&0xffffffff) >> shval) | (num<< (32-shval))) & 0xffffffff
def sh_rrx(num, shval, emu=None):
half1 = (num&0xffffffff) >> shval
half2 = num<<(33-shval)
newC = (num>>(shval-1)) & 1
if emu != None:
flags = emu.getFlags()
oldC = (flags>>PSR_C) & 1
emu.setFlags(flags & PSR_C_mask | newC) #part of the change
else:
oldC = 0 # FIXME:
retval = (half1 | half2 | (oldC << (32-shval))) & 0xffffffff
return retval
shifters = (
sh_lsl,
sh_lsr,
sh_asr,
sh_ror,
sh_rrx,
)
####################################################################
# Mnemonic tables for opcode based mnemonic lookup
# Dataprocessing mnemonics
dp_mnem = ("and","eor","sub","rsb","add","adc","sbc","rsc","tst","teq","cmp","cmn","orr","mov","bic","mvn",
"adr") # added
# FIXME: THIS IS FUGLY but sadly it works
dp_noRn = (13,15)
dp_noRd = (8,9,10,11)
# FIXME: dp_MOV was supposed to be a tuple of opcodes that could be converted to MOV's if offset from PC.
# somehow this list has vanished into the ether. add seems like the right one here.
dp_ADR = (2, 4,)
# FIXME: !!! Don't make SBZ and SBO's part of the list of opers !!!
# first parm SBZ: mov,mvn
# second parm SBZ: tst,teq,cmp,cmn,
def dpbase(opval):
"""
Parse and return opcode,sflag,Rn,Rd for a standard
dataprocessing instruction.
"""
ocode = (opval >> 21) & 0xf
sflag = (opval >> 20) & 0x1
Rn = (opval >> 16) & 0xf
Rd = (opval >> 12) & 0xf
#print "DPBASE:",ocode,sflag,Rn,Rd
return ocode,sflag,Rn,Rd
####################################################################
# Parser functions for each of the instruction encodings
def p_dp_imm_shift(opval, va):
ocode,sflag,Rn,Rd = dpbase(opval)
Rm = opval & 0xf
shtype = (opval >> 5) & 0x3
shval = (opval >> 7) & 0x1e # effectively, rot*2
if ocode in dp_noRn:# FIXME: FUGLY (and slow...)
olist = (
ArmRegOper(Rd, va=va),
ArmRegShiftImmOper(Rm, shtype, shval, va),
)
elif ocode in dp_noRd:
olist = (
ArmRegOper(Rn, va=va),
ArmRegShiftImmOper(Rm, shtype, shval, va),
)
else:
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rn, va=va),
ArmRegShiftImmOper(Rm, shtype, shval, va),
)
opcode = (IENC_DP_IMM_SHIFT << 16) + ocode
if sflag > 0:
iflags = IF_PSR_S
else:
iflags = 0
return (opcode, dp_mnem[ocode], olist, iflags)
# specialized mnemonics for p_misc
qop_mnem = ('qadd','qsub','qdadd','qdsub')
smla_mnem = ('smlabb','smlabt','smlatb','smlatt',)
smlal_mnem = ('smlalbb','smlalbt','smlaltb','smlaltt',)
smul_mnem = ('smulbb','smulbt','smultb','smultt',)
smlaw_mnem = ('smlawb','smlawt',)
smulw_mnem = ('smulwb','smulwt',)
def p_misc(opval, va):
# 0x0f900000 = 0x01000000 or 0x01000010 (misc and misc1 are both parsed at the same time. see the footnote [2] on dp instructions in the Atmel AT91SAM7 docs
if opval & 0x0fc00000 == 0x01000000:
opcode = (IENC_MISC << 16) + 1
mnem = 'mrs'
r = (opval>>22) & 1
Rd = (opval>>12) & 0xf
olist = (
ArmRegOper(Rd, va=va),
ArmPgmStatRegOper(r),
)
elif opval & 0x0fb002f0 == 0x01200000:
opcode = (IENC_MISC << 16) + 2
mnem = 'msr' # register. immediate has it's own parser in the 001 section
r = (opval>>22) & 1
Rd = (opval) & 0xf
olist = (
ArmPgmStatRegOper(r),
ArmRegOper(Rd, va=va),
)
elif opval & 0x0ff000f0 == 0x01200020:
opcode = (IENC_MISC << 16) + 5
mnem = 'bxj'
Rm = opval & 0xf
olist = ( ArmRegOper(Rm, va=va), )
elif opval & 0x0ff00090 == 0x01000080:
opcode = (IENC_MISC << 16) + 9
xy = (opval>>5)&3
mnem = smla_mnem[xy]
Rd = (opval>>16) & 0xf
Rn = (opval>>12) & 0xf
Rs = (opval>>8) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rm, va=va),
ArmRegOper(Rs, va=va),
ArmRegOper(Rn, va=va),
)
elif opval & 0x0ff000b0 == 0x01200080:
opcode = (IENC_MISC << 16) + 10
y = (opval>>6)&1
mnem = smlaw_mnem[y]
Rd = (opval>>16) & 0xf
Rn = (opval>>12) & 0xf
Rs = (opval>>8) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rm, va=va),
ArmRegOper(Rs, va=va),
ArmRegOper(Rn, va=va),
)
elif opval & 0x0ff000b0 == 0x012000a0:
opcode = (IENC_MISC << 16) + 11
y = (opval>>6)&1
mnem = smulw_mnem[y]
Rd = (opval>>16) & 0xf
Rs = (opval>>8) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rm, va=va),
ArmRegOper(Rs, va=va),
)
elif opval & 0x0ff00090 == 0x01400080:
opcode = (IENC_MISC << 16) + 12
xy = (opval>>5)&3
mnem = smlal_mnem[xy]
Rdhi = (opval>>16) & 0xf
Rdlo = (opval>>12) & 0xf
Rs = (opval>>8) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rdlo, va=va),
ArmRegOper(Rdhi, va=va),
ArmRegOper(Rs, va=va),
ArmRegOper(Rn, va=va),
)
elif opval & 0x0ff00090 == 0x01600080:
opcode = (IENC_MISC << 16) + 13
xy = (opval>>5)&3
mnem = smul_mnem[xy]
Rd = (opval>>16) & 0xf
Rs = (opval>>8) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rm, va=va),
ArmRegOper(Rs, va=va),
)
mnem = 'smul' #xy
#elif opval & 0x0fc00000 == 0x03200000:
#mnem = 'msr'
else:
raise envi.InvalidInstruction(
mesg="p_misc: invalid instruction",
bytez=struct.pack("<I", opval), va=va)
opcode = IENC_UNDEF
mnem = "undefined instruction",
olist = ()
return (opcode, mnem, olist, 0)
#### these actually belong to the media section, and already exist there. FIXME: DELETE
#misc1_mnem = ("pkhbt", "pkhtb", "rev", "rev16", "revsh", "sel", "ssat", "ssat16", "usat", "usat16", )
def p_misc1(opval, va): #
#R = (opval>>22) & 1
#Rn = (opval>>16) & 0xf
#Rd = (opval>>12) & 0xf
#rot_imm = (opval>>8) & 0xf
#imm = opval & 0xff
#Rm = opval & 0xf
if opval & 0x0ff000f0 == 0x01200010:
opcode = INS_BX
mnem = 'bx'
Rm = opval & 0xf
olist = ( ArmRegOper(Rm, va=va), )
elif opval & 0x0ff000f0 == 0x01600010:
opcode = (IENC_MISC << 16) + 4
mnem = 'clz'
Rd = (opval>>12) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rm, va=va),
)
elif opval & 0x0ff000f0 == 0x01200030:
#opcode = (IENC_MISC << 16) + 6
opcode = INS_BLX
mnem = 'blx'
Rm = opval & 0xf
olist = ( ArmRegOper(Rm, va=va), )
elif opval & 0x0f9000f0 == 0x01000050: #all qadd/qsub's
opcode = (IENC_MISC << 16) + 7
qop = (opval>>21)&3
mnem = qop_mnem[qop]
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rm, va=va),
ArmRegOper(Rn, va=va),
)
elif opval & 0x0ff000f0 == 0x01200070:
opcode = (IENC_MISC << 16) + 8
mnem = 'bkpt'
immed = ((opval>>4)&0xfff0) + (opval&0xf)
olist = ( ArmImmOper(immed), )
else:
raise envi.InvalidInstruction(
mesg="p_misc1: invalid instruction",
bytez=struct.pack("<I", opval), va=va)
return (opcode, mnem, olist, 0)
swap_mnem = ("swp","swpb",)
strex_mnem = ("strex","ldrex",) # actual full instructions
strh_mnem = (("str",IF_H),("ldr",IF_H),) # IF_H
ldrs_mnem = (("ldr",IF_S|IF_B),("ldr",IF_S|IF_H),) # IF_SH, IF_SB
ldrd_mnem = (("ldr",IF_D),("str",IF_D),) # IF_D
def p_extra_load_store(opval, va):
pubwl = (opval>>20) & 0x1f
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
Rs = (opval>>8) & 0xf
op1 = (opval>>5) & 0x3
Rm = opval & 0xf
iflags = 0
if opval&0x0fb000f0==0x01000090:# swp/swpb
idx = (pubwl>>2)&1
opcode = (IENC_EXTRA_LOAD << 16) + idx
mnem = swap_mnem[idx]
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rm, va=va),
ArmImmOffsetOper(Rn, 0, va, pubwl),
)
elif opval&0x0fe000f0==0x01800090:# strex/ldrex
idx = pubwl&1
opcode = (IENC_EXTRA_LOAD << 16) + 2 + idx
mnem = strex_mnem[idx]
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rm, va=va),
ArmRegOper(Rn, va=va),
)
elif opval&0x0e4000f0==0x000000b0:# strh/ldrh regoffset
# 000pu0w0-Rn--Rt-SBZ-1011-Rm- - STRH
# 0000u110-Rn--Rt-imm41011imm4 - STRHT (v7+)
idx = pubwl&1
opcode = (IENC_EXTRA_LOAD << 16) + 4 + idx
mnem,iflags = strh_mnem[idx]
olist = (
ArmRegOper(Rd, va=va),
ArmRegOffsetOper(Rn, Rm, va, pubwl),
)
elif opval&0x0e4000f0==0x004000b0:# strh/ldrh immoffset
idx = pubwl&1
opcode = (IENC_EXTRA_LOAD << 16) + 6 + idx
mnem,iflags = strh_mnem[idx]
olist = (
ArmRegOper(Rd, va=va),
ArmImmOffsetOper(Rn,(Rs<<4)+Rm, va, pubwl),
)
elif opval&0x0e5000d0==0x005000d0:# ldrsh/b immoffset
idx = (opval>>5)&1
opcode = (IENC_EXTRA_LOAD << 16) + 8 + idx
mnem,iflags = ldrs_mnem[idx]
olist = (
ArmRegOper(Rd, va=va),
ArmImmOffsetOper(Rn, (Rs<<4)+Rm, va, pubwl),
)
elif opval&0x0e5000d0==0x001000d0:# ldrsh/b regoffset
idx = (opval>>5)&1
opcode = (IENC_EXTRA_LOAD << 16) + 10 + idx
mnem,iflags = ldrs_mnem[idx]
olist = (
ArmRegOper(Rd, va=va),
ArmRegOffsetOper(Rn, Rm, va, pubwl),
)
elif opval&0x0e5000d0==0x000000d0:# ldrd/strd regoffset
# 000pu0w0-Rn--Rt-SBZ-1101-Rm- ldrd regoffset
# 0001u1001111-Rt-imm41101imm4 ldrd regoffset (literal, v7+)
idx = (opval>>5)&1
opcode = (IENC_EXTRA_LOAD << 16) + 12 + idx
mnem,iflags = ldrd_mnem[idx]
olist = (
ArmRegOper(Rd, va=va),
ArmRegOffsetOper(Rn, Rm, va, pubwl),
)
elif opval&0x0e5000d0==0x004000d0:# ldrd/strd immoffset
idx = (opval>>5)&1
opcode = (IENC_EXTRA_LOAD << 16) + 14 + idx
mnem,iflags = ldrd_mnem[idx]
olist = (
ArmRegOper(Rd, va=va),
ArmImmOffsetOper(Rn, (Rs<<4)+Rm, va, pubwl),
)
else:
raise envi.InvalidInstruction(
mesg="extra_load_store: invalid instruction",
bytez=struct.pack("<I", opval), va=va)
return (opcode, mnem, olist, iflags)
def p_dp_reg_shift(opval, va):
ocode,sflag,Rn,Rd = dpbase(opval)
Rm = opval & 0xf
shtype = (opval >> 5) & 0x3
Rs = (opval >> 8) & 0xf
if ocode in dp_noRn:# FIXME: FUGLY
olist = (
ArmRegOper(Rd, va=va),
ArmRegShiftRegOper(Rm, shtype, Rs),
)
elif ocode in dp_noRd:
olist = (
ArmRegOper(Rn, va=va),
ArmRegShiftRegOper(Rm, shtype, Rs),
)
else:
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rn, va=va),
ArmRegShiftRegOper(Rm, shtype, Rs),
)
opcode = (IENC_DP_REG_SHIFT << 16) + ocode
if sflag > 0:
iflags = IF_PSR_S
else:
iflags = 0
return (opcode, dp_mnem[ocode], olist, iflags)
multfail = (None, None, None,)
def p_mult(opval, va):
ocode, vals = chopmul(opval)
mnem, opindexes, flags = iencmul_codes.get(ocode, multfail)
if mnem == None:
raise envi.InvalidInstruction(
mesg="p_mult: invalid instruction",
bytez=struct.pack("<I", opval), va=va)
olist = []
for i in opindexes:
olist.append(ArmRegOper(vals[i], va=va))
opcode = (IENC_MULT << 16) + ocode
return (opcode, mnem, olist, flags)
def p_dp_imm(opval, va):
ocode,sflag,Rn,Rd = dpbase(opval)
imm = opval & 0xff
rot = (opval >> 7) & 0x1e # effectively, rot*2
# hack to make add/sub against PC more readable (also legit for ADR instruction)
if Rn == REG_PC and ocode in dp_ADR: # we know PC
if ocode == 2: # and this is a subtraction
ocode = 16
olist = (
ArmRegOper(Rd, va=va),
ArmPcOffsetOper( - shifters[S_ROR](imm, rot), va=va),
)
elif ocode == 4: # this is addition
ocode = 16
olist = (
ArmRegOper(Rd, va=va),
ArmPcOffsetOper( shifters[S_ROR](imm, rot), va=va),
)
# or just normal decode
elif ocode in dp_noRn:# FIXME: FUGLY
olist = (
ArmRegOper(Rd, va=va),
ArmImmOper(imm, rot, S_ROR),
)
elif ocode in dp_noRd:
olist = (
ArmRegOper(Rn, va=va),
ArmImmOper(imm, rot, S_ROR),
)
else:
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rn, va=va),
ArmImmOper(imm, rot, S_ROR),
)
opcode = (IENC_DP_IMM << 16) + ocode
if sflag > 0:
iflags = IF_PSR_S
else:
iflags = 0
return (opcode, dp_mnem[ocode], olist, iflags)
def p_undef(opval, va):
# FIXME: make this an actual opcode with the opval as an imm oper
raise envi.InvalidInstruction(
mesg="p_undef: invalid instruction (by definition in ARM spec)",
bytez=struct.pack("<I", opval), va=va)
opcode = IENC_UNDEF
mnem = "undefined instruction"
olist = (
ArmImmOper(opval),
)
return (opcode, mnem, olist, 0)
hint_mnem = {
0: 'Nop',
1: 'yield',
2: 'wfe',
3: 'wfi',
4: 'sev',
}
def p_mov_imm_stat(opval, va): # only one instruction: "msr"
iflags = 0
imm = opval & 0xff
rot = (opval>>8) & 0xf
r = (opval>>22) & 1
mask = (opval>>16) & 0xf
opcode = (IENC_MOV_IMM_STAT << 16)
if mask == 0:
opcode += 1
# it's a NOP or some hint instruction
if imm>>16:
mnem = 'dbg'
option = opval & 0xf
olist = ( ArmDbgHintOption(option), )
else:
mnem = hint_mnem.get(imm)
if mnem == None:
raise envi.InvalidInstruction(
mesg="MSR/Hint illegal encoding",
bytez=struct.pack("<I", opval), va=va)
olist = tuple()
else:
# it's an MSR <immediate>
mnem = 'msr'
immed = ((imm>>rot) + (imm<<(32-rot))) & 0xffffffff
if mask & 3: # USER mode these will be 0
iflags |= IF_SYS_MODE
olist = (
ArmPgmStatRegOper(r, mask),
ArmImmOper(immed),
)
return (opcode, mnem, olist, iflags)
ldr_mnem = ("str", "ldr")
tsizes = (4, 1,)
def p_load_imm_off(opval, va):
pubwl = (opval>>20) & 0x1f
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
imm = opval & 0xfff
if pubwl & 4: # B
iflags = IF_B
if (pubwl & 0x12) == 2:
iflags |= IF_T
else:
iflags = 0
olist = (
ArmRegOper(Rd, va=va),
ArmImmOffsetOper(Rn, imm, va, pubwl=pubwl) # u=-/+, b=word/byte
)
opcode = (IENC_LOAD_IMM_OFF << 16)
return (opcode, ldr_mnem[pubwl&1], olist, iflags)
def p_load_reg_off(opval, va):
pubwl = (opval>>20) & 0x1f
Rd = (opval>>12) & 0xf
Rn = (opval>>16) & 0xf
Rm = opval & 0xf
shtype = (opval>>5) & 0x3
shval = (opval>>7) & 0x1f
if pubwl & 4: # B
iflags = IF_B
if (pubwl & 0x12) == 2:
iflags |= IF_T
else:
iflags = 0
olist = (
ArmRegOper(Rd, va=va),
ArmScaledOffsetOper(Rn, Rm, shtype, shval, va, pubwl), # u=-/+, b=word/byte
)
opcode = (IENC_LOAD_REG_OFF << 16)
return (opcode, ldr_mnem[pubwl&1], olist, iflags)
def p_media(opval, va):
"""
27:20, 7:4
"""
# media is a parent for the following:
# parallel add/sub 01100
# pkh, ssat, ssat16, usat, usat16, sel 01101
# rev, rev16, revsh 01101
# smlad, smlsd, smlald, smusd 01110
# usad8, usada8 01111
definer = (opval>>23) & 0x1f
if definer == 0xc:
return p_media_parallel(opval, va)
elif definer == 0xd:
return p_media_pack_sat_rev_extend(opval, va)
elif definer == 0xe:
return p_mult(opval, va)
return p_media_smul(opval, va)
else:
return p_media_usada(opval, va)
#generate mnemonics for parallel instructions (could do manually like last time...)
parallel_mnem = []
par_suffixes = ("add16", "addsubx", "subaddx", "sub16", "add8", "sub8", "", "")
par_prefixes = ("","s","q","sh","","u","uq","uh")
for pre in par_prefixes:
for suf in par_suffixes:
parallel_mnem.append(pre+suf)
parallel_mnem = tuple(parallel_mnem)
def p_media_parallel(opval, va):
opc1 = (opval>>17) & 0x38
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
opc1 += (opval>>5) & 7
Rm = opval & 0xf
mnem = parallel_mnem[opc1]
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rn, va=va),
ArmRegOper(Rm, va=va),
)
opcode = IENC_MEDIA_PARALLEL + opc1
return (opcode, mnem, olist, 0)
xtnd_mnem = []
xtnd_suffixes = ("xtab16","xtab","xtah","xtb16","xtb","xth",)
xtnd_prefixes = ("s","u")
for pre in xtnd_prefixes:
for suf in xtnd_suffixes:
xtnd_mnem.append(pre+suf)
xtnd_mnem = tuple(xtnd_mnem)
pkh_mnem = ('pkhbt', 'pkhtb',)
sat_mnem = ('ssat','usat')
sat16_mnem = ('ssat16','usat16')
rev_mnem = ('rev','rev16',None,'revsh',)
def p_media_pack_sat_rev_extend(opval, va):
## part of p_media
# assume bit 23 == 1
opc1 = (opval>>20) & 7
opc2 = (opval>>4) & 0xf
opc25 = opc2 & 3
opcode = 0
if opc1 == 0 and opc25 == 1: #pkh
idx = (opval>>6)&1
mnem = pkh_mnem[idx]
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
shift_imm = (opval>>7) & 0x1f
Rm = opval & 0xf
opcode = IENC_MEDIA_PACK + idx
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rn, va=va),
ArmRegShiftImmOper(Rm, S_LSL, shift_imm, va),
)
elif (opc1 & 2) and opc25 == 1: #word sat
opidx = (opval>>22)&1
sat_imm = 1 + (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
Rm = opval & 0xf
if opc1 & 0x10: # ?sat16
mnem = sat16_mnem[opidx]
olist = (
ArmRegOper(Rd, va=va),
ArmImmOper(sat_imm),
ArmRegOper(Rm, va=va),
)
opcode = IENC_MEDIA_SAT + opidx
else:
mnem = sat_mnem[opidx]
shift_imm = (opval>>7) & 0x1f
sh = (opval>>5) & 2
olist = (
ArmRegOper(Rd, va=va),
ArmImmOper(sat_imm),
ArmRegShiftImmOper(Rm, sh, shift_imm, va),
)
opcode = IENC_MEDIA_SAT + 2 + opidx
elif (opc1 & 3) == 2 and opc2 == 3: #parallel half-word sat
# FIXME: implement this instruction!
raise envi.InvalidInstruction(
mesg="WTF! Parallel Half-Word Saturate... what is that instruction?",
bytez=struct.pack("<I", opval), va=va)
elif (opc1 > 0) and (opc2 & 7) == 3: # byte rev word
opidx = ((opval>>21) & 2) + ((opval>>7) & 1)
mnem = rev_mnem[opidx]
if mnem == None:
raise envi.InvalidInstruction(
mesg="p_media_pack_sat_rev_extend: invalid instruction",
bytez=struct.pack("<I", opval), va=va)
Rd = (opval>>12) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rm, va=va),
)
opcode = IENC_MEDIA_REV + opidx
#elif opc1 == 3 and opc2 == 0xb: # byte rev pkt halfword
#elif opc1 == 7 and opc2 == 0xb: # byte rev signed halfword
elif opc1 == 0 and opc2 == 0xb: # select bytes
mnem = "sel"
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
Rm = opval & 0xf
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rn, va=va),
ArmRegOper(Rm, va=va),
)
opcode = IENC_MEDIA_SEL
elif opc2 == 7: # sign extend
mnem = xtnd_mnem[opc1]
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
rot = (opval>>10) & 3
Rm = opval & 0xf
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rn, va=va),
ArmRegShiftImmOper(Rm, S_ROR, rot, va),
)
opcode = IENC_MEDIA_EXTEND + opc1
else:
raise envi.InvalidInstruction(
mesg="p_media_extend: invalid instruction",
bytez=struct.pack("<I", opval), va=va)
return (opcode, mnem, olist, 0)
#smult3_mnem = ('smlad','smlsd',,,'smlald')
def p_media_smul(opval, va):
raise envi.InvalidInstruction(
mesg="Should not reach here. If we reach here, we'll have to implement MEDIA_SMUL extended multiplication (type 3)",
bytez=struct.pack("<I", opval), va=va)
# hmmm, is this already handled?
def p_media_usada(opval, va):
Rd = (opval>>16) & 0xf
Rn = (opval>>12) & 0xf
Rs = (opval>>8) & 0xf
Rm = opval & 0xf
if Rn == 0xf:
mnem = "usad8"
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rm, va=va),
ArmRegOper(Rs, va=va),
)
opcode = IENC_MEDIA_USAD8
else:
mnem = "usada8"
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rm, va=va),
ArmRegOper(Rs, va=va),
ArmRegOper(Rn, va=va),
)
opcode = IENC_MEDIA_USADA8
return (opcode, mnem, olist, 0)
def p_arch_undef(opval, va):
print ("p_arch_undef: invalid instruction (by definition in ARM spec): %.8x:\t%.8x"%(va,opval))
raise envi.InvalidInstruction(
mesg="p_arch_undef: invalid instruction (by definition in ARM spec)",
bytez=struct.pack("<I", opval), va=va)
return (IENC_ARCH_UNDEF, 'arch undefined', (ArmImmOper(opval),), 0)
ldm_mnem = ("stm", "ldm")
def p_load_mult(opval, va):
puswl = (opval>>20) & 0x1f
mnem_idx = puswl & 1
mnem = ldm_mnem[(mnem_idx)]
flags = ((puswl>>3)<<(IF_DAIB_SHFT)) | IF_DA # store bits for decoding whether to dec/inc before/after between ldr/str. IF_DA tells the repr to print the the DAIB extension after the conditional. right shift necessary to clear lower three bits, and align us with IF_DAIB_SHFT
Rn = (opval>>16) & 0xf
reg_list = opval & 0xffff
olist = (
ArmRegOper(Rn, va=va),
ArmRegListOper(reg_list, puswl),
)
# If we are a load multi (ldm), and we load PC, we are NOFALL
# (FIXME unless we are conditional... ung...)
if mnem_idx == 1 and reg_list & (1 << REG_PC):
flags |= envi.IF_NOFALL
# If the load is from the stack, call it a "return"
if Rn == REG_SP:
flags |= envi.IF_RET
if puswl & 2: # W (mnemonic: "!")
flags |= IF_W
olist[0].oflags |= OF_W
if puswl & 4: # UM - usermode, or mov current SPSR -> CPSR if r15 included
flags |= IF_UM
olist[1].oflags |= OF_UM
opcode = (IENC_LOAD_MULT << 16)
return (opcode, mnem, olist, flags)
b_mnem = ("b", "bl",)
def p_branch(opval, va): # primary branch encoding. others were added later in the media section
off = e_bits.signed(opval, 3)
off <<= 2
link = (opval>>24) & 1
#FIXME this assumes A1 branch encoding.
olist = ( ArmPcOffsetOper(off, va),)
if link:
flags = envi.IF_CALL
else:
flags = envi.IF_BRANCH
opcode = (IENC_BRANCH << 16) + link
return (opcode, b_mnem[link], olist, flags)
ldc_mnem = ("stc", "ldc",)
def p_coproc_load(opval, va):
punwl = (opval>>20) & 0x1f
Rn = (opval>>16) & 0xf
CRd = (opval>>12) & 0xf
cp_num = (opval>>8) & 0xf
offset = opval & 0xff
if punwl & 4: # L
iflags = IF_L
else:
iflags = 0
olist = (
ArmCoprocOper(cp_num),
ArmCoprocRegOper(CRd),
ArmImmOffsetOper(Rn, offset*4, va, pubwl=punwl),
)
opcode = (IENC_COPROC_LOAD << 16)
return (opcode, ldc_mnem[punwl&1], olist, iflags)
mcrr_mnem = ("mcrr", "mrrc")
def p_coproc_dbl_reg_xfer(opval, va):
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
cp_num = (opval>>8) & 0xf
opcode = (opval>>4) & 0xf
CRm = opval & 0xf
mnem = mcrr_mnem[(opval>>20) & 1]
olist = (
ArmCoprocOper(cp_num),
ArmCoprocOpcodeOper(opcode),
ArmRegOper(Rd, va=va),
ArmRegOper(Rn, va=va),
ArmCoprocRegOper(CRm),
)
opcode = IENC_COPROC_RREG_XFER<<16
return (opcode, mnem, olist, 0)
cdp_mnem = ["cdp" for x in range(15)]
cdp_mnem.append("cdp2")
def p_coproc_dp(opval, va):
opcode1 = (opval>>20) & 0xf
CRn = (opval>>16) & 0xf
CRd = (opval>>12) & 0xf
cp_num = (opval>>8) & 0xf
opcode2 = (opval>>5) & 0x7
CRm = opval & 0xf
mnem = cdp_mnem[opval>>28]
olist = (
ArmCoprocOper(cp_num),
ArmCoprocOpcodeOper(opcode1),
ArmCoprocRegOper(CRd),
ArmCoprocRegOper(CRn),
ArmCoprocRegOper(CRm),
ArmCoprocOpcodeOper(opcode2),
)
opcode = (IENC_COPROC_DP << 16)
return (opcode, mnem, olist, 0) #FIXME: CDP2 (cond = 0b1111) also needs handling.
mcr_mnem = ("mcr", "mrc")
def p_coproc_reg_xfer(opval, va):
opcode1 = (opval>>21) & 0x7
load = (opval>>20) & 1
CRn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
cp_num = (opval>>8) & 0xf
opcode2 = (opval>>5) & 0x7
CRm = opval & 0xf
olist = (
ArmCoprocOper(cp_num),
ArmCoprocOpcodeOper(opcode1),
ArmRegOper(Rd, va=va),
ArmCoprocRegOper(CRn),
ArmCoprocRegOper(CRm),
ArmCoprocOpcodeOper(opcode2),
)
opcode = (IENC_COPROC_REG_XFER << 16)
return (opcode, mcr_mnem[load], olist, 0)
def p_swint(opval, va):
swint = opval & 0xffffff
olist = ( ArmImmOper(swint), )
opcode = IENC_SWINT << 16 + 1
return (opcode, "swi", olist, 0)
cps_mnem = ("cps","cps FAIL-bad encoding","cpsie","cpsid")
mcrr2_mnem = ("mcrr2", "mrrc2")
ldc2_mnem = ("stc2", "ldc2",)
mcr2_mnem = ("mcr2", "mrc2")
def p_uncond(opval, va):
if opval & 0x0f000000 == 0x0f000000:
# FIXME THIS IS HORKED
opcode = IENC_SWINT << 16 + 2
immval = opval & 0x00ffffff
return (opcode, 'swi', (ArmImmOper(immval),), 0)
optop = ( opval >> 26 ) & 0x3
if optop == 0:
if opval & 0xfff10020 == 0xf1000000:
#cps
imod = (opval>>18)&3
mmod = (opval>>17)&1
aif = (opval>>5)&7
mode = opval&0x1f
mnem = cps_mnem[imod]
if imod & 2:
olist = [
ArmCPSFlagsOper(aif) # if mode is set...
]
else:
olist = []
if mmod:
olist.append(ArmImmOper(mode))
opcode = IENC_UNCOND_CPS + imod
return (opcode, mnem, olist, 0)
elif (opval & 0xffff00f0) == 0xf1010000:
#setend
e = (opval>>9) & 1
mnem = "setend"
olist = ( ArmEndianOper(e), )
opcode = IENC_UNCOND_SETEND
return (opcode, mnem, olist, 0)
else:
raise envi.InvalidInstruction(
mesg="p_uncond (ontop=0): invalid instruction",
bytez=struct.pack("<I", opval), va=va)
elif optop == 1:
if (opval & 0xf570f000) == 0xf550f000:
#cache preload - also known as a nop on most platforms... does nothing except prefetch instructions from cache.
# i'm tempted to cut the parsing of it and just return a canned something.
mnem = "pld"
I = (opval>>25) & 1 # what the freak am i supposed to do with "i"???
Rn = (opval>>16) & 0xf
U = (opval>>23) & 1
opcode = IENC_UNCOND_PLD
if I:
immoffset = opval & 0xfff
olist = (ArmImmOffsetOper(Rn, immoffset, va, U<<3),)
else:
Rm = opval & 0xf
shtype = (opval>>5) & 3
shval = (opval>>7) & 0x1f
olist = (ArmScaledOffsetOper(Rn, Rm, shtype, shval, va, pubwl), )
return (opcode, mnem, olist, 0)
else:
raise envi.InvalidInstruction(
mesg="p_uncond (ontop=1): invalid instruction",
bytez=struct.pack("<I", opval), va=va)
elif optop == 2:
if (opval & 0xfe5f0f00) == 0xf84d0500:
#save return state (basically, store LR and SPSR to the stack that R13 points to)
pu_w_ = (opval>>20) & 0x1f
mnem = "srs"
flags = ((pu_w_>>3)<<(IF_DAIB_SHFT)) | IF_DA
mode = opval & 0x1f
#reg_list = ( 1<<14 | 1<<SPSR )
if pu_w_ & 2: # base_reg writeback
flags |= IF_W
# base_reg = R13
# reg_list = R14 and SPSR
olist = (
#ArmRegListOper(reg_list, pu_w_),
ArmModeOper(mode, (pu_w_>>1)&1),
)
opcode = IENC_UNCOND_SRS
return (opcode, mnem, olist, flags)
#elif (opval & 0xfe500f00) == 0xf8100a00: # this is too restrictive, although does weed out oddballs. what does "Should Be Zero" really *want* to mean?
elif (opval & 0xfe500000) == 0xf8100000:
#rfe
pu = (opval>>23) & 3
mnem = "rfe"
flags = (pu<<(IF_DAIB_SHFT)) | IF_DA
Rn = (opval>>16) & 0xf
olist = (
ArmRegOper(Rn, va=va),
)
opcode = IENC_UNCOND_RFE
return (opcode, mnem, olist, flags)
elif (opval & 0xfe000000) == 0xfa000000:
#blx
mnem = "blx"
h = (opval>>23) & 2
imm_offset = e_bits.signed(opval, 3) + h
olist = (
ArmPcOffsetOper(imm_offset, va),
)
opcode = INS_BLX #should this be IENC_UNCOND_BLX?
return (opcode, mnem, olist, 0)
else:
raise envi.InvalidInstruction(
mesg="p_uncond (ontop=2): invalid instruction",
bytez=struct.pack("<I", opval), va=va)
else:
if (opval & 0xffe00000) == 0xfc400000:
#MRCC2/MRRC2
Rn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
cp_num = (opval>>8) & 0xf
opcode = (opval>>4) & 0xf
CRm = opval & 0xf
mnem = mcrr2_mnem[(opval>>20) & 1]
olist = (
ArmCoprocOper(cp_num),
ArmCoprocOpcodeOper(opcode),
ArmRegOper(Rd, va=va),
ArmRegOper(Rn, va=va),
ArmCoprocRegOper(CRm),
)
opcode = IENC_COPROC_RREG_XFER<<16
return (opcode, mnem, olist, 0)
elif (opval & 0xfe000000) == 0xfc000000:
#stc2/ldc2
punwl = (opval>>20) & 0x1f
Rn = (opval>>16) & 0xf
CRd = (opval>>12) & 0xf
cp_num = (opval>>8) & 0xf
offset = opval & 0xff
if punwl & 4: # L
iflags = IF_L
else:
iflags = 0
olist = (
ArmCoprocOper(cp_num),
ArmCoprocRegOper(CRd),
ArmImmOffsetOper(Rn, offset*4, va, pubwl=punwl),
)
opcode = (IENC_COPROC_LOAD << 16)
return (opcode, ldc2_mnem[punwl&1], olist, iflags)
elif (opval & 0xff000010) == 0xfe000000:
#coproc dp (cdp2)
return p_coproc_dp(opval, va)
elif (opval & 0xff000010) == 0xfe000010:
#mcr2/mrc2
opcode1 = (opval>>21) & 0x7
load = (opval>>20) & 1
CRn = (opval>>16) & 0xf
Rd = (opval>>12) & 0xf
cp_num = (opval>>8) & 0xf
opcode2 = (opval>>5) & 0x7
CRm = opval & 0xf
olist = (
ArmCoprocOper(cp_num),
ArmCoprocOpcodeOper(opcode1),
ArmRegOper(Rd, va=va),
ArmCoprocRegOper(CRn),
ArmCoprocRegOper(CRm),
ArmCoprocOpcodeOper(opcode2),
)
opcode = (IENC_COPROC_REG_XFER << 16)
return (opcode, mcr2_mnem[load], olist, 0)
else:
raise envi.InvalidInstruction(
mesg="p_uncond (ontop=3): invalid instruction",
bytez=struct.pack("<I", opval), va=va)
####################################################################
# Table of the parser functions
ienc_parsers_tmp = [None for x in range(21)]
ienc_parsers_tmp[IENC_DP_IMM_SHIFT] = p_dp_imm_shift
ienc_parsers_tmp[IENC_MISC] = p_misc
ienc_parsers_tmp[IENC_MISC1] = p_misc1
ienc_parsers_tmp[IENC_EXTRA_LOAD] = p_extra_load_store
ienc_parsers_tmp[IENC_DP_REG_SHIFT] = p_dp_reg_shift
ienc_parsers_tmp[IENC_MULT] = p_mult
ienc_parsers_tmp[IENC_UNDEF] = p_undef
ienc_parsers_tmp[IENC_MOV_IMM_STAT] = p_mov_imm_stat
ienc_parsers_tmp[IENC_DP_IMM] = p_dp_imm
ienc_parsers_tmp[IENC_LOAD_IMM_OFF] = p_load_imm_off
ienc_parsers_tmp[IENC_LOAD_REG_OFF] = p_load_reg_off
ienc_parsers_tmp[IENC_ARCH_UNDEF] = p_arch_undef
ienc_parsers_tmp[IENC_MEDIA] = p_media
ienc_parsers_tmp[IENC_LOAD_MULT] = p_load_mult
ienc_parsers_tmp[IENC_BRANCH] = p_branch
ienc_parsers_tmp[IENC_COPROC_RREG_XFER] = p_coproc_dbl_reg_xfer
ienc_parsers_tmp[IENC_COPROC_LOAD] = p_coproc_load
ienc_parsers_tmp[IENC_COPROC_DP] = p_coproc_dp
ienc_parsers_tmp[IENC_COPROC_REG_XFER] = p_coproc_reg_xfer
ienc_parsers_tmp[IENC_SWINT] = p_swint
ienc_parsers_tmp[IENC_UNCOND] = p_uncond
ienc_parsers = tuple(ienc_parsers_tmp)
####################################################################
# the primary table is index'd by the 3 bits following the
# conditional and are structured as follows:
# ( ENC, nexttable )
# If ENC != None, those 3 bits were enough for us to know the
# encoding type, otherwise move on to the second table.
# The secondary tables have the format:
# (mask, value, ENC). If the opcode is masked with "mask"
# resulting in "value" we have found the instruction encoding.
# NOTE: All entries in these tables *must* be from most specific
# to least!
# Table for initial 3 bit == 0
s_0_table = (
# Order is critical here...
(0b00000001100100000000000000010000, 0b00000001000000000000000000000000, IENC_MISC),
(0b00000000000000000000000000010000, 0b00000000000000000000000000000000, IENC_DP_IMM_SHIFT),
(0b00000001100100000000000010010000, 0b00000001000000000000000000010000, IENC_MISC1),
(0b00000001000000000000000011110000, 0b00000000000000000000000010010000, IENC_MULT),
(0b00000001001000000000000010010000, 0b00000001001000000000000010010000, IENC_EXTRA_LOAD),
(0b00000000000000000000000010010000, 0b00000000000000000000000010010000, IENC_EXTRA_LOAD),
(0b00000000000000000000000010010000, 0b00000000000000000000000000010000, IENC_DP_REG_SHIFT),
(0,0, IENC_UNDEF), #catch-all
)
s_1_table = (
(0b00001111101100000000000000000000, 0b00000011001000000000000000000000, IENC_MOV_IMM_STAT),
(0b00001111111100000000000000000000, 0b00000011000000000000000000000000, IENC_DP_MOVW),
(0b00001111111100000000000000000000, 0b00000010100000000000000000000000, IENC_DP_MOVT),
(0b00001111111100000000000000000000, 0b00000010010000000000000000000000, IENC_DP_MSR_IMM),
(0b00001111111100000000000000000000, 0b00000010110000000000000000000000, IENC_DP_MSR_IMM),
(0b00001110000000000000000000000000, 0b00000010000000000000000000000000, IENC_DP_IMM),
(0, 0, IENC_UNDEF),
)
s_3_table = (
(0b00000001111100000000000011110000, 0b00000001111100000000000011110000, IENC_ARCH_UNDEF),
(0b00000000000000000000000000010000, 0b00000000000000000000000000010000, IENC_MEDIA),
(0,0, IENC_LOAD_REG_OFF),
)
s_6_table = (
(0b00001111111000000000000000000000, 0b00001100010000000000000000000000, IENC_COPROC_RREG_XFER),
(0b00001110000000000000000000000000, 0b00001100000000000000000000000000, IENC_COPROC_LOAD),
)
s_7_table = (
(0b00000001000000000000000000000000, 0b00000001000000000000000000000000, IENC_SWINT),
(0b00000001000000000000000000010000, 0b00000000000000000000000000010000, IENC_COPROC_REG_XFER),
(0, 0, IENC_COPROC_DP),
)
# Initial 3 (non conditional) primary table
inittable = [
(None, s_0_table),
(None, s_1_table),
(IENC_LOAD_IMM_OFF, None), # Load or store an immediate
(None, s_3_table),
(IENC_LOAD_MULT, None),
(IENC_BRANCH, None),
(None, s_6_table),
(None, s_7_table),
(IENC_UNCOND, None),
]
# FIXME for emulation...
#def s_lsl(val, shval):
#pass
#def s_lsr(val, shval):
#pass
# These are indexed by the 2 bit "shift" value in some DP encodings
#shift_handlers = (
#s_lsl,
#s_lsr,
#s_asr,
#s_ror,
#)
endian_names = ("le","be")
#FIXME IF_NOFALL (and other envi flags)
class ArmOpcode(envi.Opcode):
_def_arch = envi.ARCH_ARMV7
def __hash__(self):
return int(hash(self.mnem) ^ (self.size << 4))
def __len__(self):
return int(self.size)
def getBranches(self, emu=None):
"""
Return a list of tuples. Each tuple contains the target VA of the
branch, and a possible set of flags showing what type of branch it is.
See the BR_FOO types for all the supported envi branch flags....
Example: for bva,bflags in op.getBranches():
"""
ret = []
if not self.iflags & envi.IF_NOFALL:
ret.append((self.va + self.size, envi.BR_FALL | self._def_arch))
# FIXME if this is a move to PC god help us...
flags = 0
if self.prefixes != COND_AL:
flags |= envi.BR_COND
if self.opcode in ( INS_B, INS_BX, INS_BL, INS_BLX, INS_BCC ):
oper = self.opers[0]
# check for location being ODD
operval = oper.getOperValue(self)
if operval == None:
# probably a branch to a register. just return.
return ret
if self.opcode in (INS_BLX, INS_BX):
if operval & 3:
flags |= envi.ARCH_THUMB16
else:
flags |= envi.ARCH_ARM
# if we don't know that it's thumb, default to "ARCH_DEFAULT"
else:
flags |= self._def_arch
#operval &= 0xfffffffe # this has to work for both arm and thumb
if self.iflags & envi.IF_CALL:
flags |= envi.BR_PROC
ret.append((operval, flags))
return ret
def render(self, mcanv):
"""
Render this opcode to the specified memory canvas
"""
mnem = self.mnem + cond_codes.get(self.prefixes)
daib_flags = self.iflags & IF_DAIB_MASK
if self.iflags & IF_L:
mnem += 'l'
elif self.iflags & IF_PSR_S:
mnem += 's'
elif daib_flags > 0:
idx = ((daib_flags)>>(IF_DAIB_SHFT))
mnem += daib[idx]
else:
if self.iflags & IF_S:
mnem += 's'
if self.iflags & IF_D:
mnem += 'd'
if self.iflags & IF_B:
mnem += 'b'
if self.iflags & IF_H:
mnem += 'h'
elif self.iflags & IF_T:
mnem += 't'
#FIXME: Advanced SIMD modifiers (IF_V*)
if self.iflags & IF_THUMB32:
mnem += ".w"
mcanv.addNameText(mnem, typename="mnemonic")
mcanv.addText(" ")
# Allow each of our operands to render
imax = len(self.opers)
lasti = imax - 1
for i in xrange(imax):
oper = self.opers[i]
oper.render(mcanv, self, i)
if i != lasti:
mcanv.addText(",")
#if self.iflags & IF_W: # handled in operand. still keeping flag to indicate this instruction writes back
# mcanc.addText(" !")
def __repr__(self):
mnem = self.mnem + cond_codes.get(self.prefixes)
daib_flags = self.iflags & IF_DAIB_MASK
if self.iflags & IF_L:
mnem += 'l'
elif self.iflags & IF_PSR_S:
mnem += 's'
elif daib_flags > 0:
idx = ((daib_flags)>>(IF_DAIB_SHFT))
mnem += daib[idx]
else:
if self.iflags & IF_S:
mnem += 's'
if self.iflags & IF_D:
mnem += 'd'
if self.iflags & IF_B:
mnem += 'b'
if self.iflags & IF_H:
mnem += 'h'
elif self.iflags & IF_T:
mnem += 't'
if self.iflags & IF_THUMB32:
mnem += ".w"
x = []
for o in self.opers:
x.append(o.repr(self))
#if self.iflags & IF_W: # handled in operand. still keeping flag to indicate this instruction writes back
# x[-1] += " !"
return mnem + " " + ", ".join(x)
class ArmOperand(envi.Operand):
tsize = 4
def involvesPC(self):
return False
def getOperAddr(self, op, emu=None):
return None
class ArmRegOper(ArmOperand):
''' register operand. see "addressing mode 1 - data processing operands - register" '''
def __init__(self, reg, va=0, oflags=0):
self.va = va
self.reg = reg
self.oflags = oflags
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.reg != oper.reg:
return False
if self.oflags != oper.oflags:
return False
return True
def involvesPC(self):
return self.reg == 15
def isDeref(self):
return False
def getOperValue(self, op, emu=None):
if self.reg == REG_PC:
return self.va # FIXME: is this modified? or do we need to att # to this?
if emu == None:
return None
return emu.getRegister(self.reg)
def setOperValue(self, op, emu=None, val=None):
if emu == None:
return None
emu.setRegister(self.reg, val)
def render(self, mcanv, op, idx):
rname = arm_regs[self.reg][0]
mcanv.addNameText(rname, typename='registers')
if self.oflags & OF_W:
mcanv.addText( "!" )
def repr(self, op):
rname = arm_regs[self.reg][0]
if self.oflags & OF_W:
rname += "!"
return rname
class ArmRegShiftRegOper(ArmOperand):
''' register shift operand. see "addressing mode 1 - data processing operands - * shift * by register" '''
def __init__(self, reg, shtype, shreg):
self.reg = reg
self.shtype = shtype
self.shreg = shreg
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.reg != oper.reg:
return False
if self.shtype != oper.shtype:
return False
if self.shreg != oper.shreg:
return False
return True
def involvesPC(self):
return self.reg == 15
def isDeref(self):
return False
def getOperValue(self, op, emu=None):
if emu == None:
return None
return shifters[self.shtype](emu.getRegister(self.reg), emu.getRegister(shreg))
def render(self, mcanv, op, idx):
rname = arm_regs[self.reg][0]
mcanv.addNameText(rname, typename='registers')
mcanv.addText(', ')
mcanv.addNameText(shift_names[self.shtype])
mcanv.addText(' ')
mcanv.addNameText(arm_regs[self.shreg][0], typename='registers')
def repr(self, op):
rname = arm_regs[self.reg][0]+","
return " ".join([rname, shift_names[self.shtype], arm_regs[self.shreg][0]])
class ArmRegShiftImmOper(ArmOperand):
''' register shift immediate operand. see "addressing mode 1 - data processing operands - * shift * by immediate" '''
def __init__(self, reg, shtype, shimm, va):
if shimm == 0:
if shtype == S_ROR:
shtype = S_RRX
elif shtype == S_LSR or shtype == S_ASR:
shimm = 32
self.reg = reg
self.shtype = shtype
self.shimm = shimm
self.va = va
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.reg != oper.reg:
return False
if self.shtype != oper.shtype:
return False
if self.shimm != oper.shimm:
return False
return True
def involvesPC(self):
return self.reg == 15
def isDeref(self):
return False
def getOperValue(self, op, emu=None):
if self.reg == REG_PC:
return shifters[self.shtype](self.va, self.shimm)
if emu == None:
return None
return shifters[self.shtype](emu.getRegister(self.reg), self.shimm)
def render(self, mcanv, op, idx):
rname = arm_regs[self.reg][0]
mcanv.addNameText(rname, typename='registers')
if self.shimm != 0:
mcanv.addText(', ')
mcanv.addNameText(shift_names[self.shtype])
mcanv.addText(' ')
mcanv.addNameText('#%d' % self.shimm)
elif self.shtype == S_RRX:
mcanv.addText(', ')
mcanv.addNameText(shift_names[self.shtype])
def repr(self, op):
rname = arm_regs[self.reg][0]
retval = [ rname ]
if self.shimm != 0:
retval.append(", "+shift_names[self.shtype])
retval.append("#%d"%self.shimm)
elif self.shtype == S_RRX:
retval.append(shift_names[self.shtype])
return " ".join(retval)
class ArmImmOper(ArmOperand):
''' register operand. see "addressing mode 1 - data processing operands - immediate" '''
def __init__(self, val, shval=0, shtype=S_ROR, va=0):
self.val = val
self.shval = shval
self.shtype = shtype
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.getOperValue(None) != oper.getOperValue(None):
return False
return True
def involvesPC(self):
return False
def isDeref(self):
return False
def isDiscrete(self):
return True
def getOperValue(self, op, emu=None):
return shifters[self.shtype](self.val, self.shval)
def render(self, mcanv, op, idx):
val = self.getOperValue(op)
mcanv.addNameText('#0x%.2x' % (val))
def repr(self, op):
val = self.getOperValue(op)
return '#0x%.2x' % (val)
class ArmScaledOffsetOper(ArmOperand):
''' scaled offset operand. see "addressing mode 2 - load and store word or unsigned byte - scaled register *" '''
def __init__(self, base_reg, offset_reg, shtype, shval, va, pubwl=0):
if shval == 0:
if shtype == S_ROR:
shtype = S_RRX
elif shtype == S_LSR or shtype == S_ASR:
shval = 32
self.base_reg = base_reg
self.offset_reg = offset_reg
self.shtype = shtype
self.shval = shval
self.pubwl = pubwl
self.va = va
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.base_reg != oper.base_reg:
return False
if self.offset_reg != oper.offset_reg:
return False
if self.shtype != oper.shtype:
return False
if self.shval != oper.shval:
return False
if self.pubwl != oper.pubwl:
return False
return True
def involvesPC(self):
return self.base_reg == 15
def isDeref(self):
return True
def getOperValue(self, op, emu=None, writeback=False):
if emu == None:
return None
retval = 0
addr = self.getOperAddr( op, emu )
rn = emu.getRegister( self.base_reg )
# FIXME: THIS IS COMPLETELY BORKED AND WRONG!
# if pre-indexed, we incremement/decrement the register before determining the OperAddr
if (self.pubwl & 0x12 == 0x12):
# pre-indexed...
if writeback: emu.setRegister( self.base_reg, addr )
return emu.readMemValue(addr, self.tsize)
elif (self.pubwl & 0x12 == 0):
# post-indexed... still write it but return the original value
if writeback: emu.setRegister( self.base_reg, addr )
return emu.readMemValue(addr, self.tsize)
# plain jane just return the calculated address... no updates are necessary
return addr
def getOperAddr(self, op, emu=None):
if emu == None:
return None
if self.basereg == REG_PC:
addr = self.va
elif emu != None:
addr = emu.getRegister(self.basereg)
else:
return None
addval = shifters[self.shtype]( emu.getRegister( self.offset_reg ), self.shval )
# if U==0, subtract
addval *= (-1, 1)[(self.pubwl>>3)&1]
return addr + addval
def render(self, mcanv, op, idx):
pom = ('-','')[(self.pubwl>>3)&1]
idxing = self.pubwl & 0x12
basereg = arm_regs[self.base_reg][0]
offreg = arm_regs[self.offset_reg][0]
shname = shift_names[self.shtype]
mcanv.addText('[')
mcanv.addNameText(basereg, typename='registers')
if (idxing&0x10) == 0:
mcanv.addText('], ')
else:
mcanv.addText(', ')
mcanv.addText(pom)
mcanv.addNameText(offreg, typename='registers')
mcanv.addText(' ')
if self.shval != 0:
mcanv.addNameText(shname)
mcanv.addText(' ')
mcanv.addNameText('#%d' % self.shval)
if idxing == 0x10:
mcanv.addText(']')
elif idxing != 0:
mcanv.addText(']!')
def repr(self, op):
pom = ('-','')[(self.pubwl>>3)&1]
idxing = self.pubwl & 0x12
basereg = arm_regs[self.base_reg][0]
offreg = arm_regs[self.offset_reg][0]
shname = shift_names[self.shtype]
if self.shval != 0:
shval = "%s #%d"%(shname,self.shval)
elif self.shtype == S_RRX:
shval = shname
else:
shval = ""
if (idxing&0x10) == 0: # post-indexed
tname = '[%s], %s%s %s' % (basereg, pom, offreg, shval)
elif idxing == 0x10:
tname = '[%s, %s%s %s]' % (basereg, pom, offreg, shval)
else: # pre-indexed
tname = '[%s, %s%s %s]!' % (basereg, pom, offreg, shval)
return tname
class ArmRegOffsetOper(ArmOperand):
''' register offset operand. see "addressing mode 2 - load and store word or unsigned byte - register *"
dereference address mode using the combination of two register values '''
def __init__(self, base_reg, offset_reg, va, pubwl=0):
self.base_reg = base_reg
self.offset_reg = offset_reg
self.pubwl = pubwl
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.base_reg != oper.base_reg:
return False
if self.offset_reg != oper.offset_reg:
return False
if self.pubwl != oper.pubwl:
return False
return True
def involvesPC(self):
return self.base_reg == 15
def isDeref(self):
return True
def getOperValue(self, op, emu=None):
if emu == None:
return None
rn = emu.getRegister( self.base_reg )
addr = self.getOperAddr( op, emu, rn )
# if pre-indexed, we incremement/decrement the register before determining the OperAddr
if (self.pubwl & 0x12 == 0x12):
# pre-indexed...
if writeback: emu.setRegister( self.base_reg, addr )
return addr
elif (self.pubwl & 0x12 == 0):
# post-indexed... still write it but return the original value
if writeback: emu.setRegister( self.base_reg, addr )
return rn
# plain jane just return the calculated address... no updates are necessary
return addr
def getOperAddr(self, op, emu=None, rn=None):
if emu == None:
return None
if rn == None:
rn = emu.getRegister( self.base_reg )
rm = emu.getRegister( self.offset_reg )
return rn + rm
def render(self, mcanv, op, idx):
pom = ('-','')[(self.pubwl>>3)&1]
idxing = self.pubwl & 0x12
basereg = arm_regs[self.base_reg][0]
offreg = arm_regs[self.offset_reg][0]
mcanv.addText('[')
mcanv.addNameText(basereg, typename='registers')
if (idxing&0x10) == 0:
mcanv.addText('] ')
else:
mcanv.addText(', ')
mcanv.addText(pom)
mcanv.addNameText(offreg, typename='registers')
if idxing == 0x10:
mcanv.addText(']')
elif idxing&0x10 != 0:
mcanv.addText(']!')
def repr(self, op):
pom = ('-','')[(self.pubwl>>3)&1]
idxing = self.pubwl & 0x12
basereg = arm_regs[self.base_reg][0]
offreg = arm_regs[self.offset_reg][0]
if (idxing&0x10) == 0: # post-indexed
tname = '[%s], %s%s' % (basereg, pom, offreg)
elif idxing == 0x10: # offset addressing, not updated
tname = '[%s, %s%s]' % (basereg, pom, offreg)
else: # pre-indexed
tname = '[%s, %s%s]!' % (basereg, pom, offreg)
return tname
class ArmImmOffsetOper(ArmOperand):
''' immediate offset operand. see "addressing mode 2 - load and store word or unsigned byte - immediate *"
[ base_reg, offset ]
possibly with indexing, pre/post for faster rolling through arrays and such
if the base_reg is PC, we'll dig in and hopefully grab the data being referenced.
'''
def __init__(self, base_reg, offset, va, pubwl=8):
self.base_reg = base_reg
self.offset = offset
self.pubwl = pubwl
self.va = va
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.base_reg != oper.base_reg:
return False
if self.offset != oper.offset:
return False
if self.pubwl != oper.pubwl:
return False
return True
def involvesPC(self):
return self.base_reg == REG_PC
def isDeref(self):
return True
def setOperValue(self, op, emu=None, val=None):
# can't survive without an emulator
if emu == None:
return None
pubwl = self.pubwl >> 2
b = pubwl & 1
addr = self.getOperAddr(op, emu)
fmt = ("<I", "B")[b]
emu.writeMemoryFormat(addr, fmt, val)
def getOperValue(self, op, emu=None):
# can't survive without an emulator
if emu == None:
return None
pubwl = self.pubwl >> 2
b = pubwl & 1
addr = self.getOperAddr(op, emu)
fmt = ("<I", "B")[b]
ret, = emu.readMemoryFormat(addr, fmt)
return ret
def getOperAddr(self, op, emu=None):
# there are certain circumstances where we can survive without an emulator
pubwl = self.pubwl >> 3
u = pubwl & 1
# if we don't have an emulator, we must be PC-based since we know it
if self.base_reg == REG_PC:
addr = self.va
elif emu == None:
return None
else:
addr = emu.getRegister(self.base_reg)
if u:
addr += self.offset
else:
addr -= self.offset
return addr
def render(self, mcanv, op, idx):
u = (self.pubwl>>3)&1
idxing = self.pubwl & 0x12
basereg = arm_regs[self.base_reg][0]
if self.base_reg == REG_PC:
mcanv.addText('[')
addr = self.getOperAddr(op, mcanv.mem) # only works without an emulator because we've already verified base_reg is PC
if mcanv.mem.isValidPointer(addr):
name = addrToName(mcanv, addr)
mcanv.addVaText(name, addr)
else:
mcanv.addVaText('#0x%.8x' % addr, addr)
mcanv.addText(']')
value = self.getOperValue(op, mcanv.mem)
if value != None:
mcanv.addText("\t; ")
if mcanv.mem.isValidPointer(value):
name = addrToName(mcanv, value)
mcanv.addVaText(name, value)
else:
mcanv.addNameText("0x%x" % value)
# FIXME: is there any chance of us doing indexing on PC?!?
if idxing != 0x10:
print "OMJ! indexing on the program counter!"
else:
pom = ('-','')[u]
mcanv.addText('[')
mcanv.addNameText(basereg, typename='registers')
if self.offset == 0:
mcanv.addText(']')
else:
if (idxing&0x10) == 0:
mcanv.addText('] ')
else:
mcanv.addText(', ')
mcanv.addNameText('#%s0x%x' % (pom,self.offset))
if idxing == 0x10:
mcanv.addText(']')
elif idxing != 0:
mcanv.addText(']!')
def repr(self, op):
u = (self.pubwl>>3)&1
idxing = (self.pubwl) & 0x12
basereg = arm_regs[self.base_reg][0]
if self.base_reg == REG_PC:
addr = self.getOperAddr(op) # only works without an emulator because we've already verified base_reg is PC
tname = "[#0x%x]" % addr
# FIXME: is there any chance of us doing indexing on PC?!?
if idxing != 0x10:
print "OMJ! indexing on the program counter!"
else:
pom = ('-','')[u]
if self.offset != 0:
offset = ", #%s0x%x"%(pom,self.offset)
else:
offset = ""
if (idxing&0x10) == 0: # post-indexed
tname = '[%s]%s' % (basereg, offset)
else:
if idxing == 0x10: # offset addressing, not updated
tname = '[%s%s]' % (basereg,offset)
else: # pre-indexed
tname = '[%s%s]!' % (basereg,offset)
return tname
class ArmPcOffsetOper(ArmOperand):
'''
PC + imm_offset
ArmImmOper but for Branches, not a dereference. perhaps we can have ArmImmOper do all the things... but for now we have this.
'''
def __init__(self, val, va):
self.val = val # depending on mode, this is reg/imm
self.va = va
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.val != oper.val:
return False
if self.va != oper.va:
return False
return True
def involvesPC(self):
return True
def isDeref(self):
return False
def isDiscrete(self):
return False
def getOperValue(self, op, emu=None):
return self.va + self.val
def render(self, mcanv, op, idx):
value = self.getOperValue(op)
if mcanv.mem.isValidPointer(value):
name = addrToName(mcanv, value)
mcanv.addVaText(name, value)
else:
mcanv.addVaText('0x%.8x' % value, value)
def repr(self, op):
targ = self.getOperValue(op)
tname = "#0x%.8x" % targ
return tname
psrs = ("CPSR", "SPSR", 'inval', 'inval', 'inval', 'inval', 'inval', 'inval',)
fields = (None, 'c', 'x', 'cx', 's', 'cs', 'xs', 'cxs', 'f', 'fc', 'fx', 'fcx', 'fs', 'fcs', 'fxs', 'fcxs')
class ArmPgmStatRegOper(ArmOperand):
def __init__(self, r, val=0, mask=0xffffffff):
self.mask = mask
self.val = val
self.psr = r
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.val != oper.val:
return False
if self.r != oper.r:
return False
return True
def involvesPC(self):
return False
def isDeref(self):
return False
def getOperValue(self, op, emu=None):
if emu == None:
return None
mode = emu.getProcMode()
if self.psr: # SPSR
psr = emu.getSPSR(mode)
else:
psr = emu.getCPSR()
return psr
def setOperValue(self, op, emu=None, val=None):
if emu == None:
return None
mode = emu.getProcMode()
if self.psr: # SPSR
psr = emu.getSPSR(mode)
newpsr = psr & (~self.mask) | (val & self.mask)
emu.setSPSR(mode, newpsr)
else: # CPSR
psr = emu.getCPSR()
newpsr = psr & (~self.mask) | (val & self.mask)
emu.setCPSR(newpsr)
return newpsr
def repr(self, op):
return psrs[self.psr] + '_' + fields[self.val]
class ArmEndianOper(ArmImmOper):
def repr(self, op):
return endian_names[self.val]
def involvesPC(self):
return False
def isDeref(self):
return False
def getOperValue(self, op, emu=None):
return self.val
class ArmRegListOper(ArmOperand):
def __init__(self, val, oflags=0):
self.val = val
self.oflags = oflags
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.val != oper.val:
return False
if self.oflags != oper.oflags:
return False
return True
def involvesPC(self):
return self.val & 0x80 == 0x80
def isDeref(self):
return False
def render(self, mcanv, op, idx):
mcanv.addText('{')
for l in xrange(16):
if self.val & 1<<l:
mcanv.addNameText(arm_regs[l][0], typename='registers')
mcanv.addText(', ')
mcanv.addText('}')
if self.oflags & OF_UM:
mcanv.addText('^')
def getOperValue(self, op, emu=None):
if emu == None:
return None
reglist = []
for regidx in xrange(16):
#FIXME: check processor mode (abort, system, user, etc... use banked registers?)
if self.val & (1<<regidx):
reg = emu.getRegister(regidx)
reglist.append(reg)
return reglist
def repr(self, op):
s = [ "{" ]
for l in xrange(16):
if (self.val & (1<<l)):
s.append(arm_regs[l][0])
s.append('}')
if self.oflags & OF_UM:
s.append('^')
return " ".join(s)
aif_flags = (None, 'f','i','if','a','af','ai','aif')
class ArmPSRFlagsOper(ArmOperand):
def __init__(self, flags):
self.flags = flags
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.flags != oper.flags:
return False
return True
def involvesPC(self):
return False
def isDeref(self):
return False
def getOperValue(self, op, emu=None):
if emu == None:
return None
raise Exception("FIXME: Implement ArmPSRFlagsOper.getOperValue() (does it want to be a bitmask? or the actual value according to the PSR?)")
return None # FIXME
def repr(self, op):
return aif_flags[self.flags]
class ArmCoprocOpcodeOper(ArmOperand):
def __init__(self, val):
self.val = val
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.val != oper.val:
return False
return True
def involvesPC(self):
return False
def isDeref(self):
return False
def getOperValue(self, op, emu=None):
return self.val
def repr(self, op):
return "%d"%self.val
class ArmCoprocOper(ArmOperand):
def __init__(self, val):
self.val = val
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.val != oper.val:
return False
return True
def involvesPC(self):
return False
def isDeref(self):
return False
def getOperValue(self, op, emu=None):
return self.val
def repr(self, op):
return "p%d"%self.val
class ArmCoprocRegOper(ArmOperand):
def __init__(self, val, shtype=None, shval=None):
self.val = val # depending on mode, this is reg/imm
self.shval = shval
self.shtype = shtype
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.val != oper.val:
return False
if self.shval != oper.shval:
return False
if self.shtype != oper.shtype:
return False
return True
def involvesPC(self):
return False
def isDeref(self):
return False
def getOperValue(self, op, emu=None):
if emu == None:
return None
raise Exception("FIXME: Implement ArmCoprocRegOper.getOperValue()")
return None # FIXME
def repr(self, op):
return "c%d"%self.val
class ArmModeOper(ArmOperand):
def __init__(self, mode, writeback=False):
self.mode = mode
self.writeback = writeback
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.mode != oper.mode:
return False
if self.writeback != oper.writeback:
return False
return True
def involvesPC(self):
return False
def isDeref(self):
return False
def getOperValue(self, op, emu=None):
return None
def repr(self, op):
return (proc_modes % self.mode)[PM_SNAME]
class ArmDbgHintOption(ArmOperand):
def __init__(self, option):
self.val = option
def __eq__(self, oper):
if not isinstance(oper, self.__class__):
return False
if self.val != oper.val:
return False
return True
def involvesPC(self):
return False
def isDeref(self):
return False
def getOperValue(self, op, emu=None):
return self.val
def repr(self, op):
return "#%d"%self.val
ENDIAN_LSB = 0
ENDIAN_MSB = 1
class ArmDisasm:
fmt = None
def __init__(self, endian=ENDIAN_LSB):
self.setEndian(endian)
def setEndian(self, endian):
self.fmt = ("<I", ">I")[endian]
def disasm(self, bytez, offset, va):
"""
Parse a sequence of bytes out into an envi.Opcode instance.
"""
opbytes = bytez[offset:offset+4]
opval, = struct.unpack(self.fmt, opbytes)
cond = opval >> 28
# Begin the table lookup sequence with the first 3 non-cond bits
encfam = (opval >> 25) & 0x7
if cond == COND_EXTENDED:
enc = IENC_UNCOND
else:
enc,nexttab = inittable[encfam]
if nexttab != None: # we have to sub-parse...
for mask,val,penc in nexttab:
if (opval & mask) == val:
enc = penc
break
# If we don't know the encoding by here, we never will ;)
if enc == None:
raise envi.InvalidInstruction(mesg="No encoding found!",
bytez=bytez[offset:offset+4], va=va)
opcode, mnem, olist, flags = ienc_parsers[enc](opval, va+8)
# since our flags determine how the instruction is decoded later....
# performance-wise this should be set as the default value instead of 0, but this is cleaner
#flags |= envi.ARCH_ARMV7
# Ok... if we're a non-conditional branch, *or* we manipulate PC unconditionally,
# lets call ourself envi.IF_NOFALL
if cond == COND_AL: # FIXME: this could backfire if COND_EXTENDED...
if opcode in (INS_B, INS_BX):
flags |= envi.IF_NOFALL
elif ( len(olist) and
isinstance(olist[0], ArmRegOper) and
olist[0].involvesPC() and
(opcode & 0xffff) not in no_update_Rd ): # FIXME: only want IF_NOFALL if it *writes* to PC!
showop = True
flags |= envi.IF_NOFALL
else:
flags |= envi.IF_COND
# FIXME conditionals are currently plumbed as "prefixes". Perhaps normalize to that...
op = ArmOpcode(va, opcode, mnem, cond, 4, olist, flags)
op.encoder = enc #FIXME: DEBUG CODE
return op
if __name__ == '__main__':
import envi.archs
envi.archs.dismain( ArmDisasm() )
|
joshuahoman/vivisect
|
envi/archs/arm/disasm.py
|
Python
|
apache-2.0
| 74,993
|
[
"xTB"
] |
94e1b8e95a10efccd296999ac8d8e8b6fe75485743e40ffc87ad92d5e3845ac1
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
from pathlib import Path
from monty.serialization import dumpfn, loadfn
from pymatgen.core.periodic_table import Element
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.entries.entry_tools import (
EntrySet,
group_entries_by_composition,
group_entries_by_structure,
)
from pymatgen.util.testing import PymatgenTest
test_dir = Path(__file__).absolute().parent / ".." / ".." / ".." / "test_files"
class FuncTest(unittest.TestCase):
def test_group_entries_by_structure(self):
entries = loadfn(os.path.join(PymatgenTest.TEST_FILES_DIR, "TiO2_entries.json"))
groups = group_entries_by_structure(entries)
self.assertEqual(sorted(len(g) for g in groups), [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 4])
self.assertLess(len(groups), len(entries))
# Make sure no entries are left behind
self.assertEqual(sum(len(g) for g in groups), len(entries))
def test_group_entries_by_composition(self):
entries = [
ComputedEntry("Na", -2),
ComputedEntry("Na", -5),
ComputedEntry("Cl", -1),
ComputedEntry("Cl", -10),
ComputedEntry("NaCl", -20),
ComputedEntry("NaCl", -21),
ComputedEntry("Na2Cl2", -50),
]
groups = group_entries_by_composition(entries)
self.assertEqual(sorted(len(g) for g in groups), [2, 2, 3])
self.assertLess(len(groups), len(entries))
# Make sure no entries are left behind
self.assertEqual(sum(len(g) for g in groups), len(entries))
# test sorting by energy
for g in groups:
assert g == sorted(g, key=lambda e: e.energy_per_atom)
class EntrySetTest(unittest.TestCase):
def setUp(self):
entries = loadfn(os.path.join(PymatgenTest.TEST_FILES_DIR, "Li-Fe-P-O_entries.json"))
self.entry_set = EntrySet(entries)
def test_chemsys(self):
self.assertEqual(self.entry_set.chemsys, {"Fe", "Li", "O", "P"})
def test_get_subset(self):
entries = self.entry_set.get_subset_in_chemsys(["Li", "O"])
for e in entries:
self.assertTrue({Element.Li, Element.O}.issuperset(e.composition.keys()))
self.assertRaises(ValueError, self.entry_set.get_subset_in_chemsys, ["Fe", "F"])
def test_remove_non_ground_states(self):
l = len(self.entry_set)
self.entry_set.remove_non_ground_states()
self.assertLess(len(self.entry_set), l)
def test_as_dict(self):
dumpfn(self.entry_set, "temp_entry_set.json")
entry_set = loadfn("temp_entry_set.json")
self.assertEqual(len(entry_set), len(self.entry_set))
os.remove("temp_entry_set.json")
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
materialsproject/pymatgen
|
pymatgen/entries/tests/test_entry_tools.py
|
Python
|
mit
| 2,920
|
[
"pymatgen"
] |
387a5abd8f298138df41d2f34a8523567ee3995e239dd1a0a0dba0bc5f2ef51d
|
# -*- coding: utf-8 -*-
"""
Implements Optimistic Q-Learning for policies in pynfg.iterSemiNFG objects
Created on Fri Mar 22 15:32:33 2013
Copyright (C) 2013 James Bono
GNU Affero General Public License
Author: Dongping Xie
"""
from __future__ import division
import numpy as np
import matplotlib.pylab as plt
from pynfg.utilities.utilities import convert_2_pureCPT, iterated_input_dict
import copy
import warnings
class QLearning(object):
"""
Finds the **uncoordinated** best policy using Q-learning.
:arg Game: The iterated semi-NFG on which to perform the RL
:type Game: iterSemiNFG
:arg specs: A nested dictionary contained specifications of the
game. See below for details
:type specs: dict
The specs dictionary is a triply nested dictionary. The first
level of keys is player names. For each player there is an entry for
the player's:
Level : int
The player's level
w : float
The learning rate
delta : float
The discount factor
The rest of the entries are basenames. The value of
each basename is a dictionary containing:
N : int
The number of training episodes
r_max : float
(Optional) a guess of upperbound of reward in a single time
step. The default is 0 if no value is specified.
"""
def __init__(self, Game, specs):
self.Game = copy.deepcopy(Game)
self.specs = specs
self.trained_CPTs = {}
self.figs = {}
for player in Game.players:
basenames = set(map(lambda x: x.basename, Game.partition[player]))
for bn in basenames:
self.trained_CPTs[player] = {}
self.trained_CPTs[player][bn] = {}
self.trained_CPTs[player][bn][0] = self._set_L0_CPT()
self.figs[bn] = {}
self.high_level = max(map(lambda x: self.specs[x]['Level'], Game.players))
def _set_L0_CPT(self):
""" Sets the level 0 CPT"""
Game = self.Game
ps = self.specs
for player in ps:
basenames = set(map(lambda x: x.basename, Game.partition[player]))
for bn in basenames:
if ps[player][bn]['L0Dist'] == 'uniform':
return Game.bn_part[bn][0].uniformCPT(setCPT=False)
elif ps[player][bn]['L0Dist'] is None:
warnings.warn("No entry for L0Dist for player %s,\
setting to current CPT" % player)
return Game.bn_part[bn][0].CPT
elif type(ps[player][bn]['L0Dist']) == np.ndarray:
return ps[player][bn]['L0Dist']
def train_node(self, bn, level, setCPT=False):
"""Solve for the optimal policy using Optimistic Q-learning. Optimistic
Q-Learning is an off-policy TD control RL algorithm
:arg bn: The basename of the node with the CPT to be trained
:type bn: str
:arg level: The level at which to train the basename
:type level: int
"""
print 'Training ' + bn + ' at level '+ str(level)
Game = copy.deepcopy(self.Game)
ps = self.specs
player = Game.bn_part[bn][0].player
w, d, N, r_max = ps[player]['w'], ps[player]['delta'], ps[player][bn]['N'], \
ps[player][bn]['r_max']
#Set other CPTs to level-1. Works even if CPTs aren't pointers.
for o_player in Game.players:
bn_list = list(set(map(lambda x: x.basename, Game.partition[o_player])))
for base in bn_list:
if base != bn:
for dn in Game.bn_part[base]:
try:
dn.CPT = \
(self.trained_CPTs[o_player][base][level - 1])
except KeyError:
raise KeyError('Need to train other players at level %s'
% str(level-1))
T0 = Game.starttime #get the start time
T = Game.endtime + 1 #get the end time
shape = Game.bn_part[bn][T0].CPT.shape #the shape of CPT
if d<1:
Q0 = r_max*((1-d**(T-T0))/(1-d)) #the initial q value
else:
Q0 = r_max*(T-T0)
Q = Q0 * np.ones(shape) #the initial q table
visit = np.zeros(shape)
#the number of times each (m,a) pair has been visited.
r_av = 0 #the dynamic (discounted) average reward
rseries = [] #a series of average rewards
for ep in xrange(N):
print ep
#convert Q table to CPT
Game.bn_part[bn][T0].CPT = convert_2_pureCPT(Q)
Game.sample_timesteps(T0,T0) #sample the start time step
malist = Game.bn_part[bn][T0].dict2list_vals(valueinput= \
Game.bn_part[bn][T0].value)
#get the list of (m,a) pair from the iterated semi-NFG
mapair = Game.bn_part[bn][T0].get_CPTindex(malist) #get CPT index
r = Game.reward(player,T0) #get the (discounted) reward
if ep != 0: #to avoid "divided by 0" error
r_av_new = r_av + (r-r_av)/((T-1)*ep) #update the dynamic reward
Qmax = Q[mapair] #get the maximum q value
for t in xrange(T0+1,T):
Game.bn_part[bn][t].CPT = convert_2_pureCPT(Q) #convert Q table to CPT
Game.sample_timesteps(t,t) #sample the current time step
if t!= (T-1): #required by Q-learning
r = d**t*Game.reward(player,t) # get the (discounted) reward
r_av_new = r_av + (r-r_av)/((T-1)*ep+t) #update the reward
malist_new = Game.bn_part[bn][t].dict2list_vals(valueinput= \
Game.bn_part[bn][t].value)
mapair_new = Game.bn_part[bn][t].get_CPTindex(malist_new)
visit[mapair] = visit[mapair] + 1 #update the number of times
alpha = (1/(1+visit[mapair]))**w #the learning rate
Qmax_new = Q[mapair_new] #new maximum q value
Q[mapair] = Qmax + alpha*(r + d*Qmax_new -Qmax) #update q table
mapair = mapair_new
Qmax = Qmax_new
r_av = r_av_new
rseries.append(r_av)
self.trained_CPTs[player][bn][level] = Game.bn_part[bn][0].CPT
plt.figure()
plt.plot(rseries, label = str(bn + ' Level ' + str(level)))
#plotting rseries to gauge convergence
plt.legend()
fig = plt.gcf()
self.figs[bn][str(level)] = fig
if setCPT:
map(lambda x: _setallCPTs(self.Game,bn, x, Game.bn_part[bn][0].CPT), np.arange(T0, T))
def solve_game(self, setCPT=False):
"""Solves the game sfor specified player levels"""
Game = self.Game
ps = self.specs
for level in np.arange(1, self.high_level):
for player in Game.players:
basenames = set(map(lambda x: x.basename, Game.partition[player]))
for controlled in basenames:
self.train_node(controlled, level, setCPT=setCPT)
for player in Game.players:
basenames = set(map(lambda x: x.basename, Game.partition[player]))
for controlled in basenames:
if ps[player]['Level'] == self.high_level:
self.train_node(controlled, self.high_level, setCPT=setCPT)
def qlearning_dict(Game, Level, w, N, delta, r_max=0, L0Dist=None):
"""
Creates the specs shell for a game to be solved using Q learning.
:arg Game: An iterated SemiNFG
:type Game: iterSemiNFG
.. seealso::
See the Q Learning documentation (above) for details of the optional arguments
"""
return iterated_input_dict(Game, [('Level', Level), ('delta', delta),('w', w)],
[('L0Dist', L0Dist), ('N', N),
('N', N), ('r_max', r_max)])
def _setallCPTs(Game,basename, t, newCPT):
Game.bn_part[basename][t].CPT = newCPT
|
jwbono/PyNFG
|
pynfg/levelksolutions/qlearning.py
|
Python
|
agpl-3.0
| 8,130
|
[
"VisIt"
] |
19d699e268b4833df35b21ce66776f325d25474e9ec40e3fd5d0b725e6be633e
|
# -*- coding: utf-8 -*-
#
# This file is part of Lalf.
#
# Lalf is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lalf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Lalf. If not, see <http://www.gnu.org/licenses/>.
"""
Module handling the exportation of users (DEPRECATED)
This is the module that previously handled the exporation of
users. Using it can (and probably will) prevent you from having access
to the users list of your administration panel during 24h (thus
preventing you from exporting them).
The ocrusers module now handles the exportation, using this users
module to create the entries in the sql file.
"""
import re
import hashlib
import urllib.parse
import base64
from binascii import crc32
from pyquery import PyQuery
from lalf.node import Node
from lalf.util import Counter, pages, random_string, parse_admin_date, clean_url
from lalf.phpbb import BOTS
from lalf import htmltobbcode
EMAIL = base64.b64decode(b'bGFsZkBvcGVubWFpbGJveC5vcmc=\n').decode("utf-8")
PM_SUBJECT = "Félicitations !"
PM_POST = """Félicitations !
Vous avez terminé la première partie de l'importation. Suivez la
partie Resynchronisation du README pour terminer la migration.
Une fois la migration terminée, n'hésitez pas à m'envoyer vos
remerciements à l'adresse {}. Si vous le souhaitez, vous pouvez me
supporter en m'offrant un café ;) , j'accepte les dons en
bitcoins.""".format(EMAIL)
def email_hash(email):
"""
Email hash function used by phpbb
"""
return str(crc32(email.encode("utf-8"))&0xffffffff) + str(len(email))
class MemberPageBlocked(Exception):
"""
Exception raised when the member page is blocked
"""
def __str__(self):
return (
"Vous avez été bloqué par forumactif. Attendez d'être débloqué avant de relancer le "
"script (environ 24h).\n\n"
"Pour savoir si vous êtes bloqué, essayez d'accéder à la deuxième page de la gestion "
"des utilisateurs dans votre panneau d'administration (Utilisateurs & Groupes > "
"Gestion des utilisateurs). Si vous êtes bloqué, vous allez être redirigé vers la page "
"d'accueil de votre panneau d'administration.\n\n"
"Pour ne pas avoir à attendre, utilisez l'otion use_ocr."
)
def md5(string):
"""
Compute the md5 hash of a string
"""
return hashlib.md5(string.encode("utf8")).hexdigest()
class NoUser(object):
def __init__(self):
self.newid = 0
self.name = ""
self.colour = ""
class AnonymousUser(Node):
"""
Node representing the anonymous user
"""
STATE_KEEP = ["newid", "name", "colour"]
def __init__(self):
Node.__init__(self)
self.newid = 1
self.name = ""
self.colour = ""
def _dump_(self, sqlfile):
post_times = [post.time for post in self.root.get_posts() if post.poster.newid == 1]
num_posts = sum(1 for _ in post_times)
if num_posts > 0:
lastpost_time = max(post_times)
else:
lastpost_time = 0
sqlfile.insert("users", {
"user_id" : "1",
"user_type" : "2",
"group_id" : "1",
"username" : "Anonymous",
"username_clean" : "anonymous",
"user_regdate" : self.root.startdate,
"user_lang" : self.config["default_lang"],
"user_style" : "1",
"user_allow_massemail" : "0",
"user_lastpost_time" : lastpost_time,
"user_posts" : num_posts
})
sqlfile.insert("user_group", {
"group_id" : "1",
"user_id" : "1",
"user_pending" : "0"
})
class User(Node):
"""
Node representing a user
Attrs:
oldid (int): The id of the user in the old forum
name (str): His username
mail (str): The email address of the user
posts (int): The number of posts
date (int): subscription date (timestamp)
lastvisit (int): date of last visit (timestamp)
newid (int): The id of the user in the new forum
"""
STATE_KEEP = ["oldid", "newid", "name", "mail", "posts", "date", "lastvisit", "colour", "groups"]
def __init__(self, oldid, name, mail, posts, date, lastvisit, colour=""):
Node.__init__(self)
self.oldid = oldid
self.name = name
self.mail = mail
self.posts = posts
self.date = date
self.lastvisit = lastvisit
self.colour = colour
self.groups = []
self.newid = None
def _export_(self):
if self.newid is None:
if self.name == self.config["admin_name"]:
self.newid = 2
elif self.name == "Anonymous":
self.newid = 1
else:
self.newid = self.users_count.value
self.users_count += 1
self.root.current_users += 1
self.ui.update()
self.users[self.oldid] = self
def confirm_email(self):
"""
Let the user confirm the email address if it could not be
validated (for compatibility with OcrUser)
"""
return
def _dump_(self, sqlfile):
try:
group_id = self.groups[0].newid
except:
group_id = 2
if 5 in [group.newid for group in self.groups]:
# The user is an administrator
self.colour = "AA0000"
post_times = [post.time for post in self.root.get_posts() if post.poster == self]
num_posts = sum(1 for _ in post_times)
if num_posts > 0:
lastpost_time = max(post_times)
else:
lastpost_time = 0
user = {
"user_id" : self.newid,
"group_id" : group_id,
"user_regdate" : self.date,
"username" : self.name,
"username_clean" : self.name.lower(),
"user_password" : md5(random_string()),
"user_pass_convert" : "1",
"user_email" : self.mail,
"user_email_hash" : email_hash(self.mail),
"user_lastvisit" : self.lastvisit,
"user_lastpost_time" : lastpost_time,
"user_posts" : num_posts,
"user_lang" : self.config["default_lang"],
"user_style" : "1",
#"user_rank" (TODO)
"user_colour" : self.colour
#"user_avatar" (TODO)
#"user_sig" (TODO)
#"user_from" (TODO)
#"user_website" (TODO) (...)
}
# Check if the user is the administrator
if self.name == self.config["admin_name"]:
user.update({
"user_type": 3,
"user_password" : md5(self.config["admin_password"]),
"user_rank" : 1,
"user_new_privmsg" : 1,
"user_unread_privmsg" : 1,
"user_last_privmsg" : self.root.dump_time
})
if self.name == "Anonymous":
user.update({
"user_type": 2,
"group_id": 1,
"user_allow_massemail" : "0"
})
# Add user to database
sqlfile.insert("users", user)
# Add user to registered group
if self.name == "Anonymous":
sqlfile.insert("user_group", {
"group_id" : "1",
"user_id" : "1",
"user_pending" : "0"
})
else:
sqlfile.insert("user_group", {
"group_id" : 2,
"user_id" : self.newid,
"user_pending" : 0
})
for group in self.groups:
group_leader = 1 if group.leader_name == self.name else 0
sqlfile.insert("user_group", {
"group_id" : group.newid,
"user_id" : self.newid,
"user_pending" : 0,
"group_leader": group_leader
})
# Check if the user is the administrator
if self.name == self.config["admin_name"]:
# Add user to global moderators group
sqlfile.insert("user_group", {
"group_id" : 4,
"user_id" : self.newid,
"user_pending" : 0
})
# Send a private message confirming the import was successful
parser = htmltobbcode.Parser(self.root)
parser.feed(PM_POST)
post = parser.get_post()
sqlfile.insert("privmsgs", {
'msg_id' : 1,
'author_id' : self.newid,
'message_time' : self.root.dump_time,
'message_subject' : PM_SUBJECT,
'message_text' : post.text,
'bbcode_bitfield' : post.bitfield,
'bbcode_uid' : post.uid,
'to_address' : "u_{}".format(self.newid),
'bcc_address' : ""
})
# Add the message in the inbox
sqlfile.insert("privmsgs_to", {
'msg_id' : 1,
'user_id' : self.newid,
'author_id' : self.newid,
'folder_id' : -1
})
# Add the message in the outbox
sqlfile.insert("privmsgs_to", {
'msg_id' : 1,
'user_id' : self.newid,
'author_id' : self.newid,
'folder_id' : 0
})
class UsersPage(Node):
"""
Node representing a page of the list of users
"""
# Attributes to keep
STATE_KEEP = ["page"]
def __init__(self, page):
Node.__init__(self)
self.page = page
def _export_(self):
self.logger.debug('Récupération des membres (page %d)', self.page)
# Get the page of list of users from the administration panel
params = {
"part" : "users_groups",
"sub" : "users",
"start" : self.page
}
response = self.session.get_admin("/admin/index.forum", params=params)
# Check if the page was blocked
query = urllib.parse.urlparse(response.url).query
query = urllib.parse.parse_qs(query)
if "start" not in query:
raise MemberPageBlocked()
document = PyQuery(response.text)
for element in document('tbody tr'):
e = PyQuery(element)
oldid = int(re.search(r"&u=(\d+)&", clean_url(e("td a").eq(0).attr("href"))).group(1))
self.logger.info('Récupération du membre %d', oldid)
name = e("td a").eq(0).text()
mail = e("td a").eq(1).text()
posts = int(e("td").eq(2).text())
date = parse_admin_date(e("td").eq(3).text())
lastvisit = parse_admin_date(e("td").eq(4).text())
self.add_child(User(oldid, name, mail, posts, date, lastvisit))
@Node.expose(count="users_count")
class Users(Node):
"""
Node used to export the users (DEPRECATED)
"""
# Attributes to save
STATE_KEEP = ["count"]
def __init__(self):
Node.__init__(self)
# User ids start at one, the first one is the anonymous user,
# and the second one is the administrator
self.count = Counter(len(BOTS) + 3)
def _export_(self):
self.logger.info('Récupération des membres')
self.add_child(AnonymousUser())
# Get the list of users from the administration panel
params = {
"part" : "users_groups",
"sub" : "users"
}
response = self.session.get_admin("/admin/index.forum", params=params)
for page in pages(response.text):
self.add_child(UsersPage(page))
def _dump_(self, sqlfile):
user_id = 3
# Add bots
for bot in BOTS:
sqlfile.insert("users", {
"user_id" : user_id,
"user_type" : "2",
"group_id" : "6",
"user_regdate" : self.root.startdate,
"username" : bot["name"],
"username_clean" : bot["name"].lower(),
"user_passchg" : self.root.dump_time,
"user_lastmark" : self.root.dump_time,
"user_lang" : self.config["default_lang"],
"user_dateformat" : "D M d, Y g:i a",
"user_style" : "1",
"user_colour" : "9E8DA7",
"user_allow_pm" : "0",
"user_allow_massemail" : "0"})
sqlfile.insert("user_group", {
"group_id" : "6",
"user_id" : user_id,
"user_pending" : "0"})
sqlfile.insert("bots", {
"bot_name" : bot["name"],
"user_id" : user_id,
"bot_agent" : bot["agent"]})
user_id += 1
|
Roromis/Lalf-Forumactif
|
lalf/users.py
|
Python
|
gpl-3.0
| 13,345
|
[
"VisIt"
] |
e57d5ac6bfb291229302b699920b74107686682ed5fa6049f641d3c9141b924c
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for Student's Profile Page.
"""
from contextlib import contextmanager
import unittest
from datetime import datetime
from bok_choy.web_app_test import WebAppTest
from nose.plugins.attrib import attr
from django.utils.timezone import UTC
from ...pages.common.logout import LogoutPage
from ...pages.lms.account_settings import AccountSettingsPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.learner_profile import LearnerProfilePage
from ...pages.lms.dashboard import DashboardPage
from ..helpers import EventsTestMixin
class LearnerProfileTestMixin(EventsTestMixin):
"""
Mixin with helper methods for testing learner profile pages.
"""
PRIVACY_PUBLIC = u'all_users'
PRIVACY_PRIVATE = u'private'
PUBLIC_PROFILE_FIELDS = ['username', 'country', 'language_proficiencies', 'bio']
PRIVATE_PROFILE_FIELDS = ['username']
PUBLIC_PROFILE_EDITABLE_FIELDS = ['country', 'language_proficiencies', 'bio']
USER_SETTINGS_CHANGED_EVENT_NAME = u"edx.user.settings.changed"
def log_in_as_unique_user(self):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def set_public_profile_fields_data(self, profile_page):
"""
Fill in the public profile fields of a user.
"""
profile_page.value_for_dropdown_field('language_proficiencies', 'English')
profile_page.value_for_dropdown_field('country', 'United Arab Emirates')
profile_page.set_value_for_textarea_field('bio', 'Nothing Special')
def visit_profile_page(self, username, privacy=None):
"""
Visit a user's profile page and if a privacy is specified and
is different from the displayed value, then set the privacy to that value.
"""
profile_page = LearnerProfilePage(self.browser, username)
# Change the privacy if requested by loading the page and
# changing the drop down
if privacy is not None:
profile_page.visit()
# Change the privacy setting if it is not the desired one already
profile_page.privacy = privacy
# Verify the current setting is as expected
if privacy == self.PRIVACY_PUBLIC:
self.assertEqual(profile_page.privacy, 'all_users')
else:
self.assertEqual(profile_page.privacy, 'private')
if privacy == self.PRIVACY_PUBLIC:
self.set_public_profile_fields_data(profile_page)
# Reset event tracking so that the tests only see events from
# loading the profile page.
self.start_time = datetime.now(UTC()) # pylint: disable=attribute-defined-outside-init
# Load the page
profile_page.visit()
return profile_page
def set_birth_year(self, birth_year):
"""
Set birth year for the current user to the specified value.
"""
account_settings_page = AccountSettingsPage(self.browser)
account_settings_page.visit()
account_settings_page.wait_for_page()
self.assertEqual(
account_settings_page.value_for_dropdown_field('year_of_birth', str(birth_year)),
str(birth_year)
)
def verify_profile_page_is_public(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently public.
"""
self.assertEqual(profile_page.visible_fields, self.PUBLIC_PROFILE_FIELDS)
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.editable_fields, self.PUBLIC_PROFILE_EDITABLE_FIELDS)
else:
self.assertEqual(profile_page.editable_fields, [])
def verify_profile_page_is_private(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently private.
"""
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.visible_fields, self.PRIVATE_PROFILE_FIELDS)
def verify_profile_page_view_event(self, requesting_username, profile_user_id, visibility=None):
"""
Verifies that the correct view event was captured for the profile page.
"""
actual_events = self.wait_for_events(
start_time=self.start_time,
event_filter={'event_type': 'edx.user.settings.viewed', 'username': requesting_username},
number_of_matches=1)
self.assert_events_match(
[
{
'username': requesting_username,
'event': {
'user_id': int(profile_user_id),
'page': 'profile',
'visibility': unicode(visibility)
}
}
],
actual_events
)
@contextmanager
def verify_pref_change_event_during(self, username, user_id, setting, **kwargs):
"""Assert that a single setting changed event is emitted for the user_api_userpreference table."""
expected_event = {
'username': username,
'event': {
'setting': setting,
'user_id': int(user_id),
'table': 'user_api_userpreference',
'truncated': []
}
}
expected_event['event'].update(kwargs)
event_filter = {
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'username': username,
}
with self.assert_events_match_during(event_filter=event_filter, expected_events=[expected_event]):
yield
def initialize_different_user(self, privacy=None, birth_year=None):
"""
Initialize the profile page for a different test user
"""
username, user_id = self.log_in_as_unique_user()
# Set the privacy for the new user
if privacy is None:
privacy = self.PRIVACY_PUBLIC
self.visit_profile_page(username, privacy=privacy)
# Set the user's year of birth
if birth_year:
self.set_birth_year(birth_year)
# Log the user out
LogoutPage(self.browser).visit()
return username, user_id
@attr('shard_4')
class OwnLearnerProfilePageTest(LearnerProfileTestMixin, WebAppTest):
"""
Tests that verify a student's own profile page.
"""
def verify_profile_forced_private_message(self, username, birth_year, message=None):
"""
Verify age limit messages for a user.
"""
if birth_year is None:
birth_year = ""
self.set_birth_year(birth_year=birth_year)
profile_page = self.visit_profile_page(username)
self.assertTrue(profile_page.privacy_field_visible)
if message:
self.assertTrue(profile_page.age_limit_message_present)
else:
self.assertFalse(profile_page.age_limit_message_present)
self.assertIn(message, profile_page.profile_forced_private_message)
def test_profile_defaults_to_public(self):
"""
Scenario: Verify that a new user's profile defaults to public.
Given that I am a new user.
When I go to my profile page.
Then I see that the profile visibility is set to public.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
self.verify_profile_page_is_public(profile_page)
def assert_default_image_has_public_access(self, profile_page):
"""
Assert that profile image has public access.
"""
self.assertTrue(profile_page.profile_has_default_image)
self.assertTrue(profile_page.profile_has_image_with_public_access())
def test_make_profile_public(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my private profile page
And I set the profile visibility to public
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as public
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=self.PRIVACY_PRIVATE, new=self.PRIVACY_PUBLIC
):
profile_page.privacy = self.PRIVACY_PUBLIC
# Reload the page and verify that the profile is now public
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_public(profile_page)
def test_make_profile_private(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my public profile page
And I set the profile visibility to private
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as private
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=None, new=self.PRIVACY_PRIVATE
):
profile_page.privacy = self.PRIVACY_PRIVATE
# Reload the page and verify that the profile is now private
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_private(profile_page)
def test_dashboard_learner_profile_link(self):
"""
Scenario: Verify that my profile link is present on dashboard page and we can navigate to correct page.
Given that I am a registered user.
When I go to Dashboard page.
And I click on username dropdown.
Then I see Profile link in the dropdown menu.
When I click on Profile link.
Then I will be navigated to Profile page.
"""
username, user_id = self.log_in_as_unique_user()
dashboard_page = DashboardPage(self.browser)
dashboard_page.visit()
dashboard_page.click_username_dropdown()
self.assertIn('My Profile', dashboard_page.username_dropdown_link_text)
dashboard_page.click_my_profile_link()
my_profile_page = LearnerProfilePage(self.browser, username)
my_profile_page.wait_for_page()
def test_fields_on_my_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own private profile.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to private.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_page_is_private(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_fields_on_my_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own public profile.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see all the profile fields are shown.
And `location`, `language` and `about me` fields are editable.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.verify_profile_page_is_public(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PUBLIC)
def _test_dropdown_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a dropdown field.
"""
profile_page.value_for_dropdown_field(field_id, new_value)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def _test_textarea_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a textarea field.
"""
profile_page.set_value_for_textarea_field(field_id, new_value)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def test_country_field(self):
"""
Test behaviour of `Country` field.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set country value to `Pakistan`.
Then displayed country should be `Pakistan` and country field mode should be `display`
And I reload the page.
Then displayed country should be `Pakistan` and country field mode should be `display`
And I make `country` field editable
Then `country` field mode should be `edit`
And `country` field icon should be visible.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_dropdown_field(profile_page, 'country', 'Pakistan', 'Pakistan', 'display')
profile_page.make_field_editable('country')
self.assertEqual(profile_page.mode_for_field('country'), 'edit')
self.assertTrue(profile_page.field_icon_present('country'))
def test_language_field(self):
"""
Test behaviour of `Language` field.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set language value to `Urdu`.
Then displayed language should be `Urdu` and language field mode should be `display`
And I reload the page.
Then displayed language should be `Urdu` and language field mode should be `display`
Then I set empty value for language.
Then displayed language should be `Add language` and language field mode should be `placeholder`
And I reload the page.
Then displayed language should be `Add language` and language field mode should be `placeholder`
And I make `language` field editable
Then `language` field mode should be `edit`
And `language` field icon should be visible.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_dropdown_field(profile_page, 'language_proficiencies', 'Urdu', 'Urdu', 'display')
self._test_dropdown_field(profile_page, 'language_proficiencies', '', 'Add language', 'placeholder')
profile_page.make_field_editable('language_proficiencies')
self.assertTrue(profile_page.mode_for_field('language_proficiencies'), 'edit')
self.assertTrue(profile_page.field_icon_present('language_proficiencies'))
def test_about_me_field(self):
"""
Test behaviour of `About Me` field.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set about me value to `ThisIsIt`.
Then displayed about me should be `ThisIsIt` and about me field mode should be `display`
And I reload the page.
Then displayed about me should be `ThisIsIt` and about me field mode should be `display`
Then I set empty value for about me.
Then displayed about me should be empty character string and about me
field mode should be `placeholder`
And I reload the page.
Then displayed about me should be `Tell other edX learners a little about yourself: where you live,
what your interests are, why you're taking courses on edX, or what you hope to learn.` and about me
field mode should be `placeholder`
And I make `about me` field editable
Then `about me` field mode should be `edit`
"""
placeholder_value = ''
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_textarea_field(profile_page, 'bio', 'ThisIsIt', 'ThisIsIt', 'display')
self._test_textarea_field(profile_page, 'bio', '', placeholder_value, 'placeholder')
profile_page.make_field_editable('bio')
self.assertTrue(profile_page.mode_for_field('bio'), 'edit')
def test_birth_year_not_set(self):
"""
Verify message if birth year is not set.
Given that I am a registered user.
And birth year is not set for the user.
And I visit my profile page.
Then I should see a message that the profile is private until the year of birth is set.
"""
username, user_id = self.log_in_as_unique_user()
message = "You must specify your birth year before you can share your full profile."
self.verify_profile_forced_private_message(username, birth_year=None, message=message)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_is_under_age(self):
"""
Verify message if user is under age.
Given that I am a registered user.
And birth year is set so that age is less than 13.
And I visit my profile page.
Then I should see a message that the profile is private as I am under thirteen.
"""
username, user_id = self.log_in_as_unique_user()
under_age_birth_year = datetime.now().year - 10
self.verify_profile_forced_private_message(
username,
birth_year=under_age_birth_year,
message='You must be over 13 to share a full profile.'
)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_can_only_see_default_image_for_private_profile(self):
"""
Scenario: Default profile image behaves correctly for under age user.
Given that I am on my profile page with private access
And I can see default image
When I move my cursor to the image
Then i cannot see the upload/remove image text
And i cannot upload/remove the image.
"""
year_of_birth = datetime.now().year - 5
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_forced_private_message(
username,
year_of_birth,
message='You must be over 13 to share a full profile.'
)
self.assertTrue(profile_page.profile_has_default_image)
self.assertFalse(profile_page.profile_has_image_with_private_access())
def test_user_can_see_default_image_for_public_profile(self):
"""
Scenario: Default profile image behaves correctly for public profile.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
And i am able to upload new image
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
def test_user_can_upload_the_profile_image_with_success(self):
"""
Scenario: Upload profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new image via file uploader
Then i can see the changed image
And i can also see the latest image after reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
profile_page.visit()
self.assertTrue(profile_page.image_upload_success)
def test_user_can_see_error_for_exceeding_max_file_size_limit(self):
"""
Scenario: Upload profile image does not work for > 1MB image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new > 1MB image via file uploader
Then i can see the error message for file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='larger_image.jpg')
self.assertEqual(profile_page.profile_image_message, "The file must be smaller than 1 MB in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_file_size_below_the_min_limit(self):
"""
Scenario: Upload profile image does not work for < 100 Bytes image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new < 100 Bytes image via file uploader
Then i can see the error message for minimum file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='list-icon-visited.png')
self.assertEqual(profile_page.profile_image_message, "The file must be at least 100 bytes in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_wrong_file_type(self):
"""
Scenario: Upload profile image does not work for wrong file types.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new csv file via file uploader
Then i can see the error message for wrong/unsupported file type
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='generic_csv.csv')
self.assertEqual(
profile_page.profile_image_message,
"The file must be one of the following types: .gif, .png, .jpeg, .jpg."
)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_remove_profile_image(self):
"""
Scenario: Remove profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i click on the remove image link
Then i can see the default image
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
self.assertTrue(profile_page.remove_profile_image())
self.assertTrue(profile_page.profile_has_default_image)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
def test_user_cannot_remove_default_image(self):
"""
Scenario: Remove profile image does not works for default images.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see only the upload image text
And i cannot see the remove image text
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
self.assertFalse(profile_page.remove_link_present)
def test_eventing_after_multiple_uploads(self):
"""
Scenario: An event is fired when a user with a profile image uploads another image
Given that I am on my profile page with public access
And I upload a new image via file uploader
When I upload another image via the file uploader
Then two upload events have been emitted
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg', wait_for_upload_button=False)
@attr('shard_4')
class DifferentUserLearnerProfilePageTest(LearnerProfileTestMixin, WebAppTest):
"""
Tests that verify viewing the profile page of a different user.
"""
def test_different_user_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's private profile.
Given that I am a registered user.
And I visit a different user's private profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
different_username, different_user_id = self.initialize_different_user(privacy=self.PRIVACY_PRIVATE)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_under_age(self):
"""
Scenario: Verify that an under age user's profile is private to others.
Given that I am a registered user.
And I visit an under age user's profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see that only the private fields are shown.
"""
under_age_birth_year = datetime.now().year - 10
different_username, different_user_id = self.initialize_different_user(
privacy=self.PRIVACY_PUBLIC,
birth_year=under_age_birth_year
)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's public profile.
Given that I am a registered user.
And I visit a different user's public profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then all the profile fields are shown.
Then I shouldn't see the profile visibility selector dropdown.
Also `location`, `language` and `about me` fields are not editable.
"""
different_username, different_user_id = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.wait_for_public_fields()
self.verify_profile_page_is_public(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PUBLIC)
@attr('a11y')
@unittest.skip("Does not check a11y in gacco")
class LearnerProfileA11yTest(LearnerProfileTestMixin, WebAppTest):
"""
Class to test learner profile accessibility.
"""
def test_editable_learner_profile_a11y(self):
"""
Test the accessibility of the editable version of the profile page
(user viewing her own public profile).
"""
username, _ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
profile_page.a11y_audit.config.set_rules({
"ignore": [
'color-contrast', # TODO: AC-232
'skip-link', # TODO: AC-179
'link-href', # TODO: AC-231
],
})
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('language_proficiencies')
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('bio')
profile_page.a11y_audit.check_for_accessibility_errors()
def test_read_only_learner_profile_a11y(self):
"""
Test the accessibility of the read-only version of a public profile page
(user viewing someone else's profile page).
"""
# initialize_different_user should cause country, language, and bio to be filled out (since
# privacy is public). It doesn't appear that this is happening, although the method
# works in regular bokchoy tests. Perhaps a problem with phantomjs? So this test is currently
# only looking at a read-only profile page with a username.
different_username, _ = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.a11y_audit.config.set_rules({
"ignore": [
'skip-link', # TODO: AC-179
'link-href', # TODO: AC-231
],
})
profile_page.a11y_audit.check_for_accessibility_errors()
|
nttks/edx-platform
|
common/test/acceptance/tests/lms/test_learner_profile.py
|
Python
|
agpl-3.0
| 34,062
|
[
"VisIt"
] |
cf3a9b4a044436b2a9f49897a6bce1988214a2436ae4b32888833d27fc0ad792
|
#!/usr/bin/env python3
"""
refguide_check.py [OPTIONS] [-- ARGS]
- Check for a NumPy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
- Check docstring examples
- Check example blocks in RST files
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings::
$ python refguide_check.py --doctests ma
or in RST-based documentations::
$ python refguide_check.py --rst docs
"""
import copy
import doctest
import inspect
import io
import os
import re
import shutil
import sys
import tempfile
import warnings
import docutils.core
from argparse import ArgumentParser
from contextlib import contextmanager, redirect_stderr
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from docutils.parsers.rst import directives
from pkg_resources import parse_version
import sphinx
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
SKIPBLOCK = doctest.register_optionflag('SKIPBLOCK')
if parse_version(sphinx.__version__) >= parse_version('1.5'):
# Enable specific Sphinx directives
from sphinx.directives.other import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
else:
# Remove sphinx directives that don't run without Sphinx environment.
# Sphinx < 1.5 installs all directives on import...
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "numpy"
PUBLIC_SUBMODULES = [
'core',
'f2py',
'linalg',
'lib',
'lib.recfunctions',
'fft',
'ma',
'polynomial',
'matrixlib',
'random',
'testing',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
# cases where NumPy docstrings import things from SciPy:
'numpy.lib.vectorize',
'numpy.random.standard_gamma',
'numpy.random.gamma',
'numpy.random.vonmises',
'numpy.random.power',
'numpy.random.zipf',
# remote / local file IO with DataSource is problematic in doctest:
'numpy.lib.DataSource',
'numpy.lib.Repository',
])
# Skip non-numpy RST files, historical release notes
# Any single-directory exact match will skip the directory and all subdirs.
# Any exact match (like 'doc/release') will scan subdirs but skip files in
# the matched directory.
# Any filename will skip that file
RST_SKIPLIST = [
'scipy-sphinx-theme',
'sphinxext',
'neps',
'changelog',
'doc/release',
'doc/source/release',
'c-info.ufunc-tutorial.rst',
'c-info.python-as-glue.rst',
'f2py.getting-started.rst',
'arrays.nditer.cython.rst',
# See PR 17222, these should be fixed
'basics.broadcasting.rst',
'basics.byteswapping.rst',
'basics.creation.rst',
'basics.dispatch.rst',
'basics.indexing.rst',
'basics.subclassing.rst',
'basics.types.rst',
'misc.rst',
]
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
# NOTE: should NumPy have a better match between autosummary
# listings and __all__? For now, TR isn't convinced this is a
# priority -- focus on just getting docstrings executed / correct
r'numpy\.*',
]
# deprecated windows in scipy.signal namespace
for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'):
REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
Parameters
----------
path: str or None
cwd: str or None
Returns
-------
str
Relative path or absolute path based on current working directory
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
"""
Finds the occurrences of function names, special directives like data
and functions and scipy constants in the docstrings of `module`. The
following patterns are searched for:
* 3 spaces followed by function name, and maybe some spaces, some
dashes, and an explanation; only function names listed in
refguide are formatted like this (mostly, there may be some false
positives
* special directives, such as data and function
* (scipy.constants only): quoted list
The `names_dict` is updated by reference and accessible in calling method
Parameters
----------
module : ModuleType
The module, whose docstrings is to be searched
names_dict : dict
Dictionary which contains module name as key and a set of found
function names and directives as value
Returns
-------
None
"""
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""
Return a copy of the __all__ dict with irrelevant items removed.
Parameters
----------
module : ModuleType
The module whose __all__ dict has to be processed
Returns
-------
deprecated : list
List of callable and deprecated sub modules
not_deprecated : list
List of non callable or non deprecated sub modules
others : list
List of remaining types of sub modules
"""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
if not all_dict:
# Must be a pure documentation module
all_dict.append('__doc__')
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""
Return sets of objects from all_dict.
Will return three sets:
{in module_name.__all__},
{in REFGUIDE*},
and {missing from others}
Parameters
----------
all_dict : list
List of non deprecated sub modules for module_name
others : list
List of sub modules for module_name
names : set
Set of function names or special directives present in
docstring of module_name
module_name : ModuleType
Returns
-------
only_all : set
only_ref : set
missing : set
"""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
"""
Check if module `f` is deprecated
Parameter
---------
f : ModuleType
Returns
-------
bool
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except Exception:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
"""
Check that `all_dict` is consistent with the `names` in `module_name`
For instance, that there are no deprecated or extra objects.
Parameters
----------
all_dict : list
names : set
deprecated : list
others : list
module_name : ModuleType
dots : bool
Whether to print a dot for each check
Returns
-------
list
List of [(name, success_flag, output)...]
"""
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = only_ref.intersection(deprecated)
only_ref = only_ref.difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
output += "\nThis issue can be fixed by adding these objects to\n"
output += "the function listing in __init__.py for this module\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
output += "\nThis issue should likely be fixed by removing these objects\n"
output += "from the function listing in __init__.py for this module\n"
output += "or adding them to __all__.\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
"""
Validates the doc string in a snippet of documentation
`text` from file `name`
Parameters
----------
text : str
Docstring text
name : str
File name for which the doc string is to be validated
dots : bool
Whether to print a dot symbol for each check
Returns
-------
(bool, str)
"""
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'doc', 'currentmodule', 'autosummary', 'data', 'attr',
'obj', 'versionadded', 'versionchanged', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor', 'term', 'c:member',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Parameters
----------
module : ModuleType
names : set
Returns
-------
result : list
List of [(module_name, success_flag, output),...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except Exception:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'numpy': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float32': np.float32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,
'StringIO': io.StringIO,
}
class DTRunner(doctest.DocTestRunner):
"""
The doctest runner
"""
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
"""
Check the docstrings
"""
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary",
"# uninitialized", "#uninitialized"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',
'.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = CHECK_NAMESPACE
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except Exception:
# Maybe we're printing a numpy array? This produces invalid python
# code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between
# values. So, reinsert commas and retry.
# TODO: handle (1) abberivation (`print(np.arange(10000))`), and
# (2) n-dim arrays with n > 1
s_want = want.strip()
s_got = got.strip()
cond = (s_want.startswith("[") and s_want.endswith("]") and
s_got.startswith("[") and s_got.endswith("]"))
if cond:
s_want = ", ".join(s_want[1:-1].split())
s_got = ", ".join(s_got[1:-1].split())
return self.check_output(s_want, s_got, optionflags)
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = (r'[\w\d_]+\(' +
', '.join([r'[\w\d_]+=(.+)']*num) +
r'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogeneous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""
Run modified doctests for the set of `tests`.
Parameters
----------
tests: list
full_name : str
verbose : bool
doctest_warning : bool
Returns
-------
tuple(bool, list)
Tuple of (success, output)
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = io.StringIO(newline='')
success = True
# Redirect stderr to the stdout or output
tmp_stderr = sys.stdout if doctest_warnings else output
@contextmanager
def temp_cwd():
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
yield tmpdir
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
# Run tests, trying to restore global state afterward
cwd = os.getcwd()
with np.errstate(), np.printoptions(), temp_cwd() as tmpdir, \
redirect_stderr(tmp_stderr):
# try to ensure random seed is NOT reproducible
np.random.seed(None)
ns = {}
for t in tests:
# We broke the tests up into chunks to try to avoid PSEUDOCODE
# This has the unfortunate side effect of restarting the global
# namespace for each test chunk, so variables will be "lost" after
# a chunk. Chain the globals to avoid this
t.globs.update(ns)
t.filename = short_path(t.filename, cwd)
# Process our options
if any([SKIPBLOCK in ex.options for ex in t.examples]):
continue
fails, successes = runner.run(t, out=output.write, clear_globs=False)
if fails > 0:
success = False
ns = t.globs
output.seek(0)
return success, output.read()
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""
Check code in docstrings of the module's public symbols.
Parameters
----------
module : ModuleType
Name of module
verbose : bool
Should the result be verbose
ns : dict
Name space of module
dots : bool
doctest_warnings : bool
Returns
-------
results : list
List of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except Exception:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""
Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Parameters
----------
fname : str
File name
verbose : bool
ns : dict
Name space
dots : bool
doctest_warnings : bool
Returns
-------
list
List of [(item_name, success_flag, output), ...]
Notes
-----
refguide can be signalled to skip testing code by adding
``#doctest: +SKIP`` to the end of the line. If the output varies or is
random, add ``# may vary`` or ``# random`` to the comment. for example
>>> plt.plot(...) # doctest: +SKIP
>>> random.randint(0,10)
5 # random
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
if ns is None:
ns = CHECK_NAMESPACE
results = []
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
with open(fname, encoding='utf-8') as f:
text = f.read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
base_line_no = 0
for part in text.split('\n\n'):
try:
tests = parser.get_doctest(part, ns, fname, fname, base_line_no)
except ValueError as e:
if e.args[0].startswith('line '):
# fix line number since `parser.get_doctest` does not increment
# the reported line number by base_line_no in the error message
parts = e.args[0].split()
parts[1] = str(int(parts[1]) + base_line_no)
e.args = (' '.join(parts),) + e.args[1:]
raise
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts.append((part, base_line_no))
base_line_no += part.count('\n') + 2
# Reassemble the good bits and doctest them:
tests = []
for good_text, line_no in good_parts:
tests.append(parser.get_doctest(good_text, ns, fname, fname, line_no))
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def iter_included_files(base_path, verbose=0, suffixes=('.rst',)):
"""
Generator function to walk `base_path` and its subdirectories, skipping
files or directories in RST_SKIPLIST, and yield each file with a suffix in
`suffixes`
Parameters
----------
base_path : str
Base path of the directory to be processed
verbose : int
suffixes : tuple
Yields
------
path
Path of the directory and its sub directories
"""
if os.path.exists(base_path) and os.path.isfile(base_path):
yield base_path
for dir_name, subdirs, files in os.walk(base_path, topdown=True):
if dir_name in RST_SKIPLIST:
if verbose > 0:
sys.stderr.write('skipping files in %s' % dir_name)
files = []
for p in RST_SKIPLIST:
if p in subdirs:
if verbose > 0:
sys.stderr.write('skipping %s and subdirs' % p)
subdirs.remove(p)
for f in files:
if (os.path.splitext(f)[1] in suffixes and
f not in RST_SKIPLIST):
yield os.path.join(dir_name, f)
def check_documentation(base_path, results, args, dots):
"""
Check examples in any *.rst located inside `base_path`.
Add the output to `results`.
See Also
--------
check_doctests_testfile
"""
for filename in iter_included_files(base_path, args.verbose):
if dots:
sys.stderr.write(filename + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(
filename,
(args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
# stub out a "module" which is needed when reporting the result
def scratch():
pass
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write('\n')
sys.stderr.flush()
def init_matplotlib():
"""
Check feasibility of matplotlib initialization.
"""
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
"""
Validates the docstrings of all the pre decided set of
modules for errors and docstring standards.
"""
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true",
help="Run also doctests on ")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--rst", nargs='?', const='doc', default=None,
help=("Run also examples from *rst files "
"discovered walking the directory(s) specified, "
"defaults to 'doc'"))
args = parser.parse_args(argv)
modules = []
names_dict = {}
if not args.module_names:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in module_names:
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
dots = True
success = True
results = []
errormsgs = []
if args.doctests or args.rst:
init_matplotlib()
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
if args.doctests or not args.rst:
print("Running checks for %d modules:" % (len(modules),))
for module in modules:
if dots:
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others,
module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write('\n')
sys.stderr.flush()
if args.rst:
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
rst_path = os.path.relpath(os.path.join(base_dir, args.rst))
if os.path.exists(rst_path):
print('\nChecking files in %s:' % rst_path)
check_documentation(rst_path, results, args, dots)
else:
sys.stderr.write(f'\ninvalid --rst argument "{args.rst}"')
errormsgs.append('invalid directory argument to --rst')
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
for module, mod_results in results:
success = all(x[1] for x in mod_results)
if not success:
errormsgs.append(f'failed checking {module.__name__}')
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if len(errormsgs) == 0:
print("\nOK: all checks passed!")
sys.exit(0)
else:
print('\nERROR: ', '\n '.join(errormsgs))
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
|
grlee77/numpy
|
tools/refguide_check.py
|
Python
|
bsd-3-clause
| 38,039
|
[
"Gaussian"
] |
b7160198742bd719f33862ab59077a9d9a185643b950644a248283bd3afc1483
|
#!/usr/bin/env/ python3
"""
Convert ASCII files to NetCDF4 (plain text)
"""
import sys
import argparse
import numpy as np
import netCDF4
args = []
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='convert file from ASCII to netCDF format',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', '--input', dest='infile', type=str,
default='../data/test_dataHIT_ascii.dat',
help='input ASCII file', metavar='FILE')
parser.add_argument('-o', '--output', dest='outfile', type=str,
default='../data/test_dataHIT_back_converted.nc',
help='output NetCDF file', metavar='FILE')
parser.add_argument('-nx', '--nx', dest='ndimx', type=int,
help='spatial mesh dimension, for the x variable', default=159)
parser.add_argument('-ny', '--ny', dest='ndimy', type=int,
help='spatial mesh dimension, for the y variable', default=134)
args = parser.parse_args()
# Try to write the file
try:
datafile_write = netCDF4.Dataset(args.outfile, 'w', format='NETCDF4')
except IOError:
print('There was an error writing the file!')
sys.exit()
datafile_write.description = 'Experiments conducted at Rouen ...'
ndimx = args.ndimx # spacing
ndimy = args.ndimy # spacing
# dimensions
datafile_write.createDimension('resolution_x', ndimx)
datafile_write.createDimension('resolution_y', ndimy)
datafile_write.createDimension('resolution_z', 1)
# variables
velocity_x = datafile_write.createVariable('velocity_x', 'f4', ('resolution_z',
'resolution_y',
'resolution_x'))
velocity_y = datafile_write.createVariable('velocity_y', 'f4', ('resolution_z',
'resolution_y',
'resolution_x'))
velocity_z = datafile_write.createVariable('velocity_z', 'f4', ('resolution_z',
'resolution_y',
'resolution_x'))
grid_x = datafile_write.createVariable('grid_x', 'f4', 'resolution_x')
grid_y = datafile_write.createVariable('grid_y', 'f4', 'resolution_y')
# data
# velocity_x[:] = np.random.random((1,ndimy,ndimx))/1
# velocity_y[:] = np.random.random((1,ndimy,ndimx))/1
# velocity_z[:] = np.random.random((1,ndimy,ndimx))/1
# grid
x = np.linspace(0, ndimy, ndimx)
y = np.linspace(0, ndimy, ndimx)
print("Converting {:s} file to {:s} file".format(args.infile, args.outfile))
# Try to read the file
try:
infile = open(args.infile, 'r')
except IOError:
print('There was an error reading the file!')
sys.exit()
line = infile.readline()
lines = infile.readlines()
for j in range(ndimy):
for i in range(ndimx):
velocity_x[0, j, i] = lines[j * ndimx + i].split()[2]
velocity_y[0, j, i] = lines[j * ndimx + i].split()[3]
if j == 0:
grid_x[i] = lines[i].split()[0]
if i == 0:
grid_y[j] = lines[j * ndimx].split()[1]
datafile_write.close()
|
guilindner/VortexFitting
|
vortexfitting/convertToNC.py
|
Python
|
mit
| 3,315
|
[
"NetCDF"
] |
4127b5bb467f506a10efc1cee8d237a4ade4cc082e78ae3ababe9a4a7a8d305e
|
# pylint: disable=R0201
# R0201: For testing methods which could be functions are fine.
#
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#Redistribution and use in source and binary forms, with or without
#
#modification, are permitted provided that the following conditions are
#
#met:
#
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Test case for the ItemVisitorBase and ItemTreeWalkerBase classes.
"""
import unittest
from datafinder.core.item.base import ItemBase
from datafinder.core.item.collection import ItemRoot, ItemCollection
from datafinder.core.item.leaf import ItemLeaf
from datafinder.core.item.link import ItemLink
from datafinder.core.item.visitor.base import ItemTreeWalkerBase, VisitSlot
from datafinder_test.mocks import SimpleMock
__version__ = "$Revision-Id:$"
class _TestItem(object):
"""
Simple class definition to test failure of visitor class.
"""
def __init__(self):
""" Constructor. """
pass
class _TestItemVisitor(object):
"""
Mock visitor to test L{ItemVisitorBase<datafinder.core.item.visitor.base.ItemVisitorBase>}.
Two visit slots are defined: C{test1} and C{test2} both of which only have a valid implementation
for L{SimpleMock<datafinder_test.mocks.SimpleMock>}.
"""
def test1ReturnsNodeValue(self, node):
"""
Visitor slot implementation for C{SimpleMock}.
@param node: The visited node.
@type node: C{SimpleMock}
@return: The value of the node (C{node.value}).
"""
return node.value
def test1ReturnsFalse(self, _):
"""
Visitor slot implementation for C{_FailTestItem}.
@param node: The visited node.
@type node: C{SimpleMock}
@return: C{False}
"""
return False
test1 = VisitSlot((test1ReturnsNodeValue, [SimpleMock]),
(test1ReturnsFalse, [_TestItem]))
def test2ChecksNodeValueForPara(self, node, para):
"""
Visitor slot implementation for C{SimpleMock} with extra parameter which is compared to
the mocks value.
@param node: The visited node.
@type node: C{SimpleMock}
@param para: A parameter to be passed.
@type para: Boolean
@return: Returns whether the node value (C{node.value}) equals the given
parameter.
@rtype: Boolean
"""
return node.value == para
test2 = VisitSlot((test2ChecksNodeValueForPara, [SimpleMock]))
class _DerivedTestItemVisitor(_TestItemVisitor, object):
def test1Overridden(self, _):
return False
test1 = VisitSlot((test1Overridden, (SimpleMock, )), inherits="test1")
def test2Hidden(self, node, para):
return node.value == para
test2 = VisitSlot((test2Hidden, [_TestItem]))
class ItemVisitorBaseTestCase(unittest.TestCase):
"""
Test case for L{ItemVisitorBase<datafinder.core.item.visitor.base.ItemVisitorBase>}.
"""
def setUp(self):
"""
Unittest setup. Initializes an C{SimpleMock}, an C{_FailTestItem} and a
C{_TestItemVisitor} for later use in the tests.
"""
self.mockItem = SimpleMock(True)
self.testItem = _TestItem()
self.visitor = _TestItemVisitor()
self.visitor2 = _DerivedTestItemVisitor()
def testAllFine(self):
"""
These test simply check whether calling a visitor slot works with and without parameters.
"""
self.assertTrue(self.visitor.test1(self.mockItem))
self.assertTrue(self.visitor.test2(self.mockItem, True))
self.assertFalse(self.visitor.test2(self.mockItem, False))
def testDispatch(self):
"""
This method checks whether the slots really only respond to the data they
are registered for.
"""
self.assertTrue(self.visitor.test1(self.mockItem))
self.assertFalse(self.visitor.test1(self.testItem))
self.assertRaises(TypeError, self.visitor.test2, self.mockItem) # too less parameters
self.assertTrue(self.visitor.test2(self.mockItem, True))
self.assertRaises(AttributeError, self.visitor.test2, self.testItem) # no valid slot
self.assertFalse(self.visitor2.test1(self.mockItem))
self.assertFalse(self.visitor2.test1(self.testItem))
self.assertRaises(AttributeError, self.visitor2.test2, self.mockItem)
class _TestItemTreeWalker(ItemTreeWalkerBase):
"""
Mock tree walker class to test
L{ItemTreeWalkerBase<datafinder.core.item.visitor.base.ItemTreeWalkerBase>}.
"""
def __init__(self, mode=-1):
"""
Constructor.
"""
ItemTreeWalkerBase.__init__(self, mode=mode)
self.sequence = list()
def reset(self):
"""
Reset the list of walked items.
"""
self.sequence = list()
def handleData(self, node):
"""
Visitor slot C{handle} for all nodes expect links.
"""
self.sequence.append(node.name)
def handleLink(self, node):
"""
Visitor slot C{handle} for link nodes.
@return: False
"""
self.sequence.append("*" + node.name)
handle = VisitSlot((handleData, [ItemBase, ItemRoot, ItemCollection, ItemLeaf]),
(handleLink, [ItemLink]))
class _EmptyItemTreeWalker(ItemTreeWalkerBase):
"""
Another mock up tree walker where the C{handle} slot has been disabled.
"""
def __init__(self, mode=-1):
""" Constructor. """
ItemTreeWalkerBase.__init__(self, mode=mode)
class ItemTreeWalkerTestCase(unittest.TestCase):
"""
Test case for L{ItemTreeWalkerBase<datafinder.item.visitor.base.ItemTreeWalkerBase>}.
"""
PREORDER_RESULT = ("root", "collection", "leaf", "base", "*link")
POSTORDER_RESULT = ("leaf", "base", "*link", "collection", "root")
NODEONLY_RESULT = PREORDER_RESULT[1:]
def setUp(self):
# A tree walker that operates Pre-order (mode=-1)
self.preorderWalker = _TestItemTreeWalker()
self.preorderWalker.reset()
# A tree walker that applies Post-order scheme (mode=1)
self.postorderWalker = _TestItemTreeWalker(mode=1)
self.postorderWalker.reset()
# A root for testing
self.testRoot = ItemRoot("root")
self.testRoot._fileStorer = SimpleMock(list())
self.testRoot.itemFactory = SimpleMock(SimpleMock(list()))
self.testRoot.path = ""
# A collection for testing
self.testNode = ItemCollection("collection")
self.testNode._fileStorer = SimpleMock(list())
self.testNode.itemFactory = SimpleMock(SimpleMock(list()))
self.testNode.parent = self.testRoot
# A leaf for testing
self.testLeaf = ItemLeaf("leaf")
self.testLeaf._fileStorer = SimpleMock(list())
self.testLeaf.itemFactory = SimpleMock(SimpleMock(list()))
self.testLeaf.parent = self.testNode
# A base item for testing
self.testBase = ItemBase("base")
self.testBase._fileStorer = SimpleMock(list())
self.testBase.itemFactory = SimpleMock(SimpleMock(list()))
self.testBase.parent = self.testNode
# A link for testing
self.testLink = ItemLink("link")
self.testLink._fileStorer = SimpleMock(list())
self.testLink.itemFactory = SimpleMock(SimpleMock(list()))
self.testLink.parent = self.testNode
def _assertSequencesEqual(self, results, expected):
"""
Assert two sequences equal itemwise.
@param results: The sequence to be tested.
@type results: Any class implementing iterator protocol.
@param expected: The expected results.
@type expected: Any class implementing iterator protocol.
"""
for result, expect in zip(results, expected):
self.assertEqual(result, expect)
def testAllFine(self):
"""
Simply compares if the produced sequence is produced as expected.
"""
self.preorderWalker.walk(self.testRoot)
self._assertSequencesEqual(self.preorderWalker.sequence,
ItemTreeWalkerTestCase.PREORDER_RESULT)
self.postorderWalker.walk(self.testRoot)
self._assertSequencesEqual(self.postorderWalker.sequence,
ItemTreeWalkerTestCase.POSTORDER_RESULT)
def testExceptions(self):
"""
Check whether exceptions are raised just as expected.
"""
self.assertRaises(ValueError, _EmptyItemTreeWalker, mode=0)
walker = _EmptyItemTreeWalker()
self.assertRaises(AttributeError, walker.walk, self.testRoot) # No handler slot
_EmptyItemTreeWalker.handle = VisitSlot(inherits="handle")
self.assertRaises(AttributeError, walker.walk, self.testRoot) # No slot for type
def testNodes(self):
"""
Check the performance of the tree walker when started on a collection.
"""
self.preorderWalker.walk(self.testNode)
self._assertSequencesEqual(self.preorderWalker.sequence,
ItemTreeWalkerTestCase.NODEONLY_RESULT)
def testLeafs(self):
"""
Check the performance of the tree walker when started on a leaf or link.
"""
self.preorderWalker.walk(self.testLeaf)
self.assertEqual(self.preorderWalker.sequence[0], "leaf")
self.preorderWalker.reset()
self.preorderWalker.walk(self.testLink)
self.assertEqual(self.preorderWalker.sequence[0], "*link")
|
DLR-SC/DataFinder
|
test/unittest/datafinder_test/core/item/visitor/base_test.py
|
Python
|
bsd-3-clause
| 11,462
|
[
"VisIt"
] |
fc429b2e780cb4e03a56ad1f516f83ac1249fb477f49bc0361c184d89cb459d9
|
# Copyright 2008-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot import model
from robot.conf import RobotSettings
from robot.output import LOGGER, Output, pyloggingconf
from robot.utils import setter
from robot.variables import init_global_variables
from .namespace import IMPORTER
from .randomizer import Randomizer
from .runner import Runner
from .signalhandler import STOP_SIGNAL_MONITOR
# TODO: This module should be turned into a package with submodules.
# No time for that prior to 2.8, but it ought to be safe also in 2.8.x.
# Important to check that references in API docs don't break.
class Keyword(model.Keyword):
"""Running model for single keyword."""
__slots__ = ['assign']
message_class = None # TODO: Remove from base model?
def __init__(self, name='', args=(), assign=(), type='kw'):
model.Keyword.__init__(self, name=name, args=args, type=type)
#: Variables to be assigned.
self.assign = assign
def is_for_loop(self):
return False
def is_comment(self):
return False
@property
def keyword(self):
"""Name of the keyword."""
return self.name
class ForLoop(Keyword):
__slots__ = ['range']
keyword_class = Keyword
def __init__(self, vars, items, range):
Keyword.__init__(self, assign=vars, args=items, type='for')
self.range = range
@property
def vars(self):
return self.assign
@property
def items(self):
return self.args
def is_for_loop(self):
return True
@property
def steps(self):
return self.keywords
class TestCase(model.TestCase):
"""Running model for single test case."""
__slots__ = ['template']
keyword_class = Keyword
def __init__(self, name='', doc='', tags=None, timeout=None, template=None):
model.TestCase.__init__(self, name, doc, tags, timeout)
#: Name of the keyword that has been used as template
#: when building the test. `None` if no is template used.
self.template = template
@setter
def timeout(self, timeout):
"""Timeout limit of the test case as an instance of
:class:`~.Timeout.
"""
return Timeout(*timeout) if timeout else None
class TestSuite(model.TestSuite):
"""Running model for single test suite."""
__slots__ = []
test_class = TestCase
keyword_class = Keyword
def __init__(self, name='', doc='', metadata=None, source=None):
model.TestSuite.__init__(self, name, doc, metadata, source)
#: Imports the suite contains.
self.imports = []
#: User keywords defined in the same file as the suite.
#: **Likely to change or to be removed.**
self.user_keywords = []
#: Variables defined in the same file as the suite.
#: **Likely to change or to be removed.**
self.variables = []
@setter
def imports(self, imports):
return model.Imports(self.source, imports)
@setter
def user_keywords(self, keywords):
return model.ItemList(UserKeyword, items=keywords)
@setter
def variables(self, variables):
return model.ItemList(Variable, {'source': self.source}, items=variables)
def configure(self, randomize_suites=False, randomize_tests=False,
**options):
model.TestSuite.configure(self, **options)
self.randomize(randomize_suites, randomize_tests)
def randomize(self, suites=True, tests=True):
"""Randomizes the order of suites and/or tests, recursively."""
self.visit(Randomizer(suites, tests))
def run(self, settings=None, **options):
"""Executes the suite based based the given ``settings`` or ``options``.
:param settings: :class:`~robot.conf.settings.RobotSettings` object
to configure test execution.
:param options: Used to construct new
:class:`~robot.conf.settings.RobotSettings` object if ``settings``
are not given.
:return: :class:`~robot.result.executionresult.Result` object with
information about executed suites and tests.
If ``options`` are used, their names are the same as long command line
options except without hyphens, and they also have the same semantics.
Options that can be given on the command line multiple times can be
passed as lists like ``variable=['VAR1:value1', 'VAR2:value2']``.
If such an option is used only once, it can be given also as a single
string like ``variable='VAR:value'``.
Only options related to the actual test execution have an effect.
For example, options related to selecting test cases or creating
logs and reports are silently ignored. The output XML generated
as part of the execution can be configured, though, including
disabling it with ``output=None``.
Example::
result = suite.run(variable='EXAMPLE:value',
critical='regression',
output='example.xml',
exitonfailure=True,
skipteardownonexit=True)
print result.return_code
To save memory, the returned
:class:`~robot.result.executionresult.Result` object object does not
have any information about the executed keywords. If that information
is needed, the created output XML file needs to be read using the
:class:`~robot.result.resultbuilder.ExecutionResult` factory method.
See the :mod:`package level <robot.running>` documentation for
more examples, including how to construct executable test suites and
how to create logs and reports based on the execution results.
"""
STOP_SIGNAL_MONITOR.start()
IMPORTER.reset()
settings = settings or RobotSettings(options)
pyloggingconf.initialize(settings['LogLevel'])
init_global_variables(settings)
output = Output(settings)
runner = Runner(output, settings)
self.visit(runner)
output.close(runner.result)
return runner.result
class Variable(object):
def __init__(self, name, value, source=None):
# TODO: check name and value
self.name = name
self.value = value
self.source = source
def report_invalid_syntax(self, message, level='ERROR'):
LOGGER.write("Error in file '%s': Setting variable '%s' failed: %s"
% (self.source or '<unknown>', self.name, message), level)
class Timeout(object):
def __init__(self, value, message=None):
self.value = value
self.message = message
def __str__(self):
return self.value
class UserKeyword(object):
# TODO: In 2.9:
# - Teardown should be handled as a keyword like with tests and suites.
# - Timeout should be handled consistently with tests.
# - Also resource files should use these model objects.
def __init__(self, name, args=(), doc='', return_=None, timeout=None,
teardown=None):
self.name = name
self.args = args
self.doc = doc
self.return_ = return_ or ()
self.teardown = None
self.timeout = timeout
self.teardown = teardown
self.keywords = []
@setter
def keywords(self, keywords):
return model.ItemList(Keyword, items=keywords)
# Compatibility with parsing model. Should be removed in 2.9.
@property
def steps(self):
return self.keywords
|
yamateh/robotframework
|
src/robot/running/model.py
|
Python
|
apache-2.0
| 8,153
|
[
"VisIt"
] |
67eb96e855859a4295da56709e881d78337c91f5150a8c63e0e990a393fe0900
|
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomTreesEmbedding
import hashfeatures
def get_naive_bayes_models():
gnb = GaussianNB()
mnb = MultinomialNB()
bnb = BernoulliNB()
classifier_list = [gnb,mnb,bnb]
classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB']
return classifier_list,classifier_name_list
def get_neural_network(hidden_layer_size=50):
mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size)
return [mlp], ['MultiLayer Perceptron']
def get_ensemble_models():
rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3)
bagg = BaggingClassifier(n_estimators=71,random_state=42)
extra = ExtraTreesClassifier(n_estimators=57,random_state=42)
ada = AdaBoostClassifier(n_estimators=51,random_state=42)
grad = GradientBoostingClassifier(n_estimators=101,random_state=42)
classifier_list = [rf,bagg,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list,classifier_name_list
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name
predicted_values = trained_model.predict(X_test)
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
filename = 'train.csv'
imperial_frame = pd.read_csv(filename)
feature_hash = hashfeatures.FeatureHash(max_feature_num=5000)
insult_features = feature_hash.get_feature_set(list(imperial_frame['Comment'].values))
class_labels = list(imperial_frame['Insult'].values)
rf_embed_features = RandomTreesEmbedding(n_estimators=151,random_state=42)
insult_features = rf_embed_features.fit_transform(insult_features)
X_train,X_test,y_train,y_test = train_test_split(insult_features,class_labels,test_size=0.1,random_state=42)
classifier_list, classifier_name_list = get_ensemble_models()
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(X_train,y_train)
print_evaluation_metrics(classifier,classifier_name,X_test,y_test)
|
rupakc/Kaggle-Compendium
|
Imperium - Detecting Insults in Social Commentary/imperium-baseline.py
|
Python
|
mit
| 2,791
|
[
"Gaussian"
] |
8560ce1ec4a480b66755cc4cf3b8389de8a6ce3ecc59513e40cb9960ada7ac1a
|
"""Module containing constants."""
TEI_SOURCE_CBETA_GITHUB = 'cbeta-github'
TEI_SOURCE_CHOICES = [TEI_SOURCE_CBETA_GITHUB]
TOKENIZER_CHOICE_CBETA = 'cbeta'
TOKENIZER_CHOICE_LATIN = 'latin'
TOKENIZER_CHOICE_PAGEL = 'pagel'
TOKENIZER_CHOICES = [TOKENIZER_CHOICE_CBETA, TOKENIZER_CHOICE_LATIN,
TOKENIZER_CHOICE_PAGEL]
# For the CBETA (Chinese) tokenizer, a token is either a workaround
# (anything in square brackets, as a whole), or a single word
# character. Tokens are grouped together (when constituted into
# n-grams) by an empty string.
TOKENIZER_PATTERN_CBETA = r'\[[^]]*\]|\w'
TOKENIZER_JOINER_CBETA = ''
# For the Latin tokenizer, a token is a continuous sequence of word
# characters. Tokens are grouped together (when constituted into
# n-grams) by a space.
TOKENIZER_PATTERN_LATIN = r'\w+'
TOKENIZER_JOINER_LATIN = ' '
# For the Pagel (Tibetan) tokenizer, a token is a continuous sequence of
# word (plus some punctuation) characters. Tokens are grouped together
# (when constituted into n-grams) by a space.
TOKENIZER_PATTERN_PAGEL = r"[\w'\-+?~]+"
TOKENIZER_JOINER_PAGEL = ' '
TOKENIZERS = {
TOKENIZER_CHOICE_CBETA: [TOKENIZER_PATTERN_CBETA, TOKENIZER_JOINER_CBETA],
TOKENIZER_CHOICE_LATIN: [TOKENIZER_PATTERN_LATIN, TOKENIZER_JOINER_LATIN],
TOKENIZER_CHOICE_PAGEL: [TOKENIZER_PATTERN_PAGEL, TOKENIZER_JOINER_PAGEL],
}
BASE_WITNESS = 'base'
BASE_WITNESS_ID = ''
# XML namespaces.
NAMESPACES = {
'cb': 'http://www.cbeta.org/ns/1.0',
'tacl': 'http://github.com/ajenhl/tacl/ns',
'tei': 'http://www.tei-c.org/ns/1.0',
'xml': 'http://www.w3.org/XML/1998/namespace'
}
XML = '{{{}}}'.format(NAMESPACES['xml'])
# Sequencer scoring values.
IDENTICAL_CHARACTER_SCORE = 1
DIFFERENT_CHARACTER_SCORE = -1
OPEN_GAP_PENALTY = -0.5
EXTEND_GAP_PENALTY = -0.1
# The threshold is the ratio between the alignment score and the
# length of the text being aligned below which the alignment is used
# as is, rather than further expanded.
SCORE_THRESHOLD = 0.75
# CSV field names.
COUNT_FIELDNAME = 'count'
COUNT_TOKENS_FIELDNAME = 'matching tokens'
LABEL_FIELDNAME = 'label'
LABEL_COUNT_FIELDNAME = 'label count'
LABEL_WORK_COUNT_FIELDNAME = 'label work count'
NGRAM_FIELDNAME = 'ngram'
NGRAMS_FIELDNAME = 'ngrams'
NORMALISED_FIELDNAME = 'normalised ngram'
NUMBER_FIELDNAME = 'number of n-grams'
PERCENTAGE_FIELDNAME = 'percentage'
SIGLA_FIELDNAME = 'sigla'
SIGLUM_FIELDNAME = 'siglum'
SIZE_FIELDNAME = 'size'
TOTAL_COUNT_FIELDNAME = 'total count'
TOTAL_NGRAMS_FIELDNAME = 'total ngrams'
TOTAL_TOKENS_FIELDNAME = 'total tokens'
UNIQUE_NGRAMS_FIELDNAME = 'unique ngrams'
WITNESSES_FIELDNAME = 'witnesses'
WORK_FIELDNAME = 'work'
WORK_COUNTS_FIELDNAME = 'work counts'
QUERY_FIELDNAMES = (NGRAM_FIELDNAME, SIZE_FIELDNAME, WORK_FIELDNAME,
SIGLUM_FIELDNAME, COUNT_FIELDNAME, LABEL_FIELDNAME)
COUNTS_FIELDNAMES = (WORK_FIELDNAME, SIGLUM_FIELDNAME, SIZE_FIELDNAME,
UNIQUE_NGRAMS_FIELDNAME, TOTAL_NGRAMS_FIELDNAME,
TOTAL_TOKENS_FIELDNAME, LABEL_FIELDNAME)
STATISTICS_FIELDNAMES = (WORK_FIELDNAME, SIGLUM_FIELDNAME,
COUNT_TOKENS_FIELDNAME, TOTAL_TOKENS_FIELDNAME,
PERCENTAGE_FIELDNAME, LABEL_FIELDNAME)
# Those fieldnames whose data in Pandas should be treated as a string
# even if they are numeric.
STRING_FIELDNAMES = (
LABEL_FIELDNAME, NGRAM_FIELDNAME, SIGLA_FIELDNAME, SIGLUM_FIELDNAME,
WORK_FIELDNAME
)
# Command-line documentation strings.
ENCODING_EPILOG = '''\
Due to encoding issues, you may need to set the environment
variable PYTHONIOENCODING to "utf-8".'''
ALIGN_DESCRIPTION = '''\
Generates an HTML report giving tables showing aligned sequences
of text between each witness within each label and all of the
witnesses in the other labels, within a set of results. This
functionality is only appropriate for intersect results.'''
ALIGN_EPILOG = ENCODING_EPILOG + '''\
\n\nThis function requires the Biopython suite of software to be
installed. It is extremely slow and resource hungry when the
overlap between two witnesses is very great.'''
ALIGN_HELP = 'Show aligned sets of matches between two witnesses side by side.'
ALIGN_MINIMUM_SIZE_HELP = 'Minimum size of n-gram to base sequences around.'
ALIGN_OUTPUT_HELP = 'Directory to output alignment files to.'
ASYMMETRIC_HELP = 'Label of sub-corpus to restrict results to.'
CATALOGUE_CATALOGUE_HELP = 'Path to catalogue file.'
CATALOGUE_DESCRIPTION = 'Generate a catalogue file.'
CATALOGUE_EPILOG = '''\
This command is just a convenience for generating a base catalogue
file to then be customised manually.'''
CATALOGUE_HELP = 'Generate a catalogue file.'
CATALOGUE_LABEL_HELP = 'Label to use for all works.'
COUNTS_DESCRIPTION = 'List counts of n-grams in each labelled witness.'
COUNTS_EPILOG = ENCODING_EPILOG
COUNTS_HELP = 'List counts of n-grams in each labelled witness.'
DB_CORPUS_HELP = 'Path to corpus.'
DB_DATABASE_HELP = 'Path to database file.'
DB_MEMORY_HELP = '''\
Use RAM for temporary database storage.
This may cause an out of memory error, in which case run the
command without this switch.'''
DB_RAM_HELP = 'Number of gigabytes of RAM to use.'
DB_TOKENIZER_HELP = '''\
Type of tokenizer to use. The "cbeta" tokenizer is suitable for
the Chinese CBETA corpus (tokens are single characters or
workaround clusters within square brackets). The "pagel" tokenizer
is for use with the transliterated Tibetan corpus (tokens are sets
of word characters plus some punctuation used to transliterate
characters).'''
DIFF_DESCRIPTION = '''\
List n-grams unique to each sub-corpus (as defined by the labels
in the specified catalogue file).'''
DIFF_EPILOG = '''\
Many of the n-grams that are distinct to each sub-corpus are
uninteresting - if a 2-gram is distinct, then so is every gram
larger than 2 that contains that 2-gram. Therefore the results
output by this command are filtered to keep only the most
distinctive n-grams, according to the following rules (which apply
within the context of a given witness):
* If an n-gram is not composed of any (n-1)-grams found in the
results, it is kept.
* If both of the (n-1)-grams that comprise an n-gram are found in
the results, that n-gram is kept.
* Otherwise, the n-gram is removed from the results.
examples:
Make a diff query against a CBETA corpus.
tacl diff cbeta2-10.db corpus/cbeta/ dhr-vs-rest.txt > output.csv
Make an asymmetrical diff query against a CBETA corpus.
tacl diff -a Dhr cbeta2-10.db corpus/cbeta/ dhr-vs-rest.txt > output.csv
Make a diff query against a Pagel corpus.
tacl diff -t pagel pagel1-7.db corpus/pagel/ by-author.txt > output.csv
''' + ENCODING_EPILOG
DIFF_HELP = 'List n-grams unique to each sub-corpus.'
EXCISE_DESCRIPTION = '''
Output witness files for each specified work with all of the
specified n-grams replaced with the supplied replacement text. The
replacement is done for each n-gram in turn, in descending order
of n-gram length.'''
EXCISE_HELP = "Remove specified n-grams from specified works' witnesses."
EXCISE_NGRAMS_HELP = '''
Path to file containing n-grams (one per line) to be replaced.'''
EXCISE_OUTPUT_HELP = 'Path to directory to output transformed files to.'
EXCISE_REPLACEMENT_HELP = '''
Text to replace n-grams with. This should be one or more valid
tokens.'''
EXCISE_WORKS_HELP = 'Work whose witnesses will be transformed.'
HIGHLIGHT_BASE_NAME_HELP = 'Name of work to display.'
HIGHLIGHT_DESCRIPTION = '''\
Output an HTML report for each witness to a work, showing the text
of that witness with supplied n-grams visually highlighted.'''
HIGHLIGHT_EPILOG = '''\
There are two possible outputs available, depending on whether the
-n or -r option is specified.
If n-grams are supplied via the -n/--ngrams option, the resulting
HTML reports show the specified work's witness texts with those
n-grams highlighted. Any n-grams that are specified via the
-m/--minus-ngrams option will have had its constituent tokens
unhighlighted. The -n/--ngrams option may be specified multiple
times; each file's n-grams will be highlighted in a distinct
colour. The -l/--labels option can be used with -n/--ngrams in
order to provide labels for groups of n-grams. There must be as
many instances of -l/--labels as there are of -n/--ngrams. The
order of the labels matches the order of the n-grams files.
If results are supplied via the -r/--results option, the resulting
HTML reports contain an interactive heatmap of the results, allowing the
user to select which witness' matches should be highlighted in the
text. Multiple selections are possible, and the colour of the
highlight of a token reflects how many witnesses have matches
containing that token.
examples:
tacl highlight -r intersect.csv corpus/stripped/ T0001 report_dir
tacl highlight -n author_markers.csv corpus/stripped/ T0001 report_dir
tacl highlight -n Dhr_markers.csv -n ZQ_markers.csv corpus/stripped/ -l Dharmaraksa -l "Zhi Qian" T0474 report_dir'''
HIGHLIGHT_HELP = '''\
Output a witness with specified n-grams visually highlighted.'''
HIGHLIGHT_LABEL_HELP = '''\
Label used to identify the n-grams from a file specified by
-n/--ngrams. This option may be specified multiple times, and
provided as many times as the -n/--ngrams option.'''
HIGHLIGHT_MINUS_NGRAMS_HELP = '''\
Path to file containing n-grams (one per line) to remove
highlighting from. This applies only when -n is used.'''
HIGHLIGHT_NGRAMS_HELP = '''\
Path to file containing n-grams (one per line) to highlight. This
option may be specified multiple times; the n-grams in each file
will be displayed in a distinct colour.'''
HIGHLIGHT_RESULTS_HELP = 'Path to CSV results; creates heatmap highlighting.'
INTERSECT_DESCRIPTION = '''\
List n-grams common to all sub-corpora (as defined by the labels
in the specified catalogue file).'''
INTERSECT_EPILOG = '''\
examples:
Make an intersect query against a CBETA corpus.
tacl intersect cbeta2-10.db corpus/cbeta/ dhr-vs-rest.txt > output.csv
Make an intersect query against a Pagel corpus.
tacl intersect -t pagel pagel1-7.db corpus/pagel/ by-author.txt > output.csv
''' + ENCODING_EPILOG
INTERSECT_HELP = 'List n-grams common to all sub-corpora.'
JOIN_WORKS_CORPUS_HELP = 'Path to corpus of prepared TEI XML texts.'
JOIN_WORKS_DESCRIPTION = '''\
Join multiple TEI XML works split from the same original work into
a new single work.'''
JOIN_WORKS_EPILOG = '''\
Join works is useful when a work has been split into multiple
parts (likely via tacl prepare) and a new work consisting of some
of those parts joined togther is wanted.
The order the works to join are specified determines the order
they are joined.
Works are specified via their name, not file path. For example,
T0006 and not T0006.xml or path/to/corpus/T0006. The same is true
for the output work name.
The joined work is output within the specified corpus that
contains the works being joined.
Due to the way witnesses are handled, joining works split from
different original works will almost certainly result in errors or
incorrect data at later points. Do not do this.'''
JOIN_WORKS_EXISTING_OUTPUT_ERROR = 'Output work {} already exists.'
JOIN_WORKS_HELP = 'Join multiple TEI XML works into a single new work.'
JOIN_WORKS_OUTPUT_HELP = 'Name of work to output the joined works as.'
JOIN_WORKS_WORK_HELP = 'Name of work to join.'
LIFETIME_DESCRIPTION = '''\
Generate a report on the lifetime of n-grams in a results file.'''
LIFETIME_EPILOG = '''\
A lifetime report consists of:
* an HTML table showing the disposition of each n-gram across the
ordered corpora (with texts and count ranges);
* an HTML table showing, for each corpus, the n-grams that first
occurred, only occurred, and last occurred in that corpus; and
* results files for each category (first occurred in, only
occurred in , last occurred in) for each corpus.
This report may be generated from any results file, but is most
usefully applied to the output of the lifetime script (in the
tacl-extra package).
The focus label is informative only, since often multiple lifetime
reports will be generated, one per corpus, from the same master
results file, but with specific filtering for the corpus in
focus.'''
LIFETIME_HELP = 'Generate a report on the lifetime of n-grams.'
LIFETIME_LABEL_HELP = 'Label to mark as the focus of the report.'
LIFETIME_RESULTS_HELP = 'Path to a results file to report on.'
NGRAMS_CATALOGUE_HELP = '''\
Path to a catalogue file used to restrict which works in the
corpus are added.'''
NGRAMS_DESCRIPTION = 'Generate n-grams from a corpus.'
NGRAMS_EPILOG = '''\
This command can be safely interrupted and subsequently rerun;
witnesses that have already had their n-grams added will be skipped.
If new witnesses need to be added after a database was generated,
this command can be run again. However, the speed at which n-grams
from these new witnesses are added will be much less than to a new
database, due to the existing indices.
If a witness has changed or been deleted since a database was
generated, this command will not update the database. In this
case, generate a new database or manipulate the existing dataase
directly to remove the witness and its associated n-grams.
examples:
Create a database of 2 to 10-grams from a CBETA corpus.
tacl ngrams cbeta2-10.db corpus/cbeta/ 2 10
Create a database of 1 to 7-grams from a Pagel corpus.
tacl ngrams -t pagel pagel1-7.db corpus/pagel/ 1 7
Create a database of 1 to 7-grams from a subset of the CBETA corpus.
tacl ngrams -c dhr-texts.txt cbeta-dhr1-7.db corpus/cbeta/ 1 7
'''
NGRAMS_HELP = 'Generate n-grams from a corpus.'
NGRAMS_MAXIMUM_HELP = 'Maximum size of n-gram to generate (integer).'
NGRAMS_MINIMUM_HELP = 'Minimum size of n-gram to generate (integer).'
NORMALISE_CORPUS_HELP = 'Directory containing corpus to be normalised.'
NORMALISE_DESCRIPTION = '''\
Create a copy of a corpus normalised according to a supplied mapping.'''
NORMALISE_EPILOG = '''\
This is a generic normalisation process that is constrained only
by the possibilities of the mapping format. Lemmatisation could be
performed in the same way as normalisation of variant characters
and words.
LIMITATIONS
Because the normalised forms in the mapping may only consist of a
single token, the normalisation and denormalisation processes are
not able to handle context. Eg, it is not possible to reflect
"ABA" -> "ACA", where the surrounding "A"s are themselves able to
be normalised.
FILES
The mapping file follows a simple format of comma-separated
values, with each line having at least two values. The first is
the normalised form, and all subsequent values on the line being
the unnormalised forms. During processing, longer unnormalised
forms are converted first.
The normalised form is mostly used internally, and so may be
arbitrary. It may never consist of more than a single token,
however.'''
NORMALISE_HELP = 'Create a normalised copy of a corpus.'
NORMALISE_MAPPING_HELP = 'Path to mapping file.'
NORMALISE_OUTPUT_HELP = 'Directory to output normalised corpus to.'
PREPARE_DESCRIPTION = '''\
Convert CBETA TEI XML files (which may have multiple files per
work) into XML suitable for processing via the tacl strip
command.'''
PREPARE_EPILOG = '''\
Existing files are not overwritten by this command.
The TEI source options are:
* {}: The CBETA TEI files as distributed on their GitHub repository
at https://github.com/cbeta-org/xml-p5.git.'''.format(
TEI_SOURCE_CBETA_GITHUB)
PREPARE_HELP = '''\
Convert CBETA TEI XML files into an XML form suitable for
stripping.'''
PREPARE_INPUT_HELP = 'Directory containing XML files to prepare.'
PREPARE_OUTPUT_HELP = 'Directory to output prepared files to.'
PREPARE_SOURCE_HELP = 'Source of TEI files.'
QUERY_DESCRIPTION = '''\
Run a query specified in a file using supplied parameters,
outputting the results as CSV.'''
QUERY_HELP = 'Run a query from a file.'
QUERY_PARAMETERS_HELP = 'Parameters to be used in the query.'
QUERY_QUERY_HELP = 'Path to file containing the SQL query to run.'
REPORT_OUTPUT_HELP = 'Directory to output report to.'
RESULTS_ADD_LABEL_COUNT_HELP = '''\
Output the supplied results with an additional column, "{}",
giving the total count for each n-gram within the label. For each
work, the maximum count across all of that work's witnesses is
used in the sum.'''.format(LABEL_COUNT_FIELDNAME)
RESULTS_ADD_LABEL_WORK_COUNT_HELP = '''\
Output the supplied results with an additional column, "{}",
giving the total count of works that contain the n-gram within the
label. For each work, any number of positive counts across all of
that work's witnesses is counted as one in the sum.'''.format(
LABEL_WORK_COUNT_FIELDNAME)
RESULTS_BIFURCATED_EXTEND_HELP = '''\
Extend results to bifurcation points. Generates results containing
those n-grams, derived from the original n-grams, that have a
label count higher than their containing (n+1)-grams, or that have
a label count of one and the constituent (n-1)-grams have a higher
label count.'''
RESULTS_BIFURCATED_EXTEND_MAX_HELP = 'Maximum size of n-gram to extend to'
RESULTS_COLLAPSE_WITNESSES_HELP = '''\
Collapse result rows for multiple witnesses having the same count
for an n-gram. Instead of the "{}" column, all of the witnesses
(per work) with the same n-gram count are listed, comma separated,
in the "{}" column.'''.format(SIGLUM_FIELDNAME, SIGLA_FIELDNAME)
RESULTS_DENORMALISE_CORPUS_HELP = '''\
Path to directory containing the original (unnormalised)
corpus. This option must be given along with --denormalise-mapping
in order for denormalisation to be performed.'''
RESULTS_DENORMALISE_MAPPING_HELP = '''\
Denormalise result n-grams using mapping at the supplied path. The
unnormalised corpus must also be specified in the
--denormalise-corpus option.'''
RESULTS_DESCRIPTION = '''\
Modify a query results file by adding, removing or otherwise
manipulating result rows. Outputs the new set of results.'''
RESULTS_EXCISE_HELP = '''\
Remove all results whose n-gram contains the supplied n-gram
within it.'''
RESULTS_EXTEND_HELP = '''\
Extend the results to list the highest size grams that also count
as matches, going beyond the maximum size recorded in the
database. This has no effect if the results contain only 1-grams.'''
RESULTS_EPILOG = '''\
If more than one modifier is specified, they are applied in the
following order: --extend, --bifurcated-extend,
--denormalise-corpus, --denormalise_mapping, --reduce,
--reciprocal, --excise, --zero-fill, --ngrams, --min/max-works,
--min/max-size, --min/max-count, --min/max-count-work, --remove,
--relabel, --sort. All of the options that modify the format are
performed at the end, and only one should be specified. The one
exception to this is denormalisation, which adds a column to the
results without disrupting any other operations.
It is important to be careful with the use of --reduce. Coupled
with filters such as --max-size, --min-count, etc, many results
may be discarded without trace (since the reduce occurs
first). Note too that performing "reduce" on a set of results more
than once will make the results inaccurate! Denormalisation should
always be done before reducing results.
The denormalisation options together produce a set of results with
all denormalised forms that occur in each witness presented, along
with an extra column, "{}", giving the normalised form each was
derived from.
--extend applies before --reduce because it may generate results
that are also amenable to reduction.
--extend applies before --remove because it depends on there being
at least two labels in the results in order to give correct
results.
--min-count and --max-count set the range within which the total
count of each n-gram, across all works, must fall. For each work,
its count is taken as the highest count among its witnesses.
--min-works and --max-works count works rather than witnesses.
If both --min-count-work and --max-count-work are specified, only
those n-grams are kept that have at least one witness whose count
falls within that range.
-l/--label causes --min/max-count, --min/max-count-work, and
--min/max-works to have their requirements apply within that
labelled subset of results. All n-grams, both within the subset
and outside it, that meet the criteria are kept, while all other
n-grams are removed. Note that when applied to diff results, no
n-grams outside those in the labelled subset will be kept.
--relabel sets the label for each result row to the label for that
row's work as specified in the supplied catalogue. If the work is
not labelled in the catalogue, the label in the results is not
changed.
Since this command outputs a valid results file (except when using
one of those options listed as changing the format), its output
can be used as input for a subsequent tacl results command. To
chain commands together without creating an intermediate file,
pipe the commands together and use - instead of a filename, as:
tacl results --reciprocal results.csv | tacl results --reduce -
examples:
Extend CBETA results and set a minimum total count.
tacl results -e corpus/cbeta/ --min-count 9 output.csv > mod-output.csv
Zero-fill CBETA results.
tacl results -z corpus/cbeta/ output.csv > mod-output.csv
Reduce Pagel results.
tacl results --reduce -t pagel output.csv > mod-output.csv
'''.format(NORMALISED_FIELDNAME) + ENCODING_EPILOG
RESULTS_GROUP_BY_NGRAM_HELP = '''\
Group results by n-gram, providing summary information of the
works each n-gram appears in. Results are sorted by n-gram and
then order of occurrence of the label in the supplied
catalogue.'''
RESULTS_GROUP_BY_WITNESS_HELP = '''\
Group results by witness, providing summary information of which
n-grams appear in each witness.'''
RESULTS_HELP = 'Modify a query results file.'
RESULTS_LABEL_HELP = 'Label to restrict prune requirements to'
RESULTS_MINIMUM_COUNT_HELP = 'Minimum total count per n-gram to include.'
RESULTS_MINIMUM_COUNT_WORK_HELP = '''\
Minimum count per n-gram per work to include; if a single witness
meets this criterion for an n-gram, all instances of that n-gram
are kept.'''
RESULTS_MAXIMUM_COUNT_HELP = 'Maximum total count per n-gram to include.'
RESULTS_MAXIMUM_COUNT_WORK_HELP = '''\
Maximum count per n-gram per work to include; if a single witness
meets this criterion for an n-gram, all instances of that n-gram
are kept.'''
RESULTS_MINIMUM_SIZE_HELP = 'Minimum size of n-grams to include.'
RESULTS_MAXIMUM_SIZE_HELP = 'Maximum size of n-grams to include.'
RESULTS_MINIMUM_WORK_HELP = (
'Minimum count of works containing n-gram to include.')
RESULTS_MAXIMUM_WORK_HELP = (
'Maximum count of works containing n-gram to include.')
RESULTS_NGRAMS_HELP = (
'Path to file containing n-grams (one per line) to exclude.')
RESULTS_RECIPROCAL_HELP = '''\
Remove n-grams that are not attested by at least one work in each
labelled set of works. This can be useful after reducing a set of
intersection results.'''
RESULTS_REDUCE_HELP = 'Remove n-grams that are contained in larger n-grams.'
RESULTS_RELABEL_HELP = 'Relabel results according to the supplied catalogue.'
RESULTS_REMOVE_HELP = 'Remove labelled results.'
RESULTS_RESULTS_HELP = 'Path to CSV results; use - for stdin.'
RESULTS_SORT_HELP = 'Sort the results.'
RESULTS_UNSAFE_GROUP_TITLE = 'format changing arguments'
RESULTS_UNSAFE_GROUP_DESCRIPTION = '''\
These arguments change the format of the results, making them
potentially unsafe to use other operations on.'''
RESULTS_ZERO_FILL_HELP = '''\
Add rows with a count of 0 for each n-gram in each witness of a
work that has at least one witness bearing that n-gram.'''
SEARCH_DESCRIPTION = '''\
Output results of searching the database for the supplied n-grams
that occur within labelled witnesses.'''
SEARCH_EPILOG = '''\
If multiple paths to files containing n-grams are given, the
combined set of n-grams from all files will be searched for.
If no path is given, the results will include all n-grams found
for all of the labelled witnesses in the catalogue.\n\n''' \
+ ENCODING_EPILOG
SEARCH_HELP = 'List witnesses containing at least one of the supplied n-grams.'
SEARCH_NGRAMS_HELP = '''\
Path to file containing list of n-grams to search for, with one
n-gram per line.'''
SPLIT_CONF_HELP = '''\
XML configuration file defining the contents of each witness split
from the source work.'''
SPLIT_DESCRIPTION = '''\
Split an existing work into multiple works that are subsets of its
content.'''
SPLIT_EPILOG = '''\
Each split configuration file must be named according to the work
that it defines the splits for (eg, T0278.xml is the name of the
configuration file for the work T0278). Its format is a simple XML
structure, as illustrated in the example below:
<splits delete="true">
<work>
<name>T0278-paralleled-earlier</name>
<parts>
<part>
<witnesses>大,宋,元,明,聖</witnesses>
<start>佛在摩竭提國寂滅道場初始得佛普光法</start>
<end>最勝或稱能度如是等稱佛名號其數一萬</end>
</part>
<part>
<witnesses>宮</witnesses>
<start>佛在摩竭提國寂滅道場初始得佛普光法</start>
<end>最勝或稱能度如是稱佛名號其數一萬</end>
</part>
<part>
<witnesses>ALL</witnesses>
<start>爾時世尊從兩足相輪放百億光明遍照</start>
<end>百億色究竟天此世界所有一切悉現</end>
</part>
</parts>
</work>
<work>
<name>T0278-ex-earlier-parallels</name>
<parts>
<part>
<witnesses>ALL</witnesses>
<whole>如此見佛坐蓮華藏師子座上有十佛世界塵數菩薩眷屬圍遶百億閻浮提</whole>
</part>
<part>
<witnesses>ALL</witnesses>
<start>佛子是為菩薩身口意業能得一切勝妙功</start>
<end>善哉善哉真佛子快說是法我隨喜</end>
</part>
</parts>
</work>
<work rename="true">
<name>Renamed T0278</name>
</work>
</splits>
Each split work is created, under the supplied name, in the corpus
directory - an error will be raised if there is already a work
with the same name as the split work. Each of the original work's
witnesses are recreated, using the subset of its content defined
in the parts. The parts are processed in the order listed, and a
witness includes a part only if its siglum is listed in witnesses,
or the keyword ALL is given in witnesses.
Each part defines either a start and end piece of text, or a whole
piece of text. In the former case, the first remaining instance of
the start text, and everything following it until the first
remaining instance of the end text, is copied into each applicable
witness of the new work. In the latter case, the first instance of
the whole provided text is copied. In both cases, after the
specified text is copied, it is removed from consideration in the
future parts of this split work.
The source work can be output in its entirety under a new name, if
a "rename" attribute with the value "true" is added to a work
element, which must contain only a name.
The source work is left unchanged by the splitting process, unless
a "delete" attribute with the value "true" is added to the root
splits element, in which case the work is deleted.'''
SPLIT_HELP = 'Split an existing work into multiple works.'
STATISTICS_DESCRIPTION = '''\
Generate summary statistics for a set of results. This gives, for
each witness, the total number of tokens and the count of matching
tokens, and derived from these the percentage of the witness that
is encompassed by the matches.'''
STATISTICS_HELP = 'Generate summary statistics for a set of results.'
STATISTICS_RESULTS_HELP = 'Path to CSV results.'
STRIP_DESCRIPTION = '''\
Preprocess a corpus by stripping unwanted material from each
file, creating a plain text file for each attested witness.'''
STRIP_EPILOG = '''\
This command operates on files in an augmented TEI XML format that
is quite close to that used in the CBETA GitHub files.'''
STRIP_HELP = 'Generate files for use with TACL from a corpus of TEI XML.'
STRIP_INPUT_HELP = 'Directory containing files to strip.'
STRIP_OUTPUT_HELP = 'Directory to output stripped files to.'
SUPPLIED_DIFF_DESCRIPTION = '''\
List n-grams unique to each set of results (as defined by the
specified results files).'''
SUPPLIED_DIFF_HELP = 'List n-grams unique to each results file.'
SUPPLIED_EPILOG = '''\
The number of labels supplied must match the number of results
files. The first label is assigned to all results in the first
results file, the second label to all results in the second
results file, etc. The labels specified in the results files are
replaced with the supplied labels in the output.
examples:
tacl {cmd} -d cbeta2-10.db -l A B -s results1.csv results2.csv > output.csv'''
SUPPLIED_DIFF_EPILOG = SUPPLIED_EPILOG.format(cmd='sdiff')
SUPPLIED_INTERSECT_EPILOG = SUPPLIED_EPILOG.format(cmd='sintersect')
SUPPLIED_INTERSECT_DESCRIPTION = '''\
List n-grams common to all sets of results (as defined by the
specified results files).'''
SUPPLIED_INTERSECT_HELP = 'List n-grams common to all results files.'
SUPPLIED_LABELS_HELP = (
'Labels to be assigned in order to the supplied results.')
SUPPLIED_RESULTS_HELP = 'Paths to results files to be used in the query.'
TACL_DESCRIPTION = 'Analyse the text of corpora in various simple ways.'
VERBOSE_HELP = '''\
Display debug information; multiple -v options increase the verbosity.'''
# Error messages.
CATALOGUE_WORK_RELABELLED_ERROR = 'Catalogue file labels "{}" more than once.'
CATALOGUE_WORK_NOT_IN_CORPUS_ERROR = (
'Catalogue references work "{}" that does not exist in the corpus.')
DUPLICATE_VARIANT_MAPPING_FORM_ERROR = (
'Normaliser mapping lists "{}" more than once.')
EMPTY_NORMALISED_FORM_ERROR = (
'Mapping contains an empty normalised form in the row "{}".')
EMPTY_VARIANT_FORM_ERROR = (
'Normaliser mapping contains an empty variant form for "{}".')
EXCISE_OVERWRITE_WORK_WARNING = ('Output work directory "{}" already exists;'
'existing files may be overwritten.')
INSUFFICIENT_LABELS_QUERY_ERROR = (
'Not running query with fewer than two defined labels.')
LABEL_NOT_IN_CATALOGUE_ERROR = (
'Supplied label is not present in the supplied catalogue.')
MISSING_DATA_STORE_ERROR = (
'Data store does not exist or is inaccessible at {}.')
MISSING_REQUIRED_COLUMNS_ERROR = (
'Results file is missing required column(s) {}.')
NGRAM_MINIMUM_SIZE_GREATER_THAN_MAXIMUM_ERROR = (
'Minimum n-gram size must not be greater than maximum n-gram size.')
NGRAM_SIZE_MUST_BE_INTEGER_ERROR = (
'N-gram sizes must be given as positive integers.')
NGRAM_SIZE_TOO_SMALL_ERROR = 'Minimum n-gram size is 1.'
NO_VARIANTS_DEFINED_ERROR = 'No variant forms defined in mapping for "{}".'
NON_UTF8_RESULTS_FILE_ERROR = 'Results file "{}" is not encoded as UTF-8.'
SPLIT_DELETE_FAILED = 'Failed to delete work "{}" as directed: {}'
SPLIT_INVALID_WITNESS = ('Part references witness "{}" that does not exist '
'in work {}.')
SPLIT_MISSING_END_STRING = 'End string "{}" not found in work "{}" "{}".'
SPLIT_MISSING_START_STRING = 'Start string "{}" not found in work "{}" "{}".'
SPLIT_MISSING_WHOLE_STRING = 'Whole string "{}" not found in work "{}" "{}".'
SPLIT_MISSING_WITNESSES = 'No witnesses specified for part in work "{}".'
SPLIT_MIXED_START_END_STRINGS = ('Start string "{}" comes after end string '
'"{}" in work "{}" "{}".')
SPLIT_OUTPUT_DIRECTORY_EXISTS = ('Output directory for split work "{}" in '
'work {} already exists.')
SPLIT_WORK_NOT_IN_CORPUS_ERROR = 'Work {} does not exist in corpus.'
SUPPLIED_ARGS_LENGTH_MISMATCH_ERROR = (
'The number of labels supplied does not match the number of results files.'
)
TOO_LONG_NORMALISED_FORM_ERROR = ('Normalised form "{}" is longer than one '
'token, which is prohibited.')
# SQL statements.
ANALYSE_SQL = 'ANALYZE {}'
CREATE_INDEX_INPUT_RESULTS_SQL = (
'CREATE INDEX IF NOT EXISTS temp.InputResultsLabel '
'ON InputResults (ngram)')
CREATE_INDEX_TEXT_SQL = (
'CREATE INDEX IF NOT EXISTS TextIndexLabel ON Text (label)')
CREATE_INDEX_TEXTHASNGRAM_SQL = (
'CREATE UNIQUE INDEX IF NOT EXISTS TextHasNGramIndex '
'ON TextHasNGram (text, size)')
CREATE_INDEX_TEXTNGRAM_SQL = (
'CREATE INDEX IF NOT EXISTS TextNGramIndexTextNGram '
'ON TextNGram (text, ngram)')
CREATE_TABLE_TEXT_SQL = (
'CREATE TABLE IF NOT EXISTS Text ('
'id INTEGER PRIMARY KEY ASC, '
'work TEXT NOT NULL, '
'siglum TEXT NOT NULL, '
'checksum TEXT NOT NULL, '
'token_count INTEGER NOT NULL, '
'label TEXT NOT NULL, '
'UNIQUE (work, siglum))')
CREATE_TABLE_TEXTNGRAM_SQL = (
'CREATE TABLE IF NOT EXISTS TextNGram ('
'text INTEGER NOT NULL REFERENCES Text (id) ON DELETE CASCADE, '
'ngram TEXT NOT NULL, '
'size INTEGER NOT NULL, '
'count INTEGER NOT NULL)')
CREATE_TABLE_TEXTHASNGRAM_SQL = (
'CREATE TABLE IF NOT EXISTS TextHasNGram ('
'text INTEGER NOT NULL REFERENCES Text (id) ON DELETE CASCADE, '
'size INTEGER NOT NULL, '
'count INTEGER NOT NULL)')
CREATE_TEMPORARY_NGRAMS_TABLE_SQL = (
'CREATE TEMPORARY TABLE InputNGram (ngram TEXT UNIQUE)')
CREATE_TEMPORARY_RESULTS_TABLE_SQL = (
'CREATE TEMPORARY TABLE InputResults ('
'ngram TEXT NOT NULL, '
'size INTEGER NOT NULL, '
'work TEXT NOT NULL, '
'siglum TEXT NOT NULL, '
'count INTEGER NOT NULL, '
'label TEXT NOT NULL)')
DELETE_TEXT_SQL = 'DELETE FROM Text WHERE id = ?'
DELETE_TEXT_HAS_NGRAMS_SQL = 'DELETE FROM TextHasNGram WHERE text = ?'
DELETE_TEXT_NGRAMS_SQL = 'DELETE FROM TextNGram WHERE text = ?'
DROP_TEMPORARY_NGRAMS_TABLE_SQL = 'DROP TABLE IF EXISTS InputNGram'
DROP_TEMPORARY_RESULTS_TABLE_SQL = 'DROP TABLE IF EXISTS InputResults'
DROP_TEXTNGRAM_INDEX_SQL = 'DROP INDEX IF EXISTS TextNGramIndexTextNGram'
INSERT_NGRAM_SQL = (
'INSERT INTO TextNGram (text, ngram, size, count) VALUES (?, ?, ?, ?)')
INSERT_TEXT_HAS_NGRAM_SQL = (
'INSERT INTO TextHasNGram (text, size, count) VALUES (?, ?, ?)')
INSERT_TEXT_SQL = (
'INSERT INTO Text (work, siglum, checksum, token_count, label) '
'VALUES (?, ?, ?, ?, ?)')
INSERT_TEMPORARY_NGRAM_SQL = 'INSERT INTO temp.InputNGram (ngram) VALUES (?)'
INSERT_TEMPORARY_RESULTS_SQL = (
'INSERT INTO temp.InputResults '
'(ngram, size, work, siglum, count, label) '
'VALUES (?, ?, ?, ?, ?, ?)')
PRAGMA_CACHE_SIZE_SQL = 'PRAGMA cache_size={}'
PRAGMA_COUNT_CHANGES_SQL = 'PRAGMA count_changes=OFF'
PRAGMA_FOREIGN_KEYS_SQL = 'PRAGMA foreign_keys=ON'
PRAGMA_LOCKING_MODE_SQL = 'PRAGMA locking_mode=EXCLUSIVE'
PRAGMA_SYNCHRONOUS_SQL = 'PRAGMA synchronous=OFF'
PRAGMA_TEMP_STORE_SQL = 'PRAGMA temp_store=MEMORY'
SELECT_COUNTS_SQL = (
'SELECT Text.work, Text.siglum, '
'TextHasNGram.size, TextHasNGram.count AS "%s", '
'Text.token_count + 1 - TextHasNGram.size AS "%s", '
'Text.token_count AS "%s", Text.label '
'FROM Text, TextHasNGram '
'WHERE Text.id = TextHasNGram.text AND Text.label IN ({}) '
'ORDER BY Text.work, TextHasNGram.size' % (
UNIQUE_NGRAMS_FIELDNAME, TOTAL_NGRAMS_FIELDNAME,
TOTAL_TOKENS_FIELDNAME))
SELECT_DIFF_ASYMMETRIC_SQL = (
'SELECT TextNGram.ngram, TextNGram.size, '
'Text.work, Text.siglum, TextNGram.count, Text.label '
'FROM Text, TextNGram '
'WHERE Text.label = ? AND Text.id = TextNGram.text '
'AND TextNGram.ngram IN ('
'SELECT TextNGram.ngram FROM Text, TextNGram '
'WHERE Text.id = TextNGram.text AND Text.label = ? '
'EXCEPT '
'SELECT TextNGram.ngram FROM Text, TextNGram '
'WHERE Text.id = TextNGram.text AND Text.label IN ({}))')
SELECT_DIFF_SQL = (
'SELECT TextNGram.ngram, TextNGram.size, Text.work, Text.siglum, '
'TextNGram.count, Text.label '
'FROM Text, TextNGram '
'WHERE Text.label IN ({}) AND Text.id = TextNGram.text '
'AND TextNGram.ngram IN ('
'SELECT TextNGram.ngram FROM Text, TextNGram '
'WHERE Text.id = TextNGram.text AND Text.label IN ({}) '
'GROUP BY TextNGram.ngram HAVING COUNT(DISTINCT Text.label) = 1)')
SELECT_DIFF_SUPPLIED_SQL = (
'SELECT ngram, size, work, siglum, count, label '
'FROM temp.InputResults '
'WHERE ngram IN ('
'SELECT ngram FROM temp.InputResults '
'GROUP BY ngram HAVING COUNT(DISTINCT label) = 1)')
SELECT_HAS_NGRAMS_SQL = (
'SELECT text FROM TextHasNGram WHERE text = ? AND size = ?')
SELECT_INTERSECT_SQL = (
'SELECT TextNGram.ngram, TextNGram.size, '
'Text.work, Text.siglum, TextNGram.count, Text.label '
'FROM Text, TextNGram '
'WHERE Text.label IN ({}) AND Text.id = TextNGram.text '
'AND TextNGram.ngram IN ({})')
SELECT_INTERSECT_SUB_EXTRA_SQL = ' AND TextNGram.ngram IN ({})'
SELECT_INTERSECT_SUB_SQL = (
'SELECT TextNGram.ngram '
'FROM Text, TextNGram '
'WHERE Text.label = ? AND Text.id = TextNGram.text')
SELECT_INTERSECT_SUPPLIED_SQL = (
'SELECT ngram, size, work, siglum, count, label '
'FROM temp.InputResults '
'WHERE ngram IN ('
'SELECT ngram FROM temp.InputResults '
'GROUP BY ngram HAVING COUNT(DISTINCT label) = ?)')
SELECT_SEARCH_SQL = (
'SELECT TextNGram.ngram, TextNGram.size, Text.work, Text.siglum, '
'TextNGram.count, Text.label '
'FROM Text, TextNGram '
'WHERE Text.label IN ({}) AND Text.id = TextNGram.text '
'AND TextNGram.ngram IN (SELECT ngram FROM temp.InputNGram)')
SELECT_SEARCH_ALL_SQL = (
'SELECT TextNGram.ngram, TextNGram.size, Text.work, Text.siglum, '
'TextNGram.count, Text.label '
'FROM Text, TextNGram '
'WHERE Text.label IN ({}) AND Text.id = TextNGram.text')
SELECT_TEXT_TOKEN_COUNT_SQL = (
'SELECT Text.token_count FROM Text WHERE Text.work = ?')
SELECT_TEXT_SQL = 'SELECT id, checksum FROM Text WHERE work = ? AND siglum = ?'
SELECT_TEXTS_SQL = 'SELECT id, work, siglum FROM Text'
SELECT_WORK_TEXTS_SQL = 'SELECT id, work, siglum FROM Text WHERE work = ?'
UPDATE_LABEL_SQL = 'UPDATE Text SET label = ? WHERE work = ?'
UPDATE_LABELS_SQL = 'UPDATE Text SET label = ?'
UPDATE_TEXT_SQL = 'UPDATE Text SET checksum = ?, token_count = ? WHERE id = ?'
VACUUM_SQL = 'VACUUM'
|
ajenhl/tacl
|
tacl/constants.py
|
Python
|
gpl-3.0
| 39,789
|
[
"Biopython"
] |
396d06d7f24dd495ec81b972d27d229c4ea629bdff173ac037ed338fcd06a88e
|
"""
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers between stucked
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import numpy as np
import pylab as pl
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = pl.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = pl.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
pl.xlabel('n_init')
pl.ylabel('inertia')
pl.legend(plots, legends)
pl.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = pl.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
pl.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
pl.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
pl.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
pl.show()
|
florian-f/sklearn
|
examples/cluster/plot_kmeans_stability_low_dim_dense.py
|
Python
|
bsd-3-clause
| 4,310
|
[
"Gaussian"
] |
9dd4f49a87fe90de95ac51fa0e9c3cde183063d869bb8348c9d57c44f3f87760
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2012-2013 Michal Kalewski <mkalewski at cs.put.poznan.pl>
#
# This file is a part of the Simple Network Simulator (sim2net) project.
# USE, MODIFICATION, COPYING AND DISTRIBUTION OF THIS SOFTWARE IS SUBJECT TO
# THE TERMS AND CONDITIONS OF THE MIT LICENSE. YOU SHOULD HAVE RECEIVED A COPY
# OF THE MIT LICENSE ALONG WITH THIS SOFTWARE; IF NOT, YOU CAN DOWNLOAD A COPY
# FROM HTTP://WWW.OPENSOURCE.ORG/.
#
# For bug reports, feature and support requests please visit
# <https://github.com/mkalewski/sim2net/issues>.
"""
"""
from abc import ABCMeta, abstractmethod
__docformat__ = 'reStructuredText'
#pylint: disable=R0921
class Application(object):
"""
"""
__metaclass__ = ABCMeta
def initialize(self, node_id, shared):
"""
"""
pass
def finalize(self, shared):
"""
"""
pass
def failure(self, time, shared):
"""
"""
pass
def main(self, time, communication, neighbors, shared):
"""
"""
pass
|
mkalewski/sim2net
|
sim2net/application.py
|
Python
|
mit
| 1,080
|
[
"VisIt"
] |
f74d67bd8ecf7ce426c241ac4fd6ceabec1ebf071a9b30a04ce7f1298ec14f1d
|
#!/usr/bin/env python
#
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
DESC_KEY = 'desc'
SAMP_RATE_KEY = 'samp_rate'
LINK_RATE_KEY = 'link_rate'
DAC_RATE_KEY = 'dac_rate'
INTERP_KEY = 'interp'
GAIN_KEY = 'gain'
TX_FREQ_KEY = 'tx_freq'
DDC_FREQ_KEY = 'ddc_freq'
BB_FREQ_KEY = 'bb_freq'
AMPLITUDE_KEY = 'amplitude'
AMPL_RANGE_KEY = 'ampl_range'
WAVEFORM_FREQ_KEY = 'waveform_freq'
WAVEFORM_OFFSET_KEY = 'waveform_offset'
WAVEFORM2_FREQ_KEY = 'waveform2_freq'
FREQ_RANGE_KEY = 'freq_range'
GAIN_RANGE_KEY = 'gain_range'
TYPE_KEY = 'type'
def setter(ps, key, val): ps[key] = val
from gnuradio import gr, eng_notation
from gnuradio.gr.pubsub import pubsub
from gnuradio.eng_option import eng_option
from gnuradio import usrp_options
from optparse import OptionParser
import sys
import math
n2s = eng_notation.num_to_str
waveforms = { gr.GR_SIN_WAVE : "Complex Sinusoid",
gr.GR_CONST_WAVE : "Constant",
gr.GR_GAUSSIAN : "Gaussian Noise",
gr.GR_UNIFORM : "Uniform Noise",
"2tone" : "Two Tone",
"sweep" : "Sweep" }
#
# GUI-unaware GNU Radio flowgraph. This may be used either with command
# line applications or GUI applications.
#
class top_block(gr.top_block, pubsub):
def __init__(self, options, args):
gr.top_block.__init__(self)
pubsub.__init__(self)
self._verbose = options.verbose
#initialize values from options
self._setup_usrpx(options)
self.subscribe(INTERP_KEY, lambda i: setter(self, SAMP_RATE_KEY, self[DAC_RATE_KEY]/i))
self.subscribe(SAMP_RATE_KEY, lambda e: setter(self, LINK_RATE_KEY, e*32))
self[INTERP_KEY] = options.interp or 16
self[TX_FREQ_KEY] = options.tx_freq
self[AMPLITUDE_KEY] = options.amplitude
self[WAVEFORM_FREQ_KEY] = options.waveform_freq
self[WAVEFORM_OFFSET_KEY] = options.offset
self[WAVEFORM2_FREQ_KEY] = options.waveform2_freq
self[BB_FREQ_KEY] = 0
self[DDC_FREQ_KEY] = 0
#subscribe set methods
self.subscribe(INTERP_KEY, self.set_interp)
self.subscribe(GAIN_KEY, self.set_gain)
self.subscribe(TX_FREQ_KEY, self.set_freq)
self.subscribe(AMPLITUDE_KEY, self.set_amplitude)
self.subscribe(WAVEFORM_FREQ_KEY, self.set_waveform_freq)
self.subscribe(WAVEFORM2_FREQ_KEY, self.set_waveform2_freq)
self.subscribe(TYPE_KEY, self.set_waveform)
#force update on pubsub keys
for key in (INTERP_KEY, GAIN_KEY, TX_FREQ_KEY,
AMPLITUDE_KEY, WAVEFORM_FREQ_KEY, WAVEFORM_OFFSET_KEY, WAVEFORM2_FREQ_KEY):
self[key] = self[key]
self[TYPE_KEY] = options.type #set type last
def _setup_usrpx(self, options):
self._u = usrp_options.create_usrp_sink(options)
self.publish(DESC_KEY, lambda: str(self._u))
self.publish(DAC_RATE_KEY, self._u.dac_rate)
self.publish(FREQ_RANGE_KEY, self._u.freq_range)
self.publish(GAIN_RANGE_KEY, self._u.gain_range)
self.publish(GAIN_KEY, self._u.gain)
if self._verbose: print str(self._u)
def _set_tx_amplitude(self, ampl):
"""
Sets the transmit amplitude sent to the USRP
@param ampl the amplitude or None for automatic
"""
ampl_range = self[AMPL_RANGE_KEY]
if ampl is None: ampl = (ampl_range[1] - ampl_range[0])*0.15 + ampl_range[0]
self[AMPLITUDE_KEY] = max(ampl_range[0], min(ampl, ampl_range[1]))
def set_interp(self, interp):
if not self._u.set_interp(interp):
raise RuntimeError("Failed to set interpolation rate %i" % (interp,))
if self._verbose:
print "USRP interpolation rate:", interp
print "USRP IF bandwidth: %sHz" % (n2s(self[SAMP_RATE_KEY]),)
if self[TYPE_KEY] in (gr.GR_SIN_WAVE, gr.GR_CONST_WAVE):
self._src.set_sampling_freq(self[SAMP_RATE_KEY])
elif self[TYPE_KEY] == "2tone":
self._src1.set_sampling_freq(self[SAMP_RATE_KEY])
self._src2.set_sampling_freq(self[SAMP_RATE_KEY])
elif self[TYPE_KEY] == "sweep":
self._src1.set_sampling_freq(self[SAMP_RATE_KEY])
self._src2.set_sampling_freq(self[WAVEFORM_FREQ_KEY]*2*math.pi/self[SAMP_RATE_KEY])
else:
return True # Waveform not yet set
if self._verbose: print "Set interpolation rate to:", interp
return True
def set_gain(self, gain):
if gain is None:
g = self[GAIN_RANGE_KEY]
gain = float(g[0]+g[1])/2
if self._verbose:
print "Using auto-calculated mid-point TX gain"
self[GAIN_KEY] = gain
return
self._u.set_gain(gain)
if self._verbose:
print "Set TX gain to:", gain
def set_freq(self, target_freq):
if target_freq is None:
f = self[FREQ_RANGE_KEY]
target_freq = float(f[0]+f[1])/2.0
if self._verbose:
print "Using auto-calculated mid-point frequency"
self[TX_FREQ_KEY] = target_freq
return
tr = self._u.set_center_freq(target_freq)
fs = "%sHz" % (n2s(target_freq),)
if tr is not None:
self._freq = target_freq
self[DDC_FREQ_KEY] = tr.dxc_freq
self[BB_FREQ_KEY] = tr.baseband_freq
if self._verbose:
print "Set center frequency to", fs
print "Tx baseband frequency: %sHz" % (n2s(tr.baseband_freq),)
print "Tx DDC frequency: %sHz" % (n2s(tr.dxc_freq),)
print "Tx residual frequency: %sHz" % (n2s(tr.residual_freq),)
elif self._verbose: print "Failed to set freq."
return tr
def set_waveform_freq(self, freq):
if self[TYPE_KEY] == gr.GR_SIN_WAVE:
self._src.set_frequency(freq)
elif self[TYPE_KEY] == "2tone":
self._src1.set_frequency(freq)
elif self[TYPE_KEY] == 'sweep':
#there is no set sensitivity, redo fg
self[TYPE_KEY] = self[TYPE_KEY]
return True
def set_waveform2_freq(self, freq):
if freq is None:
self[WAVEFORM2_FREQ_KEY] = -self[WAVEFORM_FREQ_KEY]
return
if self[TYPE_KEY] == "2tone":
self._src2.set_frequency(freq)
elif self[TYPE_KEY] == "sweep":
self._src1.set_frequency(freq)
return True
def set_waveform(self, type):
self.lock()
self.disconnect_all()
if type == gr.GR_SIN_WAVE or type == gr.GR_CONST_WAVE:
self._src = gr.sig_source_c(self[SAMP_RATE_KEY], # Sample rate
type, # Waveform type
self[WAVEFORM_FREQ_KEY], # Waveform frequency
self[AMPLITUDE_KEY], # Waveform amplitude
self[WAVEFORM_OFFSET_KEY]) # Waveform offset
elif type == gr.GR_GAUSSIAN or type == gr.GR_UNIFORM:
self._src = gr.noise_source_c(type, self[AMPLITUDE_KEY])
elif type == "2tone":
self._src1 = gr.sig_source_c(self[SAMP_RATE_KEY],
gr.GR_SIN_WAVE,
self[WAVEFORM_FREQ_KEY],
self[AMPLITUDE_KEY]/2.0,
0)
if(self[WAVEFORM2_FREQ_KEY] is None):
self[WAVEFORM2_FREQ_KEY] = -self[WAVEFORM_FREQ_KEY]
self._src2 = gr.sig_source_c(self[SAMP_RATE_KEY],
gr.GR_SIN_WAVE,
self[WAVEFORM2_FREQ_KEY],
self[AMPLITUDE_KEY]/2.0,
0)
self._src = gr.add_cc()
self.connect(self._src1,(self._src,0))
self.connect(self._src2,(self._src,1))
elif type == "sweep":
# rf freq is center frequency
# waveform_freq is total swept width
# waveform2_freq is sweep rate
# will sweep from (rf_freq-waveform_freq/2) to (rf_freq+waveform_freq/2)
if self[WAVEFORM2_FREQ_KEY] is None:
self[WAVEFORM2_FREQ_KEY] = 0.1
self._src1 = gr.sig_source_f(self[SAMP_RATE_KEY],
gr.GR_TRI_WAVE,
self[WAVEFORM2_FREQ_KEY],
1.0,
-0.5)
self._src2 = gr.frequency_modulator_fc(self[WAVEFORM_FREQ_KEY]*2*math.pi/self[SAMP_RATE_KEY])
self._src = gr.multiply_const_cc(self[AMPLITUDE_KEY])
self.connect(self._src1,self._src2,self._src)
else:
raise RuntimeError("Unknown waveform type")
self.connect(self._src, self._u)
self.unlock()
if self._verbose:
print "Set baseband modulation to:", waveforms[type]
if type == gr.GR_SIN_WAVE:
print "Modulation frequency: %sHz" % (n2s(self[WAVEFORM_FREQ_KEY]),)
print "Initial phase:", self[WAVEFORM_OFFSET_KEY]
elif type == "2tone":
print "Tone 1: %sHz" % (n2s(self[WAVEFORM_FREQ_KEY]),)
print "Tone 2: %sHz" % (n2s(self[WAVEFORM2_FREQ_KEY]),)
elif type == "sweep":
print "Sweeping across %sHz to %sHz" % (n2s(-self[WAVEFORM_FREQ_KEY]/2.0),n2s(self[WAVEFORM_FREQ_KEY]/2.0))
print "Sweep rate: %sHz" % (n2s(self[WAVEFORM2_FREQ_KEY]),)
print "TX amplitude:", self[AMPLITUDE_KEY]
def set_amplitude(self, amplitude):
if amplitude < 0.0 or amplitude > 1.0:
if self._verbose: print "Amplitude out of range:", amplitude
return False
if self[TYPE_KEY] in (gr.GR_SIN_WAVE, gr.GR_CONST_WAVE, gr.GR_GAUSSIAN, gr.GR_UNIFORM):
self._src.set_amplitude(amplitude)
elif self[TYPE_KEY] == "2tone":
self._src1.set_amplitude(amplitude/2.0)
self._src2.set_amplitude(amplitude/2.0)
elif self[TYPE_KEY] == "sweep":
self._src.set_k(amplitude)
else:
return True # Waveform not yet set
if self._verbose: print "Set amplitude to:", amplitude
return True
def get_options():
usage="%prog: [options]"
parser = OptionParser(option_class=eng_option, usage=usage)
usrp_options.add_tx_options(parser)
parser.add_option("-f", "--tx-freq", type="eng_float", default=None,
help="Set carrier frequency to FREQ [default=mid-point]", metavar="FREQ")
parser.add_option("-x", "--waveform-freq", type="eng_float", default=0,
help="Set baseband waveform frequency to FREQ [default=%default]")
parser.add_option("-y", "--waveform2-freq", type="eng_float", default=None,
help="Set 2nd waveform frequency to FREQ [default=%default]")
parser.add_option("--sine", dest="type", action="store_const", const=gr.GR_SIN_WAVE,
help="Generate a carrier modulated by a complex sine wave", default=gr.GR_SIN_WAVE)
parser.add_option("--const", dest="type", action="store_const", const=gr.GR_CONST_WAVE,
help="Generate a constant carrier")
parser.add_option("--offset", type="eng_float", default=0,
help="Set waveform phase offset to OFFSET [default=%default]")
parser.add_option("--gaussian", dest="type", action="store_const", const=gr.GR_GAUSSIAN,
help="Generate Gaussian random output")
parser.add_option("--uniform", dest="type", action="store_const", const=gr.GR_UNIFORM,
help="Generate Uniform random output")
parser.add_option("--2tone", dest="type", action="store_const", const="2tone",
help="Generate Two Tone signal for IMD testing")
parser.add_option("--sweep", dest="type", action="store_const", const="sweep",
help="Generate a swept sine wave")
parser.add_option("-A", "--amplitude", type="eng_float", default=0.15,
help="Set output amplitude to AMPL (0.0-1.0) [default=%default]", metavar="AMPL")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="Use verbose console output [default=%default]")
(options, args) = parser.parse_args()
return (options, args)
# If this script is executed, the following runs. If it is imported, the below does not run.
def main():
if gr.enable_realtime_scheduling() != gr.RT_OK:
print "Note: failed to enable realtime scheduling, continuing"
# Grab command line options and create top block
try:
(options, args) = get_options()
tb = top_block(options, args)
except RuntimeError, e:
print e
sys.exit(1)
tb.start()
raw_input('Press Enter to quit: ')
tb.stop()
tb.wait()
# Make sure to create the top block (tb) within a function:
# That code in main will allow tb to go out of scope on return,
# which will call the decontructor on usrp and stop transmit.
# Whats odd is that grc works fine with tb in the __main__,
# perhaps its because the try/except clauses around tb.
if __name__ == "__main__": main()
|
GREO/GNU-Radio
|
gr-utils/src/python/usrp_siggen.py
|
Python
|
gpl-3.0
| 14,262
|
[
"Gaussian"
] |
8d2ec17d0dd80dcf26605ba371bf93ba193a19b7dacfb0e68b87331f02aefb36
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
"""
Testing code.
Updated BSM March 2016
"""
import unittest
import os
import numpy as np
from itertools import product
from pykrige import kriging_tools as kt
from pykrige import core
from pykrige import variogram_models
from pykrige.ok import OrdinaryKriging
from pykrige.uk import UniversalKriging
from pykrige.ok3d import OrdinaryKriging3D
from pykrige.uk3d import UniversalKriging3D
from pykrige.compat import SKLEARN_INSTALLED
class TestPyKrige(unittest.TestCase):
def setUp(self):
self.test_data = np.genfromtxt(os.path.join(os.getcwd(), 'test_data/test_data.txt'))
self.ok_test_answer, self.ok_test_gridx, self.ok_test_gridy, cellsize, no_data = \
kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/test1_answer.asc'), footer=2)
self.uk_test_answer, self.uk_test_gridx, self.uk_test_gridy, cellsize, no_data = \
kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/test2_answer.asc'), footer=2)
self.simple_data = np.array([[0.3, 1.2, 0.47],
[1.9, 0.6, 0.56],
[1.1, 3.2, 0.74],
[3.3, 4.4, 1.47],
[4.7, 3.8, 1.74]])
self.simple_gridx = np.arange(0.0, 6.0, 1.0)
self.simple_gridx_2 = np.arange(0.0, 5.5, 0.5)
self.simple_gridy = np.arange(0.0, 5.5, 0.5)
xi, yi = np.meshgrid(self.simple_gridx, self.simple_gridy)
self.mask = np.array(xi == yi)
self.simple_data_3d = np.array([[0.1, 0.1, 0.3, 0.9],
[0.2, 0.1, 0.4, 0.8],
[0.1, 0.3, 0.1, 0.9],
[0.5, 0.4, 0.4, 0.5],
[0.3, 0.3, 0.2, 0.7]])
self.simple_gridx_3d = np.arange(0.0, 0.6, 0.05)
self.simple_gridy_3d = np.arange(0.0, 0.6, 0.01)
self.simple_gridz_3d = np.arange(0.0, 0.6, 0.1)
zi, yi, xi = np.meshgrid(self.simple_gridz_3d, self.simple_gridy_3d, self.simple_gridx_3d, indexing='ij')
self.mask_3d = np.array((xi == yi) & (yi == zi))
def test_core_adjust_for_anisotropy(self):
X = np.array([[1.0, 0.0, -1.0, 0.0],
[0.0, 1.0, 0.0, -1.0]]).T
X_adj = core._adjust_for_anisotropy(X, [0.0, 0.0], [2.0], [90.0])
self.assertTrue(np.allclose(X_adj[:, 0], np.array([0.0, 1.0, 0.0, -1.0])))
self.assertTrue(np.allclose(X_adj[:, 1], np.array([-2.0, 0.0, 2.0, 0.0])))
def test_core_adjust_for_anisotropy_3d(self):
# this is a bad examples, as the X matrix is symmetric
# and insensitive to transpositions
X = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]).T
X_adj = core._adjust_for_anisotropy(X, [0., 0., 0.], [2., 2.], [90., 0., 0.])
self.assertTrue(np.allclose(X_adj[:, 0], np.array([1., 0., 0.])))
self.assertTrue(np.allclose(X_adj[:, 1], np.array([0., 0., 2.])))
self.assertTrue(np.allclose(X_adj[:, 2], np.array([0., -2., 0.])))
X_adj = core._adjust_for_anisotropy(X, [0., 0., 0.], [2., 2.], [0., 90., 0.])
self.assertTrue(np.allclose(X_adj[:, 0], np.array([0., 0., -1.])))
self.assertTrue(np.allclose(X_adj[:, 1], np.array([0., 2., 0.])))
self.assertTrue(np.allclose(X_adj[:, 2], np.array([2., 0., 0.])))
X_adj = core._adjust_for_anisotropy(X, [0., 0., 0.], [2., 2.], [0., 0., 90.])
self.assertTrue(np.allclose(X_adj[:, 0], np.array([0., 1., 0.])))
self.assertTrue(np.allclose(X_adj[:, 1], np.array([-2., 0., 0.])))
self.assertTrue(np.allclose(X_adj[:, 2], np.array([0., 0., 2.])))
def test_core_initialize_variogram_model(self):
# Note the variogram_function argument is not a string in real life...
self.assertRaises(ValueError, core.initialize_variogram_model, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], 'linear', [0.0], 'linear', 6, False,
'euclidean')
self.assertRaises(ValueError, core.initialize_variogram_model, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], 'spherical', [0.0], 'spherical', 6, False,
'euclidean')
x = np.array([1.0 + n/np.sqrt(2) for n in range(4)])
y = np.array([1.0 + n/np.sqrt(2) for n in range(4)])
z = np.arange(1.0, 5.0, 1.0)
lags, semivariance, variogram_model_parameters = core.initialize_variogram_model(x, y, z, 'linear',
[0.0, 0.0], 'linear',
6, False, 'euclidean')
self.assertTrue(np.allclose(lags, np.array([1.0, 2.0, 3.0])))
self.assertTrue(np.allclose(semivariance, np.array([0.5, 2.0, 4.5])))
def test_core_initialize_variogram_model_3d(self):
# Note the variogram_function argument is not a string in real life...
self.assertRaises(ValueError, core.initialize_variogram_model_3d, self.simple_data_3d[:, 0],
self.simple_data_3d[:, 1], self.simple_data_3d[:, 2], self.simple_data_3d[:, 3],
'linear', [0.0], 'linear', 6, False)
self.assertRaises(ValueError, core.initialize_variogram_model_3d, self.simple_data_3d[:, 0],
self.simple_data_3d[:, 1], self.simple_data_3d[:, 2], self.simple_data_3d[:, 3],
'spherical', [0.0], 'spherical', 6, False)
lags, semivariance, variogram_model_parameters = core.initialize_variogram_model_3d(np.array([1., 2., 3., 4.]),
np.array([1., 2., 3., 4.]),
np.array([1., 2., 3., 4.]),
np.array([1., 2., 3., 4.]),
'linear', [0.0, 0.0],
'linear', 3, False)
self.assertTrue(np.allclose(lags, np.array([np.sqrt(3.), 2.*np.sqrt(3.), 3.*np.sqrt(3.)])))
self.assertTrue(np.allclose(semivariance, np.array([0.5, 2.0, 4.5])))
def test_core_calculate_variogram_model(self):
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([2.05, 2.95, 4.05, 4.95]),
'linear', variogram_models.linear_variogram_model, False)
self.assertTrue(np.allclose(res, np.array([0.98, 1.05]), 0.01, 0.01))
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([2.05, 2.95, 4.05, 4.95]),
'linear', variogram_models.linear_variogram_model, True)
self.assertTrue(np.allclose(res, np.array([0.98, 1.05]), 0.01, 0.01))
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([1.0, 2.8284, 5.1962, 8.0]),
'power', variogram_models.power_variogram_model, False)
self.assertTrue(np.allclose(res, np.array([1.0, 1.5, 0.0])))
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([1.0, 1.4142, 1.7321, 2.0]),
'power', variogram_models.power_variogram_model, False)
self.assertTrue(np.allclose(res, np.array([1.0, 0.5, 0.0])))
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([1.2642, 1.7293, 1.9004, 1.9634]),
'exponential', variogram_models.exponential_variogram_model, False)
self.assertTrue(np.allclose(res, np.array([2.0, 3.0, 0.0]), 0.001, 0.001))
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([0.5769, 1.4872, 1.9065, 1.9914]),
'gaussian', variogram_models.gaussian_variogram_model, False)
self.assertTrue(np.allclose(res, np.array([2.0, 3.0, 0.0]), 0.001, 0.001))
def test_core_krige(self):
# Example 3.2 from Kitanidis
data = np.array([[9.7, 47.6, 1.22],
[43.8, 24.6, 2.822]])
z, ss = core.krige(data[:, 0], data[:, 1], data[:, 2], (18.8, 67.9),
variogram_models.linear_variogram_model, [0.006, 0.1],
'euclidean')
self.assertAlmostEqual(z, 1.6364, 4)
self.assertAlmostEqual(ss, 0.4201, 4)
z, ss = core.krige(data[:, 0], data[:, 1], data[:, 2], (43.8, 24.6),
variogram_models.linear_variogram_model, [0.006, 0.1],
'euclidean')
self.assertAlmostEqual(z, 2.822, 3)
self.assertAlmostEqual(ss, 0.0, 3)
def test_core_krige_3d(self):
# Adapted from example 3.2 from Kitanidis
data = np.array([[9.7, 47.6, 1.0, 1.22],
[43.8, 24.6, 1.0, 2.822]])
z, ss = core.krige_3d(data[:, 0], data[:, 1], data[:, 2], data[:, 3], (18.8, 67.9, 1.0),
variogram_models.linear_variogram_model, [0.006, 0.1])
self.assertAlmostEqual(z, 1.6364, 4)
self.assertAlmostEqual(ss, 0.4201, 4)
z, ss = core.krige_3d(data[:, 0], data[:, 1], data[:, 2], data[:, 3], (43.8, 24.6, 1.0),
variogram_models.linear_variogram_model, [0.006, 0.1])
self.assertAlmostEqual(z, 2.822, 3)
self.assertAlmostEqual(ss, 0.0, 3)
def test_ok(self):
# Test to compare OK results to those obtained using KT3D_H2O.
# (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater, vol. 47, no. 4, 580-586.)
ok = OrdinaryKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='exponential', variogram_parameters=[500.0, 3000.0, 0.0])
z, ss = ok.execute('grid', self.ok_test_gridx, self.ok_test_gridy, backend='vectorized')
self.assertTrue(np.allclose(z, self.ok_test_answer))
z, ss = ok.execute('grid', self.ok_test_gridx, self.ok_test_gridy, backend='loop')
self.assertTrue(np.allclose(z, self.ok_test_answer))
def test_ok_update_variogram_model(self):
self.assertRaises(ValueError, OrdinaryKriging, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], variogram_model='blurg')
ok = OrdinaryKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2])
variogram_model = ok.variogram_model
variogram_parameters = ok.variogram_model_parameters
anisotropy_scaling = ok.anisotropy_scaling
anisotropy_angle = ok.anisotropy_angle
self.assertRaises(ValueError, ok.update_variogram_model, 'blurg')
ok.update_variogram_model('power', anisotropy_scaling=3.0, anisotropy_angle=45.0)
self.assertFalse(variogram_model == ok.variogram_model)
self.assertFalse(variogram_parameters == ok.variogram_model_parameters)
self.assertFalse(anisotropy_scaling == ok.anisotropy_scaling)
self.assertFalse(anisotropy_angle == ok.anisotropy_angle)
def test_ok_execute(self):
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2])
self.assertRaises(ValueError, ok.execute, 'blurg', self.simple_gridx, self.simple_gridy)
z, ss = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='vectorized')
shape = (self.simple_gridy.size, self.simple_gridx.size)
self.assertEqual(z.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(z), np.amin(z))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(z))
z, ss = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='loop')
shape = (self.simple_gridy.size, self.simple_gridx.size)
self.assertEqual(z.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(z), np.amin(z))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(z))
self.assertRaises(IOError, ok.execute, 'masked', self.simple_gridx, self.simple_gridy, backend='vectorized')
mask = np.array([True, False])
self.assertRaises(ValueError, ok.execute, 'masked', self.simple_gridx, self.simple_gridy, mask=mask,
backend='vectorized')
z, ss = ok.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
z, ss = ok.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask.T, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
self.assertRaises(IOError, ok.execute, 'masked', self.simple_gridx, self.simple_gridy, backend='loop')
mask = np.array([True, False])
self.assertRaises(ValueError, ok.execute, 'masked', self.simple_gridx, self.simple_gridy, mask=mask,
backend='loop')
z, ss = ok.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
z, ss = ok.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask.T, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
self.assertRaises(ValueError, ok.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
backend='vectorized')
z, ss = ok.execute('points', self.simple_gridx[0], self.simple_gridy[0], backend='vectorized')
self.assertEqual(z.shape, (1,))
self.assertEqual(ss.shape, (1,))
self.assertRaises(ValueError, ok.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
backend='loop')
z, ss = ok.execute('points', self.simple_gridx[0], self.simple_gridy[0], backend='loop')
self.assertEqual(z.shape, (1,))
self.assertEqual(ss.shape, (1,))
def test_cython_ok(self):
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2])
z1, ss1 = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='loop')
z2, ss2 = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='C')
self.assertTrue(np.allclose(z1, z2))
self.assertTrue(np.allclose(ss1, ss2))
closest_points = 4
z1, ss1 = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='loop',
n_closest_points=closest_points)
z2, ss2 = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='C',
n_closest_points=closest_points)
self.assertTrue(np.allclose(z1, z2))
self.assertTrue(np.allclose(ss1, ss2))
def test_uk(self):
# Test to compare UK with linear drift to results from KT3D_H2O.
# (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater, vol. 47, no. 4, 580-586.)
uk = UniversalKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='exponential', variogram_parameters=[500.0, 3000.0, 0.0],
drift_terms=['regional_linear'])
z, ss = uk.execute('grid', self.uk_test_gridx, self.uk_test_gridy, backend='vectorized')
self.assertTrue(np.allclose(z, self.uk_test_answer))
z, ss = uk.execute('grid', self.uk_test_gridx, self.uk_test_gridy, backend='loop')
self.assertTrue(np.allclose(z, self.uk_test_answer))
def test_uk_update_variogram_model(self):
self.assertRaises(ValueError, UniversalKriging, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], variogram_model='blurg')
self.assertRaises(ValueError, UniversalKriging, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], drift_terms=['external_Z'])
self.assertRaises(ValueError, UniversalKriging, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], drift_terms=['external_Z'], external_drift=np.array([0]))
self.assertRaises(ValueError, UniversalKriging, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], drift_terms=['point_log'])
uk = UniversalKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2])
variogram_model = uk.variogram_model
variogram_parameters = uk.variogram_model_parameters
anisotropy_scaling = uk.anisotropy_scaling
anisotropy_angle = uk.anisotropy_angle
self.assertRaises(ValueError, uk.update_variogram_model, 'blurg')
uk.update_variogram_model('power', anisotropy_scaling=3.0, anisotropy_angle=45.0)
self.assertFalse(variogram_model == uk.variogram_model)
self.assertFalse(variogram_parameters == uk.variogram_model_parameters)
self.assertFalse(anisotropy_scaling == uk.anisotropy_scaling)
self.assertFalse(anisotropy_angle == uk.anisotropy_angle)
def test_uk_calculate_data_point_zscalars(self):
dem = np.arange(0.0, 5.1, 0.1)
dem = np.repeat(dem[np.newaxis, :], 6, axis=0)
dem_x = np.arange(0.0, 5.1, 0.1)
dem_y = np.arange(0.0, 6.0, 1.0)
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', variogram_parameters=[1.0, 0.0],
drift_terms=['external_Z'])
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', variogram_parameters=[1.0, 0.0],
drift_terms=['external_Z'], external_drift=dem)
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', variogram_parameters=[1.0, 0.0],
drift_terms=['external_Z'], external_drift=dem, external_drift_x=dem_x,
external_drift_y=np.arange(0.0, 5.0, 1.0))
uk = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', variogram_parameters=[1.0, 0.0],
drift_terms=['external_Z'], external_drift=dem, external_drift_x=dem_x,
external_drift_y=dem_y)
self.assertTrue(np.allclose(uk.z_scalars, self.simple_data[:, 0]))
xi, yi = np.meshgrid(np.arange(0.0, 5.3, 0.1), self.simple_gridy)
self.assertRaises(ValueError, uk._calculate_data_point_zscalars, xi, yi)
xi, yi = np.meshgrid(np.arange(0.0, 5.0, 0.1), self.simple_gridy)
z_scalars = uk._calculate_data_point_zscalars(xi, yi)
self.assertTrue(np.allclose(z_scalars[0, :], np.arange(0.0, 5.0, 0.1)))
def test_uk_execute_single_point(self):
# Test data and answer from lecture notes by Nicolas Christou, UCLA Stats
data = np.array([[61.0, 139.0, 477.0],
[63.0, 140.0, 696.0],
[64.0, 129.0, 227.0],
[68.0, 128.0, 646.0],
[71.0, 140.0, 606.0],
[73.0, 141.0, 791.0],
[75.0, 128.0, 783.0]])
point = (65.0, 137.0)
z_answer = 567.54
ss_answer = 9.044
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model='exponential',
variogram_parameters=[10.0, 9.99, 0.0], drift_terms=['regional_linear'])
z, ss = uk.execute('points', np.array([point[0]]), np.array([point[1]]), backend='vectorized')
self.assertAlmostEqual(z_answer, z[0], places=0)
self.assertAlmostEqual(ss_answer, ss[0], places=0)
z, ss = uk.execute('points', np.array([61.0]), np.array([139.0]), backend='vectorized')
self.assertAlmostEqual(z[0], 477.0, 3)
self.assertAlmostEqual(ss[0], 0.0, 3)
z, ss = uk.execute('points', np.array([61.0]), np.array([139.0]), backend='loop')
self.assertAlmostEqual(z[0], 477.0, 3)
self.assertAlmostEqual(ss[0], 0.0, 3)
def test_uk_execute(self):
uk = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear'])
self.assertRaises(ValueError, uk.execute, 'blurg', self.simple_gridx, self.simple_gridy)
self.assertRaises(ValueError, uk.execute, 'grid', self.simple_gridx, self.simple_gridy, backend='mrow')
z, ss = uk.execute('grid', self.simple_gridx, self.simple_gridy, backend='vectorized')
shape = (self.simple_gridy.size, self.simple_gridx.size)
self.assertEqual(z.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(z), np.amin(z))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(z))
z, ss = uk.execute('grid', self.simple_gridx, self.simple_gridy, backend='loop')
shape = (self.simple_gridy.size, self.simple_gridx.size)
self.assertEqual(z.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(z), np.amin(z))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(z))
self.assertRaises(IOError, uk.execute, 'masked', self.simple_gridx, self.simple_gridy, backend='vectorized')
mask = np.array([True, False])
self.assertRaises(ValueError, uk.execute, 'masked', self.simple_gridx, self.simple_gridy, mask=mask,
backend='vectorized')
z, ss = uk.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
z, ss = uk.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask.T, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
self.assertRaises(IOError, uk.execute, 'masked', self.simple_gridx, self.simple_gridy, backend='loop')
mask = np.array([True, False])
self.assertRaises(ValueError, uk.execute, 'masked', self.simple_gridx, self.simple_gridy, mask=mask,
backend='loop')
z, ss = uk.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
z, ss = uk.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask.T, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
self.assertRaises(ValueError, uk.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
backend='vectorized')
z, ss = uk.execute('points', self.simple_gridx[0], self.simple_gridy[0], backend='vectorized')
self.assertEqual(z.shape, (1,))
self.assertEqual(ss.shape, (1,))
self.assertRaises(ValueError, uk.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
backend='loop')
z, ss = uk.execute('points', self.simple_gridx[0], self.simple_gridy[0], backend='loop')
self.assertEqual(z.shape, (1,))
self.assertEqual(ss.shape, (1,))
def test_ok_uk_produce_same_result(self):
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
ok = OrdinaryKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='linear', verbose=False, enable_plotting=False)
z_ok, ss_ok = ok.execute('grid', gridx, gridy, backend='vectorized')
uk = UniversalKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='linear', verbose=False, enable_plotting=False)
z_uk, ss_uk = uk.execute('grid', gridx, gridy, backend='vectorized')
self.assertTrue(np.allclose(z_ok, z_uk))
self.assertTrue(np.allclose(ss_ok, ss_uk))
z_ok, ss_ok = ok.execute('grid', gridx, gridy, backend='loop')
z_uk, ss_uk = uk.execute('grid', gridx, gridy, backend='loop')
self.assertTrue(np.allclose(z_ok, z_uk))
self.assertTrue(np.allclose(ss_ok, ss_uk))
def test_ok_backends_produce_same_result(self):
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
ok = OrdinaryKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='linear', verbose=False, enable_plotting=False)
z_ok_v, ss_ok_v = ok.execute('grid', gridx, gridy, backend='vectorized')
z_ok_l, ss_ok_l = ok.execute('grid', gridx, gridy, backend='loop')
self.assertTrue(np.allclose(z_ok_v, z_ok_l))
self.assertTrue(np.allclose(ss_ok_v, ss_ok_l))
def test_uk_backends_produce_same_result(self):
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
uk = UniversalKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='linear', verbose=False, enable_plotting=False)
z_uk_v, ss_uk_v = uk.execute('grid', gridx, gridy, backend='vectorized')
z_uk_l, ss_uk_l = uk.execute('grid', gridx, gridy, backend='loop')
self.assertTrue(np.allclose(z_uk_v, z_uk_l))
self.assertTrue(np.allclose(ss_uk_v, ss_uk_l))
def test_kriging_tools(self):
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2])
z_write, ss_write = ok.execute('grid', self.simple_gridx, self.simple_gridy)
kt.write_asc_grid(self.simple_gridx, self.simple_gridy, z_write,
filename=os.path.join(os.getcwd(), 'test_data/temp.asc'), style=1)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/temp.asc'))
self.assertTrue(np.allclose(z_write, z_read, 0.01, 0.01))
self.assertTrue(np.allclose(self.simple_gridx, x_read))
self.assertTrue(np.allclose(self.simple_gridy, y_read))
z_write, ss_write = ok.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask)
kt.write_asc_grid(self.simple_gridx, self.simple_gridy, z_write,
filename=os.path.join(os.getcwd(), 'test_data/temp.asc'), style=1)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/temp.asc'))
self.assertTrue(np.ma.allclose(z_write, np.ma.masked_where(z_read == no_data, z_read),
masked_equal=True, rtol=0.01, atol=0.01))
self.assertTrue(np.allclose(self.simple_gridx, x_read))
self.assertTrue(np.allclose(self.simple_gridy, y_read))
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2])
z_write, ss_write = ok.execute('grid', self.simple_gridx_2, self.simple_gridy)
kt.write_asc_grid(self.simple_gridx_2, self.simple_gridy, z_write,
filename=os.path.join(os.getcwd(), 'test_data/temp.asc'), style=2)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/temp.asc'))
self.assertTrue(np.allclose(z_write, z_read, 0.01, 0.01))
self.assertTrue(np.allclose(self.simple_gridx_2, x_read))
self.assertTrue(np.allclose(self.simple_gridy, y_read))
os.remove(os.path.join(os.getcwd(), 'test_data/temp.asc'))
def test_uk_three_primary_drifts(self):
well = np.array([[1.1, 1.1, -1.0]])
dem = np.arange(0.0, 5.1, 0.1)
dem = np.repeat(dem[np.newaxis, :], 6, axis=0)
dem_x = np.arange(0.0, 5.1, 0.1)
dem_y = np.arange(0.0, 6.0, 1.0)
uk = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear', 'external_Z', 'point_log'],
point_drift=well, external_drift=dem, external_drift_x=dem_x, external_drift_y=dem_y)
z, ss = uk.execute('grid', self.simple_gridx, self.simple_gridy, backend='vectorized')
self.assertEquals(z.shape, (self.simple_gridy.shape[0], self.simple_gridx.shape[0]))
self.assertEquals(ss.shape, (self.simple_gridy.shape[0], self.simple_gridx.shape[0]))
self.assertTrue(np.all(np.isfinite(z)))
self.assertFalse(np.all(np.isnan(z)))
self.assertTrue(np.all(np.isfinite(ss)))
self.assertFalse(np.all(np.isnan(ss)))
z, ss = uk.execute('grid', self.simple_gridx, self.simple_gridy, backend='loop')
self.assertEquals(z.shape, (self.simple_gridy.shape[0], self.simple_gridx.shape[0]))
self.assertEquals(ss.shape, (self.simple_gridy.shape[0], self.simple_gridx.shape[0]))
self.assertTrue(np.all(np.isfinite(z)))
self.assertFalse(np.all(np.isnan(z)))
self.assertTrue(np.all(np.isfinite(ss)))
self.assertFalse(np.all(np.isnan(ss)))
def test_uk_specified_drift(self):
xg, yg = np.meshgrid(self.simple_gridx, self.simple_gridy)
well = np.array([[1.1, 1.1, -1.0]])
point_log = well[0, 2] * np.log(np.sqrt((xg - well[0, 0])**2. + (yg - well[0, 1])**2.)) * -1.
if np.any(np.isinf(point_log)):
point_log[np.isinf(point_log)] = -100. * well[0, 2] * -1.
point_log_data = well[0, 2] * np.log(np.sqrt((self.simple_data[:, 0] - well[0, 0])**2. +
(self.simple_data[:, 1] - well[0, 1])**2.)) * -1.
if np.any(np.isinf(point_log_data)):
point_log_data[np.isinf(point_log_data)] = -100. * well[0, 2] * -1.
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', drift_terms=['specified'])
self.assertRaises(TypeError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', drift_terms=['specified'],
specified_drift=self.simple_data[:, 0])
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', drift_terms=['specified'],
specified_drift=[self.simple_data[:2, 0]])
uk_spec = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['specified'],
specified_drift=[self.simple_data[:, 0], self.simple_data[:, 1]])
self.assertRaises(ValueError, uk_spec.execute, 'grid', self.simple_gridx, self.simple_gridy,
specified_drift_arrays=[self.simple_gridx, self.simple_gridy])
self.assertRaises(TypeError, uk_spec.execute, 'grid', self.simple_gridx, self.simple_gridy,
specified_drift_arrays=self.simple_gridx)
self.assertRaises(ValueError, uk_spec.execute, 'grid', self.simple_gridx, self.simple_gridy,
specified_drift_arrays=[xg])
z_spec, ss_spec = uk_spec.execute('grid', self.simple_gridx, self.simple_gridy, specified_drift_arrays=[xg, yg])
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear'])
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_spec, z_lin))
self.assertTrue(np.allclose(ss_spec, ss_lin))
uk_spec = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['specified'],
specified_drift=[point_log_data])
z_spec, ss_spec = uk_spec.execute('grid', self.simple_gridx, self.simple_gridy,
specified_drift_arrays=[point_log])
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['point_log'], point_drift=well)
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_spec, z_lin))
self.assertTrue(np.allclose(ss_spec, ss_lin))
uk_spec = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['specified'],
specified_drift=[self.simple_data[:, 0], self.simple_data[:, 1], point_log_data])
z_spec, ss_spec = uk_spec.execute('grid', self.simple_gridx, self.simple_gridy,
specified_drift_arrays=[xg, yg, point_log])
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear', 'point_log'],
point_drift=well)
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_spec, z_lin))
self.assertTrue(np.allclose(ss_spec, ss_lin))
def test_uk_functional_drift(self):
well = np.array([[1.1, 1.1, -1.0]])
func_x = lambda x, y: x
func_y = lambda x, y: y
func_well = lambda x, y: - well[0, 2] * np.log(np.sqrt((x - well[0, 0])**2. + (y - well[0, 1])**2.))
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', drift_terms=['functional'])
self.assertRaises(TypeError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', drift_terms=['functional'],
functional_drift=func_x)
uk_func = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['functional'],
functional_drift=[func_x, func_y])
z_func, ss_func = uk_func.execute('grid', self.simple_gridx, self.simple_gridy)
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear'])
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_func, z_lin))
self.assertTrue(np.allclose(ss_func, ss_lin))
uk_func = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['functional'], functional_drift=[func_well])
z_func, ss_func = uk_func.execute('grid', self.simple_gridx, self.simple_gridy)
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['point_log'], point_drift=well)
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_func, z_lin))
self.assertTrue(np.allclose(ss_func, ss_lin))
uk_func = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['functional'],
functional_drift=[func_x, func_y, func_well])
z_func, ss_func = uk_func.execute('grid', self.simple_gridx, self.simple_gridy)
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear', 'point_log'],
point_drift=well)
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_func, z_lin))
self.assertTrue(np.allclose(ss_func, ss_lin))
def test_uk_with_external_drift(self):
dem, demx, demy, cellsize, no_data = \
kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/test3_dem.asc'))
uk = UniversalKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='spherical',
variogram_parameters=[500.0, 3000.0, 0.0],
anisotropy_scaling=1.0, anisotropy_angle=0.0,
drift_terms=['external_Z'], external_drift=dem,
external_drift_x=demx, external_drift_y=demy,
verbose=False)
answer, gridx, gridy, cellsize, no_data = \
kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/test3_answer.asc'))
z, ss = uk.execute('grid', gridx, gridy, backend='vectorized')
self.assertTrue(np.allclose(z, answer))
z, ss = uk.execute('grid', gridx, gridy, backend='loop')
self.assertTrue(np.allclose(z, answer))
def test_force_exact(self):
data = np.array([[1., 1., 2.],
[2., 2., 1.5],
[3., 3., 1.]])
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2],
variogram_model='linear', variogram_parameters=[1.0, 1.0])
z, ss = ok.execute('grid', [1., 2., 3.], [1., 2., 3.], backend='vectorized')
self.assertAlmostEqual(z[0, 0], 2.0)
self.assertAlmostEqual(ss[0, 0], 0.0)
self.assertAlmostEqual(z[1, 1], 1.5)
self.assertAlmostEqual(ss[1, 1], 0.0)
self.assertAlmostEqual(z[2, 2], 1.0)
self.assertAlmostEqual(ss[2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 0], 0.0)
z, ss = ok.execute('points', [1., 2., 3., 3.], [2., 1., 1., 3.], backend='vectorized')
self.assertNotAlmostEqual(ss[0], 0.0)
self.assertNotAlmostEqual(ss[1], 0.0)
self.assertNotAlmostEqual(ss[2], 0.0)
self.assertAlmostEqual(z[3], 1.0)
self.assertAlmostEqual(ss[3], 0.0)
z, ss = ok.execute('grid', np.arange(0., 4., 0.1), np.arange(0., 4., 0.1), backend='vectorized')
self.assertAlmostEqual(z[10, 10], 2.)
self.assertAlmostEqual(ss[10, 10], 0.)
self.assertAlmostEqual(z[20, 20], 1.5)
self.assertAlmostEqual(ss[20, 20], 0.)
self.assertAlmostEqual(z[30, 30], 1.0)
self.assertAlmostEqual(ss[30, 30], 0.)
self.assertNotAlmostEqual(ss[0, 0], 0.0)
self.assertNotAlmostEqual(ss[15, 15], 0.0)
self.assertNotAlmostEqual(ss[10, 0], 0.0)
self.assertNotAlmostEqual(ss[0, 10], 0.0)
self.assertNotAlmostEqual(ss[20, 10], 0.0)
self.assertNotAlmostEqual(ss[10, 20], 0.0)
self.assertNotAlmostEqual(ss[30, 20], 0.0)
self.assertNotAlmostEqual(ss[20, 30], 0.0)
z, ss = ok.execute('grid', np.arange(0., 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend='vectorized')
self.assertTrue(np.any(ss <= 1e-15))
self.assertFalse(np.any(ss[:9, :30] <= 1e-15))
self.assertFalse(np.allclose(z[:9, :30], 0.))
z, ss = ok.execute('grid', np.arange(0., 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend='vectorized')
self.assertFalse(np.any(ss <= 1e-15))
z, ss = ok.execute('masked', np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25), backend='vectorized',
mask=np.asarray(np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.))
self.assertTrue(ss[2, 5] <= 1e-15)
self.assertFalse(np.allclose(ss, 0.))
z, ss = ok.execute('grid', [1., 2., 3.], [1., 2., 3.], backend='loop')
self.assertAlmostEqual(z[0, 0], 2.0)
self.assertAlmostEqual(ss[0, 0], 0.0)
self.assertAlmostEqual(z[1, 1], 1.5)
self.assertAlmostEqual(ss[1, 1], 0.0)
self.assertAlmostEqual(z[2, 2], 1.0)
self.assertAlmostEqual(ss[2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 0], 0.0)
z, ss = ok.execute('points', [1., 2., 3., 3.], [2., 1., 1., 3.], backend='loop')
self.assertNotAlmostEqual(ss[0], 0.0)
self.assertNotAlmostEqual(ss[1], 0.0)
self.assertNotAlmostEqual(ss[2], 0.0)
self.assertAlmostEqual(z[3], 1.0)
self.assertAlmostEqual(ss[3], 0.0)
z, ss = ok.execute('grid', np.arange(0., 4., 0.1), np.arange(0., 4., 0.1), backend='loop')
self.assertAlmostEqual(z[10, 10], 2.)
self.assertAlmostEqual(ss[10, 10], 0.)
self.assertAlmostEqual(z[20, 20], 1.5)
self.assertAlmostEqual(ss[20, 20], 0.)
self.assertAlmostEqual(z[30, 30], 1.0)
self.assertAlmostEqual(ss[30, 30], 0.)
self.assertNotAlmostEqual(ss[0, 0], 0.0)
self.assertNotAlmostEqual(ss[15, 15], 0.0)
self.assertNotAlmostEqual(ss[10, 0], 0.0)
self.assertNotAlmostEqual(ss[0, 10], 0.0)
self.assertNotAlmostEqual(ss[20, 10], 0.0)
self.assertNotAlmostEqual(ss[10, 20], 0.0)
self.assertNotAlmostEqual(ss[30, 20], 0.0)
self.assertNotAlmostEqual(ss[20, 30], 0.0)
z, ss = ok.execute('grid', np.arange(0., 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend='loop')
self.assertTrue(np.any(ss <= 1e-15))
self.assertFalse(np.any(ss[:9, :30] <= 1e-15))
self.assertFalse(np.allclose(z[:9, :30], 0.))
z, ss = ok.execute('grid', np.arange(0., 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend='loop')
self.assertFalse(np.any(ss <= 1e-15))
z, ss = ok.execute('masked', np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25), backend='loop',
mask=np.asarray(np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.))
self.assertTrue(ss[2, 5] <= 1e-15)
self.assertFalse(np.allclose(ss, 0.))
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2])
z, ss = uk.execute('grid', [1., 2., 3.], [1., 2., 3.], backend='vectorized')
self.assertAlmostEqual(z[0, 0], 2.0)
self.assertAlmostEqual(ss[0, 0], 0.0)
self.assertAlmostEqual(z[1, 1], 1.5)
self.assertAlmostEqual(ss[1, 1], 0.0)
self.assertAlmostEqual(z[2, 2], 1.0)
self.assertAlmostEqual(ss[2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 0], 0.0)
z, ss = uk.execute('points', [1., 2., 3., 3.], [2., 1., 1., 3.], backend='vectorized')
self.assertNotAlmostEqual(ss[0], 0.0)
self.assertNotAlmostEqual(ss[1], 0.0)
self.assertNotAlmostEqual(ss[2], 0.0)
self.assertAlmostEqual(z[3], 1.0)
self.assertAlmostEqual(ss[3], 0.0)
z, ss = uk.execute('grid', np.arange(0., 4., 0.1), np.arange(0., 4., 0.1), backend='vectorized')
self.assertAlmostEqual(z[10, 10], 2.)
self.assertAlmostEqual(ss[10, 10], 0.)
self.assertAlmostEqual(z[20, 20], 1.5)
self.assertAlmostEqual(ss[20, 20], 0.)
self.assertAlmostEqual(z[30, 30], 1.0)
self.assertAlmostEqual(ss[30, 30], 0.)
self.assertNotAlmostEqual(ss[0, 0], 0.0)
self.assertNotAlmostEqual(ss[15, 15], 0.0)
self.assertNotAlmostEqual(ss[10, 0], 0.0)
self.assertNotAlmostEqual(ss[0, 10], 0.0)
self.assertNotAlmostEqual(ss[20, 10], 0.0)
self.assertNotAlmostEqual(ss[10, 20], 0.0)
self.assertNotAlmostEqual(ss[30, 20], 0.0)
self.assertNotAlmostEqual(ss[20, 30], 0.0)
z, ss = uk.execute('grid', np.arange(0., 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend='vectorized')
self.assertTrue(np.any(ss <= 1e-15))
self.assertFalse(np.any(ss[:9, :30] <= 1e-15))
self.assertFalse(np.allclose(z[:9, :30], 0.))
z, ss = uk.execute('grid', np.arange(0., 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend='vectorized')
self.assertFalse(np.any(ss <= 1e-15))
z, ss = uk.execute('masked', np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25), backend='vectorized',
mask=np.asarray(np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.))
self.assertTrue(ss[2, 5] <= 1e-15)
self.assertFalse(np.allclose(ss, 0.))
z, ss = uk.execute('grid', [1., 2., 3.], [1., 2., 3.], backend='loop')
self.assertAlmostEqual(z[0, 0], 2.0)
self.assertAlmostEqual(ss[0, 0], 0.0)
self.assertAlmostEqual(z[1, 1], 1.5)
self.assertAlmostEqual(ss[1, 1], 0.0)
self.assertAlmostEqual(z[2, 2], 1.0)
self.assertAlmostEqual(ss[2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 0], 0.0)
z, ss = uk.execute('points', [1., 2., 3., 3.], [2., 1., 1., 3.], backend='loop')
self.assertNotAlmostEqual(ss[0], 0.0)
self.assertNotAlmostEqual(ss[1], 0.0)
self.assertNotAlmostEqual(ss[2], 0.0)
self.assertAlmostEqual(z[3], 1.0)
self.assertAlmostEqual(ss[3], 0.0)
z, ss = uk.execute('grid', np.arange(0., 4., 0.1), np.arange(0., 4., 0.1), backend='loop')
self.assertAlmostEqual(z[10, 10], 2.)
self.assertAlmostEqual(ss[10, 10], 0.)
self.assertAlmostEqual(z[20, 20], 1.5)
self.assertAlmostEqual(ss[20, 20], 0.)
self.assertAlmostEqual(z[30, 30], 1.0)
self.assertAlmostEqual(ss[30, 30], 0.)
self.assertNotAlmostEqual(ss[0, 0], 0.0)
self.assertNotAlmostEqual(ss[15, 15], 0.0)
self.assertNotAlmostEqual(ss[10, 0], 0.0)
self.assertNotAlmostEqual(ss[0, 10], 0.0)
self.assertNotAlmostEqual(ss[20, 10], 0.0)
self.assertNotAlmostEqual(ss[10, 20], 0.0)
self.assertNotAlmostEqual(ss[30, 20], 0.0)
self.assertNotAlmostEqual(ss[20, 30], 0.0)
z, ss = uk.execute('grid', np.arange(0., 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend='loop')
self.assertTrue(np.any(ss <= 1e-15))
self.assertFalse(np.any(ss[:9, :30] <= 1e-15))
self.assertFalse(np.allclose(z[:9, :30], 0.))
z, ss = uk.execute('grid', np.arange(0., 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend='loop')
self.assertFalse(np.any(ss <= 1e-15))
z, ss = uk.execute('masked', np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25), backend='loop',
mask=np.asarray(np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.))
self.assertTrue(ss[2, 5] <= 1e-15)
self.assertFalse(np.allclose(ss, 0.))
z, ss = core.krige(data[:, 0], data[:, 1], data[:, 2], (1., 1.),
variogram_models.linear_variogram_model, [1.0, 1.0],
'euclidean')
self.assertAlmostEqual(z, 2.)
self.assertAlmostEqual(ss, 0.)
z, ss = core.krige(data[:, 0], data[:, 1], data[:, 2], (1., 2.),
variogram_models.linear_variogram_model, [1.0, 1.0],
'euclidean')
self.assertNotAlmostEqual(ss, 0.)
data = np.zeros((50, 3))
x, y = np.meshgrid(np.arange(0., 10., 1.), np.arange(0., 10., 2.))
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(x) * np.ravel(y)
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2],
variogram_model='linear', variogram_parameters=[100.0, 1.0])
z, ss = ok.execute('grid', np.arange(0., 10., 1.), np.arange(0., 10., 2.), backend='vectorized')
self.assertTrue(np.allclose(np.ravel(z), data[:, 2]))
self.assertTrue(np.allclose(ss, 0.))
z, ss = ok.execute('grid', np.arange(0.5, 10., 1.), np.arange(0.5, 10., 2.), backend='vectorized')
self.assertFalse(np.allclose(np.ravel(z), data[:, 2]))
self.assertFalse(np.allclose(ss, 0.))
z, ss = ok.execute('grid', np.arange(0., 10., 1.), np.arange(0., 10., 2.), backend='loop')
self.assertTrue(np.allclose(np.ravel(z), data[:, 2]))
self.assertTrue(np.allclose(ss, 0.))
z, ss = ok.execute('grid', np.arange(0.5, 10., 1.), np.arange(0.5, 10., 2.), backend='loop')
self.assertFalse(np.allclose(np.ravel(z), data[:, 2]))
self.assertFalse(np.allclose(ss, 0.))
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2],
variogram_model='linear', variogram_parameters=[100.0, 1.0])
z, ss = uk.execute('grid', np.arange(0., 10., 1.), np.arange(0., 10., 2.), backend='vectorized')
self.assertTrue(np.allclose(np.ravel(z), data[:, 2]))
self.assertTrue(np.allclose(ss, 0.))
z, ss = uk.execute('grid', np.arange(0.5, 10., 1.), np.arange(0.5, 10., 2.), backend='vectorized')
self.assertFalse(np.allclose(np.ravel(z), data[:, 2]))
self.assertFalse(np.allclose(ss, 0.))
z, ss = uk.execute('grid', np.arange(0., 10., 1.), np.arange(0., 10., 2.), backend='loop')
self.assertTrue(np.allclose(np.ravel(z), data[:, 2]))
self.assertTrue(np.allclose(ss, 0.))
z, ss = uk.execute('grid', np.arange(0.5, 10., 1.), np.arange(0.5, 10., 2.), backend='loop')
self.assertFalse(np.allclose(np.ravel(z), data[:, 2]))
self.assertFalse(np.allclose(ss, 0.))
def test_custom_variogram(self):
func = lambda params, dist: params[0] * np.log10(dist + params[1]) + params[2]
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='mrow')
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom')
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom', variogram_function=0)
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom', variogram_function=func)
uk = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='custom', variogram_parameters=[1., 1., 1.], variogram_function=func)
self.assertAlmostEqual(uk.variogram_function([1., 1., 1.], 1.), 1.3010, 4)
uk = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear')
uk.update_variogram_model('custom', variogram_parameters=[1., 1., 1.], variogram_function=func)
self.assertAlmostEqual(uk.variogram_function([1., 1., 1.], 1.), 1.3010, 4)
self.assertRaises(ValueError, OrdinaryKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='mrow')
self.assertRaises(ValueError, OrdinaryKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom')
self.assertRaises(ValueError, OrdinaryKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom', variogram_function=0)
self.assertRaises(ValueError, OrdinaryKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom', variogram_function=func)
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='custom', variogram_parameters=[1., 1., 1.], variogram_function=func)
self.assertAlmostEqual(ok.variogram_function([1., 1., 1.], 1.), 1.3010, 4)
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear')
ok.update_variogram_model('custom', variogram_parameters=[1., 1., 1.], variogram_function=func)
self.assertAlmostEqual(ok.variogram_function([1., 1., 1.], 1.), 1.3010, 4)
def test_ok3d(self):
# Test to compare K3D results to those obtained using KT3D_H2O.
# (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater, vol. 47, no. 4, 580-586.)
k3d = OrdinaryKriging3D(self.test_data[:, 0], self.test_data[:, 1], np.zeros(self.test_data[:, 1].shape),
self.test_data[:, 2], variogram_model='exponential',
variogram_parameters=[500.0, 3000.0, 0.0])
k, ss = k3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='vectorized')
self.assertTrue(np.allclose(k, self.ok_test_answer))
k, ss = k3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='loop')
self.assertTrue(np.allclose(k, self.ok_test_answer))
# Test to compare K3D results to those obtained using KT3D.
data = np.genfromtxt('./test_data/test3d_data.txt', skip_header=1)
ans = np.genfromtxt('./test_data/test3d_answer.txt')
ans_z = ans[:, 0].reshape((10, 10, 10))
ans_ss = ans[:, 1].reshape((10, 10, 10))
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3],
variogram_model='linear', variogram_parameters=[1., 0.1])
k, ss = k3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='vectorized')
self.assertTrue(np.allclose(k, ans_z, rtol=1e-3))
self.assertTrue(np.allclose(ss, ans_ss, rtol=1e-3))
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3],
variogram_model='linear', variogram_parameters=[1., 0.1])
k, ss = k3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='loop')
self.assertTrue(np.allclose(k, ans_z, rtol=1e-3))
self.assertTrue(np.allclose(ss, ans_ss, rtol=1e-3))
def test_ok3d_moving_window(self):
# Test to compare K3D results to those obtained using KT3D.
data = np.genfromtxt('./test_data/test3d_data.txt', skip_header=1)
ans = np.genfromtxt('./test_data/test3d_answer.txt')
ans_z = ans[:, 0].reshape((10, 10, 10))
ans_ss = ans[:, 1].reshape((10, 10, 10))
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3],
variogram_model='linear', variogram_parameters=[1., 0.1])
k, ss = k3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='loop', n_closest_points=10)
self.assertTrue(np.allclose(k, ans_z, rtol=1e-3))
self.assertTrue(np.allclose(ss, ans_ss, rtol=1e-3))
def test_ok3d_uk3d_and_backends_produce_same_results(self):
ok3d = OrdinaryKriging3D(self.test_data[:, 0], self.test_data[:, 1], np.zeros(self.test_data[:, 1].shape),
self.test_data[:, 2], variogram_model='exponential',
variogram_parameters=[500.0, 3000.0, 0.0])
ok_v, oss_v = ok3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='vectorized')
ok_l, oss_l = ok3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='loop')
uk3d = UniversalKriging3D(self.test_data[:, 0], self.test_data[:, 1], np.zeros(self.test_data[:, 1].shape),
self.test_data[:, 2], variogram_model='exponential',
variogram_parameters=[500., 3000., 0.])
uk_v, uss_v = uk3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='vectorized')
self.assertTrue(np.allclose(uk_v, ok_v))
uk_l, uss_l = uk3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='loop')
self.assertTrue(np.allclose(uk_l, ok_l))
self.assertTrue(np.allclose(uk_l, uk_v))
self.assertTrue(np.allclose(uss_l, uss_v))
data = np.genfromtxt('./test_data/test3d_data.txt', skip_header=1)
ok3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3],
variogram_model='linear', variogram_parameters=[1., 0.1])
ok_v, oss_v = ok3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='vectorized')
ok_l, oss_l = ok3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='loop')
uk3d = UniversalKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3],
variogram_model='linear', variogram_parameters=[1., 0.1])
uk_v, uss_v = uk3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='vectorized')
self.assertTrue(np.allclose(uk_v, ok_v))
self.assertTrue(np.allclose(uss_v, oss_v))
uk_l, uss_l = uk3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='loop')
self.assertTrue(np.allclose(uk_l, ok_l))
self.assertTrue(np.allclose(uss_l, oss_l))
self.assertTrue(np.allclose(uk_l, uk_v))
self.assertTrue(np.allclose(uss_l, uss_v))
def test_ok3d_update_variogram_model(self):
self.assertRaises(ValueError, OrdinaryKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3], variogram_model='blurg')
k3d = OrdinaryKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3])
variogram_model = k3d.variogram_model
variogram_parameters = k3d.variogram_model_parameters
anisotropy_scaling_y = k3d.anisotropy_scaling_y
anisotropy_scaling_z = k3d.anisotropy_scaling_z
anisotropy_angle_x = k3d.anisotropy_angle_x
anisotropy_angle_y = k3d.anisotropy_angle_y
anisotropy_angle_z = k3d.anisotropy_angle_z
self.assertRaises(ValueError, k3d.update_variogram_model, 'blurg')
k3d.update_variogram_model('power', anisotropy_scaling_y=3.0, anisotropy_scaling_z=3.0,
anisotropy_angle_x=45.0, anisotropy_angle_y=45.0, anisotropy_angle_z=45.0)
self.assertFalse(variogram_model == k3d.variogram_model)
self.assertFalse(variogram_parameters == k3d.variogram_model_parameters)
self.assertFalse(anisotropy_scaling_y == k3d.anisotropy_scaling_y)
self.assertFalse(anisotropy_scaling_z == k3d.anisotropy_scaling_z)
self.assertFalse(anisotropy_angle_x == k3d.anisotropy_angle_x)
self.assertFalse(anisotropy_angle_y == k3d.anisotropy_angle_y)
self.assertFalse(anisotropy_angle_z == k3d.anisotropy_angle_z)
def test_uk3d_update_variogram_model(self):
self.assertRaises(ValueError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3], variogram_model='blurg')
uk3d = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3])
variogram_model = uk3d.variogram_model
variogram_parameters = uk3d.variogram_model_parameters
anisotropy_scaling_y = uk3d.anisotropy_scaling_y
anisotropy_scaling_z = uk3d.anisotropy_scaling_z
anisotropy_angle_x = uk3d.anisotropy_angle_x
anisotropy_angle_y = uk3d.anisotropy_angle_y
anisotropy_angle_z = uk3d.anisotropy_angle_z
self.assertRaises(ValueError, uk3d.update_variogram_model, 'blurg')
uk3d.update_variogram_model('power', anisotropy_scaling_y=3.0, anisotropy_scaling_z=3.0,
anisotropy_angle_x=45.0, anisotropy_angle_y=45.0, anisotropy_angle_z=45.0)
self.assertFalse(variogram_model == uk3d.variogram_model)
self.assertFalse(variogram_parameters == uk3d.variogram_model_parameters)
self.assertFalse(anisotropy_scaling_y == uk3d.anisotropy_scaling_y)
self.assertFalse(anisotropy_scaling_z == uk3d.anisotropy_scaling_z)
self.assertFalse(anisotropy_angle_x == uk3d.anisotropy_angle_x)
self.assertFalse(anisotropy_angle_y == uk3d.anisotropy_angle_y)
self.assertFalse(anisotropy_angle_z == uk3d.anisotropy_angle_z)
def test_ok3d_backends_produce_same_result(self):
k3d = OrdinaryKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear')
k_k3d_v, ss_k3d_v = k3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
backend='vectorized')
k_k3d_l, ss_k3d_l = k3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
backend='loop')
self.assertTrue(np.allclose(k_k3d_v, k_k3d_l))
self.assertTrue(np.allclose(ss_k3d_v, ss_k3d_l))
def test_ok3d_execute(self):
k3d = OrdinaryKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3])
self.assertRaises(ValueError, k3d.execute, 'blurg', self.simple_gridx_3d,
self.simple_gridy_3d, self.simple_gridz_3d)
k, ss = k3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='vectorized')
shape = (self.simple_gridz_3d.size, self.simple_gridy_3d.size, self.simple_gridx_3d.size)
self.assertEqual(k.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(k), np.amin(k))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(k))
k, ss = k3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='loop')
shape = (self.simple_gridz_3d.size, self.simple_gridy_3d.size, self.simple_gridx_3d.size)
self.assertEqual(k.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(k), np.amin(k))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(k))
self.assertRaises(IOError, k3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='vectorized')
mask = np.array([True, False])
self.assertRaises(ValueError, k3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, mask=mask, backend='vectorized')
k, ss = k3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d, backend='vectorized')
self.assertTrue(np.ma.is_masked(k))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(k[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
z, ss = k3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d.T, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
self.assertRaises(IOError, k3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='loop')
mask = np.array([True, False])
self.assertRaises(ValueError, k3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, mask=mask, backend='loop')
k, ss = k3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d, backend='loop')
self.assertTrue(np.ma.is_masked(k))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(k[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
z, ss = k3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d.T, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
self.assertRaises(ValueError, k3d.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
np.array([1.0]), backend='vectorized')
k, ss = k3d.execute('points', self.simple_gridx_3d[0], self.simple_gridy_3d[0],
self.simple_gridz_3d[0], backend='vectorized')
self.assertEqual(k.shape, (1,))
self.assertEqual(ss.shape, (1,))
self.assertRaises(ValueError, k3d.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
np.array([1.0]), backend='loop')
k, ss = k3d.execute('points', self.simple_gridx_3d[0], self.simple_gridy_3d[0],
self.simple_gridz_3d[0], backend='loop')
self.assertEqual(k.shape, (1,))
self.assertEqual(ss.shape, (1,))
data = np.zeros((125, 4))
z, y, x = np.meshgrid(np.arange(0., 5., 1.), np.arange(0., 5., 1.), np.arange(0., 5., 1.))
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(z)
data[:, 3] = np.ravel(z)
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model='linear')
k, ss = k3d.execute('grid', np.arange(2., 3., 0.1), np.arange(2., 3., 0.1),
np.arange(0., 4., 1.), backend='vectorized')
self.assertTrue(np.allclose(k[0, :, :], 0., atol=0.01))
self.assertTrue(np.allclose(k[1, :, :], 1., rtol=1.e-2))
self.assertTrue(np.allclose(k[2, :, :], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[3, :, :], 3., rtol=1.e-2))
k, ss = k3d.execute('grid', np.arange(2., 3., 0.1), np.arange(2., 3., 0.1),
np.arange(0., 4., 1.), backend='loop')
self.assertTrue(np.allclose(k[0, :, :], 0., atol=0.01))
self.assertTrue(np.allclose(k[1, :, :], 1., rtol=1.e-2))
self.assertTrue(np.allclose(k[2, :, :], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[3, :, :], 3., rtol=1.e-2))
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model='linear')
k, ss = k3d.execute('points', [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1., 2., 3.], backend='vectorized')
self.assertTrue(np.allclose(k[0], 1., atol=0.01))
self.assertTrue(np.allclose(k[1], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[2], 3., rtol=1.e-2))
k, ss = k3d.execute('points', [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1., 2., 3.], backend='loop')
self.assertTrue(np.allclose(k[0], 1., atol=0.01))
self.assertTrue(np.allclose(k[1], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[2], 3., rtol=1.e-2))
def test_uk3d_execute(self):
uk3d = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3])
self.assertRaises(ValueError, uk3d.execute, 'blurg', self.simple_gridx_3d,
self.simple_gridy_3d, self.simple_gridz_3d)
k, ss = uk3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='vectorized')
shape = (self.simple_gridz_3d.size, self.simple_gridy_3d.size, self.simple_gridx_3d.size)
self.assertEqual(k.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(k), np.amin(k))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(k))
k, ss = uk3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='loop')
shape = (self.simple_gridz_3d.size, self.simple_gridy_3d.size, self.simple_gridx_3d.size)
self.assertEqual(k.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(k), np.amin(k))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(k))
self.assertRaises(IOError, uk3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='vectorized')
mask = np.array([True, False])
self.assertRaises(ValueError, uk3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, mask=mask, backend='vectorized')
k, ss = uk3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d, backend='vectorized')
self.assertTrue(np.ma.is_masked(k))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(k[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
z, ss = uk3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d.T, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
self.assertRaises(IOError, uk3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='loop')
mask = np.array([True, False])
self.assertRaises(ValueError, uk3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, mask=mask, backend='loop')
k, ss = uk3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d, backend='loop')
self.assertTrue(np.ma.is_masked(k))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(k[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
z, ss = uk3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d.T, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
self.assertRaises(ValueError, uk3d.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
np.array([1.0]), backend='vectorized')
k, ss = uk3d.execute('points', self.simple_gridx_3d[0], self.simple_gridy_3d[0],
self.simple_gridz_3d[0], backend='vectorized')
self.assertEqual(k.shape, (1,))
self.assertEqual(ss.shape, (1,))
self.assertRaises(ValueError, uk3d.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
np.array([1.0]), backend='loop')
k, ss = uk3d.execute('points', self.simple_gridx_3d[0], self.simple_gridy_3d[0],
self.simple_gridz_3d[0], backend='loop')
self.assertEqual(k.shape, (1,))
self.assertEqual(ss.shape, (1,))
data = np.zeros((125, 4))
z, y, x = np.meshgrid(np.arange(0., 5., 1.), np.arange(0., 5., 1.), np.arange(0., 5., 1.))
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(z)
data[:, 3] = np.ravel(z)
k3d = UniversalKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model='linear')
k, ss = k3d.execute('grid', np.arange(2., 3., 0.1), np.arange(2., 3., 0.1),
np.arange(0., 4., 1.), backend='vectorized')
self.assertTrue(np.allclose(k[0, :, :], 0., atol=0.01))
self.assertTrue(np.allclose(k[1, :, :], 1., rtol=1.e-2))
self.assertTrue(np.allclose(k[2, :, :], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[3, :, :], 3., rtol=1.e-2))
k, ss = k3d.execute('grid', np.arange(2., 3., 0.1), np.arange(2., 3., 0.1),
np.arange(0., 4., 1.), backend='loop')
self.assertTrue(np.allclose(k[0, :, :], 0., atol=0.01))
self.assertTrue(np.allclose(k[1, :, :], 1., rtol=1.e-2))
self.assertTrue(np.allclose(k[2, :, :], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[3, :, :], 3., rtol=1.e-2))
k3d = UniversalKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model='linear')
k, ss = k3d.execute('points', [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1., 2., 3.], backend='vectorized')
self.assertTrue(np.allclose(k[0], 1., atol=0.01))
self.assertTrue(np.allclose(k[1], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[2], 3., rtol=1.e-2))
k, ss = k3d.execute('points', [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1., 2., 3.], backend='loop')
self.assertTrue(np.allclose(k[0], 1., atol=0.01))
self.assertTrue(np.allclose(k[1], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[2], 3., rtol=1.e-2))
def test_force_exact_3d(self):
k3d = OrdinaryKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear')
k, ss = k3d.execute('grid', [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend='vectorized')
self.assertAlmostEqual(k[2, 0, 0], 0.9)
self.assertAlmostEqual(ss[2, 0, 0], 0.0)
self.assertAlmostEqual(k[0, 2, 0], 0.9)
self.assertAlmostEqual(ss[0, 2, 0], 0.0)
self.assertAlmostEqual(k[1, 2, 2], 0.7)
self.assertAlmostEqual(ss[1, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 0, 0], 0.0)
k, ss = k3d.execute('grid', [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend='loop')
self.assertAlmostEqual(k[2, 0, 0], 0.9)
self.assertAlmostEqual(ss[2, 0, 0], 0.0)
self.assertAlmostEqual(k[0, 2, 0], 0.9)
self.assertAlmostEqual(ss[0, 2, 0], 0.0)
self.assertAlmostEqual(k[1, 2, 2], 0.7)
self.assertAlmostEqual(ss[1, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 0, 0], 0.0)
k3d = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear')
k, ss = k3d.execute('grid', [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend='vectorized')
self.assertAlmostEqual(k[2, 0, 0], 0.9)
self.assertAlmostEqual(ss[2, 0, 0], 0.0)
self.assertAlmostEqual(k[0, 2, 0], 0.9)
self.assertAlmostEqual(ss[0, 2, 0], 0.0)
self.assertAlmostEqual(k[1, 2, 2], 0.7)
self.assertAlmostEqual(ss[1, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 0, 0], 0.0)
k, ss = k3d.execute('grid', [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend='loop')
self.assertAlmostEqual(k[2, 0, 0], 0.9)
self.assertAlmostEqual(ss[2, 0, 0], 0.0)
self.assertAlmostEqual(k[0, 2, 0], 0.9)
self.assertAlmostEqual(ss[0, 2, 0], 0.0)
self.assertAlmostEqual(k[1, 2, 2], 0.7)
self.assertAlmostEqual(ss[1, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 0, 0], 0.0)
def test_uk3d_specified_drift(self):
zg, yg, xg = np.meshgrid(self.simple_gridz_3d, self.simple_gridy_3d, self.simple_gridx_3d, indexing='ij')
self.assertRaises(ValueError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3],
variogram_model='linear', drift_terms=['specified'])
self.assertRaises(TypeError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3], variogram_model='linear',
drift_terms=['specified'], specified_drift=self.simple_data_3d[:, 0])
self.assertRaises(ValueError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3], variogram_model='linear',
drift_terms=['specified'], specified_drift=[self.simple_data_3d[:2, 0]])
uk_spec = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear', drift_terms=['specified'],
specified_drift=[self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2]])
self.assertRaises(ValueError, uk_spec.execute, 'grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, specified_drift_arrays=[self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d])
self.assertRaises(TypeError, uk_spec.execute, 'grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, specified_drift_arrays=self.simple_gridx_3d)
self.assertRaises(ValueError, uk_spec.execute, 'grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, specified_drift_arrays=[zg])
z_spec, ss_spec = uk_spec.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
specified_drift_arrays=[xg, yg, zg])
uk_lin = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear',
drift_terms=['regional_linear'])
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d)
self.assertTrue(np.allclose(z_spec, z_lin))
self.assertTrue(np.allclose(ss_spec, ss_lin))
def test_uk3d_functional_drift(self):
func_x = lambda x, y, z: x
func_y = lambda x, y, z: y
func_z = lambda x, y, z: z
self.assertRaises(ValueError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3],
variogram_model='linear', drift_terms=['functional'])
self.assertRaises(TypeError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3], variogram_model='linear',
drift_terms=['functional'], functional_drift=func_x)
uk_func = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear', drift_terms=['functional'],
functional_drift=[func_x, func_y, func_z])
z_func, ss_func = uk_func.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d)
uk_lin = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear',
drift_terms=['regional_linear'])
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d)
self.assertTrue(np.allclose(z_func, z_lin))
self.assertTrue(np.allclose(ss_func, ss_lin))
def test_geometric_code(self):
# Create selected points distributed across the sphere:
N=4
lon = np.array([7.0, 7.0, 187.0, 73.231])
lat = np.array([13.23, 13.2301, -13.23, -79.3])
# For the points generated with this reference seed, the distance matrix
# has been calculated using geopy (v. 1.11.0) as follows:
# >>> from geopy.distance import great_circle
# >>> g = great_circle(radius=1.0)
# >>> d = np.zeros((N,N), dtype=float)
# >>> for i in range(N):
# >>> for j in range(N):
# >>> d[i,j] = g.measure((lat[i],lon[i]),(lat[j],lon[j]))
# >>> d *= 180.0/np.pi
# From that distance matrix, the reference values have been obtained.
d_ref = np.array(
[[0.0, 1e-4, 180.0, 98.744848317171801],
[1e-4, 0.0, 179.9999, 98.744946828324345],
[180.0, 179.9999, 0.0, 81.255151682828213],
[98.744848317171801, 98.744946828324345, 81.255151682828213, 0.0]]
)
# Calculate distance matrix using the PyKrige code:
d = np.zeros((N,N))
for i in range(N):
for j in range(N):
d[i,j] = core.great_circle_distance(lon[i],lat[i],lon[j],lat[j])
# Test agains reference values:
np.testing.assert_allclose(d, d_ref)
# Test general features:
np.testing.assert_allclose(d[np.eye(N,dtype=bool)], 0.0)
np.testing.assert_equal(d>=0.0, np.ones((N,N),dtype=bool))
np.testing.assert_allclose(d,d.T)
np.testing.assert_equal(d<=180.0,np.ones((N,N),dtype=bool))
# Test great_circle_distance and euclid3_to_great_circle against each other:
lon_ref = lon
lat_ref = lat
for i in range(len(lon_ref)):
lon, lat = np.meshgrid(np.linspace(0, 360.0, 20),
np.linspace(-90.0, 90.0, 20))
dx = np.cos(np.pi/180.0*lon)*np.cos(np.pi/180.0*lat)- \
np.cos(np.pi/180.0*lon_ref[i])*np.cos(np.pi/180.0*lat_ref[i])
dy = np.sin(np.pi/180.0*lon)*np.cos(np.pi/180.0*lat)- \
np.sin(np.pi/180.0*lon_ref[i])*np.cos(np.pi/180.0*lat_ref[i])
dz = np.sin(np.pi/180.0*lat) - np.sin(np.pi/180.0*lat_ref[i])
np.testing.assert_allclose(core.great_circle_distance(lon_ref[i], lat_ref[i], lon, lat),
core.euclid3_to_great_circle(np.sqrt(dx**2+dy**2+dz**2)), rtol=1e-5)
def test_ok_geometric(self):
# Generate random data:
np.random.seed(89239413)
lon = 360.0*np.random.rand(50, 1)
lat = 180.0*np.random.rand(50, 1) - 90.0
z = np.random.rand(50, 1)
#data = np.concatenate((lon, lat, z), 1)
# Generate grid:
grid_lon = 360.0*np.random.rand(120, 1)
grid_lat = 180.0*np.random.rand(120, 1) - 90.0
# Create ordinary kriging object:
OK = OrdinaryKriging(lon, lat, z, variogram_model='linear', verbose=False,
enable_plotting=False, coordinates_type='geographic')
# Execute on grid:
z, ss = OK.execute('grid', grid_lon, grid_lat)
@unittest.skipUnless(SKLEARN_INSTALLED, "scikit-learn not installed")
class TestKrige(unittest.TestCase):
@staticmethod
def method_and_vergiogram():
method = ['ordinary', 'universal', 'ordinary3d', 'universal3d']
variogram_model = ['linear', 'power', 'gaussian', 'spherical',
'exponential']
return product(method, variogram_model)
def test_krige(self):
from pykrige.rk import Krige
from pykrige.rk import threed_krige
from pykrige.compat import GridSearchCV
# dummy data
np.random.seed(1)
X = np.random.randint(0, 400, size=(20, 3)).astype(float)
y = 5 * np.random.rand(20)
for m, v in self.method_and_vergiogram():
param_dict = {'method': [m], 'variogram_model': [v]}
estimator = GridSearchCV(Krige(),
param_dict,
n_jobs=-1,
iid=False,
pre_dispatch='2*n_jobs',
verbose=False,
cv=5,
)
# run the gridsearch
if m in ['ordinary', 'universal']:
estimator.fit(X=X[:, :2], y=y)
else:
estimator.fit(X=X, y=y)
if hasattr(estimator, 'best_score_'):
if m in threed_krige:
assert estimator.best_score_ > -10.0
else:
assert estimator.best_score_ > -3.0
if hasattr(estimator, 'cv_results_'):
assert estimator.cv_results_['mean_train_score'] > 0
@unittest.skipUnless(SKLEARN_INSTALLED, "scikit-learn not installed")
class TestRegressionKrige(unittest.TestCase):
@staticmethod
def methods():
from sklearn.svm import SVR
from sklearn.linear_model import ElasticNet, Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
krige_methods = ['ordinary', 'universal']
ml_methods = [SVR(C=0.01),
RandomForestRegressor(min_samples_split=5,
n_estimators=50),
LinearRegression(),
Lasso(),
ElasticNet()
]
return product(ml_methods, krige_methods)
def test_krige(self):
from pykrige.rk import RegressionKriging
from pykrige.compat import train_test_split
from itertools import product
np.random.seed(1)
x = np.linspace(-1., 1., 100)
# create a feature matrix with 5 features
X = np.tile(x, reps=(5, 1)).T
y = 1 + 5*X[:, 0] - 2*X[:, 1] - 2*X[:, 2] + 3*X[:, 3] + 4*X[:, 4] + \
2*(np.random.rand(100) - 0.5)
# create lat/lon array
lon = np.linspace(-180., 180.0, 10)
lat = np.linspace(-90., 90., 10)
lon_lat = np.array(list(product(lon, lat)))
X_train, X_test, y_train, y_test, lon_lat_train, lon_lat_test = \
train_test_split(X, y, lon_lat, train_size=0.7, random_state=10)
for ml_model, krige_method in self.methods():
reg_kr_model = RegressionKriging(regression_model=ml_model,
method=krige_method,
n_closest_points=2)
reg_kr_model.fit(X_train, lon_lat_train, y_train)
assert reg_kr_model.score(X_test, lon_lat_test, y_test) > 0.25
def test_krige_housing(self):
from pykrige.rk import RegressionKriging
from pykrige.compat import train_test_split
from sklearn.datasets import fetch_california_housing
housing = fetch_california_housing()
# take only first 1000
p = housing['data'][:1000, :-2]
x = housing['data'][:1000, -2:]
target = housing['target'][:1000]
p_train, p_test, y_train, y_test, x_train, x_test = \
train_test_split(p, target, x, train_size=0.7,
random_state=10)
for ml_model, krige_method in self.methods():
reg_kr_model = RegressionKriging(regression_model=ml_model,
method=krige_method,
n_closest_points=2)
reg_kr_model.fit(p_train, x_train, y_train)
if krige_method == 'ordinary':
assert reg_kr_model.score(p_test, x_test, y_test) > 0.5
else:
assert reg_kr_model.score(p_test, x_test, y_test) > 0.0
if __name__ == '__main__':
unittest.main()
|
basaks/PyKrige
|
pykrige/test.py
|
Python
|
bsd-3-clause
| 91,554
|
[
"Gaussian"
] |
e2f92c283a90e45d1ebc80efbb618353c1f1ed7637b845ea2ced788726def630
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*****************
espressopp.Real3D
*****************
.. function:: espressopp.__Real3D(\*args)
:param \*args:
:type \*args:
.. function:: espressopp.__Real3D.x(v, [0)
:param v:
:param [0:
:type v:
:type [0:
:rtype:
.. function:: espressopp.__Real3D.y(v, [1)
:param v:
:param [1:
:type v:
:type [1:
:rtype:
.. function:: espressopp.__Real3D.z(v, [2)
:param v:
:param [2:
:type v:
:type [2:
:rtype:
.. function:: espressopp.toReal3DFromVector(\*args)
:param \*args:
:type \*args:
.. function:: espressopp.toReal3D(\*args)
:param \*args:
:type \*args:
"""
from _espressopp import Real3D
from espressopp import esutil
# This injects additional methods into the Real3D class and pulls it
# into this module
class __Real3D(Real3D) :
__metaclass__ = esutil.ExtendBaseClass
__originit = Real3D.__init__
def __init__(self, *args):
if len(args) == 0:
x = y = z = 0.0
elif len(args) == 1:
arg0 = args[0]
if isinstance(arg0, Real3D):
x = arg0.x
y = arg0.y
z = arg0.z
# test whether the argument is iterable and has 3 elements
elif hasattr(arg0, '__iter__') and len(arg0) == 3:
x, y, z = arg0
elif isinstance(arg0, float) or isinstance(arg0, int):
x = y = z = arg0
else :
raise TypeError("Cannot initialize Real3D from %s" % (args))
elif len(args) == 3 :
x, y, z = args
else :
raise TypeError("Cannot initialize Real3D from %s" % (args))
return self.__originit(x, y, z)
# create setters and getters
@property
def x(self): return self[0]
@x.setter
def x(self, v): self[0] = v
@property
def y(self) : return self[1]
@y.setter
def y(self, v) : self[1] = v
@property
def z(self) : return self[2]
@z.setter
def z(self, v) : self[2] = v
# string conversion
def __str__(self) :
return str((self[0], self[1], self[2]))
def __repr__(self) :
return 'Real3D' + str(self)
def toReal3DFromVector(*args):
"""Try to convert the arguments to a Real3D.
This function will only convert to a Real3D if x, y and z are
specified."""
if len(args) == 1:
arg0 = args[0]
if isinstance(arg0, Real3D):
return arg0
elif hasattr(arg0, '__iter__') and len(arg0) == 3:
return Real3D(*args)
elif len(args) == 3:
return Real3D(*args)
raise TypeError("Specify x, y and z.")
def toReal3D(*args):
"""Try to convert the arguments to a Real3D, returns the argument,
if it is already a Real3D."""
if len(args) == 1 and isinstance(args[0], Real3D):
return args[0]
else:
return Real3D(*args)
|
kkreis/espressopp
|
src/Real3D.py
|
Python
|
gpl-3.0
| 3,765
|
[
"ESPResSo"
] |
c24b98c9b671c142934c441ad6f291261fbb140b190a3c01e1acf2121cbe6407
|
from django.core.exceptions import ValidationError
from django.apps import apps
# from edc_base.audit_trail import AuditTrail
from edc_base.model.models import BaseUuidModel
from edc_consent.models import RequiresConsentMixin
from edc_constants.constants import (
YES, POS, NEG, FAILED_ELIGIBILITY)
from edc_export.models import ExportTrackingFieldsMixin
from edc_offstudy.models import OffStudyMixin
from edc_sync.models import SyncModelMixin
from edc_visit_tracking.constants import VISIT_REASON_NO_FOLLOW_UP_CHOICES, COMPLETED_PROTOCOL_VISIT, LOST_VISIT
from edc_visit_tracking.models import VisitModelMixin, PreviousVisitMixin, CaretakerFieldsMixin
from tshilo_dikotla.choices import VISIT_REASON
from .maternal_consent import MaternalConsent
from .antenatal_enrollment import AntenatalEnrollment
from .maternal_visit_crf_meta_data_mixin import MaternalVisitCrfMetaDataMixin
class MaternalVisit(OffStudyMixin, SyncModelMixin, PreviousVisitMixin, MaternalVisitCrfMetaDataMixin,
RequiresConsentMixin, CaretakerFieldsMixin, VisitModelMixin,
ExportTrackingFieldsMixin, BaseUuidModel):
""" Maternal visit form that links all antenatal/ postnatal follow-up forms """
consent_model = MaternalConsent
off_study_model = ('td_maternal', 'MaternalOffStudy')
death_report_model = ('td_maternal', 'MaternalDeathReport')
# history = AuditTrail()
def __str__(self):
return '{} {} {}'.format(self.appointment.registered_subject.subject_identifier,
self.appointment.registered_subject.first_name,
self.appointment.visit_definition.code)
def save(self, *args, **kwargs):
self.subject_identifier = self.appointment.registered_subject.subject_identifier
if not self.is_eligible():
self.reason = FAILED_ELIGIBILITY
self.subject_failed_eligibility()
super(MaternalVisit, self).save(*args, **kwargs)
def get_visit_reason_choices(self):
return VISIT_REASON
def is_eligible(self):
"""Returns True if participant is either eligible antenataly."""
eligible = False
try:
eligible = self.antenatal_enrollment.is_eligible or self.antenatal_enrollment.pending_ultrasound
except AttributeError:
pass
return eligible
def subject_failed_eligibility(self, exception_cls=None):
exception_cls = exception_cls or ValidationError
if self.is_eligible() and self.reason == FAILED_ELIGIBILITY:
raise exception_cls(
"Subject is eligible. Visit reason cannot be 'Failed Eligibility'")
def get_visit_reason_no_follow_up_choices(self):
""" Returns the visit reasons that do not imply any data
collection; that is, the subject is not available. """
dct = {}
for item in VISIT_REASON_NO_FOLLOW_UP_CHOICES:
if item not in [COMPLETED_PROTOCOL_VISIT, LOST_VISIT]:
dct.update({item: item})
return dct
# @property
# def scheduled_rapid_test(self):
# """Returns the value of the \'result\' field of the RapidTestResult.
#
# This is a scheduled maternal form for on-study participants."""
# RapidTestResult = apps.get_model('td_maternal', 'rapidtestresult')
# try:
# obj = RapidTestResult.objects.filter(
# maternal_visit__appointment__registered_subject=self.appointment.registered_subject,
# rapid_test_done=YES,
# result__in=[POS, NEG]).order_by('created').last()
# scheduled_rapid_test = obj.result
# except AttributeError as e:
# if 'result' not in str(e):
# raise AttributeError(str(e))
# scheduled_rapid_test = None
# return scheduled_rapid_test
@property
def enrollment_hiv_status(self):
enrollment_hiv_status = None
try:
enrollment_hiv_status = self.antenatal_enrollment.enrollment_hiv_status
except AttributeError:
pass
return enrollment_hiv_status
@property
def antenatal_enrollment(self):
try:
return AntenatalEnrollment.objects.get(
registered_subject=self.appointment.registered_subject)
except AntenatalEnrollment.DoesNotExist:
return None
def get_subject_identifier(self):
return self.appointment.registered_subject.subject_identifier
# @property
# def postnatal_enrollment(self):
# try:
# return PostnatalEnrollment.objects.get(
# registered_subject=self.appointment.registered_subject)
# except PostnatalEnrollment.DoesNotExist:
# return None
class Meta:
app_label = 'td_maternal'
verbose_name = 'Maternal Visit'
|
TshepangRas/tshilo-dikotla
|
td_maternal/models/maternal_visit.py
|
Python
|
gpl-2.0
| 4,885
|
[
"VisIt"
] |
adc70a6060b61a68311c35941691fe68648046ba910647f58da5f831e798e9a9
|
#
# Copyright (C) 2014-2022 S[&]T, The Netherlands.
#
from __future__ import absolute_import, division, print_function
from muninn._compat import string_types as basestring
import copy
import datetime
import re
import uuid
import muninn.geometry as geometry
from muninn.enum import Enum
from muninn.exceptions import *
from muninn.function import Prototype, FunctionTable
from muninn.schema import *
from muninn.visitor import Visitor
#
# Table of all supported operators and functions
#
type_map = {
UUID: Boolean,
}
function_table = FunctionTable(type_map=type_map)
#
# Logical operators
#
function_table.add(Prototype("not", (Boolean,), Boolean))
function_table.add(Prototype("and", (Boolean, Boolean), Boolean))
function_table.add(Prototype("or", (Boolean, Boolean), Boolean))
#
# Membership operators
#
function_table.add(Prototype("in", (Integer, Sequence), Boolean))
function_table.add(Prototype("in", (Long, Sequence), Boolean))
function_table.add(Prototype("in", (Real, Sequence), Boolean))
function_table.add(Prototype("in", (Text, Sequence), Boolean))
function_table.add(Prototype("not in", (Integer, Sequence), Boolean))
function_table.add(Prototype("not in", (Long, Sequence), Boolean))
function_table.add(Prototype("not in", (Real, Sequence), Boolean))
function_table.add(Prototype("not in", (Text, Sequence), Boolean))
#
# Comparison operators
#
function_table.add(Prototype("==", (Long, Long), Boolean))
function_table.add(Prototype("==", (Long, Integer), Boolean))
function_table.add(Prototype("==", (Integer, Long), Boolean))
function_table.add(Prototype("==", (Integer, Integer), Boolean))
function_table.add(Prototype("==", (Real, Real), Boolean))
function_table.add(Prototype("==", (Real, Long), Boolean))
function_table.add(Prototype("==", (Long, Real), Boolean))
function_table.add(Prototype("==", (Real, Integer), Boolean))
function_table.add(Prototype("==", (Integer, Real), Boolean))
function_table.add(Prototype("==", (Boolean, Boolean), Boolean))
function_table.add(Prototype("==", (Text, Text), Boolean))
function_table.add(Prototype("==", (Timestamp, Timestamp), Boolean))
function_table.add(Prototype("==", (UUID, UUID), Boolean))
function_table.add(Prototype("!=", (Long, Long), Boolean))
function_table.add(Prototype("!=", (Long, Integer), Boolean))
function_table.add(Prototype("!=", (Integer, Long), Boolean))
function_table.add(Prototype("!=", (Integer, Integer), Boolean))
function_table.add(Prototype("!=", (Real, Real), Boolean))
function_table.add(Prototype("!=", (Real, Long), Boolean))
function_table.add(Prototype("!=", (Long, Real), Boolean))
function_table.add(Prototype("!=", (Real, Integer), Boolean))
function_table.add(Prototype("!=", (Integer, Real), Boolean))
function_table.add(Prototype("!=", (Boolean, Boolean), Boolean))
function_table.add(Prototype("!=", (Text, Text), Boolean))
function_table.add(Prototype("!=", (Timestamp, Timestamp), Boolean))
function_table.add(Prototype("!=", (UUID, UUID), Boolean))
function_table.add(Prototype("<", (Long, Long), Boolean))
function_table.add(Prototype("<", (Long, Integer), Boolean))
function_table.add(Prototype("<", (Integer, Long), Boolean))
function_table.add(Prototype("<", (Integer, Integer), Boolean))
function_table.add(Prototype("<", (Real, Real), Boolean))
function_table.add(Prototype("<", (Real, Long), Boolean))
function_table.add(Prototype("<", (Long, Real), Boolean))
function_table.add(Prototype("<", (Real, Integer), Boolean))
function_table.add(Prototype("<", (Integer, Real), Boolean))
function_table.add(Prototype("<", (Text, Text), Boolean))
function_table.add(Prototype("<", (Timestamp, Timestamp), Boolean))
function_table.add(Prototype(">", (Long, Long), Boolean))
function_table.add(Prototype(">", (Long, Integer), Boolean))
function_table.add(Prototype(">", (Integer, Long), Boolean))
function_table.add(Prototype(">", (Integer, Integer), Boolean))
function_table.add(Prototype(">", (Real, Real), Boolean))
function_table.add(Prototype(">", (Real, Long), Boolean))
function_table.add(Prototype(">", (Long, Real), Boolean))
function_table.add(Prototype(">", (Real, Integer), Boolean))
function_table.add(Prototype(">", (Integer, Real), Boolean))
function_table.add(Prototype(">", (Text, Text), Boolean))
function_table.add(Prototype(">", (Timestamp, Timestamp), Boolean))
function_table.add(Prototype("<=", (Long, Long), Boolean))
function_table.add(Prototype("<=", (Long, Integer), Boolean))
function_table.add(Prototype("<=", (Integer, Long), Boolean))
function_table.add(Prototype("<=", (Integer, Integer), Boolean))
function_table.add(Prototype("<=", (Real, Real), Boolean))
function_table.add(Prototype("<=", (Real, Long), Boolean))
function_table.add(Prototype("<=", (Long, Real), Boolean))
function_table.add(Prototype("<=", (Real, Integer), Boolean))
function_table.add(Prototype("<=", (Integer, Real), Boolean))
function_table.add(Prototype("<=", (Text, Text), Boolean))
function_table.add(Prototype("<=", (Timestamp, Timestamp), Boolean))
function_table.add(Prototype(">=", (Long, Long), Boolean))
function_table.add(Prototype(">=", (Long, Integer), Boolean))
function_table.add(Prototype(">=", (Integer, Long), Boolean))
function_table.add(Prototype(">=", (Integer, Integer), Boolean))
function_table.add(Prototype(">=", (Real, Real), Boolean))
function_table.add(Prototype(">=", (Real, Long), Boolean))
function_table.add(Prototype(">=", (Long, Real), Boolean))
function_table.add(Prototype(">=", (Real, Integer), Boolean))
function_table.add(Prototype(">=", (Integer, Real), Boolean))
function_table.add(Prototype(">=", (Text, Text), Boolean))
function_table.add(Prototype(">=", (Timestamp, Timestamp), Boolean))
function_table.add(Prototype("~=", (Text, Text), Boolean))
function_table.add(Prototype("+", (Long,), Long))
function_table.add(Prototype("+", (Integer,), Integer))
function_table.add(Prototype("+", (Real,), Real))
function_table.add(Prototype("-", (Long,), Long))
function_table.add(Prototype("-", (Integer,), Integer))
function_table.add(Prototype("-", (Real,), Real))
function_table.add(Prototype("+", (Long, Long), Long))
function_table.add(Prototype("+", (Long, Integer), Long))
function_table.add(Prototype("+", (Integer, Long), Long))
function_table.add(Prototype("+", (Integer, Integer), Integer))
function_table.add(Prototype("+", (Real, Real), Real))
function_table.add(Prototype("+", (Real, Long), Real))
function_table.add(Prototype("+", (Long, Real), Real))
function_table.add(Prototype("+", (Real, Integer), Real))
function_table.add(Prototype("+", (Integer, Real), Real))
function_table.add(Prototype("-", (Long, Long), Long))
function_table.add(Prototype("-", (Long, Integer), Long))
function_table.add(Prototype("-", (Integer, Long), Long))
function_table.add(Prototype("-", (Integer, Integer), Integer))
function_table.add(Prototype("-", (Real, Real), Real))
function_table.add(Prototype("-", (Real, Long), Real))
function_table.add(Prototype("-", (Long, Real), Real))
function_table.add(Prototype("-", (Real, Integer), Real))
function_table.add(Prototype("-", (Integer, Real), Real))
function_table.add(Prototype("*", (Long, Long), Long))
function_table.add(Prototype("*", (Long, Integer), Long))
function_table.add(Prototype("*", (Integer, Long), Long))
function_table.add(Prototype("*", (Integer, Integer), Integer))
function_table.add(Prototype("*", (Real, Real), Real))
function_table.add(Prototype("*", (Real, Long), Real))
function_table.add(Prototype("*", (Long, Real), Real))
function_table.add(Prototype("*", (Real, Integer), Real))
function_table.add(Prototype("*", (Integer, Real), Real))
function_table.add(Prototype("/", (Long, Long), Long))
function_table.add(Prototype("/", (Long, Integer), Long))
function_table.add(Prototype("/", (Integer, Long), Long))
function_table.add(Prototype("/", (Integer, Integer), Integer))
function_table.add(Prototype("/", (Real, Real), Real))
function_table.add(Prototype("/", (Real, Long), Real))
function_table.add(Prototype("/", (Long, Real), Real))
function_table.add(Prototype("/", (Real, Integer), Real))
function_table.add(Prototype("/", (Integer, Real), Real))
function_table.add(Prototype("-", (Timestamp, Timestamp), Real))
#
# Functions.
#
function_table.add(Prototype("covers", (Geometry, Geometry), Boolean))
function_table.add(Prototype("covers", (Timestamp, Timestamp, Timestamp, Timestamp), Boolean))
function_table.add(Prototype("intersects", (Geometry, Geometry), Boolean))
function_table.add(Prototype("intersects", (Timestamp, Timestamp, Timestamp, Timestamp), Boolean))
function_table.add(Prototype("is_defined", (Long,), Boolean))
function_table.add(Prototype("is_defined", (Integer,), Boolean))
function_table.add(Prototype("is_defined", (Real,), Boolean))
function_table.add(Prototype("is_defined", (Boolean,), Boolean))
function_table.add(Prototype("is_defined", (Text,), Boolean))
function_table.add(Prototype("is_defined", (Namespace,), Boolean))
function_table.add(Prototype("is_defined", (Timestamp,), Boolean))
function_table.add(Prototype("is_defined", (UUID,), Boolean))
function_table.add(Prototype("is_defined", (Geometry,), Boolean))
function_table.add(Prototype("is_source_of", (UUID,), Boolean))
function_table.add(Prototype("is_source_of", (Boolean,), Boolean))
function_table.add(Prototype("is_derived_from", (UUID,), Boolean))
function_table.add(Prototype("is_derived_from", (Boolean,), Boolean))
function_table.add(Prototype("has_tag", (Text,), Boolean))
function_table.add(Prototype("now", (), Timestamp))
class TokenType(Enum):
_items = ("TEXT", "UUID", "TIMESTAMP", "REAL", "INTEGER", "BOOLEAN", "NAME", "OPERATOR", "END")
class Token(object):
def __init__(self, type_, value=None):
self.type_ = type_
self.value = value
def __repr__(self):
return "Token(type_ = TokenType.%s, value = %r)" % (TokenType.to_string(self.type_), self.value)
class TokenStream(object):
_sub_patterns = \
(
r"""\"(?:[^\\"]|\\.)*\"""", # Text literals
r"""\d{4}-\d{2}-\d{2}(?:T\d{2}:\d{2}:\d{2}(?:\.\d{0,6})?)?""", # Timestamp literals
r"""[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}""", # UUID literals
r"""\d+(?:\.\d*(?:[eE][+-]?\d+)?|[eE][+-]?\d+)""", # Real literals
r"""\d+""", # Integer literals
r"""<=|>=|==|!=|~=|not in|[*<>@()\[\],.+-/]""", # Operators and delimiters
r"""[a-zA-Z]\w*""", # Names (incl. true, false, in)
)
_pattern = r"""(?:%s)""" % ("|".join(["(%s)" % sub_pattern for sub_pattern in _sub_patterns]))
_re_token = re.compile(_pattern)
_re_datemin = re.compile(r"0000-00-00(?:T00:00:00(?:\.0{0,6})?)?$")
_re_datemax = re.compile(r"9999-99-99(?:T99:99:99(?:\.9{0,6})?)?$")
def __init__(self, text):
self.text = text
self.at_end = not self.text
self.token_start_position, self.token_end_position = 0, 0
self.next()
def next(self):
if self.at_end:
raise Error("char %d: unexpected end of input" % (self.token_start_position + 1))
self.token = self._next_token()
return self.token
def test(self, types, values=None):
return False if not self._test_token_types(types) else (values is None or self._test_token_values(values))
def accept(self, types, values=None):
if not self.test(types, values):
return False
self.next()
return True
def expect(self, types, values=None):
if not self.test(types, values):
if self.token.type_ == TokenType.END:
raise Error("char %d: unexpected end of input" % (self.token_start_position + 1))
else:
if self.token.value is None:
token_str = TokenType.to_string(self.token.type_)
else:
token_str = "\"%s\"" % self.token.value
expected_str = self._types_to_string(types) if values is None else self._values_to_string(values)
raise Error("char %d: expected %s, got %s" % (self.token_start_position + 1, expected_str, token_str))
token = self.token
self.next()
return token
def _types_to_string(self, types):
try:
strings = map(TokenType.to_string, types)
except TypeError:
return TokenType.to_string(types)
return "%s%s" % ("" if len(strings) == 1 else "one of: ", ", ".join(strings))
def _values_to_string(self, values):
if isinstance(values, basestring):
return "\"%s\"" % values
try:
strings = ["\"%s\"" % value for value in values]
except TypeError:
return "\"%s\"" % values
return "%s%s" % ("" if len(strings) == 1 else "one of: ", ", ".join(strings))
def _test_token_types(self, types):
try:
return self.token.type_ in types
except TypeError:
return self.token.type_ == types
def _test_token_values(self, values):
if isinstance(values, basestring):
return self.token.value == values
try:
return self.token.value in values
except TypeError:
return self.token.value == values
def _next_token(self):
self.token_start_position = self._skip_white_space(self.token_end_position)
if self.token_start_position == len(self.text):
self.at_end = True
return Token(TokenType.END)
match_object = self._re_token.match(self.text, self.token_start_position)
if match_object is None:
raise Error("char %d: syntax error: \"%s\"" % (self.token_start_position + 1,
self.text[self.token_start_position:]))
self.token_start_position, self.token_end_position = match_object.span()
text, timestamp, uuid_, real, integer, operator, name = match_object.groups()
if text is not None:
return Token(TokenType.TEXT, string_unescape(text[1:-1]))
if timestamp is not None:
return Token(TokenType.TIMESTAMP, self._parse_timestamp(timestamp))
if uuid_ is not None:
return Token(TokenType.UUID, uuid.UUID(uuid_))
if real is not None:
return Token(TokenType.REAL, float(real))
if integer is not None:
return Token(TokenType.INTEGER, int(integer))
if operator is not None:
return Token(TokenType.OPERATOR, operator)
if name is not None:
if name in ["true", "false"]:
return Token(TokenType.BOOLEAN, name == "true")
elif name == "in":
return Token(TokenType.OPERATOR, name)
else:
return Token(TokenType.NAME, name)
raise Error("char %d: syntax error: \"%s\"" % (self.token_start_position + 1, match_object.group()))
def _skip_white_space(self, start):
while start < len(self.text) and self.text[start].isspace():
start += 1
return start
def _parse_timestamp(self, timestamp):
if self._re_datemin.match(timestamp) is not None:
return datetime.datetime.min
if self._re_datemax.match(timestamp) is not None:
return datetime.datetime.max
for format_string in ("%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%S.%f"):
try:
return datetime.datetime.strptime(timestamp, format_string)
except ValueError:
pass
raise Error("char %d: invalid timestamp: \"%s\"" % (self.token_start_position + 1, timestamp))
class AbstractSyntaxTreeNode(object):
pass
class Literal(AbstractSyntaxTreeNode):
def __init__(self, value):
self.value = value
def __str__(self):
return "(%s %s)" % (type(self).__name__, self.value)
class Name(AbstractSyntaxTreeNode):
def __init__(self, value):
self.value = value
def __str__(self):
return "(%s %s)" % (type(self).__name__, self.value)
class List(AbstractSyntaxTreeNode):
def __init__(self, values):
self.values = values
def __str__(self):
return "(%s %s)" % (type(self).__name__, self.value)
class ParameterReference(AbstractSyntaxTreeNode):
def __init__(self, name):
self.name = name
def __str__(self):
return "(%s %s)" % (type(self).__name__, self.name)
class FunctionCall(AbstractSyntaxTreeNode):
def __init__(self, name, *args):
self.name = name
self.arguments = list(args)
def __str__(self):
if not self.arguments:
return "(%s %s)" % (type(self).__name__, self.name)
return "(%s %s %s)" % (type(self).__name__, self.name, " ".join(map(str, self.arguments)))
def parse_sequence(stream, parse_item_function, start='(', end=')'):
stream.expect(TokenType.OPERATOR, start)
if stream.accept(TokenType.OPERATOR, end):
return []
sequence = [parse_item_function(stream)]
while stream.accept(TokenType.OPERATOR, ","):
sequence.append(parse_item_function(stream))
stream.expect(TokenType.OPERATOR, end)
return sequence
def parse_geometry_sequence(stream, parse_item_function):
if stream.accept(TokenType.NAME, "EMPTY"):
return []
stream.expect(TokenType.OPERATOR, "(")
sequence = [parse_item_function(stream)]
while stream.accept(TokenType.OPERATOR, ","):
sequence.append(parse_item_function(stream))
stream.expect(TokenType.OPERATOR, ")")
return sequence
def parse_signed_coordinate(stream):
if stream.accept(TokenType.OPERATOR, "-"):
token = stream.expect((TokenType.INTEGER, TokenType.REAL))
return -float(token.value)
stream.accept(TokenType.OPERATOR, "+")
token = stream.expect((TokenType.INTEGER, TokenType.REAL))
return float(token.value)
def parse_point_raw(stream):
return geometry.Point(parse_signed_coordinate(stream),
parse_signed_coordinate(stream))
def parse_point(stream):
stream.expect(TokenType.OPERATOR, "(")
point = parse_point_raw(stream)
stream.expect(TokenType.OPERATOR, ")")
return point
def parse_line_string(stream):
return geometry.LineString(parse_geometry_sequence(stream, parse_point_raw))
def parse_linear_ring(stream):
points = parse_geometry_sequence(stream, parse_point_raw)
if len(points) == 0:
return geometry.LinearRing()
if len(points) < 4:
raise Error("char %d: linear ring should be empty or should contain >= 4 points" % stream.token_start_position)
if points[-1] != points[0]:
raise Error("char %d: linear ring should be closed" % stream.token_start_position)
return geometry.LinearRing(points[:-1])
def parse_polygon(stream):
return geometry.Polygon(parse_geometry_sequence(stream, parse_linear_ring))
def parse_multi_point(stream):
return geometry.MultiPoint(parse_geometry_sequence(stream, parse_point))
def parse_multi_line_string(stream):
return geometry.MultiLineString(parse_geometry_sequence(stream, parse_line_string))
def parse_multi_polygon(stream):
return geometry.MultiPolygon(parse_geometry_sequence(stream, parse_polygon))
def parse_atom(stream):
# Sub-expression.
if stream.accept(TokenType.OPERATOR, "("):
sub_expression = parse_expression(stream)
stream.expect(TokenType.OPERATOR, ")")
return sub_expression
# Parameter reference.
if stream.accept(TokenType.OPERATOR, "@"):
name_token = stream.expect(TokenType.NAME)
return ParameterReference(name_token.value)
# Geometry literal, function call, or name.
if stream.test(TokenType.NAME):
name_token = stream.expect(TokenType.NAME)
# Geometry literals.
if name_token.value == "POINT":
return Literal(parse_point(stream))
elif name_token.value == "LINESTRING":
return Literal(parse_line_string(stream))
elif name_token.value == "POLYGON":
return Literal(parse_polygon(stream))
elif name_token.value == "MULTIPOINT":
return Literal(parse_multi_point(stream))
elif name_token.value == "MULTILINESTRING":
return Literal(parse_multi_line_string(stream))
elif name_token.value == "MULTIPOLYGON":
return Literal(parse_multi_polygon(stream))
# Function call.
if stream.test(TokenType.OPERATOR, "("):
return FunctionCall(name_token.value, *parse_sequence(stream, parse_expression))
# Name (possibly qualified).
parts = [name_token.value]
while stream.accept(TokenType.OPERATOR, "."):
name_token = stream.expect(TokenType.NAME)
parts.append(name_token.value)
return Name(".".join(parts))
if stream.test(TokenType.OPERATOR, "["):
return List(parse_sequence(stream, parse_expression, "[", "]"))
# Literal.
token = stream.expect((TokenType.TEXT, TokenType.TIMESTAMP, TokenType.UUID, TokenType.REAL, TokenType.INTEGER,
TokenType.BOOLEAN))
return Literal(token.value)
def parse_term(stream):
if stream.test(TokenType.OPERATOR, ("+", "-")):
operator_token = stream.expect(TokenType.OPERATOR, ("+", "-"))
return FunctionCall(operator_token.value, parse_term(stream))
return parse_atom(stream)
def parse_arithmetic_expression(stream):
lhs = parse_term(stream)
if stream.test(TokenType.OPERATOR, ("+", "-", "*", "/")):
operator_token = stream.expect(TokenType.OPERATOR, ("+", "-", "*", "/"))
return FunctionCall(operator_token.value, lhs, parse_arithmetic_expression(stream))
return lhs
def parse_comparison(stream):
lhs = parse_arithmetic_expression(stream)
if stream.test(TokenType.OPERATOR, ("<", ">", "==", ">=", "<=", "!=", "~=", "in", "not in")):
operator_token = stream.expect(TokenType.OPERATOR, ("<", ">", "==", ">=", "<=", "!=", "~=", "in", "not in"))
return FunctionCall(operator_token.value, lhs, parse_comparison(stream))
return lhs
def parse_not_expression(stream):
if stream.accept(TokenType.NAME, "not"):
return FunctionCall("not", parse_not_expression(stream))
return parse_comparison(stream)
def parse_and_expression(stream):
lhs = parse_not_expression(stream)
if stream.accept(TokenType.NAME, "and"):
return FunctionCall("and", lhs, parse_and_expression(stream))
return lhs
def parse_or_expression(stream):
lhs = parse_and_expression(stream)
if stream.accept(TokenType.NAME, "or"):
return FunctionCall("or", lhs, parse_or_expression(stream))
return lhs
def parse_expression(stream):
return parse_or_expression(stream)
def _literal_type(literal):
for type in (Text, Timestamp, UUID, Boolean, Integer, Long, Real, Geometry):
try:
type.validate(literal)
except ValueError:
pass
else:
return type
raise Error("unable to determine type of literal value: %r" % literal)
class SemanticAnalysis(Visitor):
def __init__(self, namespace_schemas, parameters):
super(SemanticAnalysis, self).__init__()
self._namespace_schemas = namespace_schemas
self._parameters = parameters
def visit_Literal(self, visitable):
visitable.type = _literal_type(visitable.value)
def visit_Name(self, visitable):
split_name = visitable.value.split(".")
# namespace/implicit core property
if len(split_name) == 1:
if split_name[0] in self._namespace_schemas:
namespace = split_name[0]
name = None
else:
namespace, name = "core", split_name[0]
# namespace.property
elif len(split_name) == 2:
namespace, name = split_name
else:
raise Error("invalid property name: \"%s\"" % visitable.value)
# check that namespace exists
try:
schema = self._namespace_schemas[namespace]
except KeyError:
raise Error("undefined namespace: \"%s\"" % namespace)
# namespace
if name is None:
visitable.value = split_name[0]
visitable.type = Namespace
# namespace.property
else:
try:
type_ = schema[name]
except KeyError:
if len(split_name) == 2:
raise Error("undefined property: \"%s\"" % visitable.value)
else:
raise Error("undefined name: \"%s\"" % name)
visitable.value = "%s.%s" % (namespace, name)
visitable.type = type_
def visit_List(self, visitable): # TODO check same literal type
values = []
for value in visitable.values:
if not isinstance(value, Literal):
raise Error("list contains non-literal")
values.append(value.value)
visitable.value = values
visitable.type = Sequence
def visit_ParameterReference(self, visitable):
try:
value = self._parameters[visitable.name]
except KeyError:
raise Error("no value for parameter: \"%s\"" % visitable.name)
visitable.value = value
visitable.type = _literal_type(value)
def visit_FunctionCall(self, visitable):
# Resolve the type of the function arguments.
for argument in visitable.arguments:
self.visit(argument)
prototype = Prototype(visitable.name, [argument.type for argument in visitable.arguments])
try:
prototypes = function_table.resolve(prototype)
except KeyError:
prototypes = []
if not prototypes:
raise Error("undefined function: \"%s\"" % prototype)
if len(prototypes) > 1:
raise InternalError("cannot uniquely resolve function: \"%s\"" % prototype)
prototype = prototypes[0]
visitable.prototype = prototype
visitable.type = prototype.return_type
def visit_AbstractSyntaxTreeNode(self, visitable):
if not hasattr(visitable, "type"):
raise InternalError("encountered abstract syntax tree node without type attribute: %s" %
type(visitable).__name__)
def default(self, visitable):
raise InternalError("unsupported abstract syntax tree node type: %s" % type(visitable).__name__)
def parse(text):
stream = TokenStream(text)
abstract_syntax_tree = parse_expression(stream)
if not stream.test(TokenType.END):
raise Error("char %d: extra characters after expression: \"%s\"" % (stream.token_start_position + 1,
text[stream.token_start_position:]))
return abstract_syntax_tree
def analyze(abstract_syntax_tree, namespace_schemas={}, parameters={}):
annotated_syntax_tree = copy.deepcopy(abstract_syntax_tree)
SemanticAnalysis(namespace_schemas, parameters).visit(annotated_syntax_tree)
return annotated_syntax_tree
def parse_and_analyze(text, namespace_schemas={}, parameters={}):
return analyze(parse(text), namespace_schemas, parameters)
def string_unescape(text):
'''
Unescape special characters in a string.
Python2 and 3 compatible, uses the native string type.
In python2, the same effect can also be achieved with `string.decode("string-escape")`
'''
text = str(text) # make sure we are using the native string type
regex = re.compile('\\\\(\\\\|[\'"abfnrtv])')
translator = {
'\\': '\\',
"'": "'",
'"': '"',
'a': '\a',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t',
'v': '\v',
}
def _replace(m):
c = m.group(1)
return translator[c]
result = regex.sub(_replace, text)
return result
|
stcorp/muninn
|
muninn/language.py
|
Python
|
bsd-3-clause
| 28,166
|
[
"VisIt"
] |
3983548b0a2b0032ee920af45f933077198e22396e083bf26de8a526ee1c5d4f
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.component.component Contains the ModelingComponent class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
from abc import ABCMeta
# Import astronomical modules
from astropy.units import Unit
# Import the relevant PTS classes and modules
from ...core.basics.configurable import Configurable
from ...core.tools import introspection
from ...core.tools import filesystem as fs
from ...core.filter.broad import BroadBandFilter
from ...core.basics.configuration import Configuration
from ..core.history import ModelingHistory
from ..core.commands import ModelingCommands
from ..core.environment import GalaxyModelingEnvironment, SEDModelingEnvironment, ImagesModelingEnvironment
from ...core.tools.utils import lazyproperty
from ...core.tools import parsing
# -----------------------------------------------------------------
class ModelingComponent(Configurable):
"""
This class...
"""
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
:return:
"""
# Call the constructor of the base class
super(ModelingComponent, self).__init__(*args, **kwargs)
# The modeling configuration file
self.config_file_path = None
# The modeling environemnt
self.environment = None
# PTS directories
self.kernels_path = None
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(ModelingComponent, self).setup(**kwargs)
# Determine the path to the modeling configuration file
self.config_file_path = fs.join(self.config.path, "modeling.cfg")
# Check for the presence of the configuration file
if not fs.is_file(self.config_file_path): raise ValueError("The current working directory (" + self.config.path + ") is not a radiative transfer modeling directory (the configuration file is missing)")
# Determine the path to the kernels user directory
self.kernels_path = fs.join(introspection.pts_user_dir, "kernels")
# Create the modeling environment
if self.is_galaxy_modeling: self.environment = GalaxyModelingEnvironment(self.config.path)
elif self.is_sed_modeling: self.environment = SEDModelingEnvironment(self.config.path)
elif self.is_images_modeling: self.environment = ImagesModelingEnvironment(self.config.path)
# -----------------------------------------------------------------
@property
def history_file_path(self):
return self.environment.history_file_path
# -----------------------------------------------------------------
@property
def commands_file_path(self):
return self.environment.commands_file_path
# -----------------------------------------------------------------
@property
def fit_path(self):
return self.environment.fit_path
# -----------------------------------------------------------------
@property
def analysis_path(self):
return self.environment.analysis_path
# -----------------------------------------------------------------
@property
def reports_path(self):
return self.environment.reports_path
# -----------------------------------------------------------------
@property
def visualisation_path(self):
return self.environment.visualisation_path
# -----------------------------------------------------------------
@property
def plot_path(self):
return self.environment.plot_path
# -----------------------------------------------------------------
@property
def log_path(self):
return self.environment.log_path
# -----------------------------------------------------------------
@property
def config_path(self):
return self.environment.config_path
# -----------------------------------------------------------------
@property
def show_path(self):
return self.environment.show_path
# -----------------------------------------------------------------
@property
def build_path(self):
return self.environment.build_path
# -----------------------------------------------------------------
@property
def html_path(self):
return self.environment.html_path
# -----------------------------------------------------------------
@property
def object_name(self):
return self.modeling_configuration.name
# -----------------------------------------------------------------
@lazyproperty
def observed_sed(self):
# Return the observed SED
if self.is_galaxy_modeling: return self.environment.observed_sed
elif self.is_sed_modeling: return self.environment.observed_sed
else: raise ValueError("Observed SED is not defined for modeling types other than 'galaxy' or 'sed'")
# -----------------------------------------------------------------
@lazyproperty
def truncated_sed(self):
if not self.is_galaxy_modeling: raise RuntimeError("Something went wrong")
return self.environment.truncated_sed
# -----------------------------------------------------------------
@lazyproperty
def observed_sed_path(self):
# Return the correct path
if self.is_galaxy_modeling: return self.environment.observed_sed_path
elif self.is_sed_modeling: return self.environment.sed_path
else: raise ValueError("Observed SED not defined for modeling types other than 'galaxy' or 'sed'")
# -----------------------------------------------------------------
@lazyproperty
def truncated_sed_path(self):
if not self.is_galaxy_modeling: raise RuntimeError("Something went wrong")
return self.environment.truncated_sed_path
# -----------------------------------------------------------------
def observed_flux(self, fltr, unit=None, add_unit=True):
"""
This function ...
:param fltr:
:param unit:
:param add_unit:
:return:
"""
return self.observed_sed.photometry_for_filter(fltr, unit=unit, add_unit=add_unit)
# -----------------------------------------------------------------
@lazyproperty
def observed_filters(self):
return self.observed_sed.filters()
# -----------------------------------------------------------------
@lazyproperty
def observed_filter_names(self):
return [str(fltr) for fltr in self.observed_filters]
# -----------------------------------------------------------------
@lazyproperty
def observed_filter_wavelengths(self):
return [fltr.wavelength for fltr in self.observed_filters]
# -----------------------------------------------------------------
@lazyproperty
def sed_filters(self):
return self.observed_sed.filters()
# -----------------------------------------------------------------
@lazyproperty
def sed_filter_names(self):
return [str(fltr) for fltr in self.sed_filters]
# -----------------------------------------------------------------
@lazyproperty
def sed_filter_wavelengths(self):
return [fltr.pivot for fltr in self.sed_filters]
# -----------------------------------------------------------------
@lazyproperty
def modeling_configuration(self):
"""
This function ...
:return:
"""
# Load the configuration
config = Configuration.from_file(self.config_file_path)
# Return the configuration
return config
# -----------------------------------------------------------------
@lazyproperty
def modeling_type(self):
return self.modeling_configuration.modeling_type
# -----------------------------------------------------------------
@property
def is_galaxy_modeling(self):
return self.modeling_configuration.modeling_type == "galaxy"
# -----------------------------------------------------------------
@property
def is_sed_modeling(self):
return self.modeling_configuration.modeling_type == "sed"
# -----------------------------------------------------------------
@property
def is_images_modeling(self):
return self.modeling_configuration.modeling_type == "images"
# -----------------------------------------------------------------
@lazyproperty
def history(self):
return self.environment.history
# -----------------------------------------------------------------
@lazyproperty
def status(self):
return self.environment.status
# -----------------------------------------------------------------
@lazyproperty
def nuv_filter(self):
return BroadBandFilter("GALEX NUV")
# -----------------------------------------------------------------
@lazyproperty
def nuv_wavelength(self):
return self.nuv_filter.wavelength
# -----------------------------------------------------------------
@lazyproperty
def fuv_filter(self):
return BroadBandFilter("GALEX FUV")
# -----------------------------------------------------------------
@lazyproperty
def fuv_wavelength(self):
return self.fuv_filter.wavelength
# -----------------------------------------------------------------
@lazyproperty
def i1_filter(self):
return BroadBandFilter("IRAC I1")
# -----------------------------------------------------------------
@lazyproperty
def i1_wavelength(self):
return self.i1_filter.wavelength
# -----------------------------------------------------------------
@lazyproperty
def i2_filter(self):
return BroadBandFilter("IRAC I2")
# -----------------------------------------------------------------
@lazyproperty
def jhk_filters(self):
return parsing.lazy_broad_band_filter_list("2MASS")
# -----------------------------------------------------------------
@lazyproperty
def pacs_red_filter(self):
return BroadBandFilter("Pacs 160")
# -----------------------------------------------------------------
@lazyproperty
def spire_psw_filter(self):
return BroadBandFilter("SPIRE PSW")
# -----------------------------------------------------------------
@lazyproperty
def spire_pmw_filter(self):
return BroadBandFilter("SPIRE PMW")
# -----------------------------------------------------------------
@lazyproperty
def spire_plw_filter(self):
return BroadBandFilter("SPIRE PLW")
# -----------------------------------------------------------------
@lazyproperty
def spire_filters(self):
return parsing.lazy_broad_band_filter_list("SPIRE")
# -----------------------------------------------------------------
@lazyproperty
def planck_filters(self):
return parsing.lazy_broad_band_filter_list("Planck")
# -----------------------------------------------------------------
@lazyproperty
def hfi_filters(self):
return parsing.lazy_broad_band_filter_list("HFI")
# -----------------------------------------------------------------
@lazyproperty
def lfi_filters(self):
return parsing.lazy_broad_band_filter_list("LFI")
# -----------------------------------------------------------------
@lazyproperty
def iras_filters(self):
return parsing.lazy_broad_band_filter_list("IRAS")
# -----------------------------------------------------------------
@lazyproperty
def iras_and_planck_filters(self):
return self.planck_filters + self.iras_filters
# -----------------------------------------------------------------
@lazyproperty
def ignore_sed_plot_filters(self):
return self.planck_filters + self.iras_filters
# -----------------------------------------------------------------
@lazyproperty
def observed_filters_no_iras(self):
return [fltr for fltr in self.observed_filters if fltr not in self.iras_filters]
# -----------------------------------------------------------------
@lazyproperty
def observed_filter_names_no_iras(self):
return [str(fltr) for fltr in self.observed_filters_no_iras]
# -----------------------------------------------------------------
@lazyproperty
def observed_filters_no_iras_planck(self):
# Get the filters
return [fltr for fltr in self.observed_filters if fltr not in self.iras_and_planck_filters]
# -----------------------------------------------------------------
@lazyproperty
def observed_filter_names_no_iras_planck(self):
return [str(fltr) for fltr in self.observed_filters_no_iras_planck]
# -----------------------------------------------------------------
@lazyproperty
def observed_filter_wavelengths_no_iras_planck(self):
return [fltr.wavelength for fltr in self.observed_filters_no_iras_planck]
# -----------------------------------------------------------------
@lazyproperty
def v_band_wavelength(self):
return 0.55 * Unit("micron")
# -----------------------------------------------------------------
@property
def maps_collection(self):
return self.environment.maps_collection
# -----------------------------------------------------------------
@property
def static_maps_collection(self):
return self.environment.static_maps_collection
# -----------------------------------------------------------------
@property
def maps_selection(self):
return self.environment.maps_selection
# -----------------------------------------------------------------
@property
def static_maps_selection(self):
return self.environment.static_maps_selection
# -----------------------------------------------------------------
@property
def model_suite(self):
return self.environment.model_suite
# -----------------------------------------------------------------
@property
def static_model_suite(self):
return self.environment.static_model_suite
# -----------------------------------------------------------------
@property
def fitting_context(self):
return self.environment.fitting_context
# -----------------------------------------------------------------
@property
def fitting_runs(self):
return self.environment.fitting_runs
# -----------------------------------------------------------------
def get_config_file_path(modeling_path):
"""
This function ...
:param modeling_path:
:return:
"""
# Determine the path to the configuration file
path = fs.join(modeling_path, "modeling.cfg")
# Return the path
return path
# -----------------------------------------------------------------
def load_modeling_configuration(modeling_path):
"""
This function ...
:param modeling_path:
:return:
"""
# Determine the path to the modeling configuration file
path = get_config_file_path(modeling_path)
# Open the configuration and return it
return Configuration.from_file(path)
# -----------------------------------------------------------------
def get_modeling_type(modeling_path):
"""
This function ...
:param modeling_path:
:return:
"""
configuration = load_modeling_configuration(modeling_path)
return configuration.modeling_type
# -----------------------------------------------------------------
def get_default_fitting_method(modeling_path):
"""
This function ...
:param modeling_path:
:return:
"""
configuration = load_modeling_configuration(modeling_path)
return configuration.fitting_method
# -----------------------------------------------------------------
def get_cache_host_id(modeling_path):
"""
This function ...
:param modeling_path:
:return:
"""
configuration = load_modeling_configuration(modeling_path)
return configuration.cache_host_id
# -----------------------------------------------------------------
def load_modeling_history(modeling_path):
"""
This function ...
:param modeling_path:
:return:
"""
# Determine history file path
history_file_path = fs.join(modeling_path, "history.dat")
# Create new history
if not fs.is_file(history_file_path):
history = ModelingHistory()
history.saveto(history_file_path)
else:
history = ModelingHistory.from_file(history_file_path)
history.clean()
# Return the history
return history
# -----------------------------------------------------------------
def load_modeling_commands(modeling_path):
"""
This function ...
:param modeling_path:
:return:
"""
# Determine the commands file path
commands_file_path = fs.join(modeling_path, "commands.txt")
# Create new commands file
if not fs.is_file(commands_file_path):
commands = ModelingCommands()
commands.saveto(commands_file_path)
else: commands = ModelingCommands.from_file(commands_file_path)
# Return the commands
return commands
# -----------------------------------------------------------------
def get_configuration_file_paths(modeling_path):
"""
This function ...
:param modeling_path:
:return:
"""
# Determine the config path
config_path = fs.join(modeling_path, "config")
# Return the file paths
return fs.files_in_path(config_path, extension="cfg")
# -----------------------------------------------------------------
def get_log_file_paths(modeling_path):
"""
This function ...
:param modeling_path:
:return:
"""
# Determine the log path
log_path = fs.join(modeling_path, "log")
# Return the file paths
return fs.files_in_path(log_path, extension="txt")
# -----------------------------------------------------------------
def load_modeling_status(modeling_path):
"""
This function ...
:param modeling_path:
:return:
"""
from ..core.status import ModelingStatus
return ModelingStatus(modeling_path)
# -----------------------------------------------------------------
|
SKIRT/PTS
|
modeling/component/component.py
|
Python
|
agpl-3.0
| 19,107
|
[
"Galaxy"
] |
e7c88510c6e10815db700ad3ba9b5d1d5f6431ad5d20c792a850c6cf542b6aab
|
#!/usr/bin/python
#Audio Tools, a module and set of tools for manipulating audio data
#Copyright (C) 2007-2012 Brian Langenberger
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from audiotools.bitstream import BitstreamWriter
from audiotools.bitstream import BitstreamRecorder
from audiotools.bitstream import format_size
from audiotools import BufferedPCMReader
from hashlib import md5
#sub block IDs
WV_WAVE_HEADER = 0x1
WV_WAVE_FOOTER = 0x2
WV_TERMS = 0x2
WV_WEIGHTS = 0x3
WV_SAMPLES = 0x4
WV_ENTROPY = 0x5
WV_SAMPLE_RATE = 0x7
WV_INT32_INFO = 0x9
WV_BITSTREAM = 0xA
WV_CHANNEL_INFO = 0xD
WV_MD5 = 0x6
class EncoderContext:
def __init__(self, pcmreader, block_parameters,
wave_header=None, wave_footer=None):
self.pcmreader = pcmreader
self.block_parameters = block_parameters
self.total_frames = 0
self.block_offsets = []
self.md5sum = md5()
self.first_block_written = False
self.wave_header = wave_header
self.wave_footer = wave_footer
def write_wave_header(writer, pcmreader, total_frames, wave_footer_len):
avg_bytes_per_second = (pcmreader.sample_rate *
pcmreader.channels *
(pcmreader.bits_per_sample / 8))
block_align = (pcmreader.channels *
(pcmreader.bits_per_sample / 8))
total_size = 4 * 3 # 'RIFF' + size + 'WAVE'
total_size += 4 * 2 # 'fmt ' + size
if ((pcmreader.channels <= 2) and (pcmreader.bits_per_sample <= 16)):
#classic fmt chunk
fmt = "16u 16u 32u 32u 16u 16u"
fmt_fields = (1, # compression code
pcmreader.channels,
pcmreader.sample_rate,
avg_bytes_per_second,
block_align,
pcmreader.bits_per_sample)
else:
#extended fmt chunk
fmt = "16u 16u 32u 32u 16u 16u" + "16u 16u 32u 16b"
fmt_fields = (0xFFFE, # compression code
pcmreader.channels,
pcmreader.sample_rate,
avg_bytes_per_second,
block_align,
pcmreader.bits_per_sample,
22, # CB size
pcmreader.bits_per_sample,
pcmreader.channel_mask,
'\x01\x00\x00\x00\x00\x00\x10\x00' +
'\x80\x00\x00\xaa\x00\x38\x9b\x71' # sub format
)
total_size += format_size(fmt) / 8
total_size += 4 * 2 # 'data' + size
data_size = (total_frames *
pcmreader.channels *
(pcmreader.bits_per_sample / 8))
total_size += data_size
total_size += wave_footer_len
writer.build("4b 32u 4b 4b 32u" + fmt + "4b 32u",
(('RIFF', total_size - 8, 'WAVE',
'fmt ', format_size(fmt) / 8) + fmt_fields +
('data', data_size)))
class CorrelationParameters:
"""the parameters for a single correlation pass"""
def __init__(self, term, delta, weights, samples):
"""term is a signed integer
delta is an unsigned integer
weights[c] is a weight value per channel c
samples[c][s] is sample "s" for channel "c"
"""
#FIXME - sanity check these
self.term = term
self.delta = delta
self.weights = weights
self.samples = samples
def __repr__(self):
return "CorrelationParameters(%s, %s, %s, %s)" % \
(self.term, self.delta, self.weights, self.samples)
def update_weights(self, weights):
"""given a weights[c] list of weight values per channel c
round-trips and sets this parameter's weights"""
assert(len(weights) == len(self.weights))
self.weights = [restore_weight(store_weight(w))
for w in weights]
def update_samples(self, samples):
"""given a samples[c][s] list of sample lists
round-trips and sets this parameter's samples"""
assert(len(samples) == len(samples))
self.samples = [[wv_exp2(wv_log2(s)) for s in c]
for c in samples]
class EncodingParameters:
"""the encoding parameters for a single 1-2 channel block
multi-channel audio may have more than one set of these
"""
def __init__(self, channel_count, correlation_passes):
"""channel_count is 1 or 2
correlation_passes is in [0,1,2,5,10,16]
"""
assert((channel_count == 1) or (channel_count == 2))
assert(correlation_passes in (0, 1, 2, 5, 10, 16))
self.channel_count = channel_count
self.correlation_passes = correlation_passes
self.entropy_variables = [[0, 0, 0], [0, 0, 0]]
self.__parameters_channel_count__ = 0
self.__correlation_parameters__ = None
def __repr__(self):
return "EncodingParameters(%s, %s, %s)" % \
(self.channel_count,
self.correlation_passes,
self.entropy_variables)
def correlation_parameters(self, false_stereo):
"""given a "false_stereo" boolean
yields a CorrelationParameters object per correlation pass to be run
this may be less than the object's "correlation_passes" count
if "channel_count" is 1 or "false_stereo" is True
"""
if ((self.channel_count == 2) and (not false_stereo)):
channel_count = 2
else:
channel_count = 1
if (channel_count != self.__parameters_channel_count__):
if (channel_count == 1):
if (self.correlation_passes == 0):
self.__correlation_parameters__ = []
elif (self.correlation_passes == 1):
self.__correlation_parameters__ = [
CorrelationParameters(18, 2, [0], [[0] * 2])]
elif (self.correlation_passes == 2):
self.__correlation_parameters__ = [
CorrelationParameters(17, 2, [0], [[0] * 2]),
CorrelationParameters(18, 2, [0], [[0] * 2])]
elif (self.correlation_passes in (5, 10, 16)):
self.__correlation_parameters__ = [
CorrelationParameters(3, 2, [0], [[0] * 3]),
CorrelationParameters(17, 2, [0], [[0] * 2]),
CorrelationParameters(2, 2, [0], [[0] * 2]),
CorrelationParameters(18, 2, [0], [[0] * 2]),
CorrelationParameters(18, 2, [0], [[0] * 2])]
else:
raise ValueError("invalid correlation pass count")
elif (channel_count == 2):
if (self.correlation_passes == 0):
self.__correlation_parameters__ = []
elif (self.correlation_passes == 1):
self.__correlation_parameters__ = [
CorrelationParameters(18, 2, [0, 0], [[0] * 2,
[0] * 2])]
elif (self.correlation_passes == 2):
self.__correlation_parameters__ = [
CorrelationParameters(17, 2, [0, 0], [[0] * 2,
[0] * 2]),
CorrelationParameters(18, 2, [0, 0], [[0] * 2,
[0] * 2])]
elif (self.correlation_passes == 5):
self.__correlation_parameters__ = [
CorrelationParameters(3, 2, [0, 0], [[0] * 3,
[0] * 3]),
CorrelationParameters(17, 2, [0, 0], [[0] * 2,
[0] * 2]),
CorrelationParameters(2, 2, [0, 0], [[0] * 2,
[0] * 2]),
CorrelationParameters(18, 2, [0, 0], [[0] * 2,
[0] * 2]),
CorrelationParameters(18, 2, [0, 0], [[0] * 2,
[0] * 2])]
elif (self.correlation_passes == 10):
self.__correlation_parameters__ = [
CorrelationParameters(4, 2, [0, 0], [[0] * 4,
[0] * 4]),
CorrelationParameters(17, 2, [0, 0], [[0] * 2,
[0] * 2]),
CorrelationParameters(-1, 2, [0, 0], [[0] * 1,
[0] * 1]),
CorrelationParameters(5, 2, [0, 0], [[0] * 5,
[0] * 5]),
CorrelationParameters(3, 2, [0, 0], [[0] * 3,
[0] * 3]),
CorrelationParameters(2, 2, [0, 0], [[0] * 2,
[0] * 2]),
CorrelationParameters(-2, 2, [0, 0], [[0] * 1,
[0] * 1]),
CorrelationParameters(18, 2, [0, 0], [[0] * 2,
[0] * 2]),
CorrelationParameters(18, 2, [0, 0], [[0] * 2,
[0] * 2]),
CorrelationParameters(18, 2, [0, 0], [[0] * 2,
[0] * 2])]
elif (self.correlation_passes == 16):
self.__correlation_parameters__ = [
CorrelationParameters(2, 2, [0, 0], [[0] * 2,
[0] * 2]),
CorrelationParameters(18, 2, [0, 0], [[0] * 2,
[0] * 2]),
CorrelationParameters(-1, 2, [0, 0], [[0] * 1,
[0] * 1]),
CorrelationParameters(8, 2, [0, 0], [[0] * 8,
[0] * 8]),
CorrelationParameters(6, 2, [0, 0], [[0] * 6,
[0] * 6]),
CorrelationParameters(3, 2, [0, 0], [[0] * 3,
[0] * 3]),
CorrelationParameters(5, 2, [0, 0], [[0] * 5,
[0] * 5]),
CorrelationParameters(7, 2, [0, 0], [[0] * 7,
[0] * 7]),
CorrelationParameters(4, 2, [0, 0], [[0] * 4,
[0] * 4]),
CorrelationParameters(2, 2, [0, 0], [[0] * 2,
[0] * 2]),
CorrelationParameters(18, 2, [0, 0], [[0] * 2,
[0] * 2]),
CorrelationParameters(-2, 2, [0, 0], [[0] * 1,
[0] * 1]),
CorrelationParameters(3, 2, [0, 0], [[0] * 3,
[0] * 3]),
CorrelationParameters(2, 2, [0, 0], [[0] * 2,
[0] * 2]),
CorrelationParameters(18, 2, [0, 0], [[0] * 2,
[0] * 2]),
CorrelationParameters(18, 2, [0, 0], [[0] * 2,
[0] * 2])]
else:
raise ValueError("invalid correlation pass count")
for parameters in self.__correlation_parameters__:
yield parameters
def block_parameters(channel_count, channel_mask, correlation_passes):
if (channel_count == 1):
return [EncodingParameters(1, correlation_passes)]
elif (channel_count == 2):
return [EncodingParameters(2, correlation_passes)]
elif ((channel_count == 3) and (channel_mask == 0x7)):
#front left, front right, front center
return [EncodingParameters(2, correlation_passes),
EncodingParameters(1, correlation_passes)]
elif ((channel_count == 4) and (channel_mask == 0x33)):
#front left, front right, back left, back right
return [EncodingParameters(2, correlation_passes),
EncodingParameters(2, correlation_passes)]
elif ((channel_count == 4) and (channel_mask == 0x107)):
#front left, front right, front center, back center
return [EncodingParameters(2, correlation_passes),
EncodingParameters(1, correlation_passes),
EncodingParameters(1, correlation_passes)]
elif ((channel_count == 5) and (channel_mask == 0x37)):
#front left, front right, front center, back left, back right
return [EncodingParameters(2, correlation_passes),
EncodingParameters(1, correlation_passes),
EncodingParameters(2, correlation_passes)]
elif ((channel_count == 6) and (channel_mask == 0x3F)):
#front left, front right, front center, LFE, back left, back right
return [EncodingParameters(2, correlation_passes),
EncodingParameters(1, correlation_passes),
EncodingParameters(1, correlation_passes),
EncodingParameters(2, correlation_passes)]
else:
return [EncodingParameters(1, correlation_passes)
for c in xrange(channel_count)]
def encode_wavpack(filename,
pcmreader,
block_size,
false_stereo=False,
wasted_bits=False,
joint_stereo=False,
correlation_passes=0,
wave_header=None,
wave_footer=None):
pcmreader = BufferedPCMReader(pcmreader)
output_file = open(filename, "wb")
writer = BitstreamWriter(output_file, 1)
context = EncoderContext(pcmreader,
block_parameters(pcmreader.channels,
pcmreader.channel_mask,
correlation_passes),
wave_header,
wave_footer)
block_index = 0
#walk through PCM reader's FrameLists
frame = pcmreader.read(block_size)
while (len(frame) > 0):
context.total_frames += frame.frames
context.md5sum.update(
frame.to_bytes(False, pcmreader.bits_per_sample >= 16))
c = 0
for parameters in context.block_parameters:
if (parameters.channel_count == 1):
channel_data = [list(frame.channel(c))]
else:
channel_data = [list(frame.channel(c)),
list(frame.channel(c + 1))]
first_block = parameters is context.block_parameters[0]
last_block = parameters is context.block_parameters[-1]
context.block_offsets.append(output_file.tell())
write_block(writer, context, channel_data, block_index,
first_block, last_block, parameters)
c += parameters.channel_count
block_index += frame.frames
frame = pcmreader.read(block_size)
#write MD5 sum and optional Wave footer in final block
sub_blocks = BitstreamRecorder(1)
sub_block = BitstreamRecorder(1)
sub_block.reset()
sub_block.write_bytes(context.md5sum.digest())
write_sub_block(sub_blocks, WV_MD5, 1, sub_block)
#write Wave footer in final block, if present
if (context.wave_footer is not None):
sub_block.reset()
sub_block.write_bytes(context.wave_footer)
write_sub_block(sub_blocks, WV_WAVE_FOOTER, 1, sub_block)
write_block_header(
writer,
sub_blocks.bytes(),
0xFFFFFFFF,
0,
pcmreader.bits_per_sample,
1,
0,
0,
0,
1,
1,
0,
pcmreader.sample_rate,
0,
0xFFFFFFFF)
sub_blocks.copy(writer)
#update Wave header's "data" chunk size, if generated
if (context.wave_header is None):
output_file.seek(32 + 2)
if (context.wave_footer is None):
write_wave_header(writer, context.pcmreader,
context.total_frames, 0)
else:
write_wave_header(writer, context.pcmreader,
context.total_frames, len(context.wave_footer))
#go back and populate block headers with total samples
for block_offset in context.block_offsets:
output_file.seek(block_offset + 12, 0)
writer.write(32, block_index)
writer.close()
def write_block(writer, context, channels, block_index,
first_block, last_block, parameters):
"""writer is a BitstreamWriter-compatible object
context is an EncoderContext object
channels[c][s] is sample "s" in channel "c"
block_index is an integer of the block's offset in PCM frames
first_block and last_block are flags indicating the block's sequence
parameters is an EncodingParameters object
"""
assert((len(channels) == 1) or (len(channels) == 2))
if ((len(channels) == 1) or (channels[0] == channels[1])):
#1 channel block or equivalent
if (len(channels) == 1):
false_stereo = 0
else:
false_stereo = 1
#calculate maximum magnitude of channel_0
magnitude = max(map(bits, channels[0]))
#determine wasted bits
wasted = min(map(wasted_bps, channels[0]))
if (wasted == INFINITY):
#all samples are 0
wasted = 0
#if wasted bits, remove them from channel_0
if ((wasted > 0) and (wasted != INFINITY)):
shifted = [[s >> wasted for s in channels[0]]]
else:
shifted = [channels[0]]
#calculate CRC of shifted_0
crc = calculate_crc(shifted)
else:
#2 channel block
false_stereo = 0
#calculate maximum magnitude of channel_0/channel_1
magnitude = max(max(map(bits, channels[0])),
max(map(bits, channels[1])))
#determine wasted bits
wasted = min(min(map(wasted_bps, channels[0])),
min(map(wasted_bps, channels[1])))
if (wasted == INFINITY):
#all samples are 0
wasted = 0
#if wasted bits, remove them from channel_0/channel_1
if (wasted > 0):
shifted = [[s >> wasted for s in channels[0]],
[s >> wasted for s in channels[1]]]
else:
shifted = channels
#calculate CRC of shifted_0/shifted_1
crc = calculate_crc(shifted)
#joint stereo conversion of shifted_0/shifted_1 to mid/side channels
mid_side = joint_stereo(shifted[0], shifted[1])
sub_blocks = BitstreamRecorder(1)
sub_block = BitstreamRecorder(1)
#if first block in file, write Wave header
if (not context.first_block_written):
sub_block.reset()
if (context.wave_header is None):
if (context.wave_footer is None):
write_wave_header(sub_block, context.pcmreader, 0, 0)
else:
write_wave_header(sub_block, context.pcmreader, 0,
len(context.wave_footer))
else:
sub_block.write_bytes(context.wave_header)
write_sub_block(sub_blocks, WV_WAVE_HEADER, 1, sub_block)
context.first_block_written = True
#if correlation passes, write three sub blocks of pass data
if (parameters.correlation_passes > 0):
sub_block.reset()
write_correlation_terms(
sub_block,
[p.term for p in
parameters.correlation_parameters(false_stereo)],
[p.delta for p in
parameters.correlation_parameters(false_stereo)])
write_sub_block(sub_blocks, WV_TERMS, 0, sub_block)
sub_block.reset()
write_correlation_weights(
sub_block,
[p.weights for p in
parameters.correlation_parameters(false_stereo)])
write_sub_block(sub_blocks, WV_WEIGHTS, 0, sub_block)
sub_block.reset()
write_correlation_samples(
sub_block,
[p.term for p in
parameters.correlation_parameters(false_stereo)],
[p.samples for p in
parameters.correlation_parameters(false_stereo)],
2 if ((len(channels) == 2) and (not false_stereo)) else 1)
write_sub_block(sub_blocks, WV_SAMPLES, 0, sub_block)
#if wasted bits, write extended integers sub block
if (wasted > 0):
sub_block.reset()
write_extended_integers(sub_block, 0, wasted, 0, 0)
write_sub_block(sub_blocks, WV_INT32_INFO, 0, sub_block)
#if channel count > 2, write channel info sub block
if (context.pcmreader.channels > 2):
sub_block.reset()
sub_block.write(8, context.pcmreader.channels)
sub_block.write(32, context.pcmreader.channel_mask)
write_sub_block(sub_blocks, WV_CHANNEL_INFO, 0, sub_block)
#if nonstandard sample rate, write sample rate sub block
if (context.pcmreader.sample_rate not in
(6000, 8000, 9600, 11025, 12000, 16000, 22050, 24000,
32000, 44100, 48000, 64000, 88200, 96000, 192000)):
sub_block.reset()
sub_block.write(32, context.pcmreader.sample_rate)
write_sub_block(sub_blocks, WV_SAMPLE_RATE, 1, sub_block)
if ((len(channels) == 1) or (false_stereo)):
#1 channel block
#correlate shifted_0 with terms/deltas/weights/samples
if (parameters.correlation_passes > 0):
assert(len(shifted) == 1)
correlated = correlate_channels(
shifted,
parameters.correlation_parameters(false_stereo),
1)
else:
correlated = shifted
else:
#2 channel block
#correlate shifted_0/shifted_1 with terms/deltas/weights/samples
if (parameters.correlation_passes > 0):
assert(len(mid_side) == 2)
correlated = correlate_channels(
mid_side,
parameters.correlation_parameters(false_stereo),
2)
else:
correlated = mid_side
#write entropy variables sub block
sub_block.reset()
write_entropy_variables(sub_block, correlated,
parameters.entropy_variables)
write_sub_block(sub_blocks, WV_ENTROPY, 0, sub_block)
#write bitstream sub block
sub_block.reset()
write_bitstream(sub_block, correlated,
parameters.entropy_variables)
write_sub_block(sub_blocks, WV_BITSTREAM, 0, sub_block)
#write block header with size of all sub blocks
write_block_header(
writer,
sub_blocks.bytes(),
block_index,
len(channels[0]),
context.pcmreader.bits_per_sample,
len(channels),
(len(channels) == 2) and (false_stereo == 0),
len(set([-1, -2, -3]) &
set([p.term for p in
parameters.correlation_parameters(false_stereo)])) > 0,
wasted,
first_block,
last_block,
magnitude,
context.pcmreader.sample_rate,
false_stereo,
crc)
#write sub block data to stream
sub_blocks.copy(writer)
#round-trip entropy variables
parameters.entropy_variables = [
[wv_exp2(wv_log2(p)) for p in parameters.entropy_variables[0]],
[wv_exp2(wv_log2(p)) for p in parameters.entropy_variables[1]]]
def bits(sample):
sample = abs(sample)
total = 0
while (sample > 0):
total += 1
sample >>= 1
return total
INFINITY = 2 ** 32
def wasted_bps(sample):
if (sample == 0):
return INFINITY
else:
total = 0
while ((sample % 2) == 0):
total += 1
sample /= 2
return total
def calculate_crc(samples):
crc = 0xFFFFFFFF
for frame in zip(*samples):
for s in frame:
crc = 3 * crc + s
if (crc >= 0):
return crc % 0x100000000
else:
return (2 ** 32 - (-crc)) % 0x100000000
def joint_stereo(left, right):
from itertools import izip
assert(len(left) == len(right))
mid = []
side = []
for (l, r) in izip(left, right):
mid.append(l - r)
side.append((l + r) / 2)
return [mid, side]
def write_block_header(writer,
sub_blocks_size,
block_index,
block_samples,
bits_per_sample,
channel_count,
joint_stereo,
cross_channel_decorrelation,
wasted_bps,
initial_block_in_sequence,
final_block_in_sequence,
maximum_magnitude,
sample_rate,
false_stereo,
CRC):
writer.write_bytes("wvpk") # block ID
writer.write(32, sub_blocks_size + 24) # block size
writer.write(16, 0x0410) # version
writer.write(8, 0) # track number
writer.write(8, 0) # index number
writer.write(32, 0xFFFFFFFF) # total samples
writer.write(32, block_index)
writer.write(32, block_samples)
writer.write(2, (bits_per_sample / 8) - 1)
writer.write(1, 2 - channel_count)
writer.write(1, 0) # hybrid mode
writer.write(1, joint_stereo)
writer.write(1, cross_channel_decorrelation)
writer.write(1, 0) # hybrid noise shaping
writer.write(1, 0) # floating point data
if (wasted_bps > 0): # extended size integers
writer.write(1, 1)
else:
writer.write(1, 0)
writer.write(1, 0) # hybrid controls bitrate
writer.write(1, 0) # hybrid noise balanced
writer.write(1, initial_block_in_sequence)
writer.write(1, final_block_in_sequence)
writer.write(5, 0) # left shift data
writer.write(5, maximum_magnitude)
writer.write(4, {6000: 0,
8000: 1,
9600: 2,
11025: 3,
12000: 4,
16000: 5,
22050: 6,
24000: 7,
32000: 8,
44100: 9,
48000: 10,
64000: 11,
88200: 12,
96000: 13,
192000: 14}.get(sample_rate, 15))
writer.write(2, 0) # reserved
writer.write(1, 0) # use IIR
writer.write(1, false_stereo)
writer.write(1, 0) # reserved
writer.write(32, CRC)
def write_sub_block(writer, function, nondecoder_data, recorder):
recorder.byte_align()
actual_size_1_less = recorder.bytes() % 2
writer.build("5u 1u 1u",
(function,
nondecoder_data,
actual_size_1_less))
if (recorder.bytes() > (255 * 2)):
writer.write(1, 1)
writer.write(24, (recorder.bytes() / 2) + actual_size_1_less)
else:
writer.write(1, 0)
writer.write(8, (recorder.bytes() / 2) + actual_size_1_less)
recorder.copy(writer)
if (actual_size_1_less):
writer.write(8, 0)
def write_correlation_terms(writer, correlation_terms, correlation_deltas):
"""correlation_terms[p] and correlation_deltas[p]
are ints for each correlation pass, in descending order
writes the terms and deltas to sub block data in the proper order/format"""
assert(len(correlation_terms) == len(correlation_deltas))
for (term, delta) in zip(correlation_terms, correlation_deltas):
writer.write(5, term + 5)
writer.write(3, delta)
def write_correlation_weights(writer, correlation_weights):
"""correlation_weights[p][c]
are lists of correlation weight ints for each pass and channel
in descending order
writes the weights to sub block data in the proper order/format"""
for weights in correlation_weights:
for weight in weights:
writer.write(8, store_weight(weight))
def store_weight(w):
w = min(max(w, -1024), 1024)
if (w > 0):
return (w - ((w + 2 ** 6) / 2 ** 7) + 4) / (2 ** 3)
elif (w == 0):
return 0
elif (w < 0):
return (w + 4) / (2 ** 3)
def restore_weight(v):
if (v > 0):
return ((v * 2 ** 3) + ((v * 2 ** 3 + 2 ** 6) / 2 ** 7))
elif(v == 0):
return 0
else:
return v * (2 ** 3)
def write_correlation_samples(writer, correlation_terms, correlation_samples,
channel_count):
"""correlation_terms[p] are correlation term ints for each pass
correlation_samples[p][c][s] are lists of correlation sample ints
for each pass and channel in descending order
writes the samples to sub block data in the proper order/format"""
assert(len(correlation_terms) == len(correlation_samples))
if (channel_count == 2):
for (term, samples) in zip(correlation_terms, correlation_samples):
if ((17 <= term) and (term <= 18)):
writer.write_signed(16, wv_log2(samples[0][0]))
writer.write_signed(16, wv_log2(samples[0][1]))
writer.write_signed(16, wv_log2(samples[1][0]))
writer.write_signed(16, wv_log2(samples[1][1]))
elif ((1 <= term) and (term <= 8)):
for s in xrange(term):
writer.write_signed(16, wv_log2(samples[0][s]))
writer.write_signed(16, wv_log2(samples[1][s]))
elif ((-3 <= term) and (term <= -1)):
writer.write_signed(16, wv_log2(samples[0][0]))
writer.write_signed(16, wv_log2(samples[1][0]))
else:
raise ValueError("invalid correlation term")
elif (channel_count == 1):
for (term, samples) in zip(correlation_terms, correlation_samples):
if ((17 <= term) and (term <= 18)):
writer.write_signed(16, wv_log2(samples[0][0]))
writer.write_signed(16, wv_log2(samples[0][1]))
elif ((1 <= term) and (term <= 8)):
for s in xrange(term):
writer.write_signed(16, wv_log2(samples[0][s]))
else:
raise ValueError("invalid correlation term")
else:
print "invalid channel count"
def wv_log2(value):
from math import log
a = abs(value) + (abs(value) / 2 ** 9)
if (a != 0):
c = int(log(a) / log(2)) + 1
else:
c = 0
if (value > 0):
if ((0 <= a) and (a < 256)):
return (c * 2 ** 8) + WLOG[(a * 2 ** (9 - c)) % 256]
else:
return (c * 2 ** 8) + WLOG[(a / 2 ** (c - 9)) % 256]
else:
if ((0 <= a) and (a < 256)):
return -((c * 2 ** 8) + WLOG[(a * 2 ** (9 - c)) % 256])
else:
return -((c * 2 ** 8) + WLOG[(a / 2 ** (c - 9)) % 256])
WLOG = [0x00, 0x01, 0x03, 0x04, 0x06, 0x07, 0x09, 0x0a,
0x0b, 0x0d, 0x0e, 0x10, 0x11, 0x12, 0x14, 0x15,
0x16, 0x18, 0x19, 0x1a, 0x1c, 0x1d, 0x1e, 0x20,
0x21, 0x22, 0x24, 0x25, 0x26, 0x28, 0x29, 0x2a,
0x2c, 0x2d, 0x2e, 0x2f, 0x31, 0x32, 0x33, 0x34,
0x36, 0x37, 0x38, 0x39, 0x3b, 0x3c, 0x3d, 0x3e,
0x3f, 0x41, 0x42, 0x43, 0x44, 0x45, 0x47, 0x48,
0x49, 0x4a, 0x4b, 0x4d, 0x4e, 0x4f, 0x50, 0x51,
0x52, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a,
0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63,
0x64, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c,
0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x74, 0x75,
0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d,
0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85,
0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d,
0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95,
0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9b, 0x9c,
0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4,
0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xa9, 0xaa, 0xab,
0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb2,
0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xb9,
0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc0,
0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc6, 0xc7,
0xc8, 0xc9, 0xca, 0xcb, 0xcb, 0xcc, 0xcd, 0xce,
0xcf, 0xd0, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd4,
0xd5, 0xd6, 0xd7, 0xd8, 0xd8, 0xd9, 0xda, 0xdb,
0xdc, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe0, 0xe1,
0xe2, 0xe3, 0xe4, 0xe4, 0xe5, 0xe6, 0xe7, 0xe7,
0xe8, 0xe9, 0xea, 0xea, 0xeb, 0xec, 0xed, 0xee,
0xee, 0xef, 0xf0, 0xf1, 0xf1, 0xf2, 0xf3, 0xf4,
0xf4, 0xf5, 0xf6, 0xf7, 0xf7, 0xf8, 0xf9, 0xf9,
0xfa, 0xfb, 0xfc, 0xfc, 0xfd, 0xfe, 0xff, 0xff]
def wv_exp2(value):
if ((-32768 <= value) and (value < -2304)):
return -(WEXP[-value & 0xFF] << ((-value >> 8) - 9))
elif ((-2304 <= value) and (value < 0)):
return -(WEXP[-value & 0xFF] >> (9 - (-value >> 8)))
elif ((0 <= value) and (value <= 2304)):
return WEXP[value & 0xFF] >> (9 - (value >> 8))
elif ((2304 < value) and (value <= 32767)):
return WEXP[value & 0xFF] << ((value >> 8) - 9)
WEXP = [0x100, 0x101, 0x101, 0x102, 0x103, 0x103, 0x104, 0x105,
0x106, 0x106, 0x107, 0x108, 0x108, 0x109, 0x10a, 0x10b,
0x10b, 0x10c, 0x10d, 0x10e, 0x10e, 0x10f, 0x110, 0x110,
0x111, 0x112, 0x113, 0x113, 0x114, 0x115, 0x116, 0x116,
0x117, 0x118, 0x119, 0x119, 0x11a, 0x11b, 0x11c, 0x11d,
0x11d, 0x11e, 0x11f, 0x120, 0x120, 0x121, 0x122, 0x123,
0x124, 0x124, 0x125, 0x126, 0x127, 0x128, 0x128, 0x129,
0x12a, 0x12b, 0x12c, 0x12c, 0x12d, 0x12e, 0x12f, 0x130,
0x130, 0x131, 0x132, 0x133, 0x134, 0x135, 0x135, 0x136,
0x137, 0x138, 0x139, 0x13a, 0x13a, 0x13b, 0x13c, 0x13d,
0x13e, 0x13f, 0x140, 0x141, 0x141, 0x142, 0x143, 0x144,
0x145, 0x146, 0x147, 0x148, 0x148, 0x149, 0x14a, 0x14b,
0x14c, 0x14d, 0x14e, 0x14f, 0x150, 0x151, 0x151, 0x152,
0x153, 0x154, 0x155, 0x156, 0x157, 0x158, 0x159, 0x15a,
0x15b, 0x15c, 0x15d, 0x15e, 0x15e, 0x15f, 0x160, 0x161,
0x162, 0x163, 0x164, 0x165, 0x166, 0x167, 0x168, 0x169,
0x16a, 0x16b, 0x16c, 0x16d, 0x16e, 0x16f, 0x170, 0x171,
0x172, 0x173, 0x174, 0x175, 0x176, 0x177, 0x178, 0x179,
0x17a, 0x17b, 0x17c, 0x17d, 0x17e, 0x17f, 0x180, 0x181,
0x182, 0x183, 0x184, 0x185, 0x187, 0x188, 0x189, 0x18a,
0x18b, 0x18c, 0x18d, 0x18e, 0x18f, 0x190, 0x191, 0x192,
0x193, 0x195, 0x196, 0x197, 0x198, 0x199, 0x19a, 0x19b,
0x19c, 0x19d, 0x19f, 0x1a0, 0x1a1, 0x1a2, 0x1a3, 0x1a4,
0x1a5, 0x1a6, 0x1a8, 0x1a9, 0x1aa, 0x1ab, 0x1ac, 0x1ad,
0x1af, 0x1b0, 0x1b1, 0x1b2, 0x1b3, 0x1b4, 0x1b6, 0x1b7,
0x1b8, 0x1b9, 0x1ba, 0x1bc, 0x1bd, 0x1be, 0x1bf, 0x1c0,
0x1c2, 0x1c3, 0x1c4, 0x1c5, 0x1c6, 0x1c8, 0x1c9, 0x1ca,
0x1cb, 0x1cd, 0x1ce, 0x1cf, 0x1d0, 0x1d2, 0x1d3, 0x1d4,
0x1d6, 0x1d7, 0x1d8, 0x1d9, 0x1db, 0x1dc, 0x1dd, 0x1de,
0x1e0, 0x1e1, 0x1e2, 0x1e4, 0x1e5, 0x1e6, 0x1e8, 0x1e9,
0x1ea, 0x1ec, 0x1ed, 0x1ee, 0x1f0, 0x1f1, 0x1f2, 0x1f4,
0x1f5, 0x1f6, 0x1f8, 0x1f9, 0x1fa, 0x1fc, 0x1fd, 0x1ff]
def correlate_channels(uncorrelated_samples,
correlation_parameters,
channel_count):
"""uncorrelated_samples[c][s] is sample 's' for channel 'c'
correlation_parameters is a list of CorrelationParameters objects
which are updated by each pass
returns correlated_samples[c][s] with sample 's' for channel 'c'
"""
if (channel_count == 1):
latest_pass = uncorrelated_samples[0]
for p in correlation_parameters:
(latest_pass,
weight,
samples) = correlation_pass_1ch(latest_pass,
p.term,
p.delta,
p.weights[0],
p.samples[0])
p.update_weights([weight])
p.update_samples([samples])
return [latest_pass]
else:
latest_pass = uncorrelated_samples
for p in correlation_parameters:
(latest_pass,
weights,
samples) = correlation_pass_2ch(latest_pass,
p.term,
p.delta,
p.weights,
p.samples)
p.update_weights(weights)
p.update_samples(samples)
return latest_pass
def correlation_pass_1ch(uncorrelated_samples,
term, delta, weight, correlation_samples):
"""given a list of uncorrelated_samples[s]
term, delta and weight ints
and a list of correlation_samples[s] ints
returns a (correlated[s], weight, samples[s]) tuple
containing correlated samples and updated weight/samples"""
if (term == 18):
assert(len(correlation_samples) == 2)
uncorrelated = ([correlation_samples[1],
correlation_samples[0]] +
uncorrelated_samples)
correlated = []
for i in xrange(2, len(uncorrelated)):
temp = (3 * uncorrelated[i - 1] - uncorrelated[i - 2]) / 2
correlated.append(uncorrelated[i] - apply_weight(weight, temp))
weight += update_weight(temp, correlated[i - 2], delta)
return (correlated, weight, list(reversed(correlated[-2:])))
elif (term == 17):
assert(len(correlation_samples) == 2)
uncorrelated = ([correlation_samples[1],
correlation_samples[0]] +
uncorrelated_samples)
correlated = []
for i in xrange(2, len(uncorrelated)):
temp = 2 * uncorrelated[i - 1] - uncorrelated[i - 2]
correlated.append(uncorrelated[i] - apply_weight(weight, temp))
weight += update_weight(temp, correlated[i - 2], delta)
return (correlated, weight, list(reversed(correlated[-2:])))
elif ((1 <= term) and (term <= 8)):
assert(len(correlation_samples) == term)
uncorrelated = correlation_samples[:] + uncorrelated_samples
correlated = []
for i in xrange(term, len(uncorrelated)):
correlated.append(uncorrelated[i] -
apply_weight(weight, uncorrelated[i - term]))
weight += update_weight(uncorrelated[i - term],
correlated[i - term], delta)
return (correlated, weight, correlated[-term:])
else:
raise ValueError("unsupported term")
def correlation_pass_2ch(uncorrelated_samples,
term, delta, weights, correlation_samples):
"""given a list of uncorrelated_samples[c][s] lists
term and delta ints
a list of weight[c] ints
and a list of correlation_samples[c][s] lists
returns (correlated[c][s], weights[c], samples[c][s]) tuple
containing correlated samples and updated weights/samples"""
assert(len(uncorrelated_samples) == 2)
assert(len(uncorrelated_samples[0]) == len(uncorrelated_samples[1]))
assert(len(weights) == 2)
if (((17 <= term) and (term <= 18)) or ((1 <= term) and (term <= 8))):
(uncorrelated1,
weight1,
samples1) = correlation_pass_1ch(uncorrelated_samples[0],
term, delta, weights[0],
correlation_samples[0])
(uncorrelated2,
weight2,
samples2) = correlation_pass_1ch(uncorrelated_samples[1],
term, delta, weights[1],
correlation_samples[1])
return ([uncorrelated1, uncorrelated2],
[weight1, weight2],
[samples1, samples2])
elif ((-3 <= term) and (term <= -1)):
assert(len(correlation_samples[0]) == 1)
assert(len(correlation_samples[1]) == 1)
uncorrelated = (correlation_samples[1] + uncorrelated_samples[0],
correlation_samples[0] + uncorrelated_samples[1])
correlated = [[], []]
weights = list(weights)
if (term == -1):
for i in xrange(1, len(uncorrelated[0])):
correlated[0].append(uncorrelated[0][i] -
apply_weight(weights[0],
uncorrelated[1][i - 1]))
correlated[1].append(uncorrelated[1][i] -
apply_weight(weights[1],
uncorrelated[0][i]))
weights[0] += update_weight(uncorrelated[1][i - 1],
correlated[0][-1],
delta)
weights[1] += update_weight(uncorrelated[0][i],
correlated[1][-1],
delta)
weights[0] = max(min(weights[0], 1024), -1024)
weights[1] = max(min(weights[1], 1024), -1024)
elif (term == -2):
for i in xrange(1, len(uncorrelated[0])):
correlated[0].append(uncorrelated[0][i] -
apply_weight(weights[0],
uncorrelated[1][i]))
correlated[1].append(uncorrelated[1][i] -
apply_weight(weights[1],
uncorrelated[0][i - 1]))
weights[0] += update_weight(uncorrelated[1][i],
correlated[0][-1],
delta)
weights[1] += update_weight(uncorrelated[0][i - 1],
correlated[1][-1],
delta)
weights[0] = max(min(weights[0], 1024), -1024)
weights[1] = max(min(weights[1], 1024), -1024)
elif (term == -3):
for i in xrange(1, len(uncorrelated[0])):
correlated[0].append(uncorrelated[0][i] -
apply_weight(weights[0],
uncorrelated[1][i - 1]))
correlated[1].append(uncorrelated[1][i] -
apply_weight(weights[1],
uncorrelated[0][i - 1]))
weights[0] += update_weight(uncorrelated[1][i - 1],
correlated[0][-1],
delta)
weights[1] += update_weight(uncorrelated[0][i - 1],
correlated[1][-1],
delta)
weights[0] = max(min(weights[0], 1024), -1024)
weights[1] = max(min(weights[1], 1024), -1024)
#FIXME - use proper end-of-stream correlation samples
return (correlated, weights, correlation_samples)
else:
raise ValueError("unsupported term")
def apply_weight(weight, sample):
return ((weight * sample) + 512) >> 10
def update_weight(source, result, delta):
if ((source == 0) or (result == 0)):
return 0
elif ((source ^ result) >= 0):
return delta
else:
return -delta
def write_entropy_variables(writer, channels, entropies):
if (len(channels) == 2):
for e in entropies[0]:
writer.write(16, wv_log2(e))
for e in entropies[1]:
writer.write(16, wv_log2(e))
else:
for e in entropies[0]:
writer.write(16, wv_log2(e))
class Residual:
def __init__(self, zeroes, m, offset, add, sign):
self.zeroes = zeroes
self.m = m
self.offset = offset
self.add = add
self.sign = sign
def __repr__(self):
return "Residual(%s, %s, %s, %s, %s)" % \
(repr(self.zeroes),
repr(self.m),
repr(self.offset),
repr(self.add),
repr(self.sign))
@classmethod
def encode(cls, residual, entropy):
"""given a residual integer and list of three entropies
returns a Residual object and updates the entropies"""
#figure out unsigned from signed
if (residual >= 0):
unsigned = residual
sign = 0
else:
unsigned = -residual - 1
sign = 1
medians = [e / 2 ** 4 + 1 for e in entropy]
#figure out m, offset, add and update channel's entropies
if (unsigned < medians[0]):
m = 0
offset = unsigned
add = medians[0] - 1
entropy[0] -= ((entropy[0] + 126) / 128) * 2
elif ((unsigned - medians[0]) < medians[1]):
m = 1
offset = unsigned - medians[0]
add = medians[1] - 1
entropy[0] += ((entropy[0] + 128) / 128) * 5
entropy[1] -= ((entropy[1] + 62) / 64) * 2
elif ((unsigned - (medians[0] + medians[1])) < medians[2]):
m = 2
offset = unsigned - (medians[0] + medians[1])
add = medians[2] - 1
entropy[0] += ((entropy[0] + 128) / 128) * 5
entropy[1] += ((entropy[1] + 64) / 64) * 5
entropy[2] -= ((entropy[2] + 30) / 32) * 2
else:
m = (((unsigned - (medians[0] + medians[1])) / medians[2]) + 2)
offset = (unsigned -
(medians[0] + medians[1] + ((m - 2) * medians[2])))
add = medians[2] - 1
entropy[0] += ((entropy[0] + 128) / 128) * 5
entropy[1] += ((entropy[1] + 64) / 64) * 5
entropy[2] += ((entropy[2] + 32) / 32) * 5
#zeroes will be populated later
return cls(zeroes=None, m=m, offset=offset, add=add, sign=sign)
def flush(self, writer, u_i_2, m_i):
"""given a BitstreamWriter, u_{i - 2} and m_{i},
encodes residual_{i - 1}'s values to disk"""
from math import log
if (self.zeroes is not None):
write_egc(writer, self.zeroes)
if (self.m is not None):
#calculate unary_{i - 1} based on m_{i}
if ((self.m > 0) and (m_i > 0)):
#positive m to positive m
if ((u_i_2 is None) or (u_i_2 % 2 == 0)):
u_i_1 = (self.m * 2) + 1
else:
#passing 1 from previous u
u_i_1 = (self.m * 2) - 1
elif ((self.m == 0) and (m_i > 0)):
#zero m to positive m
if ((u_i_2 is None) or (u_i_2 % 2 == 1)):
u_i_1 = 1
else:
#passing 0 from previous u
u_i_1 = None
elif ((self.m > 0) and (m_i == 0)):
#positive m to zero m
if ((u_i_2 is None) or (u_i_2 % 2 == 0)):
u_i_1 = self.m * 2
else:
#passing 1 from previous u
u_i_1 = (self.m - 1) * 2
elif ((self.m == 0) and (m_i == 0)):
#zero m to zero m
if ((u_i_2 is None) or (u_i_2 % 2 == 1)):
u_i_1 = 0
else:
#passing 0 from previous u
u_i_1 = None
else:
raise ValueError("invalid m")
#write residual_{i - 1} to disk based on unary_{i - 1}
if (u_i_1 is not None):
if (u_i_1 < 16):
writer.unary(0, u_i_1)
else:
writer.unary(0, 16)
write_egc(writer, u_i_1 - 16)
if (self.add > 0):
p = int(log(self.add) / log(2))
e = 2 ** (p + 1) - self.add - 1
if (self.offset < e):
writer.write(p, self.offset)
else:
writer.write(p, (self.offset + e) / 2)
writer.write(1, (self.offset + e) % 2)
writer.write(1, self.sign)
else:
u_i_1 = None
return u_i_1
def write_bitstream(writer, channels, entropies):
#residual_{-1}
r_i_1 = Residual(zeroes=None, m=None, offset=None, add=None, sign=None)
#u_{-2}
u_i_2 = None
i = 0
while (i < (len(channels) * len(channels[0]))):
r = channels[i % len(channels)][i / len(channels)]
if (((entropies[0][0] < 2) and (entropies[1][0] < 2) and
unary_undefined(u_i_2, r_i_1.m))):
if ((r_i_1.zeroes is not None) and (r_i_1.m is None)):
#in a block of zeroes
if (r == 0):
#continue block of zeroes
r_i_1.zeroes += 1
else:
#end block of zeroes
r_i = Residual.encode(r, entropies[i % len(channels)])
r_i.zeroes = r_i_1.zeroes
r_i_1 = r_i
else:
#start a new block of zeroes
if (r == 0):
r_i = Residual(zeroes=1,
m=None, offset=None, add=None, sign=None)
u_i_2 = r_i_1.flush(writer, u_i_2, 0)
entropies[0][0] = entropies[0][1] = entropies[0][2] = 0
entropies[1][0] = entropies[1][1] = entropies[1][2] = 0
r_i_1 = r_i
else:
#false alarm block of zeroes
r_i = Residual.encode(r, entropies[i % len(channels)])
r_i.zeroes = 0
u_i_2 = r_i_1.flush(writer, u_i_2, r_i.m)
r_i_1 = r_i
else:
#encode regular residual
r_i = Residual.encode(r, entropies[i % len(channels)])
r_i.zeroes = None
u_i_2 = r_i_1.flush(writer, u_i_2, r_i.m)
r_i_1 = r_i
i += 1
#flush final residual
u_i_2 = r_i_1.flush(writer, u_i_2, 0)
def unary_undefined(prev_u, m):
"""given u_{i - 1} and m_{i},
returns True if u_{i} is undefined,
False if defined"""
if (m is None):
return True
if ((m == 0) and (prev_u is not None) and (prev_u % 2 == 0)):
return True
else:
return False
def write_egc(writer, value):
from math import log
assert(value >= 0)
if (value > 1):
t = int(log(value) / log(2)) + 1
writer.unary(0, t)
writer.write(t - 1, value % (2 ** (t - 1)))
else:
writer.unary(0, value)
def write_residual(writer, u, offset, add, sign):
"""given u_{i}, offset_{i}, add_{i} and sign_{i}
writes residual data to the given BitstreamWriter
u_{i} may be None, indicated an undefined unary value"""
def write_extended_integers(writer,
sent_bits, zero_bits, one_bits, duplicate_bits):
writer.build("8u 8u 8u 8u",
(sent_bits, zero_bits, one_bits, duplicate_bits))
if (__name__ == '__main__'):
write_bitstream(None,
[[1, 2, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -2, -3, -2, -1]],
[[0, 0, 0], [0, 0, 0]])
|
R-a-dio/python-audio-tools
|
audiotools/py_encoders/wavpack.py
|
Python
|
gpl-2.0
| 53,576
|
[
"Brian"
] |
623bd9261313a1fff65ac789c7a76e96814b387dc230517240af9f38bccad2d0
|
#!/usr/bin/env python
########################################################################
# $Header: $
########################################################################
"""
Remove the given file or a list of files from the File Catalog
Example:
$ dirac-dms-remove-catalog-files /formation/user/v/vhamar/1/1134/StdOut
Successfully removed 1 catalog files.
"""
from DIRAC.Core.Base.Script import Script
from DIRAC import exit as dexit
from DIRAC import gLogger
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(("LocalFile: Path to local file containing LFNs", "LFN: Logical File Names"))
Script.parseCommandLine()
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
allowUsers = Operations().getValue("DataManagement/AllowUserReplicaManagement", False)
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
res = getProxyInfo()
if not res["OK"]:
gLogger.fatal("Can't get proxy info", res["Message"])
dexit(1)
properties = res["Value"].get("groupProperties", [])
if not allowUsers:
if "FileCatalogManagement" not in properties:
gLogger.error("You need to use a proxy from a group with FileCatalogManagement")
dexit(5)
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
fc = FileCatalog()
import os
# parseCommandLine show help when mandatory arguments are not specified or incorrect argument
args = Script.getPositionalArgs()
inputFileName = args[0]
if os.path.exists(inputFileName):
inputFile = open(inputFileName, "r")
string = inputFile.read()
lfns = [lfn.strip() for lfn in string.splitlines()]
inputFile.close()
else:
lfns = [inputFileName]
res = fc.removeFile(lfns)
if not res["OK"]:
print("Error:", res["Message"])
dexit(1)
for lfn in sorted(res["Value"]["Failed"].keys()):
message = res["Value"]["Failed"][lfn]
print("Error: failed to remove %s: %s" % (lfn, message))
print("Successfully removed %d catalog files." % (len(res["Value"]["Successful"])))
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/scripts/dirac_dms_remove_catalog_files.py
|
Python
|
gpl-3.0
| 2,256
|
[
"DIRAC"
] |
2284b905873232c4d59b59d22d352651c825f13f6d3889c41343306bc2e8402d
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015 Diamond Light Source <stuart.fisher@diamond.ac.uk>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implementation of Jon Diprose's ImageUploader in Python for
# formulatrix plate imagers, takes outputted xml / image files and
# puts them in the correct location, and adds an entry to SynchWeb
import json
import time
import glob
import re
import os
import sys
import atexit
import signal
import errno
import subprocess
import logging
import logging.handlers
import MySQLdb
from PIL import Image
from shutil import copyfile
import xml.etree.ElementTree as ET
import shutil
import getopt
class MySQL:
def __init__(self, user, pw, db, host='127.0.0.1', port=3306):
self._conn = MySQLdb.connect(host=host, user=user, passwd=pw, db=db, port=port)
self._conn.autocommit(1)
self._conn.ping(True)
self._cur = self._conn.cursor(MySQLdb.cursors.DictCursor)
def __del__(self):
if self._cur is not None:
self._cur.close()
if self._conn is not None:
self._conn.close()
def pq(self, query, args=[]):
res = self._cur.execute(query, args)
rows = []
for r in self._cur:
rows.append(r)
return rows if rows else []
def id(self):
return self._cur.connection.insert_id()
class FormulatrixUploader:
_running = True
def __init__(self, db=None, config=None):
self.db = db
self.config = config
for d in ['processed', 'nosample']:
if not os.path.exists(config['holding_dir']+'/'+d):
os.mkdir(config['holding_dir']+'/'+d)
def _move_dir(self, src_dir, target_dir):
"""This will overwrite any existing files with the same names.
Make sure files are completed (written, closed) before moving them."""
logging.getLogger().debug("trying to glob.glob('%s/*')" % src_dir)
files = glob.glob("%s/*" % src_dir)
for f in files:
st = os.stat(f)
if time.time() - st.st_mtime > 10 and st.st_size > 0:
new_f = os.path.join(target_dir, os.path.basename(f))
logging.getLogger().debug('copy: %s to %s' % (f, new_f))
try:
shutil.copyfile(f, new_f)
try:
os.unlink(f)
except IOError as e:
logging.getLogger().error('Error deleting image file %s' % f)
except IOError as e:
logging.getLogger().error('Error copying image file %s to %s' % (f, new_f))
else:
logging.getLogger().debug('Not moving file %s yet as it there is a handle on it' % f)
# Remove the src_dir if empty
self._rmdir(src_dir)
def _rmdir(self, dir):
"""rmdir the dir (only works if it's empty)"""
try:
os.rmdir(dir)
except OSError as e:
pass
def _get_most_recent_container_dirs(self, dirs):
"""Generate a dict of all containers with their most recent z-slice directories (dates)"""
containers = dict()
for dir in dirs:
dir_containers = glob.glob(dir+"/*/")
for dir_container in dir_containers:
barcode = os.path.basename(os.path.abspath(dir_container))
containers[barcode] = dir
return containers
def _get_visit_dir(self, container):
visit = container['visit']
proposal = visit[ : visit.index('-')]
new_root = '{root}/{proposal}/{visit}'.format(root=self.config['upload_dir'], proposal=proposal, visit=visit)
old_root = '{root}/{year}/{visit}'.format(root=self.config['upload_dir_old'], year=container['year'], visit=visit)
the_root = None
if os.path.exists(new_root):
the_root = new_root
elif os.path.exists(old_root):
the_root = old_root
else:
logging.getLogger().error('Visit directory for visit doesnt exist, tried %s and %s' % (new_root, old_root))
return None
return the_root
def _make_dirs(self, path):
if not os.path.exists(path):
try:
os.makedirs(path)
if config['web_user']:
subprocess.call(['/usr/bin/setfacl', '-R', '-m', 'u:'+config['web_user']+':rwx', path]);
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(new_path):
pass
elif exc.errno == errno.EACCES:
logging.getLogger().error("%s - %s" % (exc.strerror, new_path))
return False
else:
raise
return True
def handle_zslice_images(self):
"""Move the z-slice images from the configured 'archive_dir' to their target_dir which is a folder
named by the container barcode in the tmp folder in the container's visit dir."""
date_dirs = glob.glob(self.config['archive_dir']+"/*/")
container_dict = self._get_most_recent_container_dirs(date_dirs)
# Move the files in the most recent container imaging dirs within the archive_dir
for barcode in container_dict:
container = self._get_container_by_barcode(barcode)
if container['visit'] is None:
logging.getLogger().error('Container barcode %s has no session' % (str(barcode)) )
continue
# Determine the container's target directory
visit_dir = self._get_visit_dir(container)
if visit_dir is None:
continue
target_dir = os.path.join(visit_dir, "tmp", barcode)
if not self._make_dirs(target_dir):
continue
# Move all the files (overwrite any existing files) in the barcode dir to the target_dir
src_dir = os.path.join(container_dict[barcode], barcode)
self._move_dir(src_dir, target_dir)
# Delete all non-recent container imaging dirs within the archive_dir
recent_container_dirs = []
for barcode in container_dict:
recent_container_dirs.append(os.path.join(container_dict[barcode], barcode))
all_container_dirs = glob.glob(self.config['archive_dir']+"/*/*/")
for dir in all_container_dirs:
# Remove the last character ("/") from the dir when comparing
if dir[:-1] not in recent_container_dirs:
try:
logging.getLogger().debug("trying to rmtree(%s)" % (dir))
shutil.rmtree(dir)
except OSError as oe:
logging.getLogger().error("OSError in shutil.rmtree('%s')" % dir)
# Remove date folders if empty
for dir in date_dirs:
self._rmdir(dir)
def handle_ef_images(self):
"""Move extended focus (EF) images from the configuration holding_dir to
imaging/{containerid}/{inspectionid} within the container's visit dir.
Also create thumbnail images."""
files = glob.glob(self.config['holding_dir']+"/*EF*.xml")
for xml in files:
logging.getLogger().debug(xml)
st = os.stat(xml)
image = xml.replace('.xml', '.jpg')
if not os.path.exists(image):
logging.getLogger().error('Corresponding image not found for %s expected %s' % (str(xml), str(image)) )
continue
if time.time() - st.st_mtime > 10 and st.st_size > 0:
tree = ET.parse(xml)
root = tree.getroot()
# deal with xml namespace
ns = root.tag.split('}')[0].strip('{')
nss = { 'oppf': ns }
inspectionid = re.sub('\-.*', '', root.find('oppf:ImagingId', nss).text)
logging.getLogger().debug('inspection: %s' % str(inspectionid))
container = self._get_container(inspectionid)
if container is None:
continue
# Check if the visit dir exists yet
the_root = self._get_visit_dir(container)
if the_root is None:
continue
# Keep images in visit/imaging/containerid/inspectionid
new_path = '{the_root}/imaging/{containerid}/{inspectionid}'.format(the_root=the_root, containerid=container['containerid'], inspectionid=inspectionid)
if not self._make_dirs(new_path):
continue
position = self._get_position(root.find('oppf:Drop', nss).text, container['containertype'])
if position is None:
logging.getLogger().error('Could not match drop: %s to position: %s' % (root.find('oppf:Drop', nss).text, container['containertype']) )
continue
logging.getLogger().debug('Drop: %s position: %s' % (root.find('oppf:Drop', nss).text, position))
sampleid = self._get_sampleid(position, container['containerid'])
if sampleid is None:
self._move_files(image, xml, 'nosample')
continue
mppx = float(root.find('oppf:SizeInMicrons', nss).find('oppf:Width', nss).text) / float(root.find('oppf:SizeInPixels', nss).find('oppf:Width', nss).text)
mppy = float(root.find('oppf:SizeInMicrons', nss).find('oppf:Height', nss).text) / float(root.find('oppf:SizeInPixels', nss).find('oppf:Height', nss).text)
db.pq("""INSERT INTO BLSampleImage (blsampleid, micronsperpixelx, micronsperpixely, containerinspectionid)
VALUES (%s,%s,%s,%s)""", [sampleid, mppx, mppy, inspectionid])
logging.getLogger().debug("INSERT INTO BLSampleImage "\
"(blsampleid, micronsperpixelx, micronsperpixely, containerinspectionid) "\
"VALUES (%s,%s,%s,%s)" % (str(sampleid), str(mppx), str(mppy), str(inspectionid)))
iid = db.id()
# Use blsampleimageid as file name as we are sure this is unique
new_file = '{path}/{iid}.jpg'.format(path=new_path, iid=iid)
db.pq("""UPDATE BLSampleImage set imagefullpath=%s WHERE blsampleimageid=%s""", [new_file, iid])
logging.getLogger().debug("UPDATE BLSampleImage set imagefullpath=%s WHERE blsampleimageid=%s" % (new_file, str(iid)))
# move image
logging.getLogger().debug('copy: %s to %s' % (image, new_file))
try:
copyfile(image, new_file)
# create a thumbnail
file, ext = os.path.splitext(new_file)
try:
im = Image.open(new_file)
im.thumbnail((config['thumb_width'], config['thumb_height']))
try:
im.save(file+'th'+ext)
except IOError as e:
logging.getLogger().error('Error saving image file %s' % file+'th'+ext)
# clear up - should be in a try ... except?
#self._move_files(image, xml, 'processed')
try:
os.unlink(image)
except IOError as e:
logging.getLogger().error('Error deleting image file %s' % image)
try:
os.unlink(xml)
except IOError as e:
logging.getLogger().error('Error deleting XML file %s' % xml)
except IOError as e:
logging.getLogger().error('Error opening image file %s' % new_file)
except IOError as e:
logging.getLogger().error('Error copying image file %s to %s' % (image, new_file))
def _move_files(self, image, xml, path):
for f in [image, xml]:
os.rename(f, f.replace(self.config['holding_dir'], self.config['holding_dir']+'/'+path))
logging.getLogger().debug('move %s %s' % (f, f.replace(self.config['holding_dir'], self.config['holding_dir']+'/'+path)))
def _get_container_by_barcode(self, barcode):
container = self.db.pq("""SELECT c.containertype, c.containerid, c.sessionid, CONCAT(p.proposalcode, p.proposalnumber, '-', bs.visit_number) as visit, DATE_FORMAT(c.bltimestamp, '%%Y') as year
FROM Container c
LEFT OUTER JOIN BLSession bs ON bs.sessionid = c.sessionid
LEFT OUTER JOIN Proposal p ON p.proposalid = bs.proposalid
WHERE c.barcode=%s
LIMIT 1""", [barcode])
if not len(container):
logging.getLogger().error('Couldn\'t find container in database for barcode %s' % str(barcode))
return None
logging.getLogger().debug(str(container[0]['visit']))
return container[0]
def _get_container(self, inspectionid):
container = self.db.pq("""SELECT c.containertype, c.containerid, c.sessionid, CONCAT(p.proposalcode, p.proposalnumber, '-', bs.visit_number) as visit, DATE_FORMAT(c.bltimestamp, '%%Y') as year
FROM Container c
INNER JOIN ContainerInspection ci ON ci.containerid = c.containerid
INNER JOIN Dewar d ON d.dewarid = c.dewarid
INNER JOIN Shipping s ON s.shippingid = d.shippingid
INNER JOIN Proposal p ON p.proposalid = s.proposalid
LEFT OUTER JOIN BLSession bs ON bs.sessionid = c.sessionid
WHERE ci.containerinspectionid=%s
LIMIT 1""", [inspectionid])
if not len(container):
logging.getLogger().error('Couldn\'t find container for inspectionid %s' % str(inspectionid))
return
logging.getLogger().debug(str(container))
if not container[0]['sessionid']:
logging.getLogger().error('Container %s has no sessionid. inspectionid is %s ' % (str(container[0]['containerid']), str(inspectionid)))
return
return container[0]
def _get_position(self, text_position, platetype):
well, drop = text_position.split('.')
drop = int(drop)
row = ord(well[0])-65
col = int(well[1:])-1
# Need to know what type of plate this is to know how many columns its got
# This should be in the database, currently in json format embedded in this collection:
# http://ispyb.diamond.ac.uk/beta/client/js/modules/shipment/collections/platetypes.js
if not platetype in self.config['types']:
logging.getLogger().error('Unknown plate type: %s' % platetype)
return
ty = self.config['types'][platetype]
# Position is a linear sequence left to right across the plate
return (ty['well_per_row']*row*ty['drops_per_well']) + (col*ty['drops_per_well']) + (drop-1) + 1
# Return a blsampleid from a position and containerid
def _get_sampleid(self, position, containerid):
sample = self.db.pq("""SELECT s.blsampleid, s.name, s.location
FROM BLSample s
INNER JOIN Container c ON c.containerid = s.containerid
WHERE s.location = %s AND c.containerid = %s
LIMIT 1""", [position, containerid])
if not len(sample):
logging.getLogger().error('Couldn\'t find a blsample for containerid: %s, position: %s', str(containerid), str(position))
return
logging.getLogger().debug(str(sample[0]))
return sample[0]['blsampleid']
def kill_handler(sig,frame):
hostname = os.uname()[1]
logging.getLogger().warning("%s: got SIGTERM on %s :-O" % (sys.argv[0], hostname))
logging.shutdown()
os._exit(-1)
def set_logging(logs):
levels_dict = {"debug" : logging.DEBUG, "info" : logging.INFO, "warning" : logging.WARNING, "error" : logging.ERROR, "critical" : logging.CRITICAL}
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
for log_name in logs:
handler = None
if log_name == "syslog":
handler = logging.handlers.SysLogHandler(address=(logs[log_name]['host'], logs[log_name]['port']))
elif log_name == "rotating_file":
handler = logging.handlers.RotatingFileHandler(filename=logs[log_name]['filename'], maxBytes=logs[log_name]['max_bytes'], backupCount=logs[log_name]['no_files'])
else:
sys.exit("Invalid logging mechanism defined in config: %s. (Valid options are syslog and rotating_file.)" % log_name)
handler.setFormatter(logging.Formatter(logs[log_name]['format']))
level = logs[log_name]['level']
if levels_dict[level]:
handler.setLevel(levels_dict[level])
else:
handler.setLevel(logging.WARNING)
logger.addHandler(handler)
def clean_up():
global pid_file
os.unlink(pid_file)
logging.getLogger().info("%s: exiting python interpreter :-(" % sys.argv[0])
logging.shutdown()
def print_usage():
usage = """Script for uploading image files from Rock Imager into the correct session directories.
Syntax: %s -c <configuration file> [-rp]
Arguments:
-h|--help : display this help
-c|--conf <conf file> : use the given configuration file, default is config_ef.json""" % sys.argv[0]
print usage
global pid_file
pid_file = None
conf_file = 'config_ef.json'
log_file = None
# Get command-line arguments
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "hc:", ["help", "conf"])
except getopt.GetoptError:
print_usage()
sys.exit(2)
for o,a in opts:
if o in ("-h", "--help"):
print_usage()
sys.exit()
elif o in ("-c", "--conf"):
conf_file = a
cf = open(conf_file)
config = json.load(cf)
cf.close()
if config['task'] not in ('EF', 'Z'):
print_usage()
sys.exit()
set_logging(config['logging'])
signal.signal(signal.SIGTERM, kill_handler)
# Create a pid file
pid_file = config['pid_file']
if os.path.isfile(pid_file):
logging.getLogger().error("%s already exists, exiting" % pid_file)
sys.exit()
if pid_file != None:
try:
f = open(pid_file, 'w')
f.write(str(os.getpid()))
f.close()
except:
logging.getLogger().error("Unable to write to pid file %s" % pid_file)
atexit.register(clean_up) # Remove pid file when exiting
atexit.register(logging.shutdown)
db = MySQL(user=config['user'], pw=config['pw'], db=config['db'], host=config['host'], port=int(config['port']))
uploader = FormulatrixUploader(db=db, config=config)
if config['task'] == 'EF':
uploader.handle_ef_images()
elif config['task'] == 'Z':
uploader.handle_zslice_images()
|
DiamondLightSource/SynchWeb
|
api/formulatrix/uploader/formulatrix_uploader.py
|
Python
|
apache-2.0
| 19,426
|
[
"VisIt"
] |
301756e18af03cdc969acdacc05e98bb00911d755f86d43ea587b90814440eb8
|
'''
This python script is intent to be ran in Paraview to process the
z-stacks generated by the ImageJ macro StackGen.ijm.
Please, note that the combination of Decimate and Smooth filter
will produce a surface with reduced number of cells but that has
more resolution in important parts of the mesh, e.g. close to the
cristae.
The coordinates of mesh points are in units of nanometers.
Matheus Viana and Swee Lim, 28.05.2016
Parameters:
-----------
DecSurface.TargetReduction = 0.9
- will reduce the number of cells by a factor of 90%
SmoothSurface.NumberofIterations = 50
- number of iteration of the smooth filter
'''
import os
import vtk
import numpy
import paraview.servermanager
# Active Render
renderView1 = GetActiveViewOrCreate('RenderView')
# Same pixel size in nanometers used in StackGen.ijm
pixel_size = 2.5
# Stacks names
Stacks = ['IM.tif','OM.tif']
# Looping over stacks
for stack in Stacks:
# Selecting source
Source = FindSource(stack)
# Source path and filename
FileName = os.path.basename(Source.FileNames[0])
FileName = os.path.splitext(FileName)[0]
Path = os.path.dirname(Source.FileNames[0])
Hide(Source, renderView1)
# Creating surface
Surface = Contour(Input=Source)
Surface.ContourBy = ['POINTS', 'Tiff Scalars']
Surface.Isosurfaces = [32767.5]
Hide(Surface, renderView1)
# Manual scaling the surface
ScaleSurface = Transform(Input=Surface)
ScaleSurface.Transform = 'Transform'
ScaleSurface.Transform.Scale = [pixel_size,pixel_size,pixel_size]
Hide(ScaleSurface, renderView1)
# Reducing number of triangles
DecSurface = Decimate(Input=ScaleSurface)
DecSurface.TargetReduction = 0.9
Hide(DecSurface, renderView1)
# Smooth the resulting mesh
SmoothSurface = Smooth(Input=DecSurface)
SmoothSurface.NumberofIterations = 50
# Make sure we only have triangles
TriSurface = Triangulate(SmoothSurface)
Show(TriSurface, renderView1)
# Exporting as VTK
SaveData(Path+'/'+FileName+'.vtk', proxy=TriSurface)
# Exporting as XML
PolyData = servermanager.Fetch(TriSurface)
Writer = vtk.vtkXMLPolyDataWriter()
Writer.SetInputData(PolyData)
Writer.SetFileName(Path+'/'+FileName+'.xml')
Writer.Write()
# Cleaning memory
Delete(TriSurface)
del TriSurface
Delete(SmoothSurface)
del SmoothSurface
Delete(DecSurface)
del DecSurface
Delete(ScaleSurface)
del ScaleSurface
Delete(Surface)
del Surface
|
vianamp/MitoMembranes
|
xmlgenerator/SurfaceGen.py
|
Python
|
apache-2.0
| 2,586
|
[
"ParaView",
"VTK"
] |
b9a7cc41af8c025bc0da0ca6ab464cae8abc3c36e59764b3c3a282fb6c73721b
|
"""Equal Groups K-Means clustering utlizing the scikit-learn api and related
utilities.
BSD 3-clause "New" or "Revised" License
version 0.17.1
"""
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster import k_means_
from sklearn.cluster import _k_means
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.utils.extmath import row_norms, squared_norm
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.utils import check_array
from sklearn.utils import check_random_state
from sklearn.utils import as_float_array
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.validation import FLOAT_DTYPES
class EqualGroupsKMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""Equal Groups K-Means clustering
90 percent of this is the Kmeans implmentations with the equal groups logic
located in `_labels_inertia_precompute_dense()` which follows the steps laid
out in the Elki Same-size k-Means Variation tutorial.
https://elki-project.github.io/tutorial/same-size_k_means
Please note that this implementation only works in scikit-learn 17.X as later
versions having breaking changes to this implementation.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
See also
--------
MiniBatchKMeans:
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster to than the default batch implementation.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300,
tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True, n_jobs=1):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES,
warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
return_n_iter=False, sample_weight=None):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter: int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init = check_array(init, dtype=np.float64, copy=True)
_validate_center_shape(X, n_clusters, init)
init -= X_mean
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state, sample_weight=sample_weight)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter,
init=init, verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single(X, n_clusters, x_squared_norms, max_iter=300,
init='k-means++', verbose=False, random_state=None,
tol=1e-4, precompute_distances=True, sample_weight=None):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
if sample_weight == None:
sample_weight = np.ones(X.shape[0])
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = k_means_._init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=np.float64)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
sample_weight = np.asarray([1.0] * len(labels))
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, sample_weight, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, sample_weight, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
shift = squared_norm(centers_old - centers)
if shift <= tol:
if verbose:
print("Converged at iteration %d" % i)
break
if shift > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float64 array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances: float64 array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=np.float64)
# distances will be changed in-place
if sp.issparse(X):
inertia = k_means_._k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = k_means_._k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
centers = np.nan_to_num(centers)
n_samples = X.shape[0]
k = centers.shape[0]
all_distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
n_samples = X.shape[0]
k = centers.shape[0]
max_cluster_size = get_clusters_size(n_samples, k)
labels, mindist = initial_assignment(labels, mindist, n_samples, all_distances, max_cluster_size)
all_points = np.arange(n_samples)
for point in all_points:
for point_dist in get_best_point_distances(point, all_distances):
cluster_id, point_dist = point_dist
# initial assignment
if not is_cluster_full(cluster_id, max_cluster_size, labels):
labels[point] = cluster_id
mindist[point] = point_dist
break
# refinement of clustering
transfer_list = []
best_mindist = mindist.copy()
best_labels = labels.copy()
# sort all of the points from largest distance to smallest
points_by_high_distance = np.argsort(mindist)[::-1]
for point in points_by_high_distance:
point_cluster = labels[point]
# see if there is an opening on the best cluster for this point
cluster_id, point_dist = get_best_cluster_for_point(point, all_distances)
if not is_cluster_full(cluster_id, max_cluster_size, labels) and point_cluster != cluster_id:
labels[point] = cluster_id
mindist[point] = point_dist
best_labels = labels.copy()
best_mindist = mindist.copy()
continue # on to the next point
for swap_candidate in transfer_list:
cand_cluster = labels[swap_candidate]
if point_cluster != cand_cluster:
# get the current dist of swap candidate
cand_distance = mindist[swap_candidate]
# get the potential dist of point
point_distance = all_distances[cand_cluster, point]
# compare
if point_distance < cand_distance:
labels[point] = cand_cluster
mindist[point] = all_distances[cand_cluster, point]
labels[swap_candidate] = point_cluster
mindist[swap_candidate] = all_distances[point_cluster, swap_candidate]
if np.absolute(mindist).sum() < np.absolute(best_mindist).sum():
# update the labels since the transfer was a success
best_labels = labels.copy()
best_mindist = mindist.copy()
break
else:
# reset since the transfer was not a success
labels = best_labels.copy()
mindist = best_mindist.copy()
transfer_list.append(point)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = best_mindist.sum()
return best_labels, inertia
def get_best_cluster_for_point(point, all_distances):
"""Gets the best cluster by distance for a point
Argument
--------
point : int
the point index
Returns
--------
tuple
(cluster_id, distance_from_cluster_center)
"""
sorted_distances = get_best_point_distances(point, all_distances)
cluster_id, point_dist = sorted_distances[0]
return cluster_id, point_dist
def get_best_point_distances(point, all_distances):
"""Gets a sorted by best distance of clusters
Argument
--------
point : int
the point index
Returns
--------
list of tuples sorted by point_dist
example: [(cluster_id, point_dist), (cluster_id, point_dist)]
"""
points_distances = all_distances[:, point]
sorted_points = sort_adjust_row(points_distances)
return sorted_points
def sort_adjust_row(points_distances):
"Sorts the points row from smallest distance to lowest distance"
return sorted([(cluster_id, point_dist) for cluster_id, point_dist in enumerate(points_distances)], key=lambda x: x[1])
def is_cluster_full(cluster_id, max_cluster_size, labels):
"""Determies in a cluster is full"""
cluster_count = len(np.where(labels==cluster_id)[0])
is_full = cluster_count >= max_cluster_size
return is_full
def get_clusters_size(n_samples, n_clusters):
"""Gets the number of members per cluster for equal groups kmeans"""
return (n_samples + n_clusters - 1) // n_clusters
def initial_assignment(labels, mindist, n_samples, all_distances, max_cluster_size):
"""Initial assignment of labels and mindist"""
all_points = np.arange(n_samples)
for point in all_points:
for point_dist in get_best_point_distances(point, all_distances):
cluster_id, point_dist = point_dist
# initial assignment
if not is_cluster_full(cluster_id, max_cluster_size, labels):
labels[point] = cluster_id
mindist[point] = point_dist
break
return labels, mindist
|
ndanielsen/Same-Size-K-Means
|
clustering/equal_groups.py
|
Python
|
bsd-3-clause
| 31,478
|
[
"Gaussian"
] |
fc5d0508ffb08fd142121e8842ae34a53308afd1a4bf5b407868866fef592485
|
__author__ = 'fabian'
from collections import deque
import time
import requests
# Constants
BRAZIL = 'br'
EUROPE_NORDIC_EAST = 'eune'
EUROPE_WEST = 'euw'
KOREA = 'kr'
LATIN_AMERICA_NORTH = 'lan'
LATIN_AMERICA_SOUTH = 'las'
NORTH_AMERICA = 'na'
OCEANIA = 'oce'
RUSSIA = 'ru'
TURKEY = 'tr'
JAPAN = 'jp'
regions = ['br', 'eune', 'euw', 'kr', 'lan', 'las', 'na', 'oce', 'ru', 'tr', 'jp']
# Platforms
platforms = {
BRAZIL: 'BR1',
EUROPE_NORDIC_EAST: 'EUN1',
EUROPE_WEST: 'EUW1',
KOREA: 'KR',
LATIN_AMERICA_NORTH: 'LA1',
LATIN_AMERICA_SOUTH: 'LA2',
NORTH_AMERICA: 'NA1',
OCEANIA: 'OC1',
RUSSIA: 'RU',
TURKEY: 'TR1',
JAPAN: 'JP1'
}
queue_types = [
'CUSTOM', # Custom games
'NORMAL_5x5_BLIND', # Normal 5v5 blind pick
'BOT_5x5', # Historical Summoners Rift coop vs AI games
'BOT_5x5_INTRO', # Summoners Rift Intro bots
'BOT_5x5_BEGINNER', # Summoner's Rift Coop vs AI Beginner Bot games
'BOT_5x5_INTERMEDIATE', # Historical Summoner's Rift Coop vs AI Intermediate Bot games
'NORMAL_3x3', # Normal 3v3 games
'NORMAL_5x5_DRAFT', # Normal 5v5 Draft Pick games
'ODIN_5x5_BLIND', # Dominion 5v5 Blind Pick games
'ODIN_5x5_DRAFT', # Dominion 5v5 Draft Pick games
'BOT_ODIN_5x5', # Dominion Coop vs AI games
'RANKED_SOLO_5x5', # Ranked Solo 5v5 games
'RANKED_PREMADE_3x3', # Ranked Premade 3v3 games
'RANKED_PREMADE_5x5', # Ranked Premade 5v5 games
'RANKED_TEAM_3x3', # Ranked Team 3v3 games
'RANKED_TEAM_5x5', # Ranked Team 5v5 games
'BOT_TT_3x3', # Twisted Treeline Coop vs AI games
'GROUP_FINDER_5x5', # Team Builder games
'ARAM_5x5', # ARAM games
'ONEFORALL_5x5', # One for All games
'FIRSTBLOOD_1x1', # Snowdown Showdown 1v1 games
'FIRSTBLOOD_2x2', # Snowdown Showdown 2v2 games
'SR_6x6', # Hexakill games
'URF_5x5', # Ultra Rapid Fire games
'BOT_URF_5x5', # Ultra Rapid Fire games played against AI games
'NIGHTMARE_BOT_5x5_RANK1', # Doom Bots Rank 1 games
'NIGHTMARE_BOT_5x5_RANK2', # Doom Bots Rank 2 games
'NIGHTMARE_BOT_5x5_RANK5', # Doom Bots Rank 5 games
'ASCENSION_5x5', # Ascension games
'HEXAKILL', # 6v6 games on twisted treeline
'KING_PORO_5x5', # King Poro game games
'COUNTER_PICK', # Nemesis games,
'BILGEWATER_5x5', # Black Market Brawlers games
]
game_maps = [
{'map_id': 1, 'name': "Summoner's Rift", 'notes': "Summer Variant"},
{'map_id': 2, 'name': "Summoner's Rift", 'notes': "Autumn Variant"},
{'map_id': 3, 'name': "The Proving Grounds", 'notes': "Tutorial Map"},
{'map_id': 4, 'name': "Twisted Treeline", 'notes': "Original Version"},
{'map_id': 8, 'name': "The Crystal Scar", 'notes': "Dominion Map"},
{'map_id': 10, 'name': "Twisted Treeline", 'notes': "Current Version"},
{'map_id': 11, 'name': "Summoner's Rift", 'notes': "Current Version"},
{'map_id': 12, 'name': "Howling Abyss", 'notes': "ARAM Map"},
{'map_id': 14, 'name': "Butcher's Bridge", 'notes': "ARAM Map"},
]
game_modes = [
'CLASSIC', # Classic Summoner's Rift and Twisted Treeline games
'ODIN', # Dominion/Crystal Scar games
'ARAM', # ARAM games
'TUTORIAL', # Tutorial games
'ONEFORALL', # One for All games
'ASCENSION', # Ascension games
'FIRSTBLOOD', # Snowdown Showdown games
'KINGPORO', # King Poro games
]
game_types = [
'CUSTOM_GAME', # Custom games
'TUTORIAL_GAME', # Tutorial games
'MATCHED_GAME', # All other games
]
sub_types = [
'NONE', # Custom games
'NORMAL', # Summoner's Rift unranked games
'NORMAL_3x3', # Twisted Treeline unranked games
'ODIN_UNRANKED', # Dominion/Crystal Scar games
'ARAM_UNRANKED_5v5', # ARAM / Howling Abyss games
'BOT', # Summoner's Rift and Crystal Scar games played against AI
'BOT_3x3', # Twisted Treeline games played against AI
'RANKED_SOLO_5x5', # Summoner's Rift ranked solo queue games
'RANKED_TEAM_3x3', # Twisted Treeline ranked team games
'RANKED_TEAM_5x5', # Summoner's Rift ranked team games
'ONEFORALL_5x5', # One for All games
'FIRSTBLOOD_1x1', # Snowdown Showdown 1x1 games
'FIRSTBLOOD_2x2', # Snowdown Showdown 2x2 games
'SR_6x6', # Hexakill games
'CAP_5x5', # Team Builder games
'URF', # Ultra Rapid Fire games
'URF_BOT', # Ultra Rapid Fire games against AI
'NIGHTMARE_BOT', # Nightmare bots
'ASCENSION', # Ascension games
'HEXAKILL', # Twisted Treeline 6x6 Hexakill
'KING_PORO', # King Poro games
'COUNTER_PICK', # Nemesis games
'BILGEWATER', # Black Market Brawlers games
]
player_stat_summary_types = [
'Unranked', # Summoner's Rift unranked games
'Unranked3x3', # Twisted Treeline unranked games
'OdinUnranked', # Dominion/Crystal Scar games
'AramUnranked5x5', # ARAM / Howling Abyss games
'CoopVsAI', # Summoner's Rift and Crystal Scar games played against AI
'CoopVsAI3x3', # Twisted Treeline games played against AI
'RankedSolo5x5', # Summoner's Rift ranked solo queue games
'RankedTeams3x3', # Twisted Treeline ranked team games
'RankedTeams5x5', # Summoner's Rift ranked team games
'OneForAll5x5', # One for All games
'FirstBlood1x1', # Snowdown Showdown 1x1 games
'FirstBlood2x2', # Snowdown Showdown 2x2 games
'SummonersRift6x6', # Hexakill games
'CAP5x5', # Team Builder games
'URF', # Ultra Rapid Fire games
'URFBots', # Ultra Rapid Fire games played against AI
'NightmareBot', # Summoner's Rift games played against Nightmare AI
'Hexakill', # Twisted Treeline 6x6 Hexakill games
'KingPoro', # King Poro games
'CounterPick', # Nemesis games
'Bilgewater', # Black Market Brawlers games
]
solo_queue, ranked_5s, ranked_3s = 'RANKED_SOLO_5x5', 'RANKED_TEAM_5x5', 'RANKED_TEAM_3x3'
preseason_3, season_3, preseason_2014, season_2014, preseason_2015, season_2015, preseason_2016, season_2016 = [
'PRESEASON3', 'SEASON3',
'PRESEASON2014', 'SEASON2014',
'PRESEASON2015', 'SEASON2015',
'PRESEASON2016', 'SEASON2016',
]
api_versions = {
'champion': 1.2,
'current-game': 1.0,
'featured-games': 1.0,
'game': 1.3,
'league': 2.5,
'lol-static-data': 1.2,
'lol-status': 1.0,
'match': 2.2,
'matchlist': 2.2,
'stats': 1.3,
'summoner': 1.4,
'team': 2.4
}
class LoLException(Exception):
def __init__(self, error, response):
self.error = error
self.headers = response.headers
def __str__(self):
return self.error
def __eq__(self, other):
if isinstance(other, "".__class__):
return self.error == other
elif isinstance(other, self.__class__):
return self.error == other.error and self.headers == other.headers
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return super(LoLException).__hash__()
error_400 = "Bad request"
error_401 = "Unauthorized"
error_403 = "Blacklisted key"
error_404 = "Game data not found"
error_429 = "Too many requests"
error_500 = "Internal server error"
error_503 = "Service unavailable"
error_504 = 'Gateway timeout'
def raise_status(response):
if response.status_code == 400:
raise LoLException(error_400, response)
elif response.status_code == 401:
raise LoLException(error_401, response)
elif response.status_code == 403:
raise LoLException(error_403, response)
elif response.status_code == 404:
raise LoLException(error_404, response)
elif response.status_code == 429:
raise LoLException(error_429, response)
elif response.status_code == 500:
raise LoLException(error_500, response)
elif response.status_code == 503:
raise LoLException(error_503, response)
elif response.status_code == 504:
raise LoLException(error_504, response)
else:
response.raise_for_status()
class RateLimit:
def __init__(self, allowed_requests, seconds):
self.allowed_requests = allowed_requests
self.seconds = seconds
self.made_requests = deque()
def __reload(self):
t = time.time()
while len(self.made_requests) > 0 and self.made_requests[0] < t:
self.made_requests.popleft()
def add_request(self):
self.made_requests.append(time.time() + self.seconds)
def request_available(self):
self.__reload()
return len(self.made_requests) < self.allowed_requests
class RiotWatcher:
def __init__(self, key, default_region=NORTH_AMERICA, limits=(RateLimit(10, 10), RateLimit(500, 600), )):
self.key = key # If you have a production key, use limits=(RateLimit(3000,10), RateLimit(180000,600),)
self.default_region = default_region
self.limits = limits
def can_make_request(self):
for lim in self.limits:
if not lim.request_available():
return False
return True
def get_champion_mastery(self, sum_id, region=None):
if region is None:
region = self.default_region
args = {'api_key': self.key}
print(args)
r = requests.get(
'https://{proxy}.api.pvp.net/championmastery/location/{platformId}/player/{summonerId}/champions'.format(
proxy=region,
platformId=platforms[region],
summonerId=sum_id
),
params=args
)
for lim in self.limits:
lim.add_request()
raise_status(r)
return r.json()
def base_request(self, url, region, static=False, **kwargs):
if region is None:
region = self.default_region
args = {'api_key': self.key}
for k in kwargs:
if kwargs[k] is not None:
args[k] = kwargs[k]
r = requests.get(
'https://{proxy}.api.pvp.net/api/lol/{static}{region}/{url}'.format(
proxy='global' if static else region,
static='static-data/' if static else '',
region=region,
url=url
),
params=args
)
if not static:
for lim in self.limits:
lim.add_request()
raise_status(r)
return r.json()
def _observer_mode_request(self, url, proxy=None, **kwargs):
if proxy is None:
proxy = self.default_region
args = {'api_key': self.key}
for k in kwargs:
if kwargs[k] is not None:
args[k] = kwargs[k]
r = requests.get(
'https://{proxy}.api.pvp.net/observer-mode/rest/{url}'.format(
proxy=proxy,
url=url
),
params=args
)
for lim in self.limits:
lim.add_request()
raise_status(r)
return r.json()
@staticmethod
def sanitized_name(name):
return name.replace(' ', '').lower()
# champion-v1.2
def _champion_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/champion/{end_url}'.format(
version=api_versions['champion'],
end_url=end_url
),
region,
**kwargs
)
def get_all_champions(self, region=None, free_to_play=False):
return self._champion_request('', region, freeToPlay=free_to_play)
def get_champion(self, champion_id, region=None):
return self._champion_request('{id}'.format(id=champion_id), region)
# current-game-v1.0
def get_current_game(self, summoner_id, platform_id=None, region=None):
if platform_id is None:
platform_id = platforms[self.default_region]
return self._observer_mode_request(
'consumer/getSpectatorGameInfo/{platform}/{summoner_id}'.format(
platform=platform_id,
summoner_id=summoner_id
),
region
)
# featured-game-v1.0
def get_featured_games(self, proxy=None):
return self._observer_mode_request('featured', proxy)
# game-v1.3
def _game_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/game/{end_url}'.format(
version=api_versions['game'],
end_url=end_url
),
region,
**kwargs
)
def get_recent_games(self, summoner_id, region=None):
return self._game_request('by-summoner/{summoner_id}/recent'.format(summoner_id=summoner_id), region)
# league-v2.5
def _league_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/league/{end_url}'.format(
version=api_versions['league'],
end_url=end_url
),
region,
**kwargs
)
def get_league(self, summoner_ids=None, team_ids=None, region=None):
"""summoner_ids and team_ids arguments must be iterable, only one should be specified, not both"""
if (summoner_ids is None) != (team_ids is None):
if summoner_ids is not None:
return self._league_request(
'by-summoner/{summoner_ids}'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
else:
return self._league_request(
'by-team/{team_ids}'.format(team_ids=','.join([str(t) for t in team_ids])),
region
)
def get_league_entry(self, summoner_ids=None, team_ids=None, region=None):
"""summoner_ids and team_ids arguments must be iterable, only one should be specified, not both"""
if (summoner_ids is None) != (team_ids is None):
if summoner_ids is not None:
return self._league_request(
'by-summoner/{summoner_ids}/entry'.format(
summoner_ids=','.join([str(s) for s in summoner_ids])
),
region
)
else:
return self._league_request(
'by-team/{team_ids}/entry'.format(team_ids=','.join([str(t) for t in team_ids])),
region
)
def get_challenger(self, region=None, queue=solo_queue):
return self._league_request('challenger', region, type=queue)
def get_master(self, region=None, queue=solo_queue):
return self._league_request('master', region, type=queue)
# lol-static-data-v1.2
def _static_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/{end_url}'.format(
version=api_versions['lol-static-data'],
end_url=end_url
),
region,
static=True,
**kwargs
)
def static_get_champion_list(self, region=None, locale=None, version=None, data_by_id=None, champ_data=None):
return self._static_request(
'champion',
region,
locale=locale,
version=version,
dataById=data_by_id,
champData=champ_data
)
def static_get_champion(self, champ_id, region=None, locale=None, version=None, champ_data=None):
return self._static_request(
'champion/{id}'.format(id=champ_id),
region,
locale=locale,
version=version,
champData=champ_data
)
def static_get_item_list(self, region=None, locale=None, version=None, item_list_data=None):
return self._static_request('item', region, locale=locale, version=version, itemListData=item_list_data)
def static_get_item(self, item_id, region=None, locale=None, version=None, item_data=None):
return self._static_request(
'item/{id}'.format(id=item_id),
region,
locale=locale,
version=version,
itemData=item_data
)
def static_get_mastery_list(self, region=None, locale=None, version=None, mastery_list_data=None):
return self._static_request(
'mastery',
region,
locale=locale,
version=version,
masteryListData=mastery_list_data
)
def static_get_mastery(self, mastery_id, region=None, locale=None, version=None, mastery_data=None):
return self._static_request(
'mastery/{id}'.format(id=mastery_id),
region,
locale=locale,
version=version,
masteryData=mastery_data
)
def static_get_realm(self, region=None):
return self._static_request('realm', region)
def static_get_rune_list(self, region=None, locale=None, version=None, rune_list_data=None):
return self._static_request('rune', region, locale=locale, version=version, runeListData=rune_list_data)
def static_get_rune(self, rune_id, region=None, locale=None, version=None, rune_data=None):
return self._static_request(
'rune/{id}'.format(id=rune_id),
region,
locale=locale,
version=version,
runeData=rune_data
)
def static_get_summoner_spell_list(self, region=None, locale=None, version=None, data_by_id=None, spell_data=None):
return self._static_request(
'summoner-spell',
region,
locale=locale,
version=version,
dataById=data_by_id,
spellData=spell_data
)
def static_get_summoner_spell(self, spell_id, region=None, locale=None, version=None, spell_data=None):
return self._static_request(
'summoner-spell/{id}'.format(id=spell_id),
region,
locale=locale,
version=version,
spellData=spell_data
)
def static_get_versions(self, region=None):
return self._static_request('versions', region)
# match-v2.2
def _match_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/match/{end_url}'.format(
version=api_versions['match'],
end_url=end_url
),
region,
**kwargs
)
def get_match(self, match_id, region=None, include_timeline=False):
return self._match_request(
'{match_id}'.format(match_id=match_id),
region,
includeTimeline=include_timeline
)
# lol-status-v1.0
@staticmethod
def get_server_status(region=None):
if region is None:
url = 'shards'
else:
url = 'shards/{region}'.format(region=region)
r = requests.get('http://status.leagueoflegends.com/{url}'.format(url=url))
raise_status(r)
return r.json()
# match list-v2.2
def _match_list_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/matchlist/by-summoner/{end_url}'.format(
version=api_versions['matchlist'],
end_url=end_url,
),
region,
**kwargs
)
def get_match_list(self, summoner_id, region=None, champion_ids=None, ranked_queues=None, season=None,
begin_time=None, end_time=None, begin_index=None, end_index=None):
if ranked_queues is not None and not isinstance(ranked_queues, str):
ranked_queues = ','.join(ranked_queues)
if season is not None and not isinstance(season, str):
season = ','.join(season)
return self._match_list_request(
'{summoner_id}'.format(summoner_id=summoner_id),
region,
championIds=champion_ids,
rankedQueues=ranked_queues,
seasons=season,
beginTime=begin_time,
endTime=end_time,
beginIndex=begin_index,
endIndex=end_index
)
# stats-v1.3
def _stats_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/stats/{end_url}'.format(
version=api_versions['stats'],
end_url=end_url
),
region,
**kwargs
)
def get_stat_summary(self, summoner_id, region=None, season=None):
return self._stats_request(
'by-summoner/{summoner_id}/summary'.format(summoner_id=summoner_id),
region,
season='SEASON{}'.format(season) if season is not None else None)
def get_ranked_stats(self, summoner_id, region=None, season=None):
return self._stats_request(
'by-summoner/{summoner_id}/ranked'.format(summoner_id=summoner_id),
region,
season='SEASON{}'.format(season) if season is not None else None
)
# summoner-v1.4
def _summoner_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/summoner/{end_url}'.format(
version=api_versions['summoner'],
end_url=end_url
),
region,
**kwargs
)
def get_mastery_pages(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/masteries'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
def get_rune_pages(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/runes'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
def get_summoners(self, names=None, ids=None, region=None):
if (names is None) != (ids is None):
return self._summoner_request(
'by-name/{summoner_names}'.format(
summoner_names=','.join([self.sanitized_name(n) for n in names])) if names is not None
else '{summoner_ids}'.format(summoner_ids=','.join([str(i) for i in ids])),
region
)
else:
return None
def get_summoner(self, name=None, _id=None, region=None):
if (name is None) != (_id is None):
if name is not None:
name = self.sanitized_name(name)
key, summoner = self.get_summoners(names=[name, ], region=region).popitem()
return summoner
else:
return self.get_summoners(ids=[_id, ], region=region)[str(_id)]
return None
def get_summoner_name(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/name'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
# team-v2.4
def _team_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/team/{end_url}'.format(
version=api_versions['team'],
end_url=end_url
),
region,
**kwargs
)
def get_teams_for_summoner(self, summoner_id, region=None):
return self.get_teams_for_summoners([summoner_id, ], region=region)[str(summoner_id)]
def get_teams_for_summoners(self, summoner_ids, region=None):
return self._team_request(
'by-summoner/{summoner_id}'.format(summoner_id=','.join([str(s) for s in summoner_ids])),
region
)
def get_team(self, team_id, region=None):
return self.get_teams([team_id, ], region=region)[str(team_id)]
def get_teams(self, team_ids, region=None):
return self._team_request('{team_ids}'.format(team_ids=','.join(str(t) for t in team_ids)), region)
if __name__ == "__main_":
pass
|
fab0l1n/lolXtension
|
RIOTWatcher.py
|
Python
|
gpl-3.0
| 23,937
|
[
"CRYSTAL"
] |
0c85e1ddcff4ea8158e43e2c22bb020a59debbdb5e7a85f51c05c6b76e0f1ef2
|
# proxy module
from __future__ import absolute_import
from mayavi.filters.delaunay3d import *
|
enthought/etsproxy
|
enthought/mayavi/filters/delaunay3d.py
|
Python
|
bsd-3-clause
| 94
|
[
"Mayavi"
] |
53db3c665cf001c037f79ca09cc1cdd7141fc727a0779d0575977011aa5f9d76
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
import six
from telemetry import decorators
from telemetry.core import util
from core import find_dependencies
class FindDependenciesTest(unittest.TestCase):
def getErroneousDependencies(self):
# For some reason, several erreoneous dependencies are reported, but only
# when running under Python 3. The output from the discovery process does
# not seem to indicate that anything is actually depending on these, nor
# do the files themselves import anything other than built-ins and files
# within dependency_test_dir, so it is unclear why this is happening.
if six.PY2:
return set()
else:
chromium_src_dir = os.path.realpath(
os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..',
'..'))
return {
os.path.join(chromium_src_dir, '-'),
os.path.join(chromium_src_dir, 'build', 'android', 'java'),
os.path.join(chromium_src_dir, 'build', 'android', 'test'),
os.path.join(chromium_src_dir, 'third_party', 'catapult',
'third_party', 'coverage', '__main__.py'),
}
@decorators.Disabled('chromeos') # crbug.com/818230
def testFindPythonDependencies(self):
try:
dog_object_path = os.path.join(
util.GetUnittestDataDir(),
'dependency_test_dir', 'dog', 'dog', 'dog_object.py')
cat_module_path = os.path.join(
util.GetUnittestDataDir(),
'dependency_test_dir', 'other_animals', 'cat', 'cat')
cat_module_init_path = os.path.join(cat_module_path, '__init__.py')
cat_object_path = os.path.join(cat_module_path, 'cat_object.py')
dependencies = set(
p for p in find_dependencies.FindPythonDependencies(dog_object_path))
dependencies -= self.getErroneousDependencies()
self.assertEquals(dependencies, {
dog_object_path, cat_module_path, cat_module_init_path,
cat_object_path
})
except ImportError: # crbug.com/559527
pass
@decorators.Disabled('chromeos') # crbug.com/818230
def testFindPythonDependenciesWithNestedImport(self):
try:
moose_module_path = os.path.join(
util.GetUnittestDataDir(),
'dependency_test_dir', 'other_animals', 'moose', 'moose')
moose_object_path = os.path.join(moose_module_path, 'moose_object.py')
horn_module_path = os.path.join(moose_module_path, 'horn')
horn_module_init_path = os.path.join(horn_module_path, '__init__.py')
horn_object_path = os.path.join(horn_module_path, 'horn_object.py')
self.assertEquals(
set(p for p in
find_dependencies.FindPythonDependencies(moose_object_path)),
{moose_object_path,
horn_module_path, horn_module_init_path, horn_object_path})
except ImportError: # crbug.com/559527
pass
|
nwjs/chromium.src
|
tools/perf/core/find_dependencies_unittest.py
|
Python
|
bsd-3-clause
| 3,040
|
[
"MOOSE"
] |
d0bba683e388fcea632060eb03ce0d0e87943176818737e66b26002bc79dab3d
|
import argparse
import re
import Bio
import Bio.Phylo
from datetime import datetime
import gzip
import os, json, sys
import pandas as pd
import subprocess
import shlex
from contextlib import contextmanager
from treetime.utils import numeric_date
from collections import defaultdict
from pkg_resources import resource_stream
from io import TextIOWrapper
from .__version__ import __version__
from augur.io import open_file
from augur.util_support.color_parser import ColorParser
from augur.util_support.date_disambiguator import DateDisambiguator
from augur.util_support.metadata_file import MetadataFile
from augur.util_support.node_data_reader import NodeDataReader
from augur.util_support.shell_command_runner import ShellCommandRunner
class AugurException(Exception):
pass
def is_vcf(filename):
"""Convenience method to check if a file is a vcf file.
>>> is_vcf(None)
False
>>> is_vcf("./foo")
False
>>> is_vcf("./foo.vcf")
True
>>> is_vcf("./foo.vcf.GZ")
True
"""
return bool(filename) and any(filename.lower().endswith(x) for x in ('.vcf', '.vcf.gz'))
def read_vcf(filename):
if filename.lower().endswith(".gz"):
import gzip
file = gzip.open(filename, mode="rt", encoding='utf-8')
else:
file = open(filename, encoding='utf-8')
chrom_line = next(line for line in file if line.startswith("#C"))
file.close()
headers = chrom_line.strip().split("\t")
sequences = headers[headers.index("FORMAT") + 1:]
# because we need 'seqs to remove' for VCF
return sequences, sequences.copy()
def myopen(fname, mode):
if fname.endswith('.gz'):
import gzip
return gzip.open(fname, mode, encoding='utf-8')
else:
return open(fname, mode, encoding='utf-8')
def get_json_name(args, default=None):
if args.output_node_data:
return args.output_node_data
else:
if default:
print("WARNING: no name for the output file was specified. Writing results to %s."%default, file=sys.stderr)
return default
else:
raise ValueError("Please specify a name for the JSON file containing the results.")
def ambiguous_date_to_date_range(uncertain_date, fmt, min_max_year=None):
return DateDisambiguator(uncertain_date, fmt=fmt, min_max_year=min_max_year).range()
def read_metadata(fname, query=None, as_data_frame=False):
return MetadataFile(fname, query, as_data_frame).read()
def is_date_ambiguous(date, ambiguous_by="any"):
"""
Returns whether a given date string in the format of YYYY-MM-DD is ambiguous by a given part of the date (e.g., day, month, year, or any parts).
Parameters
----------
date : str
Date string in the format of YYYY-MM-DD
ambiguous_by : str
Field of the date string to test for ambiguity ("day", "month", "year", "any")
"""
date_components = date.split('-', 2)
if len(date_components) == 3:
year, month, day = date_components
elif len(date_components) == 2:
year, month = date_components
day = "XX"
else:
year = date_components[0]
month = "XX"
day = "XX"
# Determine ambiguity hierarchically such that, for example, an ambiguous
# month implicates an ambiguous day even when day information is available.
return any((
"X" in year,
"X" in month and ambiguous_by in ("any", "month", "day"),
"X" in day and ambiguous_by in ("any", "day")
))
def get_numerical_date_from_value(value, fmt=None, min_max_year=None):
value = str(value)
if re.match(r'^-*\d+\.\d+$', value):
# numeric date which can be negative
return float(value)
if value.isnumeric():
# year-only date is ambiguous
value = fmt.replace('%Y', value).replace('%m', 'XX').replace('%d', 'XX')
if 'XX' in value:
ambig_date = ambiguous_date_to_date_range(value, fmt, min_max_year)
if ambig_date is None or None in ambig_date:
return [None, None] #don't send to numeric_date or will be set to today
return [numeric_date(d) for d in ambig_date]
try:
return numeric_date(datetime.strptime(value, fmt))
except:
return None
def get_numerical_dates(meta_dict, name_col = None, date_col='date', fmt=None, min_max_year=None):
if fmt:
numerical_dates = {}
if isinstance(meta_dict, dict):
for k,m in meta_dict.items():
v = m[date_col]
numerical_dates[k] = get_numerical_date_from_value(
v,
fmt,
min_max_year
)
elif isinstance(meta_dict, pd.DataFrame):
strains = meta_dict.index.values
dates = meta_dict[date_col].apply(
lambda date: get_numerical_date_from_value(
date,
fmt,
min_max_year
)
).values
numerical_dates = dict(zip(strains, dates))
else:
if isinstance(meta_dict, dict):
numerical_dates = {k:float(v) for k,v in meta_dict.items()}
elif isinstance(meta_dict, pd.DataFrame):
strains = meta_dict.index.values
dates = meta_dict[date_col].astype(float)
numerical_dates = dict(zip(strains, dates))
return numerical_dates
class InvalidTreeError(Exception):
"""Represents an error loading a phylogenetic tree from a filename.
"""
pass
def read_tree(fname, min_terminals=3):
"""Safely load a tree from a given filename or raise an error if the file does
not contain a valid tree.
Parameters
----------
fname : str
name of a file containing a phylogenetic tree
min_terminals : int
minimum number of terminals required for the parsed tree as a sanity
check on the tree
Raises
------
InvalidTreeError
If the given file exists but does not seem to contain a valid tree format.
Returns
-------
Bio.Phylo :
BioPython tree instance
"""
T = None
supported_tree_formats = ["newick", "nexus"]
for fmt in supported_tree_formats:
try:
T = Bio.Phylo.read(fname, fmt)
# Check the sanity of the parsed tree to handle cases when non-tree
# data are still successfully parsed by BioPython. Too few terminals
# in a tree indicates that the input is not valid.
if T.count_terminals() < min_terminals:
T = None
else:
break
except ValueError:
# We cannot open the tree in the current format, so we will try
# another.
pass
# If the tree cannot be loaded, raise an error to that effect.
if T is None:
raise InvalidTreeError(
"Could not read the given tree %s using the following supported formats: %s" % (fname, ", ".join(supported_tree_formats))
)
return T
def read_node_data(fnames, tree=None):
return NodeDataReader(fnames, tree).read()
def write_json(data, file_name, indent=(None if os.environ.get("AUGUR_MINIFY_JSON") else 2), include_version=True):
"""
Write ``data`` as JSON to the given ``file_name``, creating parent directories
if necessary. The augur version is included as a top-level key "augur_version".
Parameters
----------
data : dict
data to write out to JSON
file_name : str
file name to write to
indent : int or None, optional
JSON indentation level. Default is `None` if the environment variable `AUGUR_MINIFY_JSON`
is truthy, else 1
include_version : bool, optional
Include the augur version. Default: `True`.
Raises
------
OSError
"""
#in case parent folder does not exist yet
parent_directory = os.path.dirname(file_name)
if parent_directory and not os.path.exists(parent_directory):
try:
os.makedirs(parent_directory)
except OSError: #Guard against race condition
if not os.path.isdir(parent_directory):
raise
if include_version:
data["generated_by"] = {"program": "augur", "version": get_augur_version()}
with open(file_name, 'w', encoding='utf-8') as handle:
json.dump(data, handle, indent=indent, sort_keys=True)
def load_features(reference, feature_names=None):
#read in appropriately whether GFF or Genbank
#checks explicitly for GFF otherwise assumes Genbank
if not os.path.isfile(reference):
print("ERROR: reference sequence not found. looking for", reference)
return None
features = {}
if '.gff' in reference.lower():
#looks for 'gene' and 'gene' as best for TB
try:
from BCBio import GFF #Package name is confusing - tell user exactly what they need!
except ImportError:
print("ERROR: Package BCBio.GFF not found! Please install using \'pip install bcbio-gff\' before re-running.")
return None
limit_info = dict( gff_type = ['gene'] )
with open(reference, encoding='utf-8') as in_handle:
for rec in GFF.parse(in_handle, limit_info=limit_info):
for feat in rec.features:
if feature_names is not None: #check both tags; user may have used either
if "gene" in feat.qualifiers and feat.qualifiers["gene"][0] in feature_names:
fname = feat.qualifiers["gene"][0]
elif "locus_tag" in feat.qualifiers and feat.qualifiers["locus_tag"][0] in feature_names:
fname = feat.qualifiers["locus_tag"][0]
else:
fname = None
else:
if "gene" in feat.qualifiers:
fname = feat.qualifiers["gene"][0]
else:
fname = feat.qualifiers["locus_tag"][0]
if fname:
features[fname] = feat
if feature_names is not None:
for fe in feature_names:
if fe not in features:
print("Couldn't find gene {} in GFF or GenBank file".format(fe))
else:
from Bio import SeqIO
for feat in SeqIO.read(reference, 'genbank').features:
if feat.type=='CDS':
if "locus_tag" in feat.qualifiers:
fname = feat.qualifiers["locus_tag"][0]
if feature_names is None or fname in feature_names:
features[fname] = feat
elif "gene" in feat.qualifiers:
fname = feat.qualifiers["gene"][0]
if feature_names is None or fname in feature_names:
features[fname] = feat
elif feat.type=='source': #read 'nuc' as well for annotations - need start/end of whole!
features['nuc'] = feat
return features
def read_config(fname):
if not (fname and os.path.isfile(fname)):
print("ERROR: config file %s not found."%fname)
return defaultdict(dict)
try:
with open(fname, 'rb') as ifile:
config = json.load(ifile)
except json.decoder.JSONDecodeError as err:
print("FATAL ERROR:")
print("\tCouldn't parse the JSON file {}".format(fname))
print("\tError message: '{}'".format(err.msg))
print("\tLine number: '{}'".format(err.lineno))
print("\tColumn number: '{}'".format(err.colno))
print("\tYou must correct this file in order to proceed.")
sys.exit(2)
return config
def read_lat_longs(overrides=None, use_defaults=True):
coordinates = {}
# TODO: make parsing of tsv files more robust while allow for whitespace delimiting for backwards compatibility
def add_line_to_coordinates(line):
if line.startswith('#') or line.strip() == "":
return
fields = line.strip().split() if not '\t' in line else line.strip().split('\t')
if len(fields) == 4:
geo_field, loc = fields[0].lower(), fields[1].lower()
lat, long = float(fields[2]), float(fields[3])
coordinates[(geo_field, loc)] = {
"latitude": lat,
"longitude": long
}
else:
print("WARNING: geo-coordinate file contains invalid line. Please make sure not to mix tabs and spaces as delimiters (use only tabs):",line)
if use_defaults:
with resource_stream(__package__, "data/lat_longs.tsv") as stream:
with TextIOWrapper(stream, "utf-8") as defaults:
for line in defaults:
add_line_to_coordinates(line)
if overrides:
if os.path.isfile(overrides):
with open(overrides, encoding='utf-8') as ifile:
for line in ifile:
add_line_to_coordinates(line)
else:
print("WARNING: input lat/long file %s not found." % overrides)
return coordinates
def read_colors(overrides=None, use_defaults=True):
return ColorParser(mapping_filename=overrides, use_defaults=use_defaults).mapping
def write_VCF_translation(prot_dict, vcf_file_name, ref_file_name):
"""
Writes out a VCF-style file (which seems to be minimally handleable
by vcftools and pyvcf) of the AA differences between sequences and the reference.
This is a similar format created/used by read_in_vcf except that there is one
of these dicts (with sequences, reference, positions) for EACH gene.
Also writes out a fasta of the reference alignment.
EBH 12 Dec 2017
"""
import numpy as np
#for the header
seqNames = list(prot_dict[list(prot_dict.keys())[0]]['sequences'].keys())
#prepare the header of the VCF & write out
header=["#CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT"]+seqNames
with open(vcf_file_name, 'w', encoding='utf-8') as the_file:
the_file.write( "##fileformat=VCFv4.2\n"+
"##source=NextStrain_Protein_Translation\n"+
"##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\n")
the_file.write("\t".join(header)+"\n")
refWrite = []
vcfWrite = []
#go through for every gene/protein
for fname, prot in prot_dict.items():
sequences = prot['sequences']
ref = prot['reference']
positions = prot['positions']
#write out the reference fasta
refWrite.append(">"+fname)
refWrite.append(ref)
#go through every variable position
#There are no deletions here, so it's simpler than for VCF nuc sequenes!
for pi in positions:
pos = pi+1 #change numbering to match VCF not python
refb = ref[pi] #reference base at this position
#try/except is (much) faster than list comprehension!
pattern = []
for k,v in sequences.items():
try:
pattern.append(sequences[k][pi])
except KeyError:
pattern.append('.')
pattern = np.array(pattern)
#get the list of ALTs - minus any '.'!
uniques = np.unique(pattern)
uniques = uniques[np.where(uniques!='.')]
#Convert bases to the number that matches the ALT
j=1
for u in uniques:
pattern[np.where(pattern==u)[0]] = str(j)
j+=1
#Now convert these calls to #/# (VCF format)
calls = [ j+"/"+j if j!='.' else '.' for j in pattern ]
if len(uniques)==0:
print("UNEXPECTED ERROR WHILE CONVERTING TO VCF AT POSITION {}".format(str(pi)))
break
#put it all together and write it out
output = [fname, str(pos), ".", refb, ",".join(uniques), ".", "PASS", ".", "GT"] + calls
vcfWrite.append("\t".join(output))
#write it all out
with open(ref_file_name, 'w', encoding='utf-8') as the_file:
the_file.write("\n".join(refWrite))
with open(vcf_file_name, 'a', encoding='utf-8') as the_file:
the_file.write("\n".join(vcfWrite))
if vcf_file_name.lower().endswith('.gz'):
import os
#must temporarily remove .gz ending, or gzip won't zip it!
os.rename(vcf_file_name, vcf_file_name[:-3])
call = ["gzip", vcf_file_name[:-3]]
run_shell_command(" ".join(call), raise_errors = True)
shquote = shlex.quote
def run_shell_command(cmd, raise_errors=False, extra_env=None):
"""
Run the given command string via Bash with error checking.
Returns True if the command exits normally. Returns False if the command
exits with failure and "raise_errors" is False (the default). When
"raise_errors" is True, exceptions are rethrown.
If an *extra_env* mapping is passed, the provided keys and values are
overlayed onto the default subprocess environment.
"""
return ShellCommandRunner(cmd, raise_errors=raise_errors, extra_env=extra_env).run()
def first_line(text):
"""
Returns the first line of the given text, ignoring leading and trailing
whitespace.
"""
return text.strip().splitlines()[0]
def available_cpu_cores(fallback: int = 1) -> int:
"""
Returns the number (an int) of CPU cores available to this **process**, if
determinable, otherwise the number of CPU cores available to the
**computer**, if determinable, otherwise the *fallback* number (which
defaults to 1).
"""
try:
# Note that this is the correct function to use, not os.cpu_count(), as
# described in the latter's documentation.
#
# The reason, which the documentation does not detail, is that
# processes may be pinned or restricted to certain CPUs by setting
# their "affinity". This is not typical except in high-performance
# computing environments, but if it is done, then a computer with say
# 24 total cores may only allow our process to use 12. If we tried to
# naively use all 24, we'd end up with two threads across the 12 cores.
# This would degrade performance rather than improve it!
return len(os.sched_getaffinity(0))
except:
# cpu_count() returns None if the value is indeterminable.
return os.cpu_count() or fallback
def nthreads_value(value):
"""
Argument value validation and casting function for --nthreads.
"""
if value.lower() == 'auto':
return available_cpu_cores()
try:
return int(value)
except ValueError:
raise argparse.ArgumentTypeError("'%s' is not an integer or the word 'auto'" % value) from None
def get_parent_name_by_child_name_for_tree(tree):
'''
Return dictionary mapping child node names to parent node names
'''
parents = {}
for clade in tree.find_clades(order='level'):
for child in clade:
parents[child.name] = clade.name
return parents
def annotate_parents_for_tree(tree):
"""Annotate each node in the given tree with its parent.
>>> import io
>>> tree = Bio.Phylo.read(io.StringIO("(A, (B, C))"), "newick")
>>> not any([hasattr(node, "parent") for node in tree.find_clades()])
True
>>> tree = annotate_parents_for_tree(tree)
>>> tree.root.parent is None
True
>>> all([hasattr(node, "parent") for node in tree.find_clades()])
True
"""
tree.root.parent = None
for node in tree.find_clades(order="level"):
for child in node.clades:
child.parent = node
# Return the tree.
return tree
def json_to_tree(json_dict, root=True):
"""Returns a Bio.Phylo tree corresponding to the given JSON dictionary exported
by `tree_to_json`.
Assigns links back to parent nodes for the root of the tree.
Test opening a JSON from augur export v1.
>>> import json
>>> json_fh = open("tests/data/json_tree_to_nexus/flu_h3n2_ha_3y_tree.json", "r")
>>> json_dict = json.load(json_fh)
>>> tree = json_to_tree(json_dict)
>>> tree.name
'NODE_0002020'
>>> len(tree.clades)
2
>>> tree.clades[0].name
'NODE_0001489'
>>> hasattr(tree, "attr")
True
>>> "dTiter" in tree.attr
True
>>> tree.clades[0].parent.name
'NODE_0002020'
>>> tree.clades[0].branch_length > 0
True
Test opening a JSON from augur export v2.
>>> json_fh = open("tests/data/zika.json", "r")
>>> json_dict = json.load(json_fh)
>>> tree = json_to_tree(json_dict)
>>> hasattr(tree, "name")
True
>>> len(tree.clades) > 0
True
>>> tree.clades[0].branch_length > 0
True
"""
# Check for v2 JSON which has combined metadata and tree data.
if root and "meta" in json_dict and "tree" in json_dict:
json_dict = json_dict["tree"]
node = Bio.Phylo.Newick.Clade()
# v1 and v2 JSONs use different keys for strain names.
if "name" in json_dict:
node.name = json_dict["name"]
else:
node.name = json_dict["strain"]
if "children" in json_dict:
# Recursively add children to the current node.
node.clades = [json_to_tree(child, root=False) for child in json_dict["children"]]
# Assign all non-children attributes.
for attr, value in json_dict.items():
if attr != "children":
setattr(node, attr, value)
# Only v1 JSONs support a single `attr` attribute.
if hasattr(node, "attr"):
node.numdate = node.attr.get("num_date")
node.branch_length = node.attr.get("div")
if "translations" in node.attr:
node.translations = node.attr["translations"]
elif hasattr(node, "node_attrs"):
node.branch_length = node.node_attrs.get("div")
if root:
node = annotate_parents_for_tree(node)
return node
def get_augur_version():
"""
Returns a string of the current augur version.
"""
return __version__
def read_bed_file(bed_file):
"""Read a BED file and return a list of excluded sites.
Note: This function assumes the given file is a BED file. On parsing
failures, it will attempt to skip the first line and retry, but no
other error checking is attempted. Incorrectly formatted files will
raise errors.
Parameters
----------
bed_file : str
Path to the BED file
Returns:
--------
list[int]:
Sorted list of unique zero-indexed sites
"""
mask_sites = []
try:
bed = pd.read_csv(bed_file, sep='\t', header=None, usecols=[1,2],
dtype={1:int,2:int})
except ValueError:
# Check if we have a header row. Otherwise, just fail.
bed = pd.read_csv(bed_file, sep='\t', header=None, usecols=[1,2],
dtype={1:int,2:int}, skiprows=1)
print("Skipped row 1 of %s, assuming it is a header." % bed_file)
for _, row in bed.iterrows():
mask_sites.extend(range(row[1], row[2]))
return sorted(set(mask_sites))
def read_mask_file(mask_file):
"""Read a masking file and return a list of excluded sites.
Masking files have a single masking site per line, either alone
or as the second column of a tab-separated file. These sites
are assumed to be one-indexed, NOT zero-indexed. Incorrectly
formatted lines will be skipped.
Parameters
----------
mask_file : str
Path to the masking file
Returns:
--------
list[int]:
Sorted list of unique zero-indexed sites
"""
mask_sites = []
with open(mask_file, encoding='utf-8') as mf:
for idx, line in enumerate(l.strip() for l in mf.readlines()):
if "\t" in line:
line = line.split("\t")[1]
try:
mask_sites.append(int(line) - 1)
except ValueError as err:
print("Could not read line %s of %s: '%s' - %s" %
(idx, mask_file, line, err), file=sys.stderr)
raise
return sorted(set(mask_sites))
def load_mask_sites(mask_file):
"""Load masking sites from either a BED file or a masking file.
Parameters
----------
mask_file: str
Path to the BED or masking file
Returns
-------
list[int]
Sorted list of unique zero-indexed sites
"""
if mask_file.lower().endswith(".bed"):
mask_sites = read_bed_file(mask_file)
else:
mask_sites = read_mask_file(mask_file)
print("%d masking sites read from %s" % (len(mask_sites), mask_file))
return mask_sites
VALID_NUCLEOTIDES = { # http://reverse-complement.com/ambiguity.html
"A", "G", "C", "T", "U", "N", "R", "Y", "S", "W", "K", "M", "B", "V", "D", "H", "-",
"a", "g", "c", "t", "u", "n", "r", "y", "s", "w", "k", "m", "b", "v", "d", "h", "-"
}
def read_strains(*files, comment_char="#"):
"""Reads strain names from one or more plain text files and returns the
set of distinct strains.
Strain names can be commented with full-line or inline comments. For
example, the following is a valid strain names file:
# this is a comment at the top of the file
strain1 # exclude strain1 because it isn't sequenced properly
strain2
# this is an empty line that will be ignored.
Parameters
----------
files : one or more str
one or more names of text files with one strain name per line
Returns
-------
set :
strain names from the given input files
"""
strains = set()
for input_file in files:
with open_file(input_file, 'r') as ifile:
for line in ifile:
# Allow comments anywhere in a given line.
strain_name = line.split(comment_char)[0].strip()
if len(strain_name) > 0:
strains.add(strain_name)
return strains
|
nextstrain/augur
|
augur/utils.py
|
Python
|
agpl-3.0
| 26,113
|
[
"Biopython"
] |
f56e3292b0362e7ea573bc856b1784d7fbe7f68a5572a54d40d49c6ae1cfb025
|
"""
provide a generic structure to support window functions,
similar to how we have a Groupby object
"""
from __future__ import division
import warnings
import numpy as np
from collections import defaultdict
import pandas as pd
from pandas.lib import isscalar
from pandas.core.base import PandasObject, SelectionMixin
import pandas.core.common as com
import pandas.algos as algos
from pandas import compat
from pandas.util.decorators import Substitution, Appender
from textwrap import dedent
_shared_docs = dict()
_doc_template = """
Returns
-------
same type as input
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
"""
class _Window(PandasObject, SelectionMixin):
_attributes = ['window', 'min_periods', 'freq', 'center', 'win_type',
'axis']
exclusions = set()
def __init__(self, obj, window=None, min_periods=None, freq=None,
center=False, win_type=None, axis=0):
if freq is not None:
warnings.warn("The freq kw is deprecated and will be removed in a "
"future version. You can resample prior to passing "
"to a window function", FutureWarning, stacklevel=3)
self.blocks = []
self.obj = obj
self.window = window
self.min_periods = min_periods
self.freq = freq
self.center = center
self.win_type = win_type
self.axis = axis
self._setup()
@property
def _constructor(self):
return Window
def _setup(self):
pass
def _convert_freq(self, how=None):
""" resample according to the how, return a new object """
obj = self._selected_obj
if (self.freq is not None and
isinstance(obj, (com.ABCSeries, com.ABCDataFrame))):
if how is not None:
warnings.warn("The how kw argument is deprecated and removed "
"in a future version. You can resample prior "
"to passing to a window function", FutureWarning,
stacklevel=6)
obj = obj.resample(self.freq).aggregate(how or 'asfreq')
return obj
def _create_blocks(self, how):
""" split data into blocks & return conformed data """
obj = self._convert_freq(how)
return obj.as_blocks(copy=False).values(), obj
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if isscalar(key) and key in subset or com.is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self):
return self.__class__.__name__
def __unicode__(self):
""" provide a nice str repr of our rolling object """
attrs = ["{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self._window_type,
attrs=','.join(attrs))
def _shallow_copy(self, obj=None, **kwargs):
""" return a new object with the replacement attributes """
if obj is None:
obj = self._selected_obj.copy()
if isinstance(obj, self.__class__):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = getattr(self, attr)
return self._constructor(obj, **kwargs)
def _prep_values(self, values=None, kill_inf=True, how=None):
if values is None:
values = getattr(self._selected_obj, 'values', self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if com.is_float_dtype(values.dtype):
values = com._ensure_float64(values)
elif com.is_integer_dtype(values.dtype):
values = com._ensure_float64(values)
elif com.needs_i8_conversion(values.dtype):
raise NotImplementedError("ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(
action=self._window_type,
dtype=values.dtype))
else:
try:
values = com._ensure_float64(values)
except (ValueError, TypeError):
raise TypeError("cannot handle this type -> {0}"
"".format(values.dtype))
if kill_inf:
values = values.copy()
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None):
""" wrap a single result """
if obj is None:
obj = self._selected_obj
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if com.is_timedelta64_dtype(block.values.dtype):
result = pd.to_timedelta(
result.ravel(), unit='ns').values.reshape(result.shape)
if result.ndim == 1:
from pandas import Series
return Series(result, obj.index, name=obj.name)
return type(obj)(result, index=obj.index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj):
"""
wrap the results
Paramters
---------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
"""
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
if not len(final):
return obj.astype('float64')
return pd.concat(final, axis=1).reindex(columns=obj.columns)
def _center_window(self, result, window):
""" center the result in the window """
if self.axis > result.ndim - 1:
raise ValueError("Requested axis is larger then no. of argument "
"dimensions")
from pandas import Series, DataFrame
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (Series, DataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
return self.apply(arg, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs['sum'] = dedent("""
%(name)s sum
Parameters
----------
how : string, default None (DEPRECATED)
Method for down- or re-sampling""")
_shared_docs['mean'] = dedent("""
%(name)s mean
Parameters
----------
how : string, default None (DEPRECATED)
Method for down- or re-sampling""")
class Window(_Window):
"""
Provides rolling transformations.
.. versionadded:: 0.18.0
Parameters
----------
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None) (DEPRECATED)
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
prove a window type, see the notes below
axis : int, default 0
Returns
-------
a Window sub-classed for the particular operation
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
The recognized window types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
"""
def _prep_window(self, **kwargs):
""" provide validation for our window type, return the window """
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com._asarray_tuplesafe(window).astype(float)
elif com.is_integer(window):
try:
import scipy.signal as sig
except ImportError:
raise ImportError('Please install scipy to generate window '
'weight')
# the below may pop from kwargs
win_type = _validate_win_type(self.win_type, kwargs)
return sig.get_window(win_type, window).astype(float)
raise ValueError('Invalid window %s' % str(window))
def _apply_window(self, mean=True, how=None, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : boolean, default True
If True computes weighted mean, else weighted sum
how : string, default to None (DEPRECATED)
how to resample
Returns
-------
y : type of input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return algos.roll_window(np.concatenate((arg, additional_nans))
if center else arg, window, minp,
avg=mean)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
@Substitution(name='rolling')
@Appender(SelectionMixin._see_also_template)
@Appender(SelectionMixin._agg_doc)
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, **kwargs):
return self._apply_window(mean=False, **kwargs)
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, **kwargs):
return self._apply_window(mean=True, **kwargs)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(self, func, window=None, center=None, check_minp=None, how=None,
**kwargs):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
window : int/array, default to _get_window()
center : boolean, default to self.center
check_minp : function, default to _use_window
how : string, default to None (DEPRECATED)
how to resample
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
if not hasattr(algos, func):
raise ValueError("we do not support this function "
"algos.{0}".format(func))
cfunc = getattr(algos, func)
def func(arg, window, min_periods=None):
minp = check_minp(min_periods, window)
# GH #12373: rolling functions error on float32 data
return cfunc(com._ensure_float64(arg),
window, minp, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(np.concatenate((x, additional_nans)),
window, min_periods=self.min_periods)
else:
def calc(x):
return func(x, window, min_periods=self.min_periods)
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
class _Rolling_and_Expanding(_Rolling):
_shared_docs['count'] = """%(name)s count of number of non-NaN
observations inside provided window."""
def count(self):
obj = self._convert_freq()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
blocks, obj = self._create_blocks(how=None)
results = []
for b in blocks:
if com.needs_i8_conversion(b.values):
result = b.notnull().astype(int)
else:
try:
result = np.isfinite(b).astype(float)
except TypeError:
result = np.isfinite(b.astype(float)).astype(float)
result[pd.isnull(result)] = 0
result = self._constructor(result, window=window, min_periods=0,
center=self.center).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs['apply'] = dedent("""
%(name)s function apply
Parameters
----------
func : function
Must produce a single value from an ndarray input
\*args and \*\*kwargs are passed to the function""")
def apply(self, func, args=(), kwargs={}):
# TODO: _level is unused?
_level = kwargs.pop('_level', None) # noqa
window = self._get_window()
offset = _offset(window, self.center)
def f(arg, window, min_periods):
minp = _use_window(min_periods, window)
return algos.roll_generic(arg, window, minp, offset, func, args,
kwargs)
return self._apply(f, center=False)
def sum(self, **kwargs):
return self._apply('roll_sum', **kwargs)
_shared_docs['max'] = dedent("""
%(name)s maximum
Parameters
----------
how : string, default 'max' (DEPRECATED)
Method for down- or re-sampling""")
def max(self, how=None, **kwargs):
if self.freq is not None and how is None:
how = 'max'
return self._apply('roll_max', how=how, **kwargs)
_shared_docs['min'] = dedent("""
%(name)s minimum
Parameters
----------
how : string, default 'min' (DEPRECATED)
Method for down- or re-sampling""")
def min(self, how=None, **kwargs):
if self.freq is not None and how is None:
how = 'min'
return self._apply('roll_min', how=how, **kwargs)
def mean(self, **kwargs):
return self._apply('roll_mean', **kwargs)
_shared_docs['median'] = dedent("""
%(name)s median
Parameters
----------
how : string, default 'median' (DEPRECATED)
Method for down- or re-sampling""")
def median(self, how=None, **kwargs):
if self.freq is not None and how is None:
how = 'median'
return self._apply('roll_median_c', how=how, **kwargs)
_shared_docs['std'] = dedent("""
%(name)s standard deviation
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def std(self, ddof=1, **kwargs):
window = self._get_window()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(algos.roll_var(arg, window, minp, ddof))
return self._apply(f, check_minp=_require_min_periods(1), **kwargs)
_shared_docs['var'] = dedent("""
%(name)s variance
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def var(self, ddof=1, **kwargs):
return self._apply('roll_var', check_minp=_require_min_periods(1),
ddof=ddof, **kwargs)
_shared_docs['skew'] = """Unbiased %(name)s skewness"""
def skew(self, **kwargs):
return self._apply('roll_skew', check_minp=_require_min_periods(3),
**kwargs)
_shared_docs['kurt'] = """Unbiased %(name)s kurtosis"""
def kurt(self, **kwargs):
return self._apply('roll_kurt', check_minp=_require_min_periods(4),
**kwargs)
_shared_docs['quantile'] = dedent("""
%(name)s quantile
Parameters
----------
quantile : float
0 <= quantile <= 1""")
def quantile(self, quantile, **kwargs):
window = self._get_window()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
return algos.roll_quantile(arg, window, minp, quantile)
return self._apply(f, **kwargs)
_shared_docs['cov'] = dedent("""
%(name)s sample covariance
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used
and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a Panel in the case of DataFrame inputs. In the case of
missing elements, only complete pairwise observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype('float64')
Y = Y.astype('float64')
mean = lambda x: x.rolling(window, self.min_periods,
center=self.center).mean(**kwargs)
count = (X + Y).rolling(window=window,
center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
_shared_docs['corr'] = dedent("""
%(name)s sample correlation
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used
and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a Panel in the case of DataFrame inputs. In the case of
missing elements, only complete pairwise observations will be used.""")
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
b = b.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
class Rolling(_Rolling_and_Expanding):
"""
Provides rolling window calculcations.
.. versionadded:: 0.18.0
Parameters
----------
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None) (DEPRECATED)
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
axis : int, default 0
Returns
-------
a Window sub-classed for the particular operation
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
@Substitution(name='rolling')
@Appender(SelectionMixin._see_also_template)
@Appender(SelectionMixin._agg_doc)
def aggregate(self, arg, *args, **kwargs):
return super(Rolling, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self):
return super(Rolling, self).count()
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Rolling, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, **kwargs):
return super(Rolling, self).sum(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, **kwargs):
return super(Rolling, self).max(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, **kwargs):
return super(Rolling, self).min(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, **kwargs):
return super(Rolling, self).mean(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Rolling, self).median(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, **kwargs):
return super(Rolling, self).std(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, **kwargs):
return super(Rolling, self).var(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Rolling, self).skew(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Rolling, self).kurt(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Rolling, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Rolling, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Rolling, self).corr(other=other, pairwise=pairwise,
**kwargs)
class Expanding(_Rolling_and_Expanding):
"""
Provides expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None) (DEPRECATED)
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
axis : int, default 0
Returns
-------
a Window sub-classed for the particular operation
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
_attributes = ['min_periods', 'freq', 'center', 'axis']
def __init__(self, obj, min_periods=1, freq=None, center=False, axis=0,
**kwargs):
return super(Expanding, self).__init__(obj=obj,
min_periods=min_periods,
freq=freq, center=center,
axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
obj = self._selected_obj
if other is None:
return (max(len(obj), self.min_periods) if self.min_periods
else len(obj))
return (max((len(obj) + len(obj)), self.min_periods)
if self.min_periods else (len(obj) + len(obj)))
@Substitution(name='expanding')
@Appender(SelectionMixin._see_also_template)
@Appender(SelectionMixin._agg_doc)
def aggregate(self, arg, *args, **kwargs):
return super(Expanding, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self, **kwargs):
return super(Expanding, self).count(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Expanding, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, **kwargs):
return super(Expanding, self).sum(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, **kwargs):
return super(Expanding, self).max(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, **kwargs):
return super(Expanding, self).min(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, **kwargs):
return super(Expanding, self).mean(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Expanding, self).median(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, **kwargs):
return super(Expanding, self).std(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, **kwargs):
return super(Expanding, self).var(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Expanding, self).skew(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Expanding, self).kurt(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Expanding, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Expanding, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Expanding, self).corr(other=other, pairwise=pairwise,
**kwargs)
_bias_template = """
Parameters
----------
bias : boolean, default False
Use a standard estimation bias correction
"""
_pairwise_template = """
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a Panel in the case of DataFrame inputs. In the case of missing
elements, only complete pairwise observations will be used.
bias : boolean, default False
Use a standard estimation bias correction
"""
class EWM(_Rolling):
r"""
Provides exponential weighted functions
.. versionadded:: 0.18.0
Parameters
----------
com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`
halflife : float, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } halflife > 0`
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`
.. versionadded:: 0.18.0
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : None or string alias / date offset object, default=None (DEPRECATED)
Frequency to conform to before computing statistic
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings (viewing EWMA as a moving average)
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
Returns
-------
a Window sub-classed for the particular operation
Notes
-----
Exactly one of center of mass, span, half-life, and alpha must be provided.
Allowed values and relationship between the parameters are specified in the
parameter descriptions above; see the link at the end of this section for
a detailed explanation.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
When adjust is True (default), weighted averages are calculated using
weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based
on relative positions. For example, the weights of x and y used in
calculating the final weighted average of [x, None, y] are 1-alpha and 1
(if adjust is True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows
"""
_attributes = ['com', 'min_periods', 'freq', 'adjust', 'ignore_na', 'axis']
def __init__(self, obj, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False,
axis=0):
self.obj = obj
self.com = _get_center_of_mass(com, span, halflife, alpha)
self.min_periods = min_periods
self.freq = freq
self.adjust = adjust
self.ignore_na = ignore_na
self.axis = axis
@property
def _constructor(self):
return EWM
@Substitution(name='ewm')
@Appender(SelectionMixin._see_also_template)
@Appender(SelectionMixin._agg_doc)
def aggregate(self, arg, *args, **kwargs):
return super(EWM, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _apply(self, func, how=None, **kwargs):
"""Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
how : string, default to None (DEPRECATED)
how to resample
Returns
-------
y : type of input argument
"""
blocks, obj = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
if not hasattr(algos, func):
raise ValueError("we do not support this function "
"algos.{0}".format(func))
cfunc = getattr(algos, func)
def func(arg):
return cfunc(arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods))
results.append(np.apply_along_axis(func, self.axis, values))
return self._wrap_results(results, blocks, obj)
@Substitution(name='ewm')
@Appender(_doc_template)
def mean(self, **kwargs):
"""exponential weighted moving average"""
return self._apply('ewma', **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def std(self, bias=False, **kwargs):
"""exponential weighted moving stddev"""
return _zsqrt(self.var(bias=bias, **kwargs))
vol = std
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def var(self, bias=False, **kwargs):
"""exponential weighted moving variance"""
def f(arg):
return algos.ewmcov(arg, arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods),
int(bias))
return self._apply(f, **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def cov(self, other=None, pairwise=None, bias=False, **kwargs):
"""exponential weighted sample covariance"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_cov(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
cov = algos.ewmcov(X._prep_values(), Y._prep_values(), self.com,
int(self.adjust), int(self.ignore_na),
int(self.min_periods), int(bias))
return X._wrap_result(cov)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def corr(self, other=None, pairwise=None, **kwargs):
"""exponential weighted sample correlation"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_corr(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
def _cov(x, y):
return algos.ewmcov(x, y, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods),
1)
x_values = X._prep_values()
y_values = Y._prep_values()
cov = _cov(x_values, y_values)
x_var = _cov(x_values, x_values)
y_var = _cov(y_values, y_values)
corr = cov / _zsqrt(x_var * y_var)
return X._wrap_result(corr)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
# Helper Funcs
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
from pandas import Series, DataFrame, Panel
if not (isinstance(arg1, (np.ndarray, Series, DataFrame)) and
isinstance(arg2, (np.ndarray, Series, DataFrame))):
raise TypeError("arguments to moment function must be of type "
"np.ndarray/Series/DataFrame")
if (isinstance(arg1, (np.ndarray, Series)) and
isinstance(arg2, (np.ndarray, Series))):
X, Y = _prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, DataFrame):
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
if len(result.columns) > 0:
result.columns = frame_template.columns[result.columns]
return result
results = {}
if isinstance(arg2, DataFrame):
if pairwise is False:
if arg1 is arg2:
# special case in order to handle duplicate column names
for i, col in enumerate(arg1.columns):
results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
return dataframe_from_int_dict(results, arg1)
else:
if not arg1.columns.is_unique:
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
return DataFrame(results, index=X.index,
columns=res_columns)
elif pairwise is True:
results = defaultdict(dict)
for i, k1 in enumerate(arg1.columns):
for j, k2 in enumerate(arg2.columns):
if j < i and arg2 is arg1:
# Symmetric case
results[i][j] = results[j][i]
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i],
arg2.iloc[:, j]))
p = Panel.from_dict(results).swapaxes('items', 'major')
if len(p.major_axis) > 0:
p.major_axis = arg1.columns[p.major_axis]
if len(p.minor_axis) > 0:
p.minor_axis = arg2.columns[p.minor_axis]
return p
else:
raise ValueError("'pairwise' is not True/False")
else:
results = {}
for i, col in enumerate(arg1.columns):
results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))
return dataframe_from_int_dict(results, arg1)
else:
return _flex_binary_moment(arg2, arg1, f)
def _get_center_of_mass(com, span, halflife, alpha):
valid_count = len([x for x in [com, span, halflife, alpha]
if x is not None])
if valid_count > 1:
raise ValueError("com, span, halflife, and alpha "
"are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
if com is not None:
if com < 0:
raise ValueError("com must satisfy: com >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
com = (span - 1) / 2.
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
com = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
com = (1.0 - alpha) / alpha
else:
raise ValueError("Must pass one of com, span, halflife, or alpha")
return float(com)
def _offset(window, center):
if not com.is_integer(window):
window = len(window)
offset = (window - 1) / 2. if center else 0
try:
return int(offset)
except:
return offset.astype(int)
def _require_min_periods(p):
def _check_func(minp, window):
if minp is None:
return window
else:
return max(p, minp)
return _check_func
def _use_window(minp, window):
if minp is None:
return window
else:
return minp
def _zsqrt(x):
result = np.sqrt(x)
mask = x < 0
from pandas import DataFrame
if isinstance(x, DataFrame):
if mask.values.any():
result[mask] = 0
else:
if mask.any():
result[mask] = 0
return result
def _prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception('Input arrays must be of the same type!')
# mask out values, this also makes a common index...
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
def _validate_win_type(win_type, kwargs):
# may pop from kwargs
arg_map = {'kaiser': ['beta'],
'gaussian': ['std'],
'general_gaussian': ['power', 'width'],
'slepian': ['width']}
if win_type in arg_map:
return tuple([win_type] + _pop_args(win_type, arg_map[win_type],
kwargs))
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = '%s window requires %%s' % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
# Top-level exports
def rolling(obj, win_type=None, **kwds):
from pandas import Series, DataFrame
if not isinstance(obj, (Series, DataFrame)):
raise TypeError('invalid type: %s' % type(obj))
if win_type is not None:
return Window(obj, win_type=win_type, **kwds)
return Rolling(obj, **kwds)
rolling.__doc__ = Window.__doc__
def expanding(obj, **kwds):
from pandas import Series, DataFrame
if not isinstance(obj, (Series, DataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return Expanding(obj, **kwds)
expanding.__doc__ = Expanding.__doc__
def ewm(obj, **kwds):
from pandas import Series, DataFrame
if not isinstance(obj, (Series, DataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return EWM(obj, **kwds)
ewm.__doc__ = EWM.__doc__
|
pjryan126/solid-start-careers
|
store/api/zillow/venv/lib/python2.7/site-packages/pandas/core/window.py
|
Python
|
gpl-2.0
| 49,027
|
[
"Gaussian"
] |
6037a2956accbb776f6e0a69cdc0ce313eb6a5107ffd4023a57805368f17c30a
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
from pymatgen.analysis.molecule_matcher import MoleculeMatcher
from pymatgen.analysis.molecule_matcher import IsomorphismMolAtomMapper
from pymatgen.analysis.molecule_matcher import InchiMolAtomMapper
from pymatgen.core.operations import SymmOp
from pymatgen.core.structure import Molecule
try:
import openbabel as ob
except ImportError:
ob = None
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', "molecules", "molecule_matcher")
obalign_missing = ob is None or 'OBAlign' not in dir(ob)
@unittest.skipIf(obalign_missing, "OBAlign is missing, Skipping")
class MoleculeMatcherTest(unittest.TestCase):
def test_fit(self):
self.fit_with_mapper(IsomorphismMolAtomMapper())
self.fit_with_mapper(InchiMolAtomMapper())
def test_get_rmsd(self):
mm = MoleculeMatcher()
mol1 = Molecule.from_file(os.path.join(test_dir, "t3.xyz"))
mol2 = Molecule.from_file(os.path.join(test_dir, "t4.xyz"))
self.assertEqual('{0:7.3}'.format(mm.get_rmsd(mol1, mol2)), "0.00488")
def test_group_molecules(self):
mm = MoleculeMatcher(tolerance=0.001)
with open(os.path.join(test_dir, "mol_list.txt")) as f:
filename_list = [line.strip() for line in f.readlines()]
mol_list = [Molecule.from_file(os.path.join(test_dir, f))
for f in filename_list]
mol_groups = mm.group_molecules(mol_list)
filename_groups = [[filename_list[mol_list.index(m)] for m in g]
for g in mol_groups]
with open(os.path.join(test_dir, "grouped_mol_list.txt")) as f:
grouped_text = f.read().strip()
self.assertEqual(str(filename_groups), grouped_text)
def test_to_and_from_dict(self):
mm = MoleculeMatcher(tolerance=0.5,
mapper=InchiMolAtomMapper(angle_tolerance=50.0))
d = mm.as_dict()
mm2 = MoleculeMatcher.from_dict(d)
self.assertEqual(d, mm2.as_dict())
mm = MoleculeMatcher(tolerance=0.5, mapper=IsomorphismMolAtomMapper())
d = mm.as_dict()
mm2 = MoleculeMatcher.from_dict(d)
self.assertEqual(d, mm2.as_dict())
def fit_with_mapper(self, mapper):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
mol1 = Molecule(["C", "H", "H", "H", "H"], coords)
op = SymmOp.from_origin_axis_angle([0, 0, 0], [0.1, 0.2, 0.3], 60)
rotcoords = [op.operate(c) for c in coords]
mol2 = Molecule(["C", "H", "H", "H", "H"], rotcoords)
mm = MoleculeMatcher(mapper=mapper)
self.assertTrue(mm.fit(mol1, mol2))
mol1 = Molecule.from_file(os.path.join(test_dir, "benzene1.xyz"))
mol2 = Molecule.from_file(os.path.join(test_dir, "benzene2.xyz"))
self.assertTrue(mm.fit(mol1, mol2))
mol1 = Molecule.from_file(os.path.join(test_dir, "benzene1.xyz"))
mol2 = Molecule.from_file(os.path.join(test_dir, "t2.xyz"))
self.assertFalse(mm.fit(mol1, mol2))
mol1 = Molecule.from_file(os.path.join(test_dir, "c1.xyz"))
mol2 = Molecule.from_file(os.path.join(test_dir, "c2.xyz"))
self.assertTrue(mm.fit(mol1, mol2))
mol1 = Molecule.from_file(os.path.join(test_dir, "t3.xyz"))
mol2 = Molecule.from_file(os.path.join(test_dir, "t4.xyz"))
self.assertTrue(mm.fit(mol1, mol2))
mol1 = Molecule.from_file(os.path.join(test_dir, "j1.xyz"))
mol2 = Molecule.from_file(os.path.join(test_dir, "j2.xyz"))
self.assertTrue(mm.fit(mol1, mol2))
mol1 = Molecule.from_file(os.path.join(test_dir, "ethene1.xyz"))
mol2 = Molecule.from_file(os.path.join(test_dir, "ethene2.xyz"))
self.assertTrue(mm.fit(mol1, mol2))
mol1 = Molecule.from_file(os.path.join(test_dir, "toluene1.xyz"))
mol2 = Molecule.from_file(os.path.join(test_dir, "toluene2.xyz"))
self.assertTrue(mm.fit(mol1, mol2))
mol1 = Molecule.from_file(os.path.join(test_dir, "cyclohexane1.xyz"))
mol2 = Molecule.from_file(os.path.join(test_dir, "cyclohexane2.xyz"))
self.assertTrue(mm.fit(mol1, mol2))
mol1 = Molecule.from_file(os.path.join(test_dir, "oxygen1.xyz"))
mol2 = Molecule.from_file(os.path.join(test_dir, "oxygen2.xyz"))
self.assertTrue(mm.fit(mol1, mol2))
mm = MoleculeMatcher(tolerance=0.001, mapper=mapper)
mol1 = Molecule.from_file(os.path.join(test_dir, "t3.xyz"))
mol2 = Molecule.from_file(os.path.join(test_dir, "t4.xyz"))
self.assertFalse(mm.fit(mol1, mol2))
def test_strange_inchi(self):
mm = MoleculeMatcher(tolerance=0.05, mapper=InchiMolAtomMapper())
mol1 = Molecule.from_file(os.path.join(test_dir, "k1.sdf"))
mol2 = Molecule.from_file(os.path.join(test_dir, "k2.sdf"))
self.assertTrue(mm.fit(mol1, mol2))
def test_thiane(self):
mm = MoleculeMatcher(tolerance=0.05, mapper=InchiMolAtomMapper())
mol1 = Molecule.from_file(os.path.join(test_dir, "thiane1.sdf"))
mol2 = Molecule.from_file(os.path.join(test_dir, "thiane2.sdf"))
self.assertFalse(mm.fit(mol1, mol2))
def test_thiane_ethynyl(self):
mm = MoleculeMatcher(tolerance=0.05, mapper=InchiMolAtomMapper())
mol1 = Molecule.from_file(os.path.join(test_dir, "thiane_ethynyl1.sdf"))
mol2 = Molecule.from_file(os.path.join(test_dir, "thiane_ethynyl2.sdf"))
self.assertFalse(mm.fit(mol1, mol2))
def test_cdi_23(self):
mm = MoleculeMatcher(tolerance=0.05, mapper=InchiMolAtomMapper())
mol1 = Molecule.from_file(os.path.join(test_dir, "cdi_23_1.xyz"))
mol2 = Molecule.from_file(os.path.join(test_dir, "cdi_23_2.xyz"))
self.assertFalse(mm.fit(mol1, mol2))
if __name__ == '__main__':
unittest.main()
|
fraricci/pymatgen
|
pymatgen/analysis/tests/test_molecule_matcher.py
|
Python
|
mit
| 6,171
|
[
"pymatgen"
] |
9f474c95eddbc5ecb917dc894e7fd27f9d9bc2726f51d44fd2cdf576d6943cbc
|
# -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email legal@cyan.com
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
"""
Module: Teledahn.py
Age: Teledahn
Date: October 2002
event manager hooks for the Teledahn
"""
from Plasma import *
from PlasmaTypes import *
class Teledahn(ptResponder):
def __init__(self):
ptResponder.__init__(self)
self.id = 5025
self.version = 1
def OnFirstUpdate(self):
#~ # record our visit in player's chronicle
#~ kModuleName = "Teledahn"
#~ kChronicleVarName = "LinksIntoTeledahn"
#~ kChronicleVarType = 0
#~ vault = ptVault()
#~ if type(vault) != type(None):
#~ entry = vault.findChronicleEntry(kChronicleVarName)
#~ if type(entry) == type(None):
#~ # not found... add current level chronicle
#~ vault.addChronicleEntry(kChronicleVarName,kChronicleVarType,"%d" %(1))
#~ PtDebugPrint("%s:\tentered new chronicle counter %s" % (kModuleName,kChronicleVarName))
#~ else:
#~ import string
#~ count = string.atoi(entry.chronicleGetValue())
#~ count = count + 1
#~ entry.chronicleSetValue("%d" % (count))
#~ entry.save()
#~ PtDebugPrint("%s:\tyour current count for %s is %s" % (kModuleName,kChronicleVarName,entry.chronicleGetValue()))
#~ else:
#~ PtDebugPrint("%s:\tERROR trying to access vault -- can't update %s variable in chronicle." % (kModuleName,kChronicleVarName))
pass
def Load(self):
pass
def OnNotify(self,state,id,events):
pass
|
zrax/moul-scripts
|
Python/Teledahn.py
|
Python
|
gpl-3.0
| 3,389
|
[
"VisIt"
] |
d1b93f20d059380f0972d4ca75963953f2f5b6e70a0ba47a268a9fbb0b945062
|
import numpy
from gauss import *
from matmul import *
# tests of gaussian elimination
A = numpy.array([ [1, 1, 1], [-1, 2, 0], [2, 0, 1] ], dtype=numpy.float64)
b = numpy.array([6, 3, 5], dtype=numpy.float64)
# gaussElim changes A in place -- send a copy
x, d = gaussElim(A.copy(), b.copy(), returnDet=1)
# test it by multiplying A x
bout = mult_Ax(A, x)
print "matrix A:\n", A
print "RHS (b): ", b
print "solved x: ", x
print "det{A}: ", d
print "A.x: ", bout
print " "
A = numpy.array([ [0, 1, 1], [1, 1, 0], [1, 0, 1] ], dtype=numpy.float64)
b = numpy.array([5, 3, 4], dtype=numpy.float64)
x = gaussElim(A.copy(), b.copy())
# test it by multiplying A x
bout = mult_Ax(A, x)
print "matrix A:\n", A
print "RHS (b): ", b
print "solved x: ", x
print "A.x: ", bout
print " "
A = numpy.array([ [0, 0, 0, 4],
[0, 0, 3, 0],
[5, 6, 7, 8],
[0, 4, 3, 2] ], dtype=numpy.float64)
b = numpy.array([5, 4, 9, 1], dtype=numpy.float64)
x = gaussElim(A.copy(), b.copy())
# test it by multiplying A x
bout = mult_Ax(A, x)
print "matrix A:\n", A
print "RHS (b): ", b
print "solved x: ", x
print "A.x: ", bout
print " "
A = numpy.array([ [ 4, 3, 4, 10],
[ 2, -7, 3, 0],
[-2, 11, 1, 3],
[ 3, -4, 0, 2] ], dtype=numpy.float64)
b = numpy.array([2, 6, 3, 1], dtype=numpy.float64)
x = gaussElim(A.copy(), b.copy())
# test it by multiplying A x
bout = mult_Ax(A, x)
print "matrix A:\n", A
print "RHS (b): ", b
print "solved x: ", x
print "A.x: ", bout
print " "
|
bt3gl/Numerical-Methods-for-Physics
|
others/lin_algebra/gauss-test.py
|
Python
|
apache-2.0
| 1,577
|
[
"Gaussian"
] |
b98134a8c0a009cc81b359512d4c3d7bb0a933444724c230b75dc3b13cd9dace
|
#!/usr/bin/env python3
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
import imp
import numpy as np
import fileinput
# from run_parameter import *
parser = argparse.ArgumentParser(
description="This is a python3 script to\
do see the difference variable make \
run simulation")
parser.add_argument("template", help="the name of template file")
parser.add_argument("-d", "--debug", action="store_true", default=False)
parser.add_argument("--rerun",
type=int, default=1)
parser.add_argument("-m", "--mode", type=int, default=2)
parser.add_argument("--model", default="single")
args = parser.parse_args()
protein_name = args.template.strip('/')
if(args.debug):
do = print
cd = print
else:
do = os.system
cd = os.chdir
# protein_name = args.template.split('_', 1)[-1].strip('/')
# os.system("cp ~/opt/variable_test_run.py .")
# run_slurm = '''\
# #!/bin/bash
# #SBATCH --job-name=CTBP_WL
# #SBATCH --account=ctbp-common
# #SBATCH --partition=ctbp-common
# #SBATCH --ntasks=1
# #SBATCH --mem-per-cpu=1G
# #SBATCH --time=1-00:00:00
# #SBATCH --mail-user=luwei0917@gmail.com
# #SBATCH --mail-type=FAIL
# echo "My job ran on:"
# echo $SLURM_NODELIST
# srun ~/build/brian/adjustable_z_dependence/lmp_serial -in 2xov_{}.in
# '''
# if args.mode == 1:
# run_slurm = '''\
# #!/bin/bash
# #SBATCH --job-name=CTBP_WL
# #SBATCH --account=ctbp-common
# #SBATCH --partition=ctbp-common
# #SBATCH --ntasks=1
# #SBATCH --threads-per-core=1
# #SBATCH --mem-per-cpu=1G
# #SBATCH --time=1-00:00:00
# #SBATCH --mail-user=luwei0917@gmail.com
# #SBATCH --mail-type=FAIL
# echo "My job ran on:"
# echo $SLURM_NODELIST
# srun ~/build/brian/z_dependence/lmp_serial -in 2xov_{}.in
# '''
# if args.mode == 2:
# run_slurm = '''\
# #!/bin/bash
# #SBATCH --job-name=CTBP_WL
# #SBATCH --account=ctbp-common
# #SBATCH --partition=ctbp-common
# #SBATCH --ntasks=1
# #SBATCH --threads-per-core=1
# #SBATCH --mem-per-cpu=1G
# #SBATCH --time=1-00:00:00
# #SBATCH --mail-user=luwei0917@gmail.com
# #SBATCH --mail-type=FAIL
# echo "My job ran on:"
# echo $SLURM_NODELIST
# srun /home/wl45/build/awsem_new_membrane/src/lmp_serial -in 2xov_{}.in
# '''
fileName = "2xov_multi.in"
# if args.rerun == 0:
# start_from = "read_data data.2xov"
# if args.rerun == 1:
# start_from = "read_restart restart.extended"
# rg_list = [0, 1, 5, 10]
# force_list = [2.0]
# memb_k_list = [0, 1, 5, 10]
# rg_list = [0, 1, 2, 5]
# force_list = [0.0, 1.0, 2.0, 3.0]
# memb_k_list = [0, 1, 2, 5]
# rg_list = [0, 1, 2, 5]
# force_list = [0.0, 3.0]
# memb_k_list = [0, 1, 2, 5]
# rg_list = [0, 0.1, 1, 5, 10]
# force_list = [0.0, 3.0]
# memb_k_list = [0, 0.1, 1, 5, 10]
# rg_list = [0, 0.1, 1]
# force_list = ["ramp"]
# memb_k_list = [0, 0.1, 1]
# rg_list = [0, 0.1, 0.5, 1, 2]
# rg_list = [3, 4]
# force_list = ["ramp"]
# memb_k_list = [0, 0.1, 1, 2, 5, 10]
rg_list = [0, 0.1, 1, 2, 4, 8]
# rg_list = [0, 0.01, 0.04, 0.08, 0.1, 0.2, 0.5, 1]
force_list = ["ramp"]
memb_k_list = [0, 1, 2, 4, 8, 16]
i = args.rerun
simulation_steps = 5e7
# Defaults
# rg_list = [0]
# memb_k_list = [0]
# force_list = ["ramp"]
# force_ramp_rate_list = [1]
# repeat = 80
# rg_list = [0.08]
# force_list = [0.0, 0.02]
# force_list = [0.04, 0.06, 0.08]
# force_list = [0.045, 0.05, 0.055]
# force_list = [0.03, 0.07]
# force_list = [0.1]
# memb_k_list = [1]
# force_ramp_rate_list = [1]
# rg_list = [0, 0.01, 0.02, 0.04, 0.08, 0.16, 0.32, 0.6, 1, 5]
# memb_k_list = [0, 1, 2, 5, 10, 20]
# force_list = ["ramp"]
# force_ramp_rate_list = [1]
# rg_list = [0, 0.02, 0.08, 0.16, 0.6, 1, 2, 5]
# memb_k_list = [0, 1, 2, 5, 10]
# force_list = ["ramp"]
# force_ramp_rate_list = [1]
# repeat = 2
# rg_list = [0, 0.04, 0.08]
# memb_k_list = [0, 1, 2, 5, 10]
# force_list = ["ramp"]
# force_ramp_rate_list = [1]
# repeat = 20
# rg_list = [0.08]
# memb_k_list = [1]
# force_list = [0.02, 0.03, 0.04]
# force_ramp_rate_list = [1]
# repeat = 100
# rg_list = [0.08]
# memb_k_list = [1]
# # force_list = [0.02, 0.03, 0.04, 0.05, 0.06]
# force_list = [0.025, 0.035, 0.045]
# force_list = [0.02, 0.03]
force_ramp_rate_list = [1]
repeat = 2
# repeat = 100
# rg_list = [0, 0.1, 0.5, 1, 1.5, 2]
# memb_k_list = [0, 1, 2, 3, 4]
# force_list = ["force_ramp"]
# force_ramp_rate_list = [1]
# repeat = 2
# force_ramp_rate_list = [1, 5, 10, 20]
# force_ramp_rate_list = [30, 40, 50, 60, 70, 80, 90, 100, 500, 1000]
# force_ramp_rate_list = [1, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 500, 1000]
# force_ramp_rate_list = [1]
for force_ramp_rate in force_ramp_rate_list:
for memb_k in memb_k_list:
for force in force_list:
for rg in rg_list:
folder_name = "memb_{}_rg_{}".format(memb_k, rg)
# folder_name = "memb_{}_force_{}_rg_{}".format(memb_k, force, rg)
# folder_name = "rate_{}".format(force_ramp_rate)
# folder_name = "force_{}".format(force)
# if memb_k == 0 and rg == 0:
# continue
print(folder_name)
do("mkdir "+folder_name)
do("cp -r 2xov " + folder_name + "/")
cd(folder_name + "/2xov")
if args.model == "go":
fixFile = "fix_backbone_coeff_go.data"
if args.model == "single":
fixFile = "fix_backbone_coeff_single.data"
with fileinput.FileInput(fixFile, inplace=True, backup='.bak') as file:
for line in file:
print(line.replace("MY_MEMB_K", str(memb_k)), end='')
with fileinput.FileInput(fileName, inplace=True, backup='.bak') as file:
for line in file:
tmp = line
tmp = tmp.replace("MY_FORCE", str(force))
tmp = tmp.replace("MY_RG", str(rg))
tmp = tmp.replace("RATE", str(force_ramp_rate))
tmp = tmp.replace("SIMULATION_STEPS", str(int(simulation_steps/force_ramp_rate)))
print(tmp, end='')
cd("..")
do("run.py -m 2 2xov -n {}".format(repeat))
# do("run.py -m 2 2xov --start extended -n {}".format(repeat))
cd("..")
# do( # replace SIMULATION_STEPS with specific steps
# "sed -i.bak 's/NUMBER/'" +
# str(int(i)) +
# "'/g' 2xov_{}.in".format(i))
# do("mkdir -p {}".format(i))
# do( # replace RANDOM with a radnom number
# "sed -i.bak 's/RANDOM/'" +
# str(randint(1, 10**6)) +
# "'/g' *.in")
# with open("run_{}.slurm".format(i), "w") as r:
# r.write(run_slurm.format(i))
# do("sbatch " + "run_{}.slurm".format(i))
# cd("..")
# force_list = ["ramp"]
# memb_k_list = [0, 0.5, 1, 2, 3, 5]
#
# for memb_k in memb_k_list:
# for force in force_list:
# rg = memb_k
# i = 0
# folder_name = "memb_{}_force_{}_rg_{}".format(memb_k, force, rg)
# do("cp -r 2xov " + folder_name)
# cd(folder_name)
# # fixFile = "fix_backbone_coeff_go.data"
# fixFile = "fix_backbone_coeff_single.data"
# with fileinput.FileInput(fixFile, inplace=True, backup='.bak') as file:
# for line in file:
# print(line.replace("MY_MEMB_K", str(memb_k)), end='')
# with fileinput.FileInput(fileName, inplace=True, backup='.bak') as file:
# for line in file:
# print(line.replace("MY_FORCE", str(force)), end='')
# with fileinput.FileInput(fileName, inplace=True, backup='.bak') as file:
# for line in file:
# print(line.replace("MY_RG", str(rg)), end='')
#
# do("cp 2xov_multi.in 2xov_{}.in".format(i))
# with fileinput.FileInput("2xov_{}.in".format(i), inplace=True, backup='.bak') as file:
# for line in file:
# print(line.replace("START_FROM", start_from), end='')
# do( # replace SIMULATION_STEPS with specific steps
# "sed -i.bak 's/NUMBER/'" +
# str(int(i)) +
# "'/g' 2xov_{}.in".format(i))
# do("mkdir -p {}".format(i))
# do( # replace RANDOM with a radnom number
# "sed -i.bak 's/RANDOM/'" +
# str(randint(1, 10**6)) +
# "'/g' *.in")
# with open("run_{}.slurm".format(i), "w") as r:
# r.write(run_slurm.format(i))
# do("sbatch run_0.slurm")
# cd("..")
# folder_list = open('folder_list', 'w')
# distance_list = np.arange(20, 350, 5)
# # temp_list = np.arange(250, 400, 50)
# temp_list = [300]
# folder_name = ""
# cwd = os.getcwd()
# os.system("mkdir -p simulation")
# for temp in temp_list:
# pre_folder_name = "T_"+str(temp)
# for distance in distance_list:
# folder_name = pre_folder_name + "_D_"+str(distance)
# folder_list.write(folder_name+"\n")
# os.system("mkdir -p simulation/" + folder_name)
# os.system("cp -r "+protein_name+"/* simulation/"+folder_name+"/")
# os.chdir("simulation")
# os.chdir(folder_name)
# os.system(
# "sed -i.bak 's/TEMPERATURE/'" +
# str(temp) +
# "'/g' "+protein_name+".in")
# os.system(
# "sed -i.bak 's/DISTANCE/'" +
# str(distance) +
# "'/g' colvars.x")
# # os.system(
# # "sed -i.bak 's/MGamma/'" +
# # str(MGamma) +
# # "'/g' fix_backbone_coeff.data")
# do("run.py " + protein_name + " -o -s 5 -i")
# # os.system("run.py " + protein_name + "/ -o -n 1 -s 6 -i")
# cd(cwd)
# n = 5
# membrane_k = [1, 2, 3]
# rg_cylindrical_spring_constants = [1, 0.1]
# for i in range(n):
# for MemK in membrane_k:
# add_force_strengths = range(-3-MemK, -1-MemK)
# pre_pre_folder_name = "MemK"+str(MemK)
# for ForceStrength in add_force_strengths:
# pre_folder_name = pre_pre_folder_name+"ForceStrength"+str(ForceStrength)
# for SpringConstant in rg_cylindrical_spring_constants:
# # simulation set up
# folder_name = pre_folder_name+"SpringConstant"+str(SpringConstant)+"_"+str(i)+"/"
# os.system("mkdir -p "+folder_name)
# folder_list.write(folder_name+"\n")
# os.system("cp -r "+args.template+"* "+folder_name)
# os.chdir(folder_name)
# os.system( # replace SIMULATION_STEPS with specific steps
# "sed -i.bak 's/WARM_UP_STEPS/'" +
# str(warm_up_steps) +
# "'/g' "+protein_name+".in")
# os.system( # replace RANDOM with a radnom number
# "sed -i.bak 's/RANDOM/'" +
# str(randint(1, 10**6)) +
# "'/g' "+protein_name+".in")
# os.system( # replace SIMULATION_STEPS with specific steps
# "sed -i.bak 's/SIMULATION_STEPS/'" +
# str(simulation_steps) +
# "'/g' "+protein_name+".in")
# os.system( # replace SpringConstant with specific steps
# "sed -i.bak 's/SpringConstant/'" +
# str(SpringConstant) +
# "'/g' "+protein_name+".in")
# os.system( # replace ForceStrength with specific steps
# "sed -i.bak 's/ForceStrength/'" +
# str(ForceStrength) +
# "'/g' "+protein_name+".in")
# os.system( # replace ForceStrength with specific steps
# "sed -i.bak 's/MemK/'" +
# str(MemK) +
# "'/g' fix_backbone_coeff.data")
# # if(platform.system() == 'Darwin'):
# # os.system("/Users/weilu/Documents/lammps-9Oct12_modified/src/lmp_serial \
# # < "+protein_name+".in")
# if(platform.system() == 'Darwin'):
# os.system("/Users/weilu/Documents/lammps-9Oct12_modified/src/lmp_serial \
# < "+protein_name+".in")
# elif(platform.system() == 'Linux'):
# os.system("cp ~/opt/run.slurm .")
# os.system( # replace PROTEIN with pdb name
# "sed -i.bak 's/PROTEIN/'" +
# protein_name +
# "'/g' run.slurm")
# os.system("sbatch run.slurm")
# else:
# print("system unkown")
# os.chdir("..")
# folder_list.close()
# print("hello world")
|
luwei0917/awsemmd_script
|
variable_test_run.py
|
Python
|
mit
| 13,027
|
[
"Brian",
"LAMMPS"
] |
a0739a16c9e9a837a1fd865dba4162070e0159d5f426fb4b35a19b2301044e48
|
"""
This pipeline is intended to extract edge information from T2W images.
"""
import os
import numpy as np
from protoclass.data_management import T2WModality
from protoclass.data_management import GTModality
from protoclass.preprocessing import RicianNormalization
from protoclass.preprocessing import GaussianNormalization
from protoclass.extraction import EdgeSignalExtraction
# Define the path where all the patients are
path_patients = '/data/prostate/experiments'
# Define the path of the modality to normalize
path_t2w = 'T2W'
# Define the path of the ground for the prostate
path_gt = 'GT_inv/prostate'
# Define the label of the ground-truth which will be provided
label_gt = ['prostate']
# Define the path where the information for the gaussian normalization are
path_gaussian = '/data/prostate/pre-processing/mp-mri-prostate/gaussian-t2w'
# Define the path where the information for the rician normalization are
path_rician = '/data/prostate/pre-processing/mp-mri-prostate/rician-t2w'
# Define the path to store the Tofts data
path_store = '/data/prostate/extraction/mp-mri-prostate/edge-t2w'
# ID of the patient for which we need to use the Gaussian Normalization
ID_GAUSSIAN = '387'
# Set the value of the extremum
EXTREM = (-4.48, 22.11)
# Set the value of the edges
TYPE_FILTER = ('sobel', 'prewitt', 'scharr', 'kirsch', 'laplacian')
# Generate the different path to be later treated
path_patients_list_t2w = []
path_patients_list_gt = []
# Create the generator
id_patient_list = [name for name in os.listdir(path_patients)
if os.path.isdir(os.path.join(path_patients, name))]
for id_patient in id_patient_list:
# Append for the T2W data
path_patients_list_t2w.append(os.path.join(path_patients, id_patient,
path_t2w))
# Append for the GT data - Note that we need a list of gt path
path_patients_list_gt.append([os.path.join(path_patients, id_patient,
path_gt)])
# List where to store the different minimum
for id_p, (p_t2w, p_gt) in enumerate(zip(path_patients_list_t2w,
path_patients_list_gt)):
print 'Processing {}'.format(id_patient_list[id_p])
# Remove a part of the string to have only the id
nb_patient = id_patient_list[id_p].replace('Patient ', '')
# Read the image data
t2w_mod = T2WModality()
t2w_mod.read_data_from_path(p_t2w)
# Read the GT
gt_mod = GTModality()
gt_mod.read_data_from_path(label_gt, p_gt)
if not nb_patient == ID_GAUSSIAN:
# Rician Normalization
# Read the normalization information
pat_chg = id_patient_list[id_p].lower().replace(' ', '_') + '_norm.p'
filename = os.path.join(path_rician, pat_chg)
t2w_norm = RicianNormalization.load_from_pickles(filename)
# Normalize the data
t2w_mod = t2w_norm.normalize(t2w_mod)
else:
# Gaussian Normalization
# Read the normalization information
pat_chg = id_patient_list[id_p].lower().replace(' ', '_') + '_norm.p'
filename = os.path.join(path_gaussian, pat_chg)
t2w_norm = GaussianNormalization.load_from_pickles(filename)
# Normalize the data
t2w_mod = t2w_norm.normalize(t2w_mod)
# Rescale the data on 8 bits
t2w_mod.data_ = ((t2w_mod.data_ - EXTREM[0]) *
(255. / (EXTREM[1] - EXTREM[0])))
# Update the histogram
t2w_mod.update_histogram()
# Extract the edges for each type of filter and order of filter
for type_f in TYPE_FILTER:
print 'The {} will be extracted'.format(type_f)
# Create the extraction method
ext = EdgeSignalExtraction(t2w_mod, edge_detector=type_f)
# Fit the data
print 'Compute the edge map'
ext.fit(t2w_mod, ground_truth=gt_mod, cat=label_gt[0])
# Extract the data
print 'Extract the edge map'
data = ext.transform(t2w_mod, ground_truth=gt_mod, cat=label_gt[0])
# Store the data
print 'Store the data in the right directory'
# Create the path for the current version of the filter
path_filter = os.path.join(path_store, type_f)
# Check that the path is existing
if not os.path.exists(path_filter):
os.makedirs(path_filter)
pat_chg = (id_patient_list[id_p].lower().replace(' ', '_') +
'_edge_t2w.npy')
filename = os.path.join(path_filter, pat_chg)
np.save(filename, data)
|
I2Cvb/mp-mri-prostate
|
pipeline/feature-extraction/t2w/pipeline_extraction_edge_t2w.py
|
Python
|
mit
| 4,554
|
[
"Gaussian"
] |
cf85099ae532451fc288352e6a2bfbe4a968c636d651827e6b7a88b5b9605007
|
from __future__ import absolute_import
from typing import Any, Dict, List, Set, Tuple, TypeVar, Text, \
Union, Optional, Sequence, AbstractSet, Pattern, AnyStr
from typing.re import Match
from zerver.lib.str_utils import NonBinaryStr
from django.db import models
from django.db.models.query import QuerySet
from django.db.models import Manager
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, UserManager, \
PermissionsMixin
import django.contrib.auth
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.dispatch import receiver
from zerver.lib.cache import cache_with_key, flush_user_profile, flush_realm, \
user_profile_by_id_cache_key, user_profile_by_email_cache_key, \
generic_bulk_cached_fetch, cache_set, flush_stream, \
display_recipient_cache_key, cache_delete, \
get_stream_cache_key, active_user_dicts_in_realm_cache_key, \
bot_dicts_in_realm_cache_key, active_user_dict_fields, \
bot_dict_fields, flush_message
from zerver.lib.utils import make_safe_digest, generate_random_token
from zerver.lib.str_utils import ModelReprMixin
from django.db import transaction
from zerver.lib.camo import get_camo_url
from django.utils import timezone
from django.contrib.sessions.models import Session
from zerver.lib.timestamp import datetime_to_timestamp
from django.db.models.signals import pre_save, post_save, post_delete
from django.core.validators import MinLengthValidator, RegexValidator
from django.utils.translation import ugettext_lazy as _
from zerver.lib import cache
from bitfield import BitField
from bitfield.types import BitHandler
from collections import defaultdict
from datetime import timedelta
import pylibmc
import re
import logging
import sre_constants
import time
import datetime
MAX_SUBJECT_LENGTH = 60
MAX_MESSAGE_LENGTH = 10000
MAX_LANGUAGE_ID_LENGTH = 50 # type: int
STREAM_NAMES = TypeVar('STREAM_NAMES', Sequence[Text], AbstractSet[Text])
# Doing 1000 remote cache requests to get_display_recipient is quite slow,
# so add a local cache as well as the remote cache cache.
per_request_display_recipient_cache = {} # type: Dict[int, List[Dict[str, Any]]]
def get_display_recipient_by_id(recipient_id, recipient_type, recipient_type_id):
# type: (int, int, int) -> Union[Text, List[Dict[str, Any]]]
if recipient_id not in per_request_display_recipient_cache:
result = get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id)
per_request_display_recipient_cache[recipient_id] = result
return per_request_display_recipient_cache[recipient_id]
def get_display_recipient(recipient):
# type: (Recipient) -> Union[Text, List[Dict[str, Any]]]
return get_display_recipient_by_id(
recipient.id,
recipient.type,
recipient.type_id
)
def flush_per_request_caches():
# type: () -> None
global per_request_display_recipient_cache
per_request_display_recipient_cache = {}
global per_request_realm_filters_cache
per_request_realm_filters_cache = {}
@cache_with_key(lambda *args: display_recipient_cache_key(args[0]),
timeout=3600*24*7)
def get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id):
# type: (int, int, int) -> Union[Text, List[Dict[str, Any]]]
"""
returns: an appropriate object describing the recipient. For a
stream this will be the stream name as a string. For a huddle or
personal, it will be an array of dicts about each recipient.
"""
if recipient_type == Recipient.STREAM:
stream = Stream.objects.get(id=recipient_type_id)
return stream.name
# We don't really care what the ordering is, just that it's deterministic.
user_profile_list = (UserProfile.objects.filter(subscription__recipient_id=recipient_id)
.select_related()
.order_by('email'))
return [{'email': user_profile.email,
'domain': user_profile.realm.domain,
'full_name': user_profile.full_name,
'short_name': user_profile.short_name,
'id': user_profile.id,
'is_mirror_dummy': user_profile.is_mirror_dummy} for user_profile in user_profile_list]
def get_realm_emoji_cache_key(realm):
# type: (Realm) -> Text
return u'realm_emoji:%s' % (realm.id,)
class Realm(ModelReprMixin, models.Model):
# domain is a domain in the Internet sense. It must be structured like a
# valid email domain. We use is to restrict access, identify bots, etc.
domain = models.CharField(max_length=40, db_index=True, unique=True) # type: Text
# name is the user-visible identifier for the realm. It has no required
# structure.
AUTHENTICATION_FLAGS = [u'Google', u'Email', u'GitHub', u'LDAP', u'Dev', u'RemoteUser']
name = models.CharField(max_length=40, null=True) # type: Optional[Text]
string_id = models.CharField(max_length=40, unique=True) # type: Text
restricted_to_domain = models.BooleanField(default=False) # type: bool
invite_required = models.BooleanField(default=True) # type: bool
invite_by_admins_only = models.BooleanField(default=False) # type: bool
create_stream_by_admins_only = models.BooleanField(default=False) # type: bool
add_emoji_by_admins_only = models.BooleanField(default=False) # type: bool
mandatory_topics = models.BooleanField(default=False) # type: bool
show_digest_email = models.BooleanField(default=True) # type: bool
name_changes_disabled = models.BooleanField(default=False) # type: bool
email_changes_disabled = models.BooleanField(default=False) # type: bool
allow_message_editing = models.BooleanField(default=True) # type: bool
DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS = 600 # if changed, also change in admin.js
message_content_edit_limit_seconds = models.IntegerField(default=DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS) # type: int
message_retention_days = models.IntegerField(null=True) # type: Optional[int]
# Valid org_types are {CORPORATE, COMMUNITY}
CORPORATE = 1
COMMUNITY = 2
org_type = models.PositiveSmallIntegerField(default=COMMUNITY) # type: int
date_created = models.DateTimeField(default=timezone.now) # type: datetime.datetime
notifications_stream = models.ForeignKey('Stream', related_name='+', null=True, blank=True) # type: Optional[Stream]
deactivated = models.BooleanField(default=False) # type: bool
default_language = models.CharField(default=u'en', max_length=MAX_LANGUAGE_ID_LENGTH) # type: Text
authentication_methods = BitField(flags=AUTHENTICATION_FLAGS,
default=2**31 - 1) # type: BitHandler
waiting_period_threshold = models.PositiveIntegerField(default=0) # type: int
ICON_FROM_GRAVATAR = u'G'
ICON_UPLOADED = u'U'
ICON_SOURCES = (
(ICON_FROM_GRAVATAR, 'Hosted by Gravatar'),
(ICON_UPLOADED, 'Uploaded by administrator'),
)
icon_source = models.CharField(default=ICON_FROM_GRAVATAR, choices=ICON_SOURCES,
max_length=1) # type: Text
icon_version = models.PositiveSmallIntegerField(default=1) # type: int
DEFAULT_NOTIFICATION_STREAM_NAME = u'announce'
def authentication_methods_dict(self):
# type: () -> Dict[Text, bool]
"""Returns the a mapping from authentication flags to their status,
showing only those authentication flags that are supported on
the current server (i.e. if EmailAuthBackend is not configured
on the server, this will not return an entry for "Email")."""
# This mapping needs to be imported from here due to the cyclic
# dependency.
from zproject.backends import AUTH_BACKEND_NAME_MAP
ret = {} # type: Dict[Text, bool]
supported_backends = {backend.__class__ for backend in django.contrib.auth.get_backends()}
for k, v in self.authentication_methods.iteritems():
backend = AUTH_BACKEND_NAME_MAP[k]
if backend in supported_backends:
ret[k] = v
return ret
def __unicode__(self):
# type: () -> Text
return u"<Realm: %s %s>" % (self.string_id, self.id)
@cache_with_key(get_realm_emoji_cache_key, timeout=3600*24*7)
def get_emoji(self):
# type: () -> Dict[Text, Optional[Dict[str, Text]]]
return get_realm_emoji_uncached(self)
def get_admin_users(self):
# type: () -> Sequence[UserProfile]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_realm_admin=True,
is_active=True).select_related()
def get_active_users(self):
# type: () -> Sequence[UserProfile]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True).select_related()
def get_bot_domain(self):
# type: () -> str
# Remove the port. Mainly needed for development environment.
external_host = settings.EXTERNAL_HOST.split(':')[0]
if settings.REALMS_HAVE_SUBDOMAINS or \
Realm.objects.filter(deactivated=False) \
.exclude(string_id__in=settings.SYSTEM_ONLY_REALMS).count() > 1:
return "%s.%s" % (self.string_id, external_host)
return external_host
@property
def subdomain(self):
# type: () -> Optional[Text]
if settings.REALMS_HAVE_SUBDOMAINS:
return self.string_id
return None
@property
def uri(self):
# type: () -> str
if settings.REALMS_HAVE_SUBDOMAINS and self.subdomain is not None:
return '%s%s.%s' % (settings.EXTERNAL_URI_SCHEME,
self.subdomain, settings.EXTERNAL_HOST)
return settings.SERVER_URI
@property
def host(self):
# type: () -> str
if settings.REALMS_HAVE_SUBDOMAINS and self.subdomain is not None:
return "%s.%s" % (self.subdomain, settings.EXTERNAL_HOST)
return settings.EXTERNAL_HOST
@property
def is_zephyr_mirror_realm(self):
# type: () -> bool
return self.string_id == "zephyr"
@property
def webathena_enabled(self):
# type: () -> bool
return self.is_zephyr_mirror_realm
@property
def presence_disabled(self):
# type: () -> bool
return self.is_zephyr_mirror_realm
class Meta(object):
permissions = (
('administer', "Administer a realm"),
('api_super_user', "Can send messages as other users for mirroring"),
)
post_save.connect(flush_realm, sender=Realm)
def get_realm(string_id):
# type: (Text) -> Realm
return Realm.objects.filter(string_id=string_id).first()
def completely_open(realm):
# type: (Realm) -> bool
# This realm is completely open to everyone on the internet to
# join. E-mail addresses do not need to match a realmalias and
# an invite from an existing user is not required.
if not realm:
return False
return not realm.invite_required and not realm.restricted_to_domain
def get_unique_open_realm():
# type: () -> Optional[Realm]
"""We only return a realm if there is a unique non-system-only realm,
it is completely open, and there are no subdomains."""
if settings.REALMS_HAVE_SUBDOMAINS:
return None
realms = Realm.objects.filter(deactivated=False)
# On production installations, the (usually "zulip.com") system
# realm is an empty realm just used for system bots, so don't
# include it in this accounting.
realms = realms.exclude(string_id__in=settings.SYSTEM_ONLY_REALMS)
if len(realms) != 1:
return None
realm = realms[0]
if realm.invite_required or realm.restricted_to_domain:
return None
return realm
def name_changes_disabled(realm):
# type: (Optional[Realm]) -> bool
if realm is None:
return settings.NAME_CHANGES_DISABLED
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
class RealmAlias(models.Model):
realm = models.ForeignKey(Realm) # type: Realm
# should always be stored lowercase
domain = models.CharField(max_length=80, db_index=True) # type: Text
allow_subdomains = models.BooleanField(default=False)
class Meta(object):
unique_together = ("realm", "domain")
def can_add_alias(domain):
# type: (Text) -> bool
if settings.REALMS_HAVE_SUBDOMAINS:
return True
if RealmAlias.objects.filter(domain=domain).exists():
return False
return True
# These functions should only be used on email addresses that have
# been validated via django.core.validators.validate_email
#
# Note that we need to use some care, since can you have multiple @-signs; e.g.
# "tabbott@test"@zulip.com
# is valid email address
def email_to_username(email):
# type: (Text) -> Text
return "@".join(email.split("@")[:-1]).lower()
# Returns the raw domain portion of the desired email address
def email_to_domain(email):
# type: (Text) -> Text
return email.split("@")[-1].lower()
class GetRealmByDomainException(Exception):
pass
def get_realm_by_email_domain(email):
# type: (Text) -> Optional[Realm]
if settings.REALMS_HAVE_SUBDOMAINS:
raise GetRealmByDomainException(
"Cannot get realm from email domain when settings.REALMS_HAVE_SUBDOMAINS = True")
domain = email_to_domain(email)
query = RealmAlias.objects.select_related('realm')
# Search for the longest match. If found return immediately. Since in case of
# settings.REALMS_HAVE_SUBDOMAINS=True, we have a unique mapping between the
# realm and domain so don't worry about `allow_subdomains` being True or False.
alias = query.filter(domain=domain).first()
if alias is not None:
return alias.realm
else:
# Since we have not found any match. We will now try matching the parent domain.
# Filter out the realm domains with `allow_subdomains=False` so that we don't end
# up matching 'test.zulip.com' wrongly to (realm, 'zulip.com', False).
query = query.filter(allow_subdomains=True)
while len(domain) > 0:
subdomain, sep, domain = domain.partition('.')
alias = query.filter(domain=domain).first()
if alias is not None:
return alias.realm
return None
# Is a user with the given email address allowed to be in the given realm?
# (This function does not check whether the user has been invited to the realm.
# So for invite-only realms, this is the test for whether a user can be invited,
# not whether the user can sign up currently.)
def email_allowed_for_realm(email, realm):
# type: (Text, Realm) -> bool
if not realm.restricted_to_domain:
return True
domain = email_to_domain(email)
query = RealmAlias.objects.filter(realm=realm)
if query.filter(domain=domain).exists():
return True
else:
query = query.filter(allow_subdomains=True)
while len(domain) > 0:
subdomain, sep, domain = domain.partition('.')
if query.filter(domain=domain).exists():
return True
return False
def list_of_domains_for_realm(realm):
# type: (Realm) -> List[Dict[str, Union[str, bool]]]
return list(RealmAlias.objects.filter(realm=realm).values('domain', 'allow_subdomains'))
class RealmEmoji(ModelReprMixin, models.Model):
author = models.ForeignKey('UserProfile', blank=True, null=True)
realm = models.ForeignKey(Realm) # type: Realm
# Second part of the regex (negative lookbehind) disallows names ending with one of the punctuation characters
name = models.TextField(validators=[MinLengthValidator(1),
RegexValidator(regex=r'^[0-9a-zA-Z.\-_]+(?<![.\-_])$',
message=_("Invalid characters in emoji name"))]) # type: Text
# URLs start having browser compatibility problem below 2000
# characters, so 1000 seems like a safe limit.
img_url = models.URLField(max_length=1000) # type: Text
class Meta(object):
unique_together = ("realm", "name")
def __unicode__(self):
# type: () -> Text
return u"<RealmEmoji(%s): %s %s>" % (self.realm.string_id, self.name, self.img_url)
def get_realm_emoji_uncached(realm):
# type: (Realm) -> Dict[Text, Optional[Dict[str, Text]]]
d = {}
for row in RealmEmoji.objects.filter(realm=realm).select_related('author'):
if row.author:
author = {
'id': row.author.id,
'email': row.author.email,
'full_name': row.author.full_name}
else:
author = None
d[row.name] = dict(source_url=row.img_url,
display_url=get_camo_url(row.img_url),
author=author)
return d
def flush_realm_emoji(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance'].realm
cache_set(get_realm_emoji_cache_key(realm),
get_realm_emoji_uncached(realm),
timeout=3600*24*7)
post_save.connect(flush_realm_emoji, sender=RealmEmoji)
post_delete.connect(flush_realm_emoji, sender=RealmEmoji)
def filter_pattern_validator(value):
# type: (Text) -> None
regex = re.compile(r'(?:[\w\-#]*)(\(\?P<\w+>.+\))')
error_msg = 'Invalid filter pattern, you must use the following format OPTIONAL_PREFIX(?P<id>.+)'
if not regex.match(str(value)):
raise ValidationError(error_msg)
try:
re.compile(value)
except sre_constants.error:
# Regex is invalid
raise ValidationError(error_msg)
def filter_format_validator(value):
# type: (str) -> None
regex = re.compile(r'^[\.\/:a-zA-Z0-9_-]+%\(([a-zA-Z0-9_-]+)\)s[a-zA-Z0-9_-]*$')
if not regex.match(value):
raise ValidationError('URL format string must be in the following format: `https://example.com/%(\w+)s`')
class RealmFilter(models.Model):
realm = models.ForeignKey(Realm) # type: Realm
pattern = models.TextField(validators=[filter_pattern_validator]) # type: Text
url_format_string = models.TextField(validators=[URLValidator, filter_format_validator]) # type: Text
class Meta(object):
unique_together = ("realm", "pattern")
def __unicode__(self):
# type: () -> Text
return u"<RealmFilter(%s): %s %s>" % (self.realm.string_id, self.pattern, self.url_format_string)
def get_realm_filters_cache_key(realm_id):
# type: (int) -> Text
return u'all_realm_filters:%s' % (realm_id,)
# We have a per-process cache to avoid doing 1000 remote cache queries during page load
per_request_realm_filters_cache = {} # type: Dict[int, List[Tuple[Text, Text, int]]]
def realm_in_local_realm_filters_cache(realm_id):
# type: (int) -> bool
return realm_id in per_request_realm_filters_cache
def realm_filters_for_realm(realm_id):
# type: (int) -> List[Tuple[Text, Text, int]]
if not realm_in_local_realm_filters_cache(realm_id):
per_request_realm_filters_cache[realm_id] = realm_filters_for_realm_remote_cache(realm_id)
return per_request_realm_filters_cache[realm_id]
@cache_with_key(get_realm_filters_cache_key, timeout=3600*24*7)
def realm_filters_for_realm_remote_cache(realm_id):
# type: (int) -> List[Tuple[Text, Text, int]]
filters = []
for realm_filter in RealmFilter.objects.filter(realm_id=realm_id):
filters.append((realm_filter.pattern, realm_filter.url_format_string, realm_filter.id))
return filters
def all_realm_filters():
# type: () -> Dict[int, List[Tuple[Text, Text, int]]]
filters = defaultdict(list) # type: Dict[int, List[Tuple[Text, Text, int]]]
for realm_filter in RealmFilter.objects.all():
filters[realm_filter.realm_id].append((realm_filter.pattern, realm_filter.url_format_string, realm_filter.id))
return filters
def flush_realm_filter(sender, **kwargs):
# type: (Any, **Any) -> None
realm_id = kwargs['instance'].realm_id
cache_delete(get_realm_filters_cache_key(realm_id))
try:
per_request_realm_filters_cache.pop(realm_id)
except KeyError:
pass
post_save.connect(flush_realm_filter, sender=RealmFilter)
post_delete.connect(flush_realm_filter, sender=RealmFilter)
class UserProfile(ModelReprMixin, AbstractBaseUser, PermissionsMixin):
DEFAULT_BOT = 1
"""
Incoming webhook bots are limited to only sending messages via webhooks.
Thus, it is less of a security risk to expose their API keys to third-party services,
since they can't be used to read messages.
"""
INCOMING_WEBHOOK_BOT = 2
# Fields from models.AbstractUser minus last_name and first_name,
# which we don't use; email is modified to make it indexed and unique.
email = models.EmailField(blank=False, db_index=True, unique=True) # type: Text
is_staff = models.BooleanField(default=False) # type: bool
is_active = models.BooleanField(default=True, db_index=True) # type: bool
is_realm_admin = models.BooleanField(default=False, db_index=True) # type: bool
is_bot = models.BooleanField(default=False, db_index=True) # type: bool
bot_type = models.PositiveSmallIntegerField(null=True, db_index=True) # type: Optional[int]
is_api_super_user = models.BooleanField(default=False, db_index=True) # type: bool
date_joined = models.DateTimeField(default=timezone.now) # type: datetime.datetime
is_mirror_dummy = models.BooleanField(default=False) # type: bool
bot_owner = models.ForeignKey('self', null=True, on_delete=models.SET_NULL) # type: Optional[UserProfile]
USERNAME_FIELD = 'email'
MAX_NAME_LENGTH = 100
NAME_INVALID_CHARS = ['*', '`', '>', '"', '@']
# Our custom site-specific fields
full_name = models.CharField(max_length=MAX_NAME_LENGTH) # type: Text
short_name = models.CharField(max_length=MAX_NAME_LENGTH) # type: Text
# pointer points to Message.id, NOT UserMessage.id.
pointer = models.IntegerField() # type: int
last_pointer_updater = models.CharField(max_length=64) # type: Text
realm = models.ForeignKey(Realm) # type: Realm
api_key = models.CharField(max_length=32) # type: Text
tos_version = models.CharField(null=True, max_length=10) # type: Text
### Notifications settings. ###
# Stream notifications.
enable_stream_desktop_notifications = models.BooleanField(default=False) # type: bool
enable_stream_sounds = models.BooleanField(default=False) # type: bool
# PM + @-mention notifications.
enable_desktop_notifications = models.BooleanField(default=True) # type: bool
pm_content_in_desktop_notifications = models.BooleanField(default=True) # type: bool
enable_sounds = models.BooleanField(default=True) # type: bool
enable_offline_email_notifications = models.BooleanField(default=True) # type: bool
enable_offline_push_notifications = models.BooleanField(default=True) # type: bool
enable_online_push_notifications = models.BooleanField(default=False) # type: bool
enable_digest_emails = models.BooleanField(default=True) # type: bool
# Old notification field superseded by existence of stream notification
# settings.
default_desktop_notifications = models.BooleanField(default=True) # type: bool
###
last_reminder = models.DateTimeField(default=timezone.now, null=True) # type: Optional[datetime.datetime]
rate_limits = models.CharField(default=u"", max_length=100) # type: Text # comma-separated list of range:max pairs
# Default streams
default_sending_stream = models.ForeignKey('zerver.Stream', null=True, related_name='+') # type: Optional[Stream]
default_events_register_stream = models.ForeignKey('zerver.Stream', null=True, related_name='+') # type: Optional[Stream]
default_all_public_streams = models.BooleanField(default=False) # type: bool
# UI vars
enter_sends = models.NullBooleanField(default=False) # type: Optional[bool]
autoscroll_forever = models.BooleanField(default=False) # type: bool
left_side_userlist = models.BooleanField(default=False) # type: bool
emoji_alt_code = models.BooleanField(default=False) # type: bool
# display settings
twenty_four_hour_time = models.BooleanField(default=False) # type: bool
default_language = models.CharField(default=u'en', max_length=MAX_LANGUAGE_ID_LENGTH) # type: Text
# Hours to wait before sending another email to a user
EMAIL_REMINDER_WAITPERIOD = 24
# Minutes to wait before warning a bot owner that her bot sent a message
# to a nonexistent stream
BOT_OWNER_STREAM_ALERT_WAITPERIOD = 1
AVATAR_FROM_GRAVATAR = u'G'
AVATAR_FROM_USER = u'U'
AVATAR_SOURCES = (
(AVATAR_FROM_GRAVATAR, 'Hosted by Gravatar'),
(AVATAR_FROM_USER, 'Uploaded by user'),
)
avatar_source = models.CharField(default=AVATAR_FROM_GRAVATAR, choices=AVATAR_SOURCES, max_length=1) # type: Text
avatar_version = models.PositiveSmallIntegerField(default=1) # type: int
TUTORIAL_WAITING = u'W'
TUTORIAL_STARTED = u'S'
TUTORIAL_FINISHED = u'F'
TUTORIAL_STATES = ((TUTORIAL_WAITING, "Waiting"),
(TUTORIAL_STARTED, "Started"),
(TUTORIAL_FINISHED, "Finished"))
tutorial_status = models.CharField(default=TUTORIAL_WAITING, choices=TUTORIAL_STATES, max_length=1) # type: Text
# Contains serialized JSON of the form:
# [("step 1", true), ("step 2", false)]
# where the second element of each tuple is if the step has been
# completed.
onboarding_steps = models.TextField(default=u'[]') # type: Text
invites_granted = models.IntegerField(default=0) # type: int
invites_used = models.IntegerField(default=0) # type: int
alert_words = models.TextField(default=u'[]') # type: Text # json-serialized list of strings
# Contains serialized JSON of the form:
# [["social", "mit"], ["devel", "ios"]]
muted_topics = models.TextField(default=u'[]') # type: Text
objects = UserManager() # type: UserManager
DEFAULT_UPLOADS_QUOTA = 1024*1024*1024
quota = models.IntegerField(default=DEFAULT_UPLOADS_QUOTA) # type: int
# The maximum length of a timezone in pytz.all_timezones is 32.
# Setting max_length=40 is a safe choice.
# In Django, the convention is to use empty string instead of Null
# for text based fields. For more information, see
# https://docs.djangoproject.com/en/1.10/ref/models/fields/#django.db.models.Field.null.
timezone = models.CharField(max_length=40, default=u'') # type: Text
def can_admin_user(self, target_user):
# type: (UserProfile) -> bool
"""Returns whether this user has permission to modify target_user"""
if target_user.bot_owner == self:
return True
elif self.is_realm_admin and self.realm == target_user.realm:
return True
else:
return False
def __unicode__(self):
# type: () -> Text
return u"<UserProfile: %s %s>" % (self.email, self.realm)
@property
def is_incoming_webhook(self):
# type: () -> bool
return self.bot_type == UserProfile.INCOMING_WEBHOOK_BOT
@staticmethod
def emails_from_ids(user_ids):
# type: (Sequence[int]) -> Dict[int, Text]
rows = UserProfile.objects.filter(id__in=user_ids).values('id', 'email')
return {row['id']: row['email'] for row in rows}
def can_create_streams(self):
# type: () -> bool
diff = (timezone.now() - self.date_joined).days
if self.is_realm_admin:
return True
elif self.realm.create_stream_by_admins_only:
return False
if diff >= self.realm.waiting_period_threshold:
return True
return False
def major_tos_version(self):
# type: () -> int
if self.tos_version is not None:
return int(self.tos_version.split('.')[0])
else:
return -1
def receives_offline_notifications(user_profile):
# type: (UserProfile) -> bool
return ((user_profile.enable_offline_email_notifications or
user_profile.enable_offline_push_notifications) and
not user_profile.is_bot)
def receives_online_notifications(user_profile):
# type: (UserProfile) -> bool
return (user_profile.enable_online_push_notifications and
not user_profile.is_bot)
def remote_user_to_email(remote_user):
# type: (Text) -> Text
if settings.SSO_APPEND_DOMAIN is not None:
remote_user += "@" + settings.SSO_APPEND_DOMAIN
return remote_user
# Make sure we flush the UserProfile object from our remote cache
# whenever we save it.
post_save.connect(flush_user_profile, sender=UserProfile)
class PreregistrationUser(models.Model):
email = models.EmailField() # type: Text
referred_by = models.ForeignKey(UserProfile, null=True) # Optional[UserProfile]
streams = models.ManyToManyField('Stream') # type: Manager
invited_at = models.DateTimeField(auto_now=True) # type: datetime.datetime
realm_creation = models.BooleanField(default=False)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status = models.IntegerField(default=0) # type: int
realm = models.ForeignKey(Realm, null=True) # type: Optional[Realm]
class EmailChangeStatus(models.Model):
new_email = models.EmailField() # type: Text
old_email = models.EmailField() # type: Text
updated_at = models.DateTimeField(auto_now=True) # type: datetime.datetime
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status = models.IntegerField(default=0) # type: int
realm = models.ForeignKey(Realm) # type: Realm
class PushDeviceToken(models.Model):
APNS = 1
GCM = 2
KINDS = (
(APNS, 'apns'),
(GCM, 'gcm'),
)
kind = models.PositiveSmallIntegerField(choices=KINDS) # type: int
# The token is a unique device-specific token that is
# sent to us from each device:
# - APNS token if kind == APNS
# - GCM registration id if kind == GCM
token = models.CharField(max_length=4096, unique=True) # type: bytes
last_updated = models.DateTimeField(auto_now=True) # type: datetime.datetime
# The user who's device this is
user = models.ForeignKey(UserProfile, db_index=True) # type: UserProfile
# [optional] Contains the app id of the device if it is an iOS device
ios_app_id = models.TextField(null=True) # type: Optional[Text]
def generate_email_token_for_stream():
# type: () -> Text
return generate_random_token(32)
class Stream(ModelReprMixin, models.Model):
MAX_NAME_LENGTH = 60
name = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True) # type: Text
realm = models.ForeignKey(Realm, db_index=True) # type: Realm
invite_only = models.NullBooleanField(default=False) # type: Optional[bool]
# Used by the e-mail forwarder. The e-mail RFC specifies a maximum
# e-mail length of 254, and our max stream length is 30, so we
# have plenty of room for the token.
email_token = models.CharField(
max_length=32, default=generate_email_token_for_stream) # type: Text
description = models.CharField(max_length=1024, default=u'') # type: Text
date_created = models.DateTimeField(default=timezone.now) # type: datetime.datetime
deactivated = models.BooleanField(default=False) # type: bool
def __unicode__(self):
# type: () -> Text
return u"<Stream: %s>" % (self.name,)
def is_public(self):
# type: () -> bool
# All streams are private in Zephyr mirroring realms.
return not self.invite_only and not self.realm.is_zephyr_mirror_realm
class Meta(object):
unique_together = ("name", "realm")
def num_subscribers(self):
# type: () -> int
return Subscription.objects.filter(
recipient__type=Recipient.STREAM,
recipient__type_id=self.id,
user_profile__is_active=True,
active=True
).count()
# This is stream information that is sent to clients
def to_dict(self):
# type: () -> Dict[str, Any]
return dict(name=self.name,
stream_id=self.id,
description=self.description,
invite_only=self.invite_only)
post_save.connect(flush_stream, sender=Stream)
post_delete.connect(flush_stream, sender=Stream)
# The Recipient table is used to map Messages to the set of users who
# received the message. It is implemented as a set of triples (id,
# type_id, type). We have 3 types of recipients: Huddles (for group
# private messages), UserProfiles (for 1:1 private messages), and
# Streams. The recipient table maps a globally unique recipient id
# (used by the Message table) to the type-specific unique id (the
# stream id, user_profile id, or huddle id).
class Recipient(ModelReprMixin, models.Model):
type_id = models.IntegerField(db_index=True) # type: int
type = models.PositiveSmallIntegerField(db_index=True) # type: int
# Valid types are {personal, stream, huddle}
PERSONAL = 1
STREAM = 2
HUDDLE = 3
class Meta(object):
unique_together = ("type", "type_id")
# N.B. If we used Django's choice=... we would get this for free (kinda)
_type_names = {
PERSONAL: 'personal',
STREAM: 'stream',
HUDDLE: 'huddle'}
def type_name(self):
# type: () -> str
# Raises KeyError if invalid
return self._type_names[self.type]
def __unicode__(self):
# type: () -> Text
display_recipient = get_display_recipient(self)
return u"<Recipient: %s (%d, %s)>" % (display_recipient, self.type_id, self.type)
class Client(ModelReprMixin, models.Model):
name = models.CharField(max_length=30, db_index=True, unique=True) # type: Text
def __unicode__(self):
# type: () -> Text
return u"<Client: %s>" % (self.name,)
get_client_cache = {} # type: Dict[Text, Client]
def get_client(name):
# type: (Text) -> Client
# Accessing KEY_PREFIX through the module is necessary
# because we need the updated value of the variable.
cache_name = cache.KEY_PREFIX + name
if cache_name not in get_client_cache:
result = get_client_remote_cache(name)
get_client_cache[cache_name] = result
return get_client_cache[cache_name]
def get_client_cache_key(name):
# type: (Text) -> Text
return u'get_client:%s' % (make_safe_digest(name),)
@cache_with_key(get_client_cache_key, timeout=3600*24*7)
def get_client_remote_cache(name):
# type: (Text) -> Client
(client, _) = Client.objects.get_or_create(name=name)
return client
# get_stream_backend takes either a realm id or a realm
@cache_with_key(get_stream_cache_key, timeout=3600*24*7)
def get_stream_backend(stream_name, realm):
# type: (Text, Realm) -> Stream
return Stream.objects.select_related("realm").get(
name__iexact=stream_name.strip(), realm_id=realm.id)
def get_active_streams(realm):
# type: (Realm) -> QuerySet
"""
Return all streams (including invite-only streams) that have not been deactivated.
"""
return Stream.objects.filter(realm=realm, deactivated=False)
def get_stream(stream_name, realm):
# type: (Text, Realm) -> Optional[Stream]
try:
return get_stream_backend(stream_name, realm)
except Stream.DoesNotExist:
return None
def bulk_get_streams(realm, stream_names):
# type: (Realm, STREAM_NAMES) -> Dict[Text, Any]
def fetch_streams_by_name(stream_names):
# type: (List[Text]) -> Sequence[Stream]
#
# This should be just
#
# Stream.objects.select_related("realm").filter(name__iexact__in=stream_names,
# realm_id=realm_id)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
if len(stream_names) == 0:
return []
upper_list = ", ".join(["UPPER(%s)"] * len(stream_names))
where_clause = "UPPER(zerver_stream.name::text) IN (%s)" % (upper_list,)
return get_active_streams(realm.id).select_related("realm").extra(
where=[where_clause],
params=stream_names)
return generic_bulk_cached_fetch(lambda stream_name: get_stream_cache_key(stream_name, realm),
fetch_streams_by_name,
[stream_name.lower() for stream_name in stream_names],
id_fetcher=lambda stream: stream.name.lower())
def get_recipient_cache_key(type, type_id):
# type: (int, int) -> Text
return u"get_recipient:%s:%s" % (type, type_id,)
@cache_with_key(get_recipient_cache_key, timeout=3600*24*7)
def get_recipient(type, type_id):
# type: (int, int) -> Recipient
return Recipient.objects.get(type_id=type_id, type=type)
def bulk_get_recipients(type, type_ids):
# type: (int, List[int]) -> Dict[int, Any]
def cache_key_function(type_id):
# type: (int) -> Text
return get_recipient_cache_key(type, type_id)
def query_function(type_ids):
# type: (List[int]) -> Sequence[Recipient]
# TODO: Change return type to QuerySet[Recipient]
return Recipient.objects.filter(type=type, type_id__in=type_ids)
return generic_bulk_cached_fetch(cache_key_function, query_function, type_ids,
id_fetcher=lambda recipient: recipient.type_id)
def sew_messages_and_reactions(messages, reactions):
# type: (List[Dict[str, Any]], List[Dict[str, Any]]) -> List[Dict[str, Any]]
"""Given a iterable of messages and reactions stitch reactions
into messages.
"""
# Add all messages with empty reaction item
for message in messages:
message['reactions'] = []
# Convert list of messages into dictionary to make reaction stitching easy
converted_messages = {message['id']: message for message in messages}
for reaction in reactions:
converted_messages[reaction['message_id']]['reactions'].append(
reaction)
return list(converted_messages.values())
class Message(ModelReprMixin, models.Model):
sender = models.ForeignKey(UserProfile) # type: UserProfile
recipient = models.ForeignKey(Recipient) # type: Recipient
subject = models.CharField(max_length=MAX_SUBJECT_LENGTH, db_index=True) # type: Text
content = models.TextField() # type: Text
rendered_content = models.TextField(null=True) # type: Optional[Text]
rendered_content_version = models.IntegerField(null=True) # type: Optional[int]
pub_date = models.DateTimeField('date published', db_index=True) # type: datetime.datetime
sending_client = models.ForeignKey(Client) # type: Client
last_edit_time = models.DateTimeField(null=True) # type: Optional[datetime.datetime]
edit_history = models.TextField(null=True) # type: Optional[Text]
has_attachment = models.BooleanField(default=False, db_index=True) # type: bool
has_image = models.BooleanField(default=False, db_index=True) # type: bool
has_link = models.BooleanField(default=False, db_index=True) # type: bool
def topic_name(self):
# type: () -> Text
"""
Please start using this helper to facilitate an
eventual switch over to a separate topic table.
"""
return self.subject
def __unicode__(self):
# type: () -> Text
display_recipient = get_display_recipient(self.recipient)
return u"<Message: %s / %s / %r>" % (display_recipient, self.subject, self.sender)
def get_realm(self):
# type: () -> Realm
return self.sender.realm
def save_rendered_content(self):
# type: () -> None
self.save(update_fields=["rendered_content", "rendered_content_version"])
@staticmethod
def need_to_render_content(rendered_content, rendered_content_version, bugdown_version):
# type: (Optional[Text], Optional[int], int) -> bool
return (rendered_content is None or
rendered_content_version is None or
rendered_content_version < bugdown_version)
def to_log_dict(self):
# type: () -> Dict[str, Any]
return dict(
id = self.id,
sender_id = self.sender.id,
sender_email = self.sender.email,
sender_domain = self.sender.realm.domain,
sender_full_name = self.sender.full_name,
sender_short_name = self.sender.short_name,
sending_client = self.sending_client.name,
type = self.recipient.type_name(),
recipient = get_display_recipient(self.recipient),
subject = self.topic_name(),
content = self.content,
timestamp = datetime_to_timestamp(self.pub_date))
@staticmethod
def get_raw_db_rows(needed_ids):
# type: (List[int]) -> List[Dict[str, Any]]
# This is a special purpose function optimized for
# callers like get_old_messages_backend().
fields = [
'id',
'subject',
'pub_date',
'last_edit_time',
'edit_history',
'content',
'rendered_content',
'rendered_content_version',
'recipient_id',
'recipient__type',
'recipient__type_id',
'sender_id',
'sending_client__name',
'sender__email',
'sender__full_name',
'sender__short_name',
'sender__realm__id',
'sender__realm__domain',
'sender__avatar_source',
'sender__avatar_version',
'sender__is_mirror_dummy',
]
messages = Message.objects.filter(id__in=needed_ids).values(*fields)
"""Adding one-many or Many-Many relationship in values results in N X
results.
Link: https://docs.djangoproject.com/en/1.8/ref/models/querysets/#values
"""
reactions = Reaction.get_raw_db_rows(needed_ids)
return sew_messages_and_reactions(messages, reactions)
def sent_by_human(self):
# type: () -> bool
sending_client = self.sending_client.name.lower()
return (sending_client in ('zulipandroid', 'zulipios', 'zulipdesktop',
'zulipmobile', 'zulipelectron', 'snipe',
'website', 'ios', 'android')) or (
'desktop app' in sending_client)
@staticmethod
def content_has_attachment(content):
# type: (Text) -> Match
return re.search(r'[/\-]user[\-_]uploads[/\.-]', content)
@staticmethod
def content_has_image(content):
# type: (Text) -> bool
return bool(re.search(r'[/\-]user[\-_]uploads[/\.-]\S+\.(bmp|gif|jpg|jpeg|png|webp)', content, re.IGNORECASE))
@staticmethod
def content_has_link(content):
# type: (Text) -> bool
return ('http://' in content or
'https://' in content or
'/user_uploads' in content or
(settings.ENABLE_FILE_LINKS and 'file:///' in content))
@staticmethod
def is_status_message(content, rendered_content):
# type: (Text, Text) -> bool
"""
Returns True if content and rendered_content are from 'me_message'
"""
if content.startswith('/me ') and '\n' not in content:
if rendered_content.startswith('<p>') and rendered_content.endswith('</p>'):
return True
return False
def update_calculated_fields(self):
# type: () -> None
# TODO: rendered_content could also be considered a calculated field
content = self.content
self.has_attachment = bool(Message.content_has_attachment(content))
self.has_image = bool(Message.content_has_image(content))
self.has_link = bool(Message.content_has_link(content))
@receiver(pre_save, sender=Message)
def pre_save_message(sender, **kwargs):
# type: (Any, **Any) -> None
if kwargs['update_fields'] is None or "content" in kwargs['update_fields']:
message = kwargs['instance']
message.update_calculated_fields()
def get_context_for_message(message):
# type: (Message) -> QuerySet[Message]
# TODO: Change return type to QuerySet[Message]
return Message.objects.filter(
recipient_id=message.recipient_id,
subject=message.subject,
id__lt=message.id,
pub_date__gt=message.pub_date - timedelta(minutes=15),
).order_by('-id')[:10]
post_save.connect(flush_message, sender=Message)
class Reaction(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
message = models.ForeignKey(Message) # type: Message
emoji_name = models.TextField() # type: Text
class Meta(object):
unique_together = ("user_profile", "message", "emoji_name")
@staticmethod
def get_raw_db_rows(needed_ids):
# type: (List[int]) -> List[Dict[str, Any]]
fields = ['message_id', 'emoji_name', 'user_profile__email',
'user_profile__id', 'user_profile__full_name']
return Reaction.objects.filter(message_id__in=needed_ids).values(*fields)
# Whenever a message is sent, for each user current subscribed to the
# corresponding Recipient object, we add a row to the UserMessage
# table, which has has columns (id, user profile id, message id,
# flags) indicating which messages each user has received. This table
# allows us to quickly query any user's last 1000 messages to generate
# the home view.
#
# Additionally, the flags field stores metadata like whether the user
# has read the message, starred the message, collapsed or was
# mentioned the message, etc.
#
# UserMessage is the largest table in a Zulip installation, even
# though each row is only 4 integers.
class UserMessage(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
message = models.ForeignKey(Message) # type: Message
# We're not using the archived field for now, but create it anyway
# since this table will be an unpleasant one to do schema changes
# on later
ALL_FLAGS = ['read', 'starred', 'collapsed', 'mentioned', 'wildcard_mentioned',
'summarize_in_home', 'summarize_in_stream', 'force_expand', 'force_collapse',
'has_alert_word', "historical", 'is_me_message']
flags = BitField(flags=ALL_FLAGS, default=0) # type: BitHandler
class Meta(object):
unique_together = ("user_profile", "message")
def __unicode__(self):
# type: () -> Text
display_recipient = get_display_recipient(self.message.recipient)
return u"<UserMessage: %s / %s (%s)>" % (display_recipient, self.user_profile.email, self.flags_list())
def flags_list(self):
# type: () -> List[str]
return [flag for flag in self.flags.keys() if getattr(self.flags, flag).is_set]
def parse_usermessage_flags(val):
# type: (int) -> List[str]
flags = []
mask = 1
for flag in UserMessage.ALL_FLAGS:
if val & mask:
flags.append(flag)
mask <<= 1
return flags
class Attachment(ModelReprMixin, models.Model):
file_name = models.TextField(db_index=True) # type: Text
# path_id is a storage location agnostic representation of the path of the file.
# If the path of a file is http://localhost:9991/user_uploads/a/b/abc/temp_file.py
# then its path_id will be a/b/abc/temp_file.py.
path_id = models.TextField(db_index=True) # type: Text
owner = models.ForeignKey(UserProfile) # type: UserProfile
realm = models.ForeignKey(Realm, blank=True, null=True) # type: Realm
is_realm_public = models.BooleanField(default=False) # type: bool
messages = models.ManyToManyField(Message) # type: Manager
create_time = models.DateTimeField(default=timezone.now, db_index=True) # type: datetime.datetime
size = models.IntegerField(null=True) # type: int
def __unicode__(self):
# type: () -> Text
return u"<Attachment: %s>" % (self.file_name,)
def is_claimed(self):
# type: () -> bool
return self.messages.count() > 0
def to_dict(self):
# type: () -> Dict[str, Any]
return {
'id': self.id,
'name': self.file_name,
'path_id': self.path_id,
'messages': [{
'id': m.id,
# convert to JavaScript-style UNIX timestamp so we can take
# advantage of client timezones.
'name': time.mktime(m.pub_date.timetuple()) * 1000
} for m in self.messages.all()]
}
def get_old_unclaimed_attachments(weeks_ago):
# type: (int) -> Sequence[Attachment]
# TODO: Change return type to QuerySet[Attachment]
delta_weeks_ago = timezone.now() - datetime.timedelta(weeks=weeks_ago)
old_attachments = Attachment.objects.filter(messages=None, create_time__lt=delta_weeks_ago)
return old_attachments
class Subscription(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
recipient = models.ForeignKey(Recipient) # type: Recipient
active = models.BooleanField(default=True) # type: bool
in_home_view = models.NullBooleanField(default=True) # type: Optional[bool]
DEFAULT_STREAM_COLOR = u"#c2c2c2"
color = models.CharField(max_length=10, default=DEFAULT_STREAM_COLOR) # type: Text
pin_to_top = models.BooleanField(default=False) # type: bool
desktop_notifications = models.BooleanField(default=True) # type: bool
audible_notifications = models.BooleanField(default=True) # type: bool
# Combination desktop + audible notifications superseded by the
# above.
notifications = models.BooleanField(default=False) # type: bool
class Meta(object):
unique_together = ("user_profile", "recipient")
def __unicode__(self):
# type: () -> Text
return u"<Subscription: %r -> %s>" % (self.user_profile, self.recipient)
@cache_with_key(user_profile_by_id_cache_key, timeout=3600*24*7)
def get_user_profile_by_id(uid):
# type: (int) -> UserProfile
return UserProfile.objects.select_related().get(id=uid)
@cache_with_key(user_profile_by_email_cache_key, timeout=3600*24*7)
def get_user_profile_by_email(email):
# type: (Text) -> UserProfile
return UserProfile.objects.select_related().get(email__iexact=email.strip())
@cache_with_key(active_user_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_active_user_dicts_in_realm(realm):
# type: (Realm) -> List[Dict[str, Any]]
return UserProfile.objects.filter(realm=realm, is_active=True) \
.values(*active_user_dict_fields)
@cache_with_key(bot_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_bot_dicts_in_realm(realm):
# type: (Realm) -> List[Dict[str, Any]]
return UserProfile.objects.filter(realm=realm, is_bot=True).values(*bot_dict_fields)
def get_owned_bot_dicts(user_profile, include_all_realm_bots_if_admin=True):
# type: (UserProfile, bool) -> List[Dict[str, Any]]
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True,
bot_owner=user_profile).values(*bot_dict_fields)
# TODO: Remove this import cycle
from zerver.lib.avatar import get_avatar_url
return [{'email': botdict['email'],
'user_id': botdict['id'],
'full_name': botdict['full_name'],
'is_active': botdict['is_active'],
'api_key': botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner': botdict['bot_owner__email'],
'avatar_url': get_avatar_url(botdict['avatar_source'], botdict['email'],
botdict['avatar_version']),
}
for botdict in result]
def get_prereg_user_by_email(email):
# type: (Text) -> PreregistrationUser
# A user can be invited many times, so only return the result of the latest
# invite.
return PreregistrationUser.objects.filter(email__iexact=email.strip()).latest("invited_at")
def get_cross_realm_emails():
# type: () -> Set[Text]
return set(settings.CROSS_REALM_BOT_EMAILS)
# The Huddle class represents a group of individuals who have had a
# Group Private Message conversation together. The actual membership
# of the Huddle is stored in the Subscription table just like with
# Streams, and a hash of that list is stored in the huddle_hash field
# below, to support efficiently mapping from a set of users to the
# corresponding Huddle object.
class Huddle(models.Model):
# TODO: We should consider whether using
# CommaSeparatedIntegerField would be better.
huddle_hash = models.CharField(max_length=40, db_index=True, unique=True) # type: Text
def get_huddle_hash(id_list):
# type: (List[int]) -> Text
id_list = sorted(set(id_list))
hash_key = ",".join(str(x) for x in id_list)
return make_safe_digest(hash_key)
def huddle_hash_cache_key(huddle_hash):
# type: (Text) -> Text
return u"huddle_by_hash:%s" % (huddle_hash,)
def get_huddle(id_list):
# type: (List[int]) -> Huddle
huddle_hash = get_huddle_hash(id_list)
return get_huddle_backend(huddle_hash, id_list)
@cache_with_key(lambda huddle_hash, id_list: huddle_hash_cache_key(huddle_hash), timeout=3600*24*7)
def get_huddle_backend(huddle_hash, id_list):
# type: (Text, List[int]) -> Huddle
with transaction.atomic():
(huddle, created) = Huddle.objects.get_or_create(huddle_hash=huddle_hash)
if created:
recipient = Recipient.objects.create(type_id=huddle.id,
type=Recipient.HUDDLE)
subs_to_create = [Subscription(recipient=recipient,
user_profile=get_user_profile_by_id(user_profile_id))
for user_profile_id in id_list]
Subscription.objects.bulk_create(subs_to_create)
return huddle
def clear_database():
# type: () -> None
pylibmc.Client(['127.0.0.1']).flush_all()
model = None # type: Any
for model in [Message, Stream, UserProfile, Recipient,
Realm, Subscription, Huddle, UserMessage, Client,
DefaultStream]:
model.objects.all().delete()
Session.objects.all().delete()
class UserActivity(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
client = models.ForeignKey(Client) # type: Client
query = models.CharField(max_length=50, db_index=True) # type: Text
count = models.IntegerField() # type: int
last_visit = models.DateTimeField('last visit') # type: datetime.datetime
class Meta(object):
unique_together = ("user_profile", "client", "query")
class UserActivityInterval(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
start = models.DateTimeField('start time', db_index=True) # type: datetime.datetime
end = models.DateTimeField('end time', db_index=True) # type: datetime.datetime
class UserPresence(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
client = models.ForeignKey(Client) # type: Client
# Valid statuses
ACTIVE = 1
IDLE = 2
timestamp = models.DateTimeField('presence changed') # type: datetime.datetime
status = models.PositiveSmallIntegerField(default=ACTIVE) # type: int
@staticmethod
def status_to_string(status):
# type: (int) -> str
if status == UserPresence.ACTIVE:
return 'active'
elif status == UserPresence.IDLE:
return 'idle'
else:
raise ValueError('Unknown status: %s' % (status,))
@staticmethod
def get_status_dict_by_user(user_profile):
# type: (UserProfile) -> defaultdict[Any, Dict[Any, Any]]
query = UserPresence.objects.filter(user_profile=user_profile).values(
'client__name',
'status',
'timestamp',
'user_profile__email',
'user_profile__id',
'user_profile__enable_offline_push_notifications',
'user_profile__is_mirror_dummy',
)
if PushDeviceToken.objects.filter(user=user_profile).exists():
mobile_user_ids = [user_profile.id] # type: List[int]
else:
mobile_user_ids = []
return UserPresence.get_status_dicts_for_query(query, mobile_user_ids)
@staticmethod
def get_status_dict_by_realm(realm_id):
# type: (int) -> defaultdict[Any, Dict[Any, Any]]
query = UserPresence.objects.filter(
user_profile__realm_id=realm_id,
user_profile__is_active=True,
user_profile__is_bot=False
).values(
'client__name',
'status',
'timestamp',
'user_profile__email',
'user_profile__id',
'user_profile__enable_offline_push_notifications',
'user_profile__is_mirror_dummy',
)
mobile_user_ids = [row['user'] for row in PushDeviceToken.objects.filter(
user__realm_id=1,
user__is_active=True,
user__is_bot=False,
).distinct("user").values("user")]
return UserPresence.get_status_dicts_for_query(query, mobile_user_ids)
@staticmethod
def get_status_dicts_for_query(query, mobile_user_ids):
# type: (QuerySet, List[int]) -> defaultdict[Any, Dict[Any, Any]]
user_statuses = defaultdict(dict) # type: defaultdict[Any, Dict[Any, Any]]
# Order of query is important to get a latest status as aggregated status.
for row in query.order_by("user_profile__id", "-timestamp"):
info = UserPresence.to_presence_dict(
row['client__name'],
row['status'],
row['timestamp'],
push_enabled=row['user_profile__enable_offline_push_notifications'],
has_push_devices=row['user_profile__id'] in mobile_user_ids,
is_mirror_dummy=row['user_profile__is_mirror_dummy'],
)
if not user_statuses.get(row['user_profile__email']):
# Applying the latest status as aggregated status for user.
user_statuses[row['user_profile__email']]['aggregated'] = {
'status': info['status'],
'timestamp': info['timestamp'],
'client': info['client']
}
user_statuses[row['user_profile__email']][row['client__name']] = info
return user_statuses
@staticmethod
def to_presence_dict(client_name, status, dt, push_enabled=None,
has_push_devices=None, is_mirror_dummy=None):
# type: (Text, int, datetime.datetime, Optional[bool], Optional[bool], Optional[bool]) -> Dict[str, Any]
presence_val = UserPresence.status_to_string(status)
timestamp = datetime_to_timestamp(dt)
return dict(
client=client_name,
status=presence_val,
timestamp=timestamp,
pushable=(push_enabled and has_push_devices),
)
def to_dict(self):
# type: () -> Dict[str, Any]
return UserPresence.to_presence_dict(
self.client.name,
self.status,
self.timestamp
)
@staticmethod
def status_from_string(status):
# type: (NonBinaryStr) -> Optional[int]
if status == 'active':
status_val = UserPresence.ACTIVE
elif status == 'idle':
status_val = UserPresence.IDLE
else:
status_val = None
return status_val
class Meta(object):
unique_together = ("user_profile", "client")
class DefaultStream(models.Model):
realm = models.ForeignKey(Realm) # type: Realm
stream = models.ForeignKey(Stream) # type: Stream
class Meta(object):
unique_together = ("realm", "stream")
class Referral(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
email = models.EmailField(blank=False, null=False) # type: Text
timestamp = models.DateTimeField(auto_now_add=True, null=False) # type: datetime.datetime
# This table only gets used on Zulip Voyager instances
# For reasons of deliverability (and sending from multiple email addresses),
# we will still send from mandrill when we send things from the (staging.)zulip.com install
class ScheduledJob(models.Model):
scheduled_timestamp = models.DateTimeField(auto_now_add=False, null=False) # type: datetime.datetime
type = models.PositiveSmallIntegerField() # type: int
# Valid types are {email}
# for EMAIL, filter_string is recipient_email
EMAIL = 1
# JSON representation of the job's data. Be careful, as we are not relying on Django to do validation
data = models.TextField() # type: Text
# Kind if like a ForeignKey, but table is determined by type.
filter_id = models.IntegerField(null=True) # type: Optional[int]
filter_string = models.CharField(max_length=100) # type: Text
class RealmAuditLog(models.Model):
realm = models.ForeignKey(Realm) # type: Realm
acting_user = models.ForeignKey(UserProfile, null=True, related_name='+') # type: Optional[UserProfile]
modified_user = models.ForeignKey(UserProfile, null=True, related_name='+') # type: Optional[UserProfile]
modified_stream = models.ForeignKey(Stream, null=True) # type: Optional[Stream]
event_type = models.CharField(max_length=40) # type: Text
event_time = models.DateTimeField() # type: datetime.datetime
backfilled = models.BooleanField(default=False) # type: bool
|
dawran6/zulip
|
zerver/models.py
|
Python
|
apache-2.0
| 62,996
|
[
"VisIt"
] |
30c9657ab075a5214f0a72a6baaa21136222de21635e3c0e4462cdf2d27dd3ab
|
import numpy as np
import math
import pdb
import matplotlib.pyplot as plt
###########################################################################
#
# This module holds the definitions for various basis functions that
# are routinely used in variational Bayesian regression applications.
# The functions must be defined according to the following format:
#
# 1. The first argument must receive **all** non-parametric inputs.
# Hence, if the function accepts multivariate input, then it must
# be passed in as the first argument in the form of an NxD array,
# where N is the number of data points and D is the number of
# different input variables.
#
# 2. All remaining arguments must be keyword arguments (**kwargs).
#
###########################################################################
def poly1d( x, order=None, func=None ):
"""
1D nth-order polynomial.
It is possible to pass a function in as a keyword argument to operate
on the inputs before returning the polynomial. To be explicit, suppose
func=np.log and order=2, then the output would be:
output = [ np.log( x ) , np.log10( x**2. ) ]
The default behaviour is to not apply any such transformation to the
input, so func=None and order=2 returns:
output = [ x , x**2 ]
Note that a constant offset is not added, as this is done separately
via the model_add_offset attribute and we don't want duplicate columns
of 1s in the basis matrix.
"""
outarray = np.zeros([len(x),order])
if func!=None:
x = func(x)
for i in range(1,order+1):
outarray[:,i-1] = x**i
return outarray
def local_poly1d( x, order=None, ixs=None, func=None ):
"""
1D nth-order polynomial, the same as poly1d() except that it ignores all
data outside [xmin,xmax].
"""
n = len(x)
outarray = np.zeros( [n, 2] )
outarray[:,0] = outarray[:,0]+1.0
outarray_ixs = polynomial1d(x[ixs], order=order, func=func)
outarray[ixs,1] = outarray_ixs[:,0]
return outarray
def poly2d_crossterms( xy, order=None, func=None ):
"""
Returns the polynomial crossterms between two variables, going from
(x**0)*(y**order) through to (x**order)*(y**0).
For the remaining terms that are not crossterms, poly1d should be used.
i.e. poly1d( x, order=ored, func=func ) and poly1d( y, order=order, func=func )
"""
outarray = np.empty([np.shape(xy)[0], order+1])
x = xy[:,0]
y = xy[:,1]
if func!=None:
x = func(x)
y = func(y)
for i in range(order+1):
outarray[:,i] = (x**i)*(y**(order-i))
return outarray
def gaussian1d( x, means=None, widths=None ):
"""
Returns an Nxn array where N is the number of scalar data points where n
1D Gaussians with individually specified means and widths are evaluated.
Note that points beyond 5 sigma of the mean are assumed to make zero
contribution to the model and so are not evaluated to save time, which
can be significant for large datasets.
"""
ndata = len( x )
nfuncs = len( means )
print 'Evaluating %i 1D Gaussian functions (only at locations within 5 sigma of mean)' % ( nfuncs )
outarray = np.zeros( [ ndata, nfuncs ] )
for i in range( len( means ) ):
ixs = ( abs( x-means[i] ) < 5*widths[i] ) # only bother evaluating within 5-sig of mean
outarray[ixs,i] = np.exp( -( ( x[ixs]-means[i] )**2. ) / 2. / ( widths[i]**2. ) )
return outarray
def gaussian2d( xy, means=None, widths=None ):
"""
Returns an Nxn array where N is the number of 2D data points and where n
is the number of 2D Gaussians with individually specified means and scalar
widths.
Note that the widths are the same along both axes (i.e. the same for both
input variables). For this reason, the input variables should probably be
standardised before being passed to this function, especially if their
units are significantly different.
.
"""
ndata = np.shape(xy)[0]
nfuncs = np.shape(means)[0]
outarray = np.zeros([ndata, nfuncs])
x = xy[:,0]
y = xy[:,1]
print 'Evaluating %i 2D Gaussian functions (only at locations within 5 sigma of mean)' % (nfuncs)
xmeans = means[:,0]
ymeans = means[:,1]
for i in range(len(xmeans)):
ixs = ((abs(x-xmeans[i])<5*widths[i])*\
(abs(y-ymeans[i])<5*widths[i])) # only bother evaluating within 5-sig of mean
outarray[ixs,i] = np.exp( -((x[ixs]-xmeans[i])**2.+(y[ixs]-ymeans[i])**2.)/2./(widths[i]**2.) )
return outarray
def harmonics( x, n=None, period0=None ):
"""
Returns an N x 2n array where N is the number of data points and n is the number
of harmonics. To be explicit, the output will be:
output = [ sin( 1*(2*np.pi/period0)*x) , 1*cos( (2*np.pi/period0)*x),
sin( 2*(2*np.pi/period0)*x) , 2*cos( (2*np.pi/period0)*x),
...
sin( n*(2*np.pi/period0)*x) , n*cos( (2*np.pi/period0)*x) ]
where each entry in the above is a column vector.
"""
outarray = np.empty([len(x),2*n], dtype=float)
w0 = 2*np.pi/period0
for i in range(n):
w = float((i+1))*w0
outarray[:,i] = np.sin(w*x)
outarray[:,i+n] = np.cos(w*x)
return outarray
def sinusoids(x, pmin=None, pmax=None, n=None):
"""
Returns an N x 2n array containing sinusoids (cos and sin) with periods evenly
spaced between pmin and pmax, where N is the number of data points and n is the
number of sinusoids. To be explicit, the output will be:
output = [ sin( (2*np.pi/period1)*x) , cos( (2*np.pi/period1)*x),
sin( (2*np.pi/period2)*x) , cos( (2*np.pi/period2)*x),
...
sin( (2*np.pi/periodn)*x) , cos( (2*np.pi/periodn)*x) ]
where each entry in the above is a column vector, and period1=pmin and periodn=pmax.
Note that pmin and pmax are assumed to have the same units as the input x.
"""
outarray = np.empty([len(x),2*n], dtype=float)
ps = np.r_[pmin:pmax:1j*n]
for i in range(n):
outarray[:,i] = np.sin(2*np.pi*x/ps[i])
outarray[:,i+n] = np.cos(2*np.pi*x/ps[i])
return outarray
def boxcar( x, a=None, b=None, d=None ):
"""
Simple boxcar function, set to zero everywhere except a<x<b where it is set
to d. Returns an Nx1 array.
"""
outarray = np.zeros(len(x))
ixs = ((x>a)*(x<b))
outarray[ixs] = d
outarray = np.reshape(outarray, [len(outarray),1])
return outarray
|
tomevans/linvb
|
linvb/vbr_basis_functions.py
|
Python
|
gpl-2.0
| 6,559
|
[
"Gaussian"
] |
211c43293eda53066ab593bd7d4082817e5b1c7d39c388a06ed4576dac869473
|
# POVME Pocket ID 1.0 is released under the GNU General Public License
# (see http://www.gnu.org/licenses/gpl.html).
# If you have any questions, comments, or suggestions, please don't hesitate to contact me,
# Jacob Durrant, at jdurrant [at] ucsd [dot] edu.
import sys
import numpy
from scipy import spatial
from scipy.cluster.vq import kmeans2
from scipy.spatial.distance import cdist
import textwrap
import getopt
from numpy.lib.recfunctions import append_fields
import multiprocessing
import warnings
# POVME Pocket ID 1.0 is a program for identifying protein pockets and generating
# appropriate pocket-encompassing inclusion spheres. These spheres, modified as required,
# can then be used as POVME input.
# Supress errors
numpy.seterr(all='ignore')
warnings.simplefilter("ignore") # no "One of the clusters is empty." warnings
# Some classes are required to support the loading and manipulation of 3D molecular information
class Information():
"""A class for storing and accessing information about the elements of a Molecule object"""
def __init__(self, parent_molecule_object):
"""Initializes the Information class.
Arguments:
parent_molecule_object -- The Molecule object associated with this class.
"""
self.__parent_molecule = parent_molecule_object
self.__constants = {}
self.__constants['i8_fields'] = ['serial','resseq']
self.__constants['f8_fields']= ['x','y','z','occupancy','tempfactor']
self.__atom_information = None
self.__coordinates = None
def get_atom_information(self): return self.__atom_information
def get_coordinates(self): return self.__coordinates
def get_constants(self): return self.__constants
def set_atom_information(self,atom_information): self.__atom_information = atom_information
def set_coordinates(self,coordinates): self.__coordinates = coordinates
def get_bounding_box(self, selection = None, padding=0.0):
"""Calculates a box that bounds (encompasses) a set of atoms.
Arguments:
selection -- An optional numpy.array containing the indices of the atoms to consider. If ommitted, all atoms of the Molecule object will be considered.
padding -- An optional float. The bounding box will extend this many angstroms beyond the atoms being considered.
Returns:
A numpy array representing two 3D points, (min_x, min_y, min_z) and (max_x, max_y, max_z), that bound the molecule.
"""
if selection is None: selection = self.__parent_molecule.select_all()
return numpy.vstack((numpy.min(self.__coordinates[selection],0), numpy.max(self.__coordinates[selection],0)))
class FileIO():
"""A class for saving and loading molecular data into a Molecule object"""
def __init__(self, parent_molecule_object):
"""Initializes the FileIO class.
Arguments:
parent_molecule_object -- The Molecule object associated with this class.
"""
self.__parent_molecule = parent_molecule_object
def load_pdb_into(self, filename):
"""Loads the molecular data contained in a pdb file into the current Molecule object.
Arguments:
filename -- A string, the filename of the pdb file.
"""
# open/read the file
afile = open(filename,"r")
self.load_pdb_into_using_file_object(afile)
afile.close()
def load_pdb_into_using_file_object(self, file_obj):
"""Loads molecular data from a python file object (pdb formatted) into the current Molecule object. Note that most users will want to use the load_pdb_into() function instead, which is identical except that it accepts a filename string instead of a python file object.
Arguments:
file_obj -- A python file object, containing pdb-formatted data.
"""
#source_data = numpy.genfromtxt(file_obj, dtype="S6,S5,S5,S4,S2,S4,S4,S8,S8,S8,S6,S6,S10,S2,S2", names=['record_name', 'serial', 'name', 'resname', 'chainid', 'resseq', 'empty', 'x', 'y', 'z', 'occupancy', 'tempfactor', 'empty2', 'element', 'charge'], delimiter=[6, 5, 5, 4, 2, 4, 4, 8, 8, 8, 6, 6, 10, 2, 2])
source_data = numpy.genfromtxt(file_obj, dtype="S6,S5,S5,S5,S1,S4,S4,S8,S8,S8,S6,S6,S10,S2,S3", names=['record_name', 'serial', 'name', 'resname', 'chainid', 'resseq', 'empty', 'x', 'y', 'z', 'occupancy', 'tempfactor', 'empty2', 'element', 'charge'], delimiter=[6, 5, 5, 5, 1, 4, 4, 8, 8, 8, 6, 6, 10, 2, 3])
if source_data.ndim == 0: source_data = source_data.reshape(1, -1) # in case the pdb file has only one line
# get the ones that are ATOM or HETATOM in the record_name
or_matrix = numpy.logical_or((source_data['record_name'] == "ATOM "), (source_data['record_name'] == "HETATM"))
indices_of_atom_or_hetatom = numpy.nonzero(or_matrix)[0]
self.__parent_molecule.set_atom_information(source_data[indices_of_atom_or_hetatom])
# now, some of the data needs to change types
# first, fields that should be numbers cannot be empty strings
for field in self.__parent_molecule.get_constants()['i8_fields'] + self.__parent_molecule.get_constants()['f8_fields']:
check_fields = self.__parent_molecule.get_atom_information()[field]
check_fields = numpy.core.defchararray.strip(check_fields)
indices_of_empty = numpy.nonzero(check_fields == '')[0]
self.__parent_molecule.get_atom_information()[field][indices_of_empty] = '0'
# now actually change the type
old_types = self.__parent_molecule.get_atom_information().dtype
descr = old_types.descr
for field in self.__parent_molecule.get_constants()['i8_fields']:
index = self.__parent_molecule.get_atom_information().dtype.names.index(field)
descr[index] = (descr[index][0], 'i8')
for field in self.__parent_molecule.get_constants()['f8_fields']:
index = self.__parent_molecule.get_atom_information().dtype.names.index(field)
descr[index] = (descr[index][0], 'f8')
new_types = numpy.dtype(descr)
self.__parent_molecule.set_atom_information(self.__parent_molecule.get_atom_information().astype(new_types))
# remove some of the fields that just contain empty data
self.__parent_molecule.set_atom_information(self.__parent_molecule.numpy_structured_array_remove_field(self.__parent_molecule.get_atom_information(), ['empty', 'empty2']))
# the coordinates need to be placed in their own special numpy array to facilitate later manipulation
self.__parent_molecule.set_coordinates(numpy.vstack([self.__parent_molecule.get_atom_information()['x'], self.__parent_molecule.get_atom_information()['y'], self.__parent_molecule.get_atom_information()['z']]).T)
self.__parent_molecule.set_atom_information(self.__parent_molecule.numpy_structured_array_remove_field(self.__parent_molecule.get_atom_information(), ['x', 'y', 'z'])) # now remove the coordinates from the atom_information object to save memory
# string values in self.__parent_molecule.information.get_atom_information() should also be provided in stripped format for easier comparison
fields_to_strip = ['name', 'resname', 'chainid', 'element']
for f in fields_to_strip: self.__parent_molecule.set_atom_information(append_fields(self.__parent_molecule.get_atom_information(), f + '_stripped', data=numpy.core.defchararray.strip(self.__parent_molecule.get_atom_information()[f])))
class Selections():
"""A class for selecting atoms"""
######## selections ########
def __init__(self, parent_molecule_object):
"""Initializes the Selections class.
Arguments:
parent_molecule_object -- The Molecule object associated with this class.
"""
self.__parent_molecule = parent_molecule_object
def select_atoms(self, selection_criteria):
"""Select a set of atoms based on user-specified criteria.
Arguments:
selection_criteria -- An dictionary, where the keys correspond to keys in the self.__parent_molecule.information.get_atom_information() structured numpy array, and the values are lists of acceptable matches.
The selection is a logical "AND" between dictionary entries, but "OR" within the value lists themselves.
For example: {'atom':['CA','O'], 'chain':'A', 'resname':'PRO'} would select all atoms with the names CA or O that are located in the PRO residues of chain A.
Returns:
A numpy.array containing the indices of the atoms of the selection.
"""
try:
selection = numpy.ones(len(self.__parent_molecule.get_atom_information()), dtype=bool) # start assuming everything is selected
for key in selection_criteria.keys():
vals = selection_criteria[key]
# make sure the vals are in a list
if not type(vals) is list and not type(vals) is tuple: vals = [vals] # if it's a single value, put it in a list
# make sure the vals are in the right format
if key in self.__parent_molecule.get_constants()['f8_fields']: vals = [float(v) for v in vals]
elif key in self.__parent_molecule.get_constants()['i8_fields']: vals = [int(v) for v in vals]
else: vals = [v.strip() for v in vals]
# "or" all the vals together
subselection = numpy.zeros(len(self.__parent_molecule.get_atom_information()), dtype=bool) # start assuming nothing is selected
for val in vals: subselection = numpy.logical_or(subselection, (self.__parent_molecule.get_atom_information()[key] == val))
# now "and" that with everything else
selection = numpy.logical_and(selection, subselection)
# now get the indices of the selection
return numpy.nonzero(selection)[0]
except:
print "ERROR: Could not make the selection. Existing fields:"
print "\t" + ", ".join(self.__parent_molecule.get_atom_information().dtype.names)
sys.exit(0)
def invert_selection(self, selection):
"""Inverts a user-defined selection (i.e., identifies all atoms that are not in the seleciton).
Arguments:
selection -- A numpy.array containing the indices of the user-defined selection.
Returns:
A numpy.array containing the indices of all atoms that are not in the user-defined seleciton.
"""
# selection is a list of atom indices
all_atoms = numpy.arange(0,len(self.__parent_molecule.get_atom_information()), 1, dtype=int)
remaining_indicies = numpy.delete(all_atoms, selection)
return remaining_indicies
def select_all(self):
"""Selects all the atoms in a Molecule object.
Returns:
A numpy.array containing the indices of all atoms in the Molecule object.
"""
return self.select_atoms({})
def get_molecule_from_selection(self, selection):
"""Creates a Molecule from a user-defined atom selection.
Arguments
selection -- A numpy.array containing the indices of the atoms in the user-defined selection.
Returns:
A Molecule object containing the atoms of the user-defined selection.
"""
new_mol = Molecule()
new_mol.set_coordinates(self.__parent_molecule.get_coordinates()[selection])
new_mol.set_atom_information(self.__parent_molecule.get_atom_information()[selection])
# note that hierarchy will have to be recalculated
return new_mol
# here's the actual Molecule class
class Molecule:
"""Loads, saves, and manupulates molecuar models. The main pymolecule class."""
def __init__ (self):
"""Initializes the variables of the Molecule class."""
self.fileio = FileIO(self)
self.selections = Selections(self)
self.information = Information(self)
# Information methods
def get_coordinates(self): return self.information.get_coordinates()
def get_atom_information(self): return self.information.get_atom_information()
def get_constants(self): return self.information.get_constants()
def get_bounding_box(self, selection=None, padding=0.0): return self.information.get_bounding_box(selection, padding)
def set_atom_information(self,atom_information): self.information.set_atom_information(atom_information)
def set_coordinates(self,coordinates): self.information.set_coordinates(coordinates)
# File I/O class methods
def load_pdb_into(self, filename): self.fileio.load_pdb_into(filename)
def load_pdb_into_using_file_object(self, file_obj): self.fileio.load_pdb_into_using_file_object(file_obj)
# Selections class
def get_molecule_from_selection(self, selection): return self.selections.get_molecule_from_selection(selection)
def select_atoms(self, selection_criteria): return self.selections.select_atoms(selection_criteria)
def invert_selection(self, selection): return self.selections.invert_selection(selection)
def select_all(self): return self.selections.select_all()
######## supporting functions ########
def numpy_structured_array_remove_field(self, narray, field_names): # surprised this doesn't come with numpy
"""Removes a specific field name from a structured numpy array.
Arguments:
narray -- A structured numpy array.
field_names -- A list of strings, where each string is one of the field names of narray.
Returns:
A structured numpy array identical to narray, but with the field names in field_names removed.
"""
names = list(narray.dtype.names) # now remove the coordinates from the atom_information object to save memory
for f in field_names: names.remove(f)
return narray[names]
# Some classes are required for calculating convex hulls
class ConvexHull():
"""A class to handle convex-hull calculations"""
def __init__(self, pts):
"""Initializes the ConvexHull class."""
akl_toussaint_pts = self.akl_toussaint(pts) # quickly reduces input size
self.hull = self.gift_wrapping_3d(akl_toussaint_pts) # calculate convex hull using gift wrapping algorithm
def inside_hull(self, our_point):
"""Determines if a point is inside the hull
Arguments:
our_point -- An x,y,z array
Returns:
A boolean, True if the point is inside the hull, False otherwise
"""
return not self.outside_hull(our_point, self.hull)
def outside_hull(self, our_point, triangles, epsilon=1.0e-5): # this one used internally
"""Given the hull as defined by a list of triangles, this definition will return whether a point is within these or not.
Arguments:
our_point -- an x,y,z array
epsilon -- needed for imprecisions in the floating-point operations.
Returns:
True if our_point exists outside of the hull, False otherwise
"""
our_point = numpy.array(our_point) # convert it to an numpy.array
for triangle in triangles:
rel_point = our_point - triangle[0] # vector from triangle corner 0 to point
vec1 = triangle[1] - triangle[0] # vector from triangle corner 0 to corner 1
vec2 = triangle[2] - triangle[1] # vector from triangle corner 1 to corner 2
our_cross = numpy.cross(vec1, vec2) # cross product between vec1 and vec2
our_dot = numpy.dot(rel_point,our_cross) # dot product to determine whether cross is point inward or outward
if numpy.dot(rel_point,our_cross) > epsilon: # if the dot is greater than 0, then its outside
return True
return False
def get_seg_dict_num(self, seg_dict, seg_index):
"""seg_dict is a dictionary object that contains information about segments within the convex hull. The keys are 2x3 tuples, which represent two ends of a segment in space. The values of seg_dict are the number of times a segment has been part of a triangle, either 1 or 2. (Zero times would mean that the segment doesn't exist in the dictionary yet). This function looks up and returns the value of a seg_index from seg_dict
Arguments:
seg_dict -- the dictionary of segment 2x3 tuples as keys, integers as values
seg_index -- the key of the dictionary member we are going to retrieve
Returns:
if seg_index exists in the keys of seg_dict, return the value. Otherwise, return 0
"""
if seg_index[0][0] > seg_index[1][0]: # we want the index with the greater x-value, so we don't get identical segments in the dictionary more than once
index = seg_index
else:
index = seg_index[::-1]
if index in seg_dict:
return seg_dict[index]
else:
return 0
def increment_seg_dict(self, seg_dict, seg_index):
"""seg_dict is a dictionary object that contains information about segments within the convex hull. The keys are 2x3 tuples, which represent two ends of a segment in space. The values of seg_dict are the number of times a segment has been part of a triangle, either 1 or 2. (Zero times would mean that the segment doesn't exist in the dictionary yet). This function increments the values within seg_dict, or initiates them if they dont exist yet.
Arguments:
seg_dict -- the dictionary of segment 2x3 tuples as keys, integers as values
seg_index -- the key of the dictionary member we are going to increment
"""
if seg_index[0][0] > seg_index[1][0]: # we want the index with the greater x-value, so we don't get identical segments in the dictionary more than once
index = seg_index
else:
index = seg_index[::-1]
#"putting index:", index, "into seg_dict because", index[0][0], ">", index[1][0]
if index in seg_dict: # if the entry already exists in seg_dict
seg_dict[index] += 1 # increment
else:
seg_dict[index] = 1 # initiate with a value of 1 because it now exists on a triangle
return
def gift_wrapping_3d(self, raw_points):
"""Gift wrapping for 3d convex hull
Arguments:
raw_points -- A nx3 array of points, where each row corresponds to an x,y,z point coordinate
Returns:
A convex hull represented by a list of triangles. Each triangle is a 3x3 array, where each row is an x,y,z coordinate in space. The 3 rows describe the location of the 3 corners of the triangle. Each of the 3 points are arranged so that a cross product will point outwards from the hull
"""
n = numpy.shape(raw_points)[0] # number of points
point1 = raw_points[0] # take the first point
xaxis = numpy.array([1,0,0]) # create a ref vector pointing along x axis
maxx = raw_points[0][0] # initiate highest x value
points = [] # a list of tuples for easy dictionary lookup
seg_dict = {} # a dictionary that contains the number of triangles a seg is in
for i in range(n): # find the n with the largest x value
point = tuple(raw_points[i])
points.append(point)
if point[0] > maxx:
maxx = point[0]
point1 = raw_points[i]
best_dot = -1.0 # initiate dot relative to x-axis
point2 = numpy.array(raw_points[1]) # initiate best segment
# find first/best segment
for i in range(n):
pointi = raw_points[i]
if numpy.array_equal(pointi, point1): continue
diff_vec = pointi - point1
diff_len = numpy.linalg.norm(diff_vec)
test_dot = numpy.dot(diff_vec/diff_len,xaxis)
if test_dot > best_dot:
best_dot = test_dot
point2 = pointi
point1 = tuple(point1)
point2 = tuple(point2)
ref_vec = xaxis
# now find the best triangle
triangles = []
seg_list = set([(point1, point2),])
norm_dict = {(point1,point2):xaxis}
self.increment_seg_dict( seg_dict, (point1,point2) )
counter = 0
first_time = True
section1 = 0.0
section2 = 0.0
section3 = 0.0
while seg_list: # as long as there are unexplored edges of triangles in the hull...
counter += 1
seg = seg_list.pop() # take a segment out of the seg_list
tuple1 = seg[0] # the two ends of the segment
tuple2 = seg[1]
point1 = numpy.array(seg[0])
point2 = numpy.array(seg[1])
result = self.get_seg_dict_num( seg_dict, (seg[0],seg[1]) )
if result >= 2: # then we already have 2 triangles on this segment
continue # forget about drawing a triangle for this seg
ref_vec = norm_dict[(seg[0],seg[1])] # get the norm for a triangle that the segment is part of
best_dot_cross = -1.0
best_point = None
for i in range(n): # look at each point
pointi = raw_points[i]
diff_vec1 = point2 - point1
diff_vec2 = pointi - point2
test_cross = numpy.array([diff_vec1[1]*diff_vec2[2]-diff_vec1[2]*diff_vec2[1], diff_vec1[2]*diff_vec2[0]-diff_vec1[0]*diff_vec2[2], diff_vec1[0]*diff_vec2[1]-diff_vec1[1]*diff_vec2[0]]) # cross product
test_cross_len = numpy.sqrt(test_cross[0]*test_cross[0] + test_cross[1]*test_cross[1] + test_cross[2]*test_cross[2]) #numpy.linalg.norm(test_cross) # get the norm of the cross product
if test_cross_len <= 0.0: continue
test_cross = test_cross / test_cross_len
dot_cross = numpy.dot(test_cross, ref_vec)
if dot_cross > best_dot_cross:
best_cross = test_cross
best_dot_cross = dot_cross
best_point = pointi
tuple3 = points[i]
point3 = best_point
if self.get_seg_dict_num( seg_dict, (tuple2,tuple1) ) > 2: continue
if self.get_seg_dict_num( seg_dict, (tuple3,tuple2) ) > 2: continue
if self.get_seg_dict_num( seg_dict, (tuple1,tuple3) ) > 2: continue
# now we have a triangle from point1 -> point2 -> point3
# must test each edge
if first_time:
self.increment_seg_dict( seg_dict, (tuple2,tuple1) )
seg_list.add((tuple2, tuple1))
norm_dict[(tuple2,tuple1)] = best_cross
self.increment_seg_dict( seg_dict, (tuple3,tuple2) )
seg_list.add((tuple3, tuple2))
norm_dict[(tuple3,tuple2)] = best_cross
self.increment_seg_dict( seg_dict, (tuple1,tuple3) )
seg_list.add((tuple1, tuple3))
norm_dict[(tuple1,tuple3)] = best_cross
triangles.append((numpy.array(tuple1),numpy.array(tuple2),numpy.array(tuple3)))
first_time = False
return triangles
def akl_toussaint(self, points):
"""The Akl-Toussaint Heuristic. Given a set of points, this definition will create an octahedron whose corners are the extremes in x, y, and z directions. Every point within this octahedron will be removed because they are not part of the convex hull. This causes any expected running time for a convex hull algorithm to be reduced to linear time.
Arguments:
points -- An nx3 array of x,y,z coordinates
Returns:
All members of original set of points that fall outside the Akl-Toussaint octahedron
"""
x_high = (-1e99,0,0); x_low = (1e99,0,0); y_high = (0,-1e99,0); y_low = (0,1e99,0); z_high = (0,0,-1e99); z_low = (0,0,1e99)
for point in points: # find the corners of the octahedron
if point[0] > x_high[0]: x_high = point
if point[0] < x_low[0]: x_low = point
if point[1] > y_high[1]: y_high = point
if point[1] < y_low[1]: y_low = point
if point[2] > z_high[2]: z_high = point
if point[2] < z_low[2]: z_low = point
octahedron = [ # define the triangles of the surfaces of the octahedron
numpy.array((x_high,y_high,z_high)),
numpy.array((x_high,z_low,y_high)),
numpy.array((x_high,y_low,z_low)),
numpy.array((x_high,z_high,y_low)),
numpy.array((x_low,y_low,z_high)),
numpy.array((x_low,z_low,y_low)),
numpy.array((x_low,y_high,z_low)),
numpy.array((x_low,z_high,y_high)),
]
new_points = [] # everything outside of the octahedron
for point in points: # now check to see if a point is inside or outside the octahedron
outside = self.outside_hull(point, octahedron, epsilon=-1.0e-5)
if outside:
new_points.append(point)
return numpy.array(new_points) # convert back to an array
# Some classes are required for multiprocessing
class MultiThreading():
"""A class for multi-processor support."""
results = []
def __init__(self, inputs, num_processors, task_class_name):
"""Initializes the MultiThreading class."""
self.results = []
# first, if num_processors <= 0, determine the number of processors to use programatically
if num_processors <= 0: num_processors = multiprocessing.cpu_count()
# reduce the number of processors if too many have been specified
if len(inputs) < num_processors: num_processors = len(inputs)
# now, divide the inputs into the appropriate number of processors
inputs_divided = {}
for t in range(num_processors): inputs_divided[t] = []
for t in range(0, len(inputs), num_processors):
for t2 in range(num_processors):
index = t + t2
if index < len(inputs): inputs_divided[t2].append(inputs[index])
# now, run each division on its own processor
running = multiprocessing.Value('i', num_processors)
mutex = multiprocessing.Lock()
arrays = []
threads = []
for i in range(num_processors):
threads.append(task_class_name())
arrays.append(multiprocessing.Array('i',[0, 1]))
results_queue = multiprocessing.Queue() # to keep track of the results
processes = []
for i in range(num_processors):
p = multiprocessing.Process(target=threads[i].runit, args=(running, mutex, results_queue, inputs_divided[i]))
p.start()
#p.join()
processes.append(p)
while running.value > 0: is_running = 0 # wait for everything to finish
# compile all results
for thread in threads:
chunk = results_queue.get()
self.results.extend(chunk)
class GeneralTask:
"""A class that determines the specific calculations that will be performed when multi-processor support is used. Other, more specific classes will inherit this one."""
results = []
def runit(self, running, mutex, results_queue, items):
for item in items: self.value_func(item, results_queue)
mutex.acquire()
running.value -= 1
mutex.release()
results_queue.put(self.results)
def value_func(self, item, results_queue): # this is the function that changes through inheritance
print item # here's where you do something
self.results.append(item) # here save the results for later compilation
# You'll also need a class representing a box of points, with associated definitions
class BoxOfPoints():
"""A class representing a box of equidistant points"""
def __init__(self, box, reso):
"""Initialize the class.
Arguments:
box -- A numpy array representing two 3D points, (min_x, min_y, min_z) and (max_x, max_y, max_z), that define a box.
reso -- The space between the points of the box, in the X, Y, and Z direction.
"""
self.write_pdbs = write_pdbs()
min_x = self.__snap_float(box[0][0], reso)
min_y = self.__snap_float(box[0][1], reso)
min_z = self.__snap_float(box[0][2], reso)
max_x = self.__snap_float(box[1][0], reso) + 1.1 * reso
max_y = self.__snap_float(box[1][1], reso) + 1.1 * reso
max_z = self.__snap_float(box[1][2], reso) + 1.1 * reso
x, y, z = numpy.mgrid[min_x:max_x:reso, min_y:max_y:reso, min_z:max_z:reso]
self.points = numpy.array(zip(x.ravel(), y.ravel(), z.ravel()))
def __snap_float(self, val, reso):
"""Snaps an arbitrary point to the nearest grid point.
Arguments:
val -- A numpy array corresponding to a 3D point.
reso -- The resolution (distance in the X, Y, and Z directions between adjacent points) of the grid.
Returns:
A numpy array corresponding to a 3D point near val that is on a nearby grid point.
"""
return numpy.floor(val / reso) * reso
def remove_points_outside_convex_hull(self, hull):
"""Removes box points that are outside a convex hull.
Arguments:
hull -- The convex hull.
"""
chunks = [(hull, t) for t in numpy.array_split(self.points, params['processors'])]
tmp = MultiThreading(chunks, params['processors'], self.__MultiIdHullPts)
self.points = numpy.vstack(tmp.results)
class __MultiIdHullPts(GeneralTask):
"""A class to remove points outside a convex hull using multiple processors."""
def value_func(self, items, results_queue): # so overwriting this function
"""The calculations that will run on a single processor to remove points outside a convex hull."""
hull = items[0]
some_points = items[1]
# Note this would be much faster if it were matrix-based intead of point-by-point based.
new_pts = [] # Can preallocate numpy array size because I don't know beforehand how many points will be in the hull
for pt in some_points:
if hull.inside_hull(pt) == True: new_pts.append(pt)
if len(new_pts) == 0: pass # here save the results for later compilation
else: self.results.append(numpy.array(new_pts))
def remove_all_points_close_to_other_points(self, other_points, dist_cutoff):
"""Removes all points in this box that come within the points specified in a numpy array
Arguments:
other_points -- A numpy array containing the other points.
dist_cutoff -- A float, the cutoff distance to use in determining whether or not box points will be removed.
"""
box_of_pts_distance_tree = spatial.KDTree(self.points) # note, in newer versions of scipy use cKDTree
chunks = [(box_of_pts_distance_tree, dist_cutoff, t) for t in numpy.array_split(other_points, params['processors'])]
tmp = MultiThreading(chunks, params['processors'], self.__MultiGetClosePoints)
indicies_of_box_pts_close_to_molecule_points = numpy.unique(numpy.hstack(tmp.results))
self.points = numpy.delete(self.points, indicies_of_box_pts_close_to_molecule_points, axis=0) # remove the ones that are too close to molecule atoms
class __MultiGetClosePoints(GeneralTask):
"""A class to remove box points that are near other, user-specified points, using multiple processors."""
def value_func(self, items, results_queue): # so overwriting this function
"""The calculations that will run on a single processor."""
box_of_pts_distance_tree = items[0]
dist_cutoff = items[1]
other_points = items[2]
other_points_distance_tree = spatial.KDTree(other_points) # note, in newer versions of scipy use cKDTree
sparce_distance_matrix = other_points_distance_tree.sparse_distance_matrix(box_of_pts_distance_tree, dist_cutoff)
indicies_of_box_pts_close_to_molecule_points = numpy.unique(sparce_distance_matrix.tocsr().indices) #tocsr()
self.results.append(indicies_of_box_pts_close_to_molecule_points)
def to_pdb(self, let="X"):
"""Converts the points in this box into a PDB representation.
Arguments:
let -- An optional string, the chain ID to use. "X" by default.
Returns:
A PDB-formatted string.
"""
return self.write_pdbs.numpy_to_pdb(self.points, let)
def expand_around_existing_points(self, num_pts, reso):
"""Add points to the current box that surround existing points, essentially increasing the resolution of the box.
Arguments:
num_pts -- An int, the number of points to place on each side of the existing points, in the X, Y, and Z directions.
reso -- The distance between adjacent added points.
"""
new_pts = []
i = numpy.arange(-num_pts * reso, num_pts * reso + reso*0.01, reso)
for xi in i:
for yi in i:
for zi in i:
vec = numpy.array([xi, yi, zi])
new_pts.append(self.points + vec)
self.points = numpy.vstack(new_pts)
self.__unique_points()
def __unique_points(self):
"""Identifies unique points (rows) in an array of points.
Arguments:
a -- A nx3 numpy.array representing 3D points.
Returns:
A nx2 numpy.array containing the 3D points that are unique.
"""
b = numpy.ascontiguousarray(self.points).view(numpy.dtype((numpy.void, self.points.dtype.itemsize * self.points.shape[1])))
unique_points = numpy.unique(b).view(self.points.dtype).reshape(-1, self.points.shape[1])
self.points = unique_points
def filter_isolated_points_until_no_change(self, reso, number_of_neighbors):
"""Keep removing points that don't have enough neighbors, until no such points exist.
Arguments:
reso -- The distance between adjacent points.
number_of_neighbors -- The minimum number of permissible neighbors.
"""
# calculate the pairwise distances between all box points
box_of_pts_distance_tree = spatial.KDTree(self.points) # note, in newer versions of scipy use cKDTree
self.dist_matrix = box_of_pts_distance_tree.sparse_distance_matrix(box_of_pts_distance_tree, reso * numpy.sqrt(3.0) * 1.1).todense() # so kiddy-corner counted as a neighbor
# note that the diagnol of self.dist_matrix is zero, as expected, but ones with dist > reso * numpy.sqrt(3.0) * 1.1 are also 0. Pretty convenient.
num_pts = 0
while num_pts != len(self.points): # keep running the pass until there are no changes (points are stable)
num_pts = len(self.points)
# identify the points that have enough neighbors
columns_nonzero_count = numpy.array((self.dist_matrix != 0).sum(0))[0]
columns_nonzero_count_match_criteria = (columns_nonzero_count >= number_of_neighbors)
columns_nonzero_count_match_criteria_index = numpy.nonzero(columns_nonzero_count_match_criteria)
self.__keep_limited_points(columns_nonzero_count_match_criteria_index)
def __keep_limited_points(self, pt_indices):
"""A support function"""
# keep only those points
self.points = self.points[pt_indices]
# update the distance matrix so it doesn't need to be recalculated
self.dist_matrix = self.dist_matrix[pt_indices,:][0]
self.dist_matrix = self.dist_matrix.T
self.dist_matrix = self.dist_matrix[pt_indices,:][0]
#self.dist_matrix = self.dist_matrix.T # not necessary because it's a symetrical matrix
def separate_out_pockets(self):
"""Separate the points according to the pocket they belong to. Determined by looking at patches of contiguous points.
Returns:
A list of point arrays, each array corresponding to the points of a separate pocket.
"""
all_pockets = []
while len(self.points) != 0:
pocket_indexes = numpy.array([0])
num_pts_in_pocket = 0
while num_pts_in_pocket != len(pocket_indexes):
num_pts_in_pocket = len(pocket_indexes)
# get all the adjacent points
pocket_indexes = numpy.hstack((pocket_indexes,numpy.array(numpy.nonzero(self.dist_matrix[pocket_indexes, :])[1])[0]))
pocket_indexes = numpy.unique(pocket_indexes)
pocket = self.points[pocket_indexes,:]
all_pockets.append(pocket)
self.__delete_limited_points(pocket_indexes)
# sort the pockets by size
all_pockets = sorted(all_pockets, key=lambda pts: -len(pts))
return all_pockets
def __delete_limited_points(self, pt_indices):
"""A support function"""
# keep only those points
self.points = numpy.delete(self.points, pt_indices, axis=0)
# update the distance matrix so it doesn't need to be recalculated
self.dist_matrix = numpy.delete(self.dist_matrix,pt_indices, axis=0)
self.dist_matrix = self.dist_matrix.T
self.dist_matrix = numpy.delete(self.dist_matrix,pt_indices, axis=0)
# Also, you need a class to save numpy arrays as PDB files
class write_pdbs():
"""A class for converting numpy arrays into PDB-formatted strings"""
def __create_pdb_line(self, numpy_array, index, resname, letter):
"""Create a string formatted according to the PDB standard.
Arguments:
numpy_array -- A 1x3 numpy.array representing a 3D point.
index -- An integer, the atom index to use in the string.
resname -- A string, the RESNAME to use.
letter -- A string, the atom name/chain/etc to use for the output.
Returns:
A string, formatted according to the PDB standard.
"""
if len(numpy_array) == 2: numpy_array = numpy.array([numpy_array[0], numpy_array[1], 0.0])
if numpy_array.shape == (1, 3): numpy_array = numpy_array[0]
output = "ATOM "
output = output + str(index % 999999).rjust(6) + letter.rjust(5) + resname.rjust(4) + letter.rjust(2) + str(index % 9999).rjust(4)
output = output + ("%.3f" % numpy_array[0]).rjust(12)
output = output + ("%.3f" % numpy_array[1]).rjust(8)
output = output + ("%.3f" % numpy_array[2]).rjust(8)
output = output + letter.rjust(24)
return output
def numpy_to_pdb(self, narray, letter, resname=""):
"""Create a string formatted according to the PDB standard.
Arguments:
narray -- A nx3 numpy.array representing a 3D point.
letter -- A string, the atom name/chain/etc to use for the output.
resname -- An optional string, the RESNAME to use for the output.
Returns:
A string, formatted according to the PDB standard.
"""
if len(narray.flatten()) == 3:
return self.__create_pdb_line(narray, 1, "AAA", letter) + "\n"
else:
if resname == "":
letters = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
resnames = []
for l1 in letters:
for l2 in letters:
for l3 in letters:
resnames.append(l1+l2+l3)
resnames.remove("XXX") # because this is reserved for empty atoms
else:
resnames = [resname]
t = ""
for i, item in enumerate(narray): t = t + self.__create_pdb_line(item, i+1, resnames[i % len(resnames)], letter) + "\n"
return t
####### Now the meat of the program ########
# First, show a brief help file describing the command-line arguments.
help_lines = []
help_lines.append('')
help_lines.append('POVME Pocket ID 1.0')
help_lines.append('===================')
help_lines.append('')
help_lines.append('Required command-line parameters:')
help_lines.append('')
help_lines.append('--filename: The PDB filename to be analyzed.')
help_lines.append('')
help_lines.append('Optional command-line parameters:')
help_lines.append('')
help_lines.append('--pocket_detection_resolution: The distance between probe points used to initially find the pockets (4.0 by default).')
help_lines.append('--pocket_measuring_resolution: The distance between probe points used to measure identified pockets in greater detail. Should divide --pocket_detection_resolution evenly. (1.0 by default).')
help_lines.append('--clashing_cutoff: In measuring the pockets, any points closer than this cutoff to receptor atoms will be removed. (3.0 by default).')
help_lines.append('--number_of_neighbors: In measuring the pockets, any points with fewer than this number of neighbors will be deleted. These are usually just stray points that don\'t belong to any real pocket. (4 by default).')
help_lines.append('--processors: The number of processors to use. (1 by default).')
help_lines.append('--number_of_spheres: The number of inclusion spheres to generate for each pocket. (5 by default).')
help_lines.append('--sphere_padding: How much larger the radius of the inclusion spheres should be, beyond what is required to encompass the identified pockets. (5.0 by default).')
help_lines.append('')
help_lines.append('Example:')
help_lines.append('')
help_lines.append('python pocket_id.py --filename rel1_example.pdb --pocket_detection_resolution 4.0 --pocket_measuring_resolution 1.0 --clashing_cutoff 3.0 --number_of_neighbors 4 --processors 1 --number_of_spheres 5 --sphere_padding 5.0 ')
help_lines.append('')
def printit(text): print textwrap.fill(text, initial_indent='', subsequent_indent=' ')
for line in help_lines: printit(line)
if len(sys.argv[1:]) == 0: sys.exit(0)
# Now, parse the command-line arguments
params = {
'filename': '',
'pocket_detection_resolution': 4.0,
'pocket_measuring_resolution': 1.0,
'clashing_cutoff': 3.0,
'number_of_neighbors': 4,
'processors': 1,
'number_of_spheres': 5,
'sphere_padding': 5.0
}
for item in getopt.getopt(sys.argv[1:], '', [ 'filename=', 'pocket_detection_resolution=', 'pocket_measuring_resolution=', 'clashing_cutoff=', 'number_of_neighbors=', 'processors=', 'number_of_spheres=', 'sphere_padding=' ])[0]: params[item[0].replace('--','')] = item[1]
if params['filename'] == '':
print "ERROR: Must specify the --filename parameter!"
print
sys.exit(0)
for key in ['number_of_neighbors', 'processors', 'number_of_spheres']: params[key] = int(params[key])
for key in ['pocket_detection_resolution', 'pocket_measuring_resolution', 'clashing_cutoff', 'sphere_padding']: params[key] = float(params[key])
print 'Specified command-line arguments:'
print
for key in params: print " --" + key + ': ' + str(params[key])
print
# Step 1: Load in the protein
printit("Step 1. Loading the PDB file " + params['filename'] + "...")
molecule = Molecule()
molecule.load_pdb_into(params['filename'])
# Step 2: Get rid of hydogen atoms. They just slow stuff down.
print "Step 2. Removing hydrogen atoms..."
sel = molecule.selections.select_atoms({'element_stripped':'H'})
sel = molecule.selections.invert_selection(sel)
molecule = molecule.selections.get_molecule_from_selection(sel)
# Step 3: Calculate the convex hull of the protein alpha carbons.
print "Step 3. Calculating the convex hull of the PDB file..."
molecule_alpha_carbons = molecule.selections.get_molecule_from_selection(molecule.selections.select_atoms({'name_stripped':'CA'})) # Get a version of the protein with just the alpha carbons. In my experience, that's better for convex hull identification. Otherwise the program identifies shallow contors in the protein surface as pockets.
convex_hull_3d = ConvexHull(molecule_alpha_carbons.get_coordinates())
# Step 4. Get a box of equispaced points that surround the protein, snapped to reso. I'm putting a whole bunch of other functions in this class as well to manipulate the points of this box.
printit("Step 4. Making a box of points spaced " + str(params['pocket_detection_resolution']) + " A apart that entirely encompasses the protein...")
box_pts = BoxOfPoints(molecule.get_bounding_box(), params['pocket_detection_resolution'] * 4) # note that the initial box is low resolution (* 4) so convex hull will be very fast
# Step 5. Remove points outside the convex hull. Gradually fill in protein-occupying region with denser point fields. Faster this way, I think.
printit("Step 5. Removing points that fall outside the protein's convex hull...")
box_pts.remove_points_outside_convex_hull(convex_hull_3d)
box_pts.expand_around_existing_points(2, params['pocket_detection_resolution'] * 2)
box_pts.remove_points_outside_convex_hull(convex_hull_3d)
box_pts.expand_around_existing_points(2, params['pocket_detection_resolution'])
box_pts.remove_points_outside_convex_hull(convex_hull_3d)
# Step 6. Remove the points in this box that are too close to protein atoms.
# For simplicity's sake, don't worry about atomic radii. Just a simple cutoff.
printit("Step 6. Removing points that come within " + str(params['clashing_cutoff']) + " A of any protein atom...")
box_pts.remove_all_points_close_to_other_points(molecule.get_coordinates(), params['clashing_cutoff'])
# Step 7. Now surround each of these points with higher density points that in the same regions. This is for getting a more detailed view of the identified pockets.
if params['pocket_measuring_resolution'] != params['pocket_detection_resolution']:
printit("Step 7. Flooding the identified pockets with points spaced " + str(params['pocket_measuring_resolution']) + " A apart for a more detailed measurement of the pocket volume...")
print "\tAdding points..."
box_pts.expand_around_existing_points(params['pocket_detection_resolution']/params['pocket_measuring_resolution'], params['pocket_measuring_resolution'])
printit("\tRemoving points that fall outside the convex hull...")
box_pts.remove_points_outside_convex_hull(convex_hull_3d)
printit("\tRemoving points within " + str(params['clashing_cutoff']) + " A of any protein atom...")
box_pts.remove_all_points_close_to_other_points(molecule.get_coordinates(), params['clashing_cutoff'])
# Step 8. Now start doing a repeated pass filter (keep repeating until no change). Don't know if this is a high pass or low pass filter. I've heard these terms, though, and they sound cool.
printit("Step 8. Removing points until all points have at least " + str(params['number_of_neighbors']) + " neighbors...")
box_pts.filter_isolated_points_until_no_change(params['pocket_measuring_resolution'], params['number_of_neighbors'])
# Step 9. Separate out the pockets so they can be considered in isolation.
printit("Step 9. Partitioning the remaining points by pocket...")
all_pockets = box_pts.separate_out_pockets()
# Step 10. Get povme spheres that encompass each pocket, write pockets to seprate pdb files
printit("Step 10. Saving the points of each pocket...")
let_ids = ['A','B','C','D','E','F','G','H','I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
write_some_pdbs = write_pdbs()
for i,pts in enumerate(all_pockets):
filename = 'pocket' + str(i+1) + '.pdb'
printit("\tSaving " + filename + "...")
f = open(filename,'w')
f.write("REMARK Pocket #" + str(i+1) + "\n")
# do I need to whiten stuff here? not sure what whitening is.
centroids, idx = kmeans2(pts, params['number_of_spheres'])
pts_string = ""
for cluster_num in range(params['number_of_spheres']):
indexes_for_this_cluster = numpy.nonzero(idx == cluster_num)[0]
cluster_pts = pts[indexes_for_this_cluster]
cluster_center = numpy.mean(cluster_pts, axis=0)
try:
cluster_radius = numpy.max(cdist(numpy.array([cluster_center]), cluster_pts))
f.write("REMARK CHAIN " + let_ids[cluster_num] + ": PointsInclusionSphere " + str(numpy.round(cluster_center[0],2)) + ' ' + str(numpy.round(cluster_center[1],2)) + ' ' + str(numpy.round(cluster_center[2],2)) + ' ' + str(numpy.round(cluster_radius + params['sphere_padding'],2)) + "\n")
pts_string = pts_string + write_some_pdbs.numpy_to_pdb(cluster_pts, let_ids[cluster_num])
except:
print
printit("There was an error, but I don't think it was catastrophic. Could be that one of the pocket clusters was empty.")
print
f.write(pts_string)
f.close()
print
printit("Done. See the pocket{n}.pdb files. Using a visualization program like VMD, identify which of these files includes the pocket you wish to measure. POVME Pocket ID has divided each pocket volume into " + str(params['number_of_spheres']) + " sections (i.e., PDB chains). In some cases, the pocket you're interested in might be included in a larger identified pocket, so feel free to use only certain sections of a given pocket as well.")
printit("The POVME PointsInclusionSphere commands are located in the header of each pocket{n}.pdb file. A text editor can be used to copy and paste these commands into a POVME input file.")
print
|
j-wags/POVME
|
POVME_version_for_paper_do_not_modify/POVME_pocket_id.py
|
Python
|
gpl-3.0
| 51,500
|
[
"VMD"
] |
070e34a7451b4b5addee1228a5e6ea50a516ed2b5acf05add1ad7800f72197a7
|
# -*- coding: utf-8 -*-
"""INI simulator with temporal pattern code.
@author: rbodo
"""
import tensorflow as tf
import numpy as np
from snntoolbox.simulation.target_simulators.\
INI_temporal_mean_rate_target_sim import SNN as SNN_
from snntoolbox.simulation.utils import get_layer_synaptic_operations, \
remove_name_counter
class SNN(SNN_):
"""
The compiled spiking neural network, using layers derived from
Keras base classes (see
`snntoolbox.simulation.backends.inisim.temporal_pattern`).
Aims at simulating the network on a self-implemented Integrate-and-Fire
simulator using a timestepped approach.
Attributes
----------
snn: keras.models.Model
Keras model. This is the output format of the compiled spiking model
because INI simulator runs networks of layers that are derived from
Keras layer base classes.
"""
def __init__(self, config, queue=None):
SNN_.__init__(self, config, queue)
self.num_bits = self.config.getint('conversion', 'num_bits')
def compile(self):
self.snn = tf.keras.models.Model(
self._input_images,
self._spiking_layers[self.parsed_model.layers[-1].name])
self.snn.compile('sgd', 'categorical_crossentropy', ['accuracy'])
# Tensorflow 2 lists all variables as weights, including our state
# variables (membrane potential etc). So a simple
# snn.set_weights(parsed_model.get_weights()) does not work any more.
# Need to extract the actual weights here:
parameter_map = {remove_name_counter(p.name): p for p in
self.parsed_model.weights}
count = 0
for p in self.snn.weights:
name = remove_name_counter(p.name)
if name in parameter_map:
p.assign(parameter_map[name])
count += 1
assert count == len(parameter_map), "Not all weights have been " \
"transferred from ANN to SNN."
for layer in self.snn.layers:
if hasattr(layer, 'bias'):
# Adjust biases to time resolution of simulator.
layer.bias.assign(layer.bias / self._num_timesteps)
# @tf.function
def simulate(self, **kwargs):
from snntoolbox.utils.utils import echo
input_b_l = kwargs[str('x_b_l')] * self._dt
output_b_l_t = np.zeros((self.batch_size, self.num_classes,
self._num_timesteps))
self._input_spikecount = 0
self.set_time(self._dt)
# Main step: Propagate input through network and record output spikes.
out_spikes = self.snn.predict_on_batch(input_b_l)
# Broadcast the raw output (softmax) across time axis.
output_b_l_t[:, :, :] = np.expand_dims(out_spikes, -1)
# Record neuron variables.
i = 0
for layer in self.snn.layers:
# Excludes Input, Flatten, Concatenate, etc:
if hasattr(layer, 'spikerates') and layer.spikerates is not None:
spikerates_b_l = layer.spikerates.numpy()
spiketrains_b_l_t = to_binary_numpy(spikerates_b_l,
self.num_bits)
self.set_spikerates(spikerates_b_l, i)
self.set_spiketrains(spiketrains_b_l_t, i)
if self.synaptic_operations_b_t is not None:
self.set_synaptic_operations(spiketrains_b_l_t, i)
if self.neuron_operations_b_t is not None:
self.set_neuron_operations(i)
i += 1
if 'input_b_l_t' in self._log_keys:
self.input_b_l_t[Ellipsis, 0] = input_b_l
if self.neuron_operations_b_t is not None:
self.neuron_operations_b_t[:, 0] += self.fanin[1] * \
self.num_neurons[1] * np.ones(self.batch_size) * 2
print("Current accuracy of batch:")
if self.config.getint('output', 'verbose') > 0:
guesses_b = np.argmax(np.sum(output_b_l_t, 2), 1)
echo('{:.2%}_'.format(np.mean(kwargs[str('truth_b')] ==
guesses_b)))
return np.cumsum(output_b_l_t, 2)
def load(self, path, filename):
SNN_.load(self, path, filename)
def set_spiketrains(self, spiketrains_b_l_t, i):
if self.spiketrains_n_b_l_t is not None:
self.spiketrains_n_b_l_t[i][0][:] = spiketrains_b_l_t
def set_spikerates(self, spikerates_b_l, i):
if self.spikerates_n_b_l is not None:
self.spikerates_n_b_l[i][0][:] = spikerates_b_l
def set_neuron_operations(self, i):
self.neuron_operations_b_t += self.num_neurons_with_bias[i + 1]
def set_synaptic_operations(self, spiketrains_b_l_t, i):
for t in range(self.synaptic_operations_b_t.shape[-1]):
ops = get_layer_synaptic_operations(spiketrains_b_l_t[Ellipsis, t],
self.fanout[i + 1])
self.synaptic_operations_b_t[:, t] += 2 * ops
def to_binary_numpy(x, num_bits):
"""Transform an array of floats into binary representation.
Parameters
----------
x: ndarray
Input array containing float values. The first dimension has to be of
length 1.
num_bits: int
The fixed point precision to be used when converting to binary.
Returns
-------
y: ndarray
Output array with same shape as ``x`` except that an axis is added to
the last dimension with size ``num_bits``. The binary representation of
each value in ``x`` is distributed across the last dimension of ``y``.
"""
n = 2 ** num_bits - 1
a = np.round(x * n) / n
y = np.zeros(list(x.shape) + [num_bits])
for i in range(num_bits):
p = 2 ** -(i + 1)
b = np.greater(a, p) * p
y[Ellipsis, i] = b
a -= b
return y
|
NeuromorphicProcessorProject/snn_toolbox
|
snntoolbox/simulation/target_simulators/INI_temporal_pattern_target_sim.py
|
Python
|
mit
| 5,979
|
[
"NEURON"
] |
6594c8fca7ab539f8a9b7c0e367b9013895fb0c6923e4be09c7fd8aa91c78f0f
|
# Copyright (C) 2016, Davit Samvelyan
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from ycm import vimsupport
from ycm.client.event_notification import EventNotification
from ycm.diagnostic_interface import DiagnosticInterface
# Emulates Vim buffer
# Used to store buffer related information like diagnostics, latest parse
# request. Stores buffer change tick at the parse request moment, allowing
# to effectively determine whether reparse is needed for the buffer.
class Buffer:
def __init__( self, bufnr, user_options, filetypes ):
self._number = bufnr
self._parse_tick = 0
self._handled_tick = 0
self._parse_request = None
self._should_resend = False
self._diag_interface = DiagnosticInterface( bufnr, user_options )
self._open_loclist_on_ycm_diags = user_options[
'open_loclist_on_ycm_diags' ]
self.UpdateFromFileTypes( filetypes )
def FileParseRequestReady( self, block = False ):
return bool( self._parse_request and
( block or self._parse_request.Done() ) )
def SendParseRequest( self, extra_data ):
# Don't send a parse request if one is in progress
if self._parse_request is not None and not self._parse_request.Done():
self._should_resend = True
return
self._should_resend = False
self._parse_request = EventNotification( 'FileReadyToParse',
extra_data = extra_data )
self._parse_request.Start()
# Decrement handled tick to ensure correct handling when we are forcing
# reparse on buffer visit and changed tick remains the same.
self._handled_tick -= 1
self._parse_tick = self._ChangedTick()
def NeedsReparse( self ):
return self._parse_tick != self._ChangedTick()
def ShouldResendParseRequest( self ):
return ( self._should_resend
or ( bool( self._parse_request )
and self._parse_request.ShouldResend() ) )
def UpdateDiagnostics( self, force = False ):
if force or not self._async_diags:
self.UpdateWithNewDiagnostics( self._parse_request.Response(), False )
else:
# We need to call the response method, because it might throw an exception
# or require extra config confirmation, even if we don't actually use the
# diagnostics.
self._parse_request.Response()
def UpdateWithNewDiagnostics( self, diagnostics, async_message ):
self._async_diags = async_message
self._diag_interface.UpdateWithNewDiagnostics(
diagnostics,
not self._async_diags and self._open_loclist_on_ycm_diags )
def UpdateMatches( self ):
self._diag_interface.UpdateMatches()
def PopulateLocationList( self, open_on_edit = False ):
return self._diag_interface.PopulateLocationList( open_on_edit )
def GetResponse( self ):
return self._parse_request.Response()
def IsResponseHandled( self ):
return self._handled_tick == self._parse_tick
def MarkResponseHandled( self ):
self._handled_tick = self._parse_tick
def OnCursorMoved( self ):
self._diag_interface.OnCursorMoved()
def GetErrorCount( self ):
return self._diag_interface.GetErrorCount()
def GetWarningCount( self ):
return self._diag_interface.GetWarningCount()
def RefreshDiagnosticsUI( self ):
return self._diag_interface.RefreshDiagnosticsUI()
def UpdateFromFileTypes( self, filetypes ):
self._filetypes = filetypes
# We will set this to true if we ever receive any diagnostics asyncronously.
self._async_diags = False
def _ChangedTick( self ):
return vimsupport.GetBufferChangedTick( self._number )
class BufferDict( dict ):
def __init__( self, user_options ):
self._user_options = user_options
def __missing__( self, key ):
# Python does not allow to return assignment operation result directly
new_value = self[ key ] = Buffer(
key,
self._user_options,
vimsupport.GetBufferFiletypes( key ) )
return new_value
|
snakeleon/YouCompleteMe-x64
|
python/ycm/buffer.py
|
Python
|
gpl-3.0
| 4,644
|
[
"VisIt"
] |
ff40f76c4ae7c3f9ecf6fd5a0dbf648fbf2defc4cc38547676b117ab0eeb1ec9
|
"""
Useful support routines (for internal use).
These functions aren't really Zero Install specific; they're things we might
wish were in the standard library.
@since: 0.27
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
import os, logging
def find_in_path(prog):
"""Search $PATH for prog.
If prog is an absolute path, return it unmodified.
@param prog: name of executable to find
@return: the full path of prog, or None if not found
@since: 0.27
"""
if os.path.isabs(prog): return prog
if os.name == "nt":
prog += '.exe'
for d in os.environ.get('PATH', '/bin:/usr/bin').split(os.pathsep):
path = os.path.join(d, prog)
if os.path.isfile(path):
return path
return None
def read_bytes(fd, nbytes, null_ok = False):
"""Read exactly nbytes from fd.
@param fd: file descriptor to read from
@param nbytes: number of bytes to read
@param null_ok: if True, it's OK to receive EOF immediately (we then return None)
@return: the bytes read
@raise Exception: if we received less than nbytes of data
"""
data = ''
while nbytes:
got = os.read(fd, nbytes)
if not got:
if null_ok and not data:
return None
raise Exception(_("Unexpected end-of-stream. Data so far %(data)s; expecting %(bytes)d bytes more.")
% {'data': repr(data), 'bytes': nbytes})
data += got
nbytes -= len(got)
logging.debug(_("Message received: %s") % repr(data))
return data
def pretty_size(size):
"""Format a size for printing.
@param size: the size in bytes
@type size: int (or None)
@return: the formatted size
@rtype: str
@since: 0.27"""
if size is None:
return '?'
if size < 2048:
return _('%d bytes') % size
size = float(size)
for unit in (_('KB'), _('MB'), _('GB'), _('TB')):
size /= 1024
if size < 2048:
break
return _('%(size).1f %(unit)s') % {'size': size, 'unit': unit}
def ro_rmtree(root):
"""Like shutil.rmtree, except that we also delete read-only items.
@param root: the root of the subtree to remove
@type root: str
@since: 0.28"""
import shutil
import platform
if platform.system() == 'Windows':
for main, dirs, files in os.walk(root):
for i in files + dirs:
os.chmod(os.path.join(main, i), 0700)
os.chmod(root, 0700)
else:
for main, dirs, files in os.walk(root):
os.chmod(main, 0700)
shutil.rmtree(root)
|
pombredanne/zero-install
|
zeroinstall/support/__init__.py
|
Python
|
lgpl-2.1
| 2,369
|
[
"VisIt"
] |
e9f5d805d24eb6ce5ae548c66f299fb4f1f843993126f9d34183f3cd175921bb
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
"""
The :mod:`sklearn.gaussian_process` module implements scalar Gaussian Process
based predictions.
"""
from .gaussian_process import GaussianProcess
from . import correlation_models
from . import regression_models
__all__ = ['GaussianProcess', 'correlation_models', 'regression_models']
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/sklearn/gaussian_process/__init__.py
|
Python
|
gpl-2.0
| 454
|
[
"Gaussian"
] |
4a18d4b4a34b02d1e2c36b8c99f6866f21010ebfe275d3e2c8df120aaacf33b5
|
# remotefilelogserver.py - server logic for a remotefilelog server
#
# Copyright 2013 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from mercurial import wireproto, changegroup, match, util, changelog, context
from mercurial.extensions import wrapfunction
from mercurial.node import bin, hex, nullid, nullrev
from mercurial.i18n import _
import shallowrepo
import stat, os, lz4, time
def setupserver(ui, repo):
"""Sets up a normal Mercurial repo so it can serve files to shallow repos.
"""
onetimesetup(ui)
# don't send files to shallow clients during pulls
def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source):
caps = self._bundlecaps or []
if shallowrepo.requirement in caps:
# only send files that don't match the specified patterns
includepattern = None
excludepattern = None
for cap in (self._bundlecaps or []):
if cap.startswith("includepattern="):
includepattern = cap[len("includepattern="):].split('\0')
elif cap.startswith("excludepattern="):
excludepattern = cap[len("excludepattern="):].split('\0')
m = match.always(repo.root, '')
if includepattern or excludepattern:
m = match.match(repo.root, '', None,
includepattern, excludepattern)
changedfiles = list([f for f in changedfiles if not m(f)])
return orig(self, changedfiles, linknodes, commonrevs, source)
wrapfunction(changegroup.cg1packer, 'generatefiles', generatefiles)
# add incoming hook to continuously generate file blobs
ui.setconfig("hooks", "changegroup.remotefilelog", incominghook)
onetime = False
def onetimesetup(ui):
"""Configures the wireprotocol for both clients and servers.
"""
global onetime
if onetime:
return
onetime = True
# support file content requests
wireproto.commands['getfiles'] = (getfiles, '')
class streamstate(object):
match = None
shallowremote = False
state = streamstate()
def stream_out_shallow(repo, proto, other):
includepattern = None
excludepattern = None
raw = other.get('includepattern')
if raw:
includepattern = raw.split('\0')
raw = other.get('excludepattern')
if raw:
excludepattern = raw.split('\0')
oldshallow = state.shallowremote
oldmatch = state.match
try:
state.shallowremote = True
state.match = match.always(repo.root, '')
if includepattern or excludepattern:
state.match = match.match(repo.root, '', None,
includepattern, excludepattern)
return wireproto.stream(repo, proto)
finally:
state.shallowremote = oldshallow
state.match = oldmatch
wireproto.commands['stream_out_shallow'] = (stream_out_shallow, '*')
# don't clone filelogs to shallow clients
def _walkstreamfiles(orig, repo):
if state.shallowremote:
# if we are shallow ourselves, stream our local commits
if shallowrepo.requirement in repo.requirements:
striplen = len(repo.store.path) + 1
readdir = repo.store.rawvfs.readdir
visit = [os.path.join(repo.store.path, 'data')]
while visit:
p = visit.pop()
for f, kind, st in readdir(p, stat=True):
fp = p + '/' + f
if kind == stat.S_IFREG:
if not fp.endswith('.i') and not fp.endswith('.d'):
n = util.pconvert(fp[striplen:])
yield (store.decodedir(n), n, st.st_size)
if kind == stat.S_IFDIR:
visit.append(fp)
# Return .d and .i files that do not match the shallow pattern
match = state.match or match.always(repo.root, '')
for (u, e, s) in repo.store.datafiles():
f = u[5:-2] # trim data/... and .i/.d
if not state.match(f):
yield (u, e, s)
for x in repo.store.topfiles():
yield x
elif shallowrepo.requirement in repo.requirements:
# don't allow cloning from a shallow repo to a full repo
# since it would require fetching every version of every
# file in order to create the revlogs.
raise util.Abort(_("Cannot clone from a shallow repo "
+ "to a full repo."))
else:
for x in orig(repo):
yield x
wrapfunction(wireproto, '_walkstreamfiles', _walkstreamfiles)
# We no longer use getbundle_shallow commands, but we must still
# support it for migration purposes
def getbundleshallow(repo, proto, others):
bundlecaps = others.get('bundlecaps', '')
bundlecaps = set(bundlecaps.split(','))
bundlecaps.add('remotefilelog')
others['bundlecaps'] = ','.join(bundlecaps)
return wireproto.commands["getbundle"][0](repo, proto, others)
wireproto.commands["getbundle_shallow"] = (getbundleshallow, '*')
# expose remotefilelog capabilities
def capabilities(orig, repo, proto):
caps = orig(repo, proto)
if (shallowrepo.requirement in repo.requirements or
ui.configbool('remotefilelog', 'server')):
caps += " " + shallowrepo.requirement
return caps
wrapfunction(wireproto, 'capabilities', capabilities)
def getfiles(repo, proto):
"""A server api for requesting particular versions of particular files.
"""
if shallowrepo.requirement in repo.requirements:
raise util.Abort(_('cannot fetch remote files from shallow repo'))
def streamer():
fin = proto.fin
opener = repo.sopener
cachepath = repo.ui.config("remotefilelog", "servercachepath")
if not cachepath:
cachepath = os.path.join(repo.path, "remotefilelogcache")
# everything should be user & group read/writable
oldumask = os.umask(0o002)
try:
while True:
request = fin.readline()[:-1]
if not request:
break
node = bin(request[:40])
if node == nullid:
yield '0\n'
continue
path = request[40:]
filecachepath = os.path.join(cachepath, path, hex(node))
if not os.path.exists(filecachepath):
filectx = repo.filectx(path, fileid=node)
if filectx.node() == nullid:
repo.changelog = changelog.changelog(repo.sopener)
filectx = repo.filectx(path, fileid=node)
text = createfileblob(filectx)
text = lz4.compressHC(text)
dirname = os.path.dirname(filecachepath)
if not os.path.exists(dirname):
os.makedirs(dirname)
try:
with open(filecachepath, "w") as f:
f.write(text)
except IOError:
# Don't abort if the user only has permission to read,
# and not write.
pass
else:
with open(filecachepath, "r") as f:
text = f.read()
yield '%d\n%s' % (len(text), text)
# it would be better to only flush after processing a whole batch
# but currently we don't know if there are more requests coming
proto.fout.flush()
finally:
os.umask(oldumask)
return wireproto.streamres(streamer())
def incominghook(ui, repo, node, source, url, **kwargs):
"""Server hook that produces the shallow file blobs immediately after
a commit, in anticipation of them being requested soon.
"""
cachepath = repo.ui.config("remotefilelog", "servercachepath")
if not cachepath:
cachepath = os.path.join(repo.path, "remotefilelogcache")
heads = repo.revs("heads(%s::)" % node)
# everything should be user & group read/writable
oldumask = os.umask(0o002)
try:
count = 0
for head in heads:
mf = repo[head].manifest()
for filename, filenode in mf.iteritems():
filecachepath = os.path.join(cachepath, filename, hex(filenode))
if os.path.exists(filecachepath):
continue
# This can be a bit slow. Don't block the commit returning
# for large commits.
if count > 500:
break
count += 1
filectx = repo.filectx(filename, fileid=filenode)
text = createfileblob(filectx)
text = lz4.compressHC(text)
dirname = os.path.dirname(filecachepath)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(filecachepath, "w")
try:
f.write(text)
finally:
f.close()
finally:
os.umask(oldumask)
def createfileblob(filectx):
text = filectx.data()
repo = filectx._repo
ancestors = [filectx]
try:
repo.forcelinkrev = True
ancestors.extend([f for f in filectx.ancestors()])
ancestortext = ""
for ancestorctx in ancestors:
parents = ancestorctx.parents()
p1 = nullid
p2 = nullid
if len(parents) > 0:
p1 = parents[0].filenode()
if len(parents) > 1:
p2 = parents[1].filenode()
copyname = ""
rename = ancestorctx.renamed()
if rename:
copyname = rename[0]
linknode = ancestorctx.node()
ancestortext += "%s%s%s%s%s\0" % (
ancestorctx.filenode(), p1, p2, linknode,
copyname)
finally:
repo.forcelinkrev = False
return "%d\0%s%s" % (len(text), text, ancestortext)
def gcserver(ui, repo):
if not repo.ui.configbool("remotefilelog", "server"):
return
neededfiles = set()
heads = repo.revs("heads(all())")
cachepath = repo.join("remotefilelogcache")
for head in heads:
mf = repo[head].manifest()
for filename, filenode in mf.iteritems():
filecachepath = os.path.join(cachepath, filename, hex(filenode))
neededfiles.add(filecachepath)
# delete unneeded older files
days = repo.ui.configint("remotefilelog", "serverexpiration", 30)
expiration = time.time() - (days * 24 * 60 * 60)
_removing = _("removing old server cache")
count = 0
ui.progress(_removing, count, unit="files")
for root, dirs, files in os.walk(cachepath):
for file in files:
filepath = os.path.join(root, file)
count += 1
ui.progress(_removing, count, unit="files")
if filepath in neededfiles:
continue
stat = os.stat(filepath)
if stat.st_mtime < expiration:
os.remove(filepath)
ui.progress(_removing, None)
|
pycontribs/remotefilelog
|
remotefilelog/remotefilelogserver.py
|
Python
|
gpl-2.0
| 11,639
|
[
"VisIt"
] |
8c91f645f3c530154bf3c7503e5f200a6596779734085cea8b5e0640c3b0bc16
|
'''
Created on 2012-09-29
A simple script that reads WRF netcdf-4 file and displays a 2D field in a proper geographic projection;
application here is plotting precipitation in the inner WRF domain.
@author: Andre R. Erler
'''
## includes
# matplotlib config: size etc.
import numpy as np
import matplotlib.pylab as pyl
import matplotlib as mpl
mpl.rc('lines', linewidth=1.)
mpl.rc('font', size=10)
# pygeode stuff
from myDatasets.loadWRF import openWRF
from myPlots.plots import surfacePlot
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import cm, maskoceans
#from pygeode.plot import plot_v1 as pl
#from pygeode.plot import basemap as bm
## settings
nax = 2 # number of panels
ndom = 2
sf = dict(dpi=150) # print properties
folder = '/home/me/Research/Dynamical Downscaling/figures/' # figure directory
if __name__ == '__main__':
## read data
data = openWRF('ctrl-1',[1982],list(range(11,12)))
print(data[ndom-1])
## compute data
precip = []; ndays = []
for n in range(ndom):
nrec = data[n].time.values[-1]+1
ndays = data[n].xtime(time=nrec-1).get() /24/60 # xtime is in minutes, need days
dailyrain = data[n].rain(time=nrec-1).get() / ndays
# ndays = ( data[n].xtime(time=nrec-1).get() - data[n].xtime(time=0).get() )/24/60 # xtime is in minutes, need days
# dailyrain = ( data[n].rain(time=nrec-1).get() - data[n].rain(time=0).get() ) / ndays
precip.append(dailyrain.squeeze())
## setup projection
f = pyl.figure(facecolor='white', figsize = (6.25,4.25))
ax = []
for n in range(nax):
ax.append(f.add_subplot(1,2,n+1))
f.subplots_adjust(bottom=0.12, left=0.06, right=.97, top=.94, hspace=0.05, wspace=0.05) # hspace, wspace
# setup lambert conformal basemap.
# lat_1 is first standard parallel.
# lat_2 is second standard parallel (defaults to lat_1).
# lon_0,lat_0 is central point.
# rsphere=(6378137.00,6356752.3142) specifies WGS4 ellipsoid
# area_thresh=1000 means don't plot coastline features less
# than 1000 km^2 in area.
lcc = dict(projection='lcc', lat_0=59, lon_0=-123, lat_1=53, rsphere=(6378137.00,6356752.3142),#
width=310*10e3, height=315*10e3, area_thresh = 1000., resolution='l')
# map projection boundaries for inner WRF domain
map = []
for n in range(nax):
map.append(Basemap(ax=ax[n],**lcc)) # one map for each panel!!
## Plot data
grid = 10; res = 'l'
clevs = np.linspace(0,25,51)
norm = mpl.colors.Normalize(vmin=min(clevs),vmax=max(clevs),clip=True)
cmap = mpl.cm.gist_ncar #s3pcpn
cmap.set_over('purple'); cmap.set_under('blue')
# coordinates
lat = []; lon = []; x = []; y = []
for n in range(ndom):
lat.append(data[n].lat.get())
lon.append(data[n].lon.get())
xx, yy = map[0](lon[n],lat[n]) # convert to map-native coordinates
x.append(xx); y.append(yy)
# draw boundaries of inner and outer domains
bdy2 = np.ones_like(lat[1]); bdy2[0,:]=0; bdy2[-1,:]=0; bdy2[:,0]=0; bdy2[:,-1]=0
for n in range(nax):
# N.B.: bdy2 depends on inner domain coordinates x[1],y[1]
map[n].contour(x[1],y[1],bdy2,[0],ax=ax[n], colors='k') # draw boundary of inner domain
# # terrain data: mask out ocean
# zs = []
# for n in xrange(ndom):
# zs.append(maskoceans(lon[n],lat[n],data[n].zs.get(),resolution=res,grid=grid))
# draw data
cd = []
for n in range(nax): # only plot first domain in first panel
for m in range(n+1): # but also plot first domain in second panel (as background)
print('panel %i / domain %i'%(n,m))
print('precip: min %f / max %f / mean %f'%(precip[m].min(),precip[m].max(),precip[m].mean()))
cd.append(map[n].contourf(x[m],y[m],precip[m],clevs,ax=ax[n],cmap=cmap, norm=norm,extend='both'))
# add colorbar
cax = f.add_axes([0.1, 0.06, 0.8, 0.03])
for cn in cd: # [c1d1, c1d2, c2d2]:
cn.set_clim(vmin=min(clevs),vmax=max(clevs))
cbar = f.colorbar(cax=cax,mappable=cd[0],orientation='h',extend='both') # ,size='3%',pad='2%'
cbl = np.linspace(min(clevs),max(clevs),6)
cbar.set_ticks(cbl); cbar.set_ticklabels(['%02.1f mm'%(lev) for lev in cbl])
## Annotation
# add labels
f.suptitle('Average Daily Precipitation',fontsize=12)
ax[0].set_title('Outer Domain (30 km)',fontsize=11)
ax[1].set_title('Inner Domain (10 km)',fontsize=11)
# ax.set_xlabel('Longitude'); ax.set_ylabel('Latitude')
map[0].drawmapscale(-135, 49, -137, 57, 800, barstyle='fancy', yoffset=0.01*(map[n].ymax-map[n].ymin))
for n in range(nax):
if n == 0 or n == 1: Bottom = True
else: Bottom = False
if n == 0: Left = True
else: Left = False
# land/sea mask
map[n].drawlsmask(ocean_color='blue', land_color='green',resolution=res,grid=grid)
# add map stuff
map[n].drawcoastlines(linewidth=0.5)
map[n].drawcountries(linewidth=0.5)
# map[n].drawrivers(linewidth=0.5)
# map[n].fillcontinents(color = 'coral')
map[n].drawmapboundary(fill_color='k',linewidth=2)
# labels = [left,right,top,bottom]
map[n].drawparallels([45,65],linewidth=1, labels=[Left,False,False,False])
map[n].drawparallels([55,75],linewidth=0.5, labels=[Left,False,False,False])
map[n].drawmeridians([-140,-120,-100],linewidth=1, labels=[False,False,False,Bottom])
map[n].drawmeridians([-150,-130,-110],linewidth=0.5, labels=[False,False,False,Bottom])
# save figure to disk
f.savefig(folder+'AnnualPrecip.pdf', **sf) # save figure to pdf
print(('\nSaved figure in '+folder+'AnnualPrecip.pdf'))
# show plots
pyl.show()
## more projections
# setup lambert azimuthal equal area basemap.
# lat_ts is latitude of true scale.
# lon_0,lat_0 is central point.
# laea = dict(projection='laea', lat_0=57, lon_0=-137, lat_ts=53, resolution='l', #
# width=259*30e3, height=179*30e3, rsphere=(6378137.00,6356752.3142), area_thresh = 1000.)
# lon_0, lat_0 are the center point of the projection.
# resolution = 'l' means use low resolution coastlines.
# ortho = dict(projection='ortho', lat_0 = 57, lon_0 = -137, resolution = 'l', area_thresh = 1000.)
# 'parallels':[30,50,70], 'meridians':[-180,-150,-120,-90], 'labels':[1,0,0,1]}
|
aerler/WRF-Projects
|
src/archive/plotPrecip.py
|
Python
|
gpl-3.0
| 6,161
|
[
"NetCDF"
] |
96865cea41c55c55d2e8bc6e380f73ddbbbbac2eea5b1b4952f5f7fa50e4281b
|
#!/usr/bin/python
from __future__ import print_function
import py2neo, sys, json, anydbm, os, optparse, base64
from timeit import default_timer as timer
import snomed_g_lib_neo4j
# -----------------------------------------------------------------------------
# Module: snomed_g_TC_tools.py
# Purpose: Compute Transitive-Closure (currently only from Graph using py2neo).
# Given a NEO4J database with SNOMED_G schema,
# generate a transitive closure file.
# Uses REST API to NEO4J, via py2neo library.
# Syntax: python <pgm> <outputfile> <date>
# Date: syntax YYYYMMDD
# Eg: python snomed_g_TC_tools.py TC.txt 20140131
# Output: SPecified transitive closure file, one parent-child def per line:
# <parent>,<child>
# Algorithm:
# DAG_DFTC algorithm for generation of Transitive Closure.
# ACM Transactions on Database Systems, Vol. 18, No. 3, Sep. 1993,
# Pages: 512 - 576.
# Author: Jay Pedersen, University of Nebraska, Aug 25, 2015
# -----------------------------------------------------------------------------
#---------------------------------------------------------------------------------|
# TC_from_RF2 |
#---------------------------------------------------------------------------------|
def TC_from_RF2(arglist):
#-------------------------------------------------------------------------------
# build_ISA_graph(children,filename)
# Concept: Reads ISA edges from relationships file, stores in the children hash
#-------------------------------------------------------------------------------
def build_ISA_graph(children,relationships_filename):
global ISA_edges
for idx,line in enumerate(x.rstrip('\n').rstrip('\r') for x in open(relationships_filename)):
# line -- [0]id,[1]effectiveTime,[2]active,[3]moduleId,[4]sourceId,[5]destinationId,
# [6]relationshipGroup,[7]typeId,[8]characteristicTypeId,[9]modifierId
if idx==0: continue # ignore column names
values = line.split('\t')
active, sourceId, destinationId, typeId = \
(values[2], values[4], values[5], values[7])
if typeId=="116680003" and active=="1": # active ISA relationship
if destinationId not in children: # parent discovered
children[destinationId] = set([sourceId]) # 1st child, create list
else:
children[destinationId].add(sourceId) # nth child, add to set
return # done
#-------------------------------------------------------------------------------
# compute_TC_table(startnode,children,descendants,visited)
#-------------------------------------------------------------------------------
# Based on a method described in "Transitive Closure Algorithms
# Based on Graph Traversal" by Yannis Ioannidis, Raghu Ramakrishnan, and Linda Winger,
# ACM Transactions on Database Systems, Vol. 18, No. 3, September 1993,
# Pages: 512 - 576.
# Simplified version of their "DAG_DFTC" algorithm.
#-------------------------------------------------------------------------------
#
def compute_TC_table(startnode,children,descendants,visited): # recursively depth-first traverse the graph.
visited.add(startnode)
descendants[startnode] = set([]) # no descendants yet
if startnode not in children: return # no children case, leaf nodes
for childnode in children[startnode]: # for all the children of the startnode
if childnode not in visited: # if not yet visited (Note: DFS traversal)
compute_TC_table(childnode,children,descendants,visited) # recursively visit the childnode, set descendants
for descendant in list(descendants[childnode]): # each descendant of childnode
descendants[startnode].add(descendant) # mark descendants of startnode
descendants[startnode].add(childnode) # mark immediate child of startnode
return
def print_TC_table(descendants, outfile_name):
fout = open(outfile_name, 'w')
for startnode in descendants.keys():
for endnode in list(descendants[startnode]):
print('%s,%s' % (startnode,endnode), file = fout)
fout.close()
return
# TC_from_RF2:
# command line parsing
if len(arglist)!=2:
print('Syntax: cmd TC_from_RF2 <relationshipsfilename-in> <TCfilename-out>')
sys.exit(1)
relationships_filename, output_TC_filename = arglist[0], arglist[1]
# Compute TC table from ISA relationships, output to specified file.
children, visited, descendants, concept_node = ({}, set(), {}, "138875005") # init
build_ISA_graph(children, relationships_filename) # build 'children' hash
compute_TC_table(concept_node, children, descendants, visited)
print_TC_table(descendants, output_TC_filename)
# All done
return
#---------------------------------------------------------------------------------|
# TC_from_graph |
#---------------------------------------------------------------------------------|
def TC_from_graph(arglist):
#-------------------------------------------------------------------------------
# build_ISA_graph(children,filename)
# Concept: Reads ISA edges from relationships file, stores in the children hash
#-------------------------------------------------------------------------------
def build_ISA_graph(children,isa_rels):
for idvalue in isa_rels.keys():
isa_map = isa_rels[idvalue]
active, sourceId, destinationId = isa_map['active'], isa_map['sourceId'], isa_map['destinationId']
if active=='1': # active ISA relationship
if destinationId not in children: # parent discovered
children[destinationId] = set([sourceId]) # 1st child, create list
else:
children[destinationId].add(sourceId) # nth child, add to set
return # done
#-------------------------------------------------------------------------------
# compute_TC_table(startnode,children,descendants,visited)
#-------------------------------------------------------------------------------
# Based on a method described in "Transitive Closure Algorithms
# Based on Graph Traversal" by Yannis Ioannidis, Raghu Ramakrishnan, and Linda Winger,
# ACM Transactions on Database Systems, Vol. 18, No. 3, September 1993,
# Pages: 512 - 576.
# Simplified version of their "DAG_DFTC" algorithm.
#-------------------------------------------------------------------------------
#
def compute_TC_table(startnode,children,descendants,visited): # recursively depth-first traverse the graph.
visited.add(startnode)
descendants[startnode] = set([]) # no descendants yet
if startnode not in children: return # no children case, leaf nodes
for childnode in children[startnode]: # for all the children of the startnode
if childnode not in visited: # if not yet visited (Note: DFS traversal)
compute_TC_table(childnode,children,descendants,visited) # recursively visit the childnode, set descendants
for descendant in list(descendants[childnode]): # each descendant of childnode
descendants[startnode].add(descendant) # mark descendants of startnode
descendants[startnode].add(childnode) # mark immediate child of startnode
return
def print_TC_table(descendants, outfile_name):
fout = open(outfile_name, 'w')
for startnode in descendants.keys():
for endnode in list(descendants[startnode]):
print('%s,%s' % (startnode,endnode), file = fout)
fout.close()
return
def show_timings(t):
print('NEO4J Graph DB open: %g' % (t['graph_open_end']-t['graph_open_start']))
print('ISA extraction from NEO4J: %g' % (t['isa_get_end']-t['isa_get_start']))
print('TC computation: %g' % (t['TC_end']-t['TC_start']))
print('Output (csv): %g' % (t['output_write_end']-t['output_write_start']))
print('Total time: %g' % (t['end']-t['start']))
# TC_from_graph:
# command line parsing
opt = optparse.OptionParser()
opt.add_option('--neopw64', action='store')
opt.add_option('--neopw', action='store')
opts, args = opt.parse_args(arglist)
if not (len(args)==1 and (opts.neopw or opts.neopw64)):
print('Usage: cmd TC_from_graph <TCfile-out> --neopw <pw>'); sys.exit(1)
if opts.neopw and opts.neopw64:
print('Usage: only one of --neopw and --neopw64 may be specified')
sys.exit(1)
if opts.neopw64: # snomed_g v1.2, convert neopw64 to neopw
opts.neopw = str(base64.b64decode(opts.neopw64),'utf-8') if sys.version_info[0]==3 else base64.decodestring(opts.neopw64) # py2
output_TC_filename = args[0]
# Extract ISA relationships from graph (active and inactive)
timings = {}
timings['start'] = timer()
timings['graph_open_start'] = timer()
neo4j = snomed_g_lib_neo4j.Neo4j_Access(opts.neopw)
timings['graph_open_end'] = timer()
timings['isa_get_start'] = timer()
isa_rels = neo4j.lookup_all_isa_rels()
timings['isa_get_end'] = timer()
print('Result class: %s' % str(type(isa_rels)))
print('Returned %d objects' % len(isa_rels))
# Compute TC table from ISA relationships, output to specified file.
timings['TC_start'] = timer()
children, visited, descendants, concept_node = ({}, set(), {}, "138875005") # init
build_ISA_graph(children, isa_rels) # build 'children' hash
compute_TC_table(concept_node, children, descendants, visited)
timings['TC_end'] = timer()
timings['output_write_start'] = timer()
print_TC_table(descendants, output_TC_filename)
timings['output_write_end'] = timer()
timings['end'] = timer()
show_timings(timings)
# All done
return
# END TC_from_graph
#---------------------------------------------------------------------------------|
# TC_fordate_from_graph |
#---------------------------------------------------------------------------------|
def TC_fordate_from_graph(arglist):
def active_at_date(datestring, isa_edge):
active = '0' # if no information applies (possible), default to inactive
# check the current definition, may be in effect at given date
if isa_edge['effectiveTime'] <= datestring: # the current def in effect
active = isa_edge['active']
elif len(isa_edge['history']) > 2: # check history, current definition doesnt apply
# eg: datestring = 20050101 and current effectiveTime is 20160101 ==> not in effect
# hist item 20030101 and 20040101 exists ==> 200401010 in effect in 20050101.
# note: no need to check current element again, already determined not in effect
# JSON example [{"typeId": "116680003", "sourceId": "900000000000441003", ...},{...}]
ordered_history_list = json.loads(isa_edge['history'])
for hist_elem in ordered_history_list: # list of maps
if hist_elem['effectiveTime'] > datestring: break # in future vs given date
if 'active' in hist_elem: active = hist_elem['active']
return active=='1'
#-------------------------------------------------------------------------------
# build_ISA_graph(children,filename)
# Concept: Reads ISA edges from relationships file, stores in the children hash
#-------------------------------------------------------------------------------
def build_ISA_graph(children,isa_rels,yyyymmdd):
for idvalue in isa_rels.keys():
isa_map = isa_rels[idvalue]
sourceId, destinationId = isa_map['sourceId'], isa_map['destinationId']
if active_at_date(yyyymmdd, isa_map):
if destinationId not in children: # parent discovered
children[destinationId] = set([sourceId]) # 1st child, create list
else:
children[destinationId].add(sourceId) # nth child, add to set
return # done
#-------------------------------------------------------------------------------
# compute_TC_table(startnode,children,descendants,visited)
#-------------------------------------------------------------------------------
# Based on a method described in "Transitive Closure Algorithms
# Based on Graph Traversal" by Yannis Ioannidis, Raghu Ramakrishnan, and Linda Winger,
# ACM Transactions on Database Systems, Vol. 18, No. 3, September 1993,
# Pages: 512 - 576.
# Simplified version of their "DAG_DFTC" algorithm.
#-------------------------------------------------------------------------------
#
def compute_TC_table(startnode,children,descendants,visited): # recursively depth-first traverse the graph.
visited.add(startnode)
descendants[startnode] = set([]) # no descendants yet
if startnode not in children: return # no children case, leaf nodes
for childnode in children[startnode]: # for all the children of the startnode
if childnode not in visited: # if not yet visited (Note: DFS traversal)
compute_TC_table(childnode,children,descendants,visited) # recursively visit the childnode, set descendants
for descendant in list(descendants[childnode]): # each descendant of childnode
descendants[startnode].add(descendant) # mark descendants of startnode
descendants[startnode].add(childnode) # mark immediate child of startnode
return
def print_TC_table(descendants, outfile_name):
fout = open(outfile_name, 'w')
for startnode in descendants.keys():
for endnode in list(descendants[startnode]):
print('%s,%s' % (startnode,endnode), file = fout)
fout.close()
return
def show_timings(t):
print('NEO4J Graph DB open: %g' % (t['graph_open_end']-t['graph_open_start']))
print('ISA extraction from NEO4J: %g' % (t['isa_get_end']-t['isa_get_start']))
print('TC computation: %g' % (t['TC_end']-t['TC_start']))
print('Output (csv): %g' % (t['output_write_end']-t['output_write_start']))
print('Total time: %g' % (t['end']-t['start']))
# TC_fordate_from_graph:
# command line parsing
opt = optparse.OptionParser()
opt.add_option('--neopw64', action='store')
opt.add_option('--neopw', action='store')
opts, args = opt.parse_args(arglist)
if not (len(args)==2 and (opts.neopw or opts.neopw64)):
print('Usage: cmd TC_fordate_from_graph YYYYMMDD <TCfile-out> --neopw <pw>'); sys.exit(1)
if opts.neopw and opts.neopw64:
print('Usage: only one of --neopw and --neopw64 may be specified')
sys.exit(1)
if opts.neopw64: # snomed_g v1.2, convert neopw64 to neopw
opts.neopw = str(base64.b64decode(opts.neopw64),'utf-8') if sys.version_info[0]==3 else base64.decodestring(opts.neopw64) # py2
yyyymmdd, output_TC_filename = args[0], args[1]
# Extract ISA relationships from graph (active and inactive)
timings = {}
timings['start'] = timer()
timings['graph_open_start'] = timer()
neo4j = snomed_g_lib_neo4j.Neo4j_Access(base64.decodestring(opts.neopw64))
timings['graph_open_end'] = timer()
timings['isa_get_start'] = timer()
isa_rels = neo4j.lookup_all_isa_rels()
timings['isa_get_end'] = timer()
print('Result class: %s' % str(type(isa_rels)))
print('Returned %d objects' % len(isa_rels))
# Compute TC table from ISA relationships, output to specified file.
timings['TC_start'] = timer()
children, visited, descendants, concept_node = ({}, set(), {}, "138875005") # init
build_ISA_graph(children, isa_rels, yyyymmdd) # build 'children' hash
compute_TC_table(concept_node, children, descendants, visited)
timings['TC_end'] = timer()
timings['output_write_start'] = timer()
print_TC_table(descendants, output_TC_filename)
timings['output_write_end'] = timer()
timings['end'] = timer()
show_timings(timings)
return
# END TC_fordate_from_graph
#---------------------------------------------------------------------------------|
# compare_TC_files |
#---------------------------------------------------------------------------------|
def compare_TC_files(arglist):
# Syntax: cmd <file1> <file2>
# Concept: see if files have same content, if order of lines dont matter
def build_hash(f):
h = {}
for line in [x.rstrip('\n').rstrip('\r') for x in f]:
p,c = line.split(',') # parent, child
if p not in h: h[p] = [c]
else: h[p].append(c)
return h
def compare_hashes(h1,h2):
# same number of keys?
k1, k2 = h1.keys(), h2.keys()
if len(k1) != len(k2):
print('Key counts differ. %d vs %d' % (len(k1),len(k2))); return
# same keys?
if sorted(k1)!=sorted(k2):
print('Set of keys do not match'); return
# same hash values for each key?
relcount = 0
for key in k1:
if sorted(h1[key]) != sorted(h2[key]):
print('Key hash values do not match')
print('Case: key=[%s]' % key)
print('file1: %s' % str(sorted(h1[key])))
print('file2: %s' % str(sorted(h2[key])))
return
else:
relcount += len(h1[key])
# hashes match
print('Contents match (%d parents, %d relationships).' % (len(k1),relcount))
return
def verbose_compare_hashes(h1,h2):
def show_diffs_in_keys(diffs,display_message):
if len(diffs)>0:
print((display_message+': %d') % len(diffs))
for idx,x in enumerate(diffs): print('%4d. [%s]' % (idx+1,x))
print()
def show_diffs_for_single_key(key,set1,set2):
def show_diffs_for_key(diffs,display_message):
print((display_message+': %d') % len(diffs))
for idx,x in enumerate(diffs): print(' %4d. [%s]' % (idx+1,x))
print()
print('TC sets differ for %s, (NOTE: codes in common: %d)' % (key, len(s1&s2)))
show_diffs_for_key(list(set1-set2),' Codes in TC in file1 but not in file2')
show_diffs_for_key(list(set2-set1),' Codes in TC in file2 but not in file1')
# Show difference in key sets
k1, k2 = set(h1.keys()), set(h2.keys())
differences_in_keysets = False
if len(k1) != len(k2): print('Key counts differ. %d vs %d' % (len(k1),len(k2)))
if k1!=k2: # if key sets differ
differences_in_keysets = True
print('Set of keys do not match')
show_diffs_in_keys(list(k1-k2),'Keys in file1 that are not in file2')
show_diffs_in_keys(list(k2-k1),'Keys in file2 that are not in file1')
# Show differences where keys exist in both
shared_keys = k1 & k2
relcount = 0
diffcount = 0
for key in list(shared_keys):
s1 = set(h1[key])
s2 = set(h2[key]) if key in h2 else set()
if s1 != s2:
diffcount += 1
show_diffs_for_single_key(key, s1, s2)
# hashes match?
if not differences_in_keysets and diffcount==0:
print('Contents match (%d parents, %d relationships).' % (len(k1),relcount))
else:
print('Transitive closures to NOT match in file1 and file2')
if differences_in_keysets: print('Key sets do not match, as reported above.')
if diffcount>0: print('%d codes with differences' % diffcount)
return
#compare_TC_files:
opt = optparse.OptionParser()
opt.add_option('--verbose',action='store_true',dest='verbose')
opts, args = opt.parse_args(arglist)
if not (len(args)==2):
print('Usage: cmd compare_TC_files <TCfile1> <TCfile2>'); sys.exit(1)
h = []
h1 = build_hash(open(args[0]))
h2 = build_hash(open(args[1]))
if opts.verbose:
verbose_compare_hashes(h1,h2)
else:
compare_hashes(h1,h2) #
return
# END compare_TC_files
#----------------------------------------------------------------------------|
# MAIN |
#----------------------------------------------------------------------------|
def parse_and_interpret(arglist):
command_interpreters = [('TC_from_RF2',TC_from_RF2),
('TC_from_graph',TC_from_graph),
('TC_fordate_from_graph',TC_fordate_from_graph),
('compare_TC_files',compare_TC_files)]
command_names = [x[0] for x in command_interpreters]
if len(arglist) < 1: print('Usage: python <cmd> %s ...' % '[one of %s]' % ','.join(command_names)); sys.exit(1)
# DEMAND that arglist[0] be one of the sub-commands
command_name = arglist[0]
try: command_index = command_names.index(command_name)
except: print('Usage : python <cmd> %s ...' % '[one of %s]' % ','.join(command_names)); sys.exit(1)
command_interpreters[command_index][1](arglist[1:]) # call appropriate interpreter
# MAIN
parse_and_interpret(sys.argv[1:]) # causes sub-command processing to occur as well
sys.exit(0)
|
rorydavidson/SNOMED-CT-Database
|
NEO4J/snomed_g_TC_tools.py
|
Python
|
mit
| 20,561
|
[
"VisIt"
] |
377bf1aa803f777d5cd91b0530f1a86a03dff6dd311eb092f81d7ef6495b0e6d
|
# author: brian dillmann
# for rscs
# this might be a useless class - dont give af
class Transition:
def __init__(self, destinationStateName, condition, evaluator):
self.destination = destinationStateName
self.condition = condition
self.evaluator = evaluator
def evaluate(self):
return self.evaluator.evaluate(self.condition)
|
dillmann/rscs
|
lib/Graph/Transition.py
|
Python
|
mit
| 338
|
[
"Brian"
] |
cc9c0c3c67a0fd5947e52120b0ba624f2beeaebbd6c1b8d1b0df27a3415c11f3
|
from ..instrumenters import InstrumentPlugin
from ...metrics import formatting
import time
import logging
log = logging.getLogger( __name__ )
GALAXY_SLOTS_KEY = "galaxy_slots"
START_EPOCH_KEY = "start_epoch"
END_EPOCH_KEY = "end_epoch"
RUNTIME_SECONDS_KEY = "runtime_seconds"
class CorePluginFormatter( formatting.JobMetricFormatter ):
def format( self, key, value ):
value = int( value )
if key == GALAXY_SLOTS_KEY:
return ( "Cores Allocated", "%d" % value )
elif key == RUNTIME_SECONDS_KEY:
return ( "Job Runtime (Wall Clock)", formatting.seconds_to_str( value ) )
else:
# TODO: Use localized version of this from galaxy.ini
title = "Job Start Time" if key == START_EPOCH_KEY else "Job End Time"
return (title, time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime( value ) ) )
class CorePlugin( InstrumentPlugin ):
""" Simple plugin that collects data without external dependencies. In
particular it currently collects value set for Galaxy slots.
"""
plugin_type = "core"
formatter = CorePluginFormatter()
def __init__( self, **kwargs ):
pass
def pre_execute_instrument( self, job_directory ):
commands = []
commands.append( self.__record_galaxy_slots_command( job_directory ) )
commands.append( self.__record_seconds_since_epoch_to_file( job_directory, "start" ) )
return commands
def post_execute_instrument( self, job_directory ):
commands = []
commands.append( self.__record_seconds_since_epoch_to_file( job_directory, "end" ) )
return commands
def job_properties( self, job_id, job_directory ):
galaxy_slots_file = self.__galaxy_slots_file( job_directory )
properties = {}
properties[ GALAXY_SLOTS_KEY ] = self.__read_integer( galaxy_slots_file )
start = self.__read_seconds_since_epoch( job_directory, "start" )
end = self.__read_seconds_since_epoch( job_directory, "end" )
if start is not None and end is not None:
properties[ START_EPOCH_KEY ] = start
properties[ END_EPOCH_KEY ] = end
properties[ RUNTIME_SECONDS_KEY ] = end - start
return properties
def __record_galaxy_slots_command( self, job_directory ):
galaxy_slots_file = self.__galaxy_slots_file( job_directory )
return '''echo "$GALAXY_SLOTS" > '%s' ''' % galaxy_slots_file
def __record_seconds_since_epoch_to_file( self, job_directory, name ):
path = self._instrument_file_path( job_directory, "epoch_%s" % name )
return 'date +"%s" > ' + path
def __read_seconds_since_epoch( self, job_directory, name ):
path = self._instrument_file_path( job_directory, "epoch_%s" % name )
return self.__read_integer( path )
def __galaxy_slots_file( self, job_directory ):
return self._instrument_file_path( job_directory, "galaxy_slots" )
def __read_integer( self, path ):
value = None
try:
value = int( open( path, "r" ).read() )
except Exception:
pass
return value
__all__ = [ CorePlugin ]
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/jobs/metrics/instrumenters/core.py
|
Python
|
gpl-3.0
| 3,185
|
[
"Galaxy"
] |
39040963f42ed682c0561fa826658e8a5b5a889317fa9cd3f4e21f3ee3294807
|
#!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
"""
xnat_scans_filter.py
======================
This script filters the csv file generated using xnat_extractor.py. This filters
is based on records from XNAT where there is one row per scan.
xnat_scans_filter.py -i path/to/xnat.csv
"""
import os
import sys
import redcap
import pandas as pd
# Fields to retrieve from REDCap
fields = ['study_id', 'redcap_event_name', 'exclude', 'visit_ignore',
'visit_date', 'mri_missing', 'mri_xnat_sid', 'mri_xnat_eids',
'visit_notes']
# Forms that the fields come from in REDCap.
forms = ['mr_session_report', 'visit_date', 'demographics']
def get_project_entry(args=None):
"""
Pulls the data from REDCap
"""
# Get API key.
summary_key_file = open(os.path.join(os.path.expanduser("~"),
'.server_config',
'redcap-dataentry-token'), 'r')
summary_api_key = summary_key_file.read().strip()
# Connect to API.
project_entry = redcap.Project('https://ncanda.sri.com/redcap/api/',
summary_api_key,
verify_ssl=False)
return project_entry
def data_entry_fields(fields, project, arm):
"""
Gets the dataframe containing a specific arm from REDCap
"""
# Get a dataframe of fields
data_entry_raw = project.export_records(fields=fields,
forms=forms,
format='df',
events=[arm])
return data_entry_raw
def append_site_id_row(xnat_df, scans_df):
scans_df['site_id'] = ''
ids = xnat_df[['site_id', 'subject_id']]
map = {r.subject_id: r.site_id for idx, r in ids.iterrows()}
for idx, row in scans_df.iterrows():
scans_df.site_id.loc[idx] = map.get(row.case)
return scans_df
def is_in_redcap(rc_df, scans_df):
"""
Checks if the scans missing in the pipeline are listed in REDCap and adds
a column indicating as such.
"""
scans_df['in_redcap'] = False
scans_df['visit_ignore___yes'] = ''
scans_df['visit_ignore_why'] = ''
scans_df['visit_ignore_why_other'] = ''
scans_df['visit_notes'] = ''
scans_df['mri_missing'] = ''
scans_df['mri_missing_why'] = ''
scans_df['mri_missing_why_other'] = ''
scans_df['mri_notes'] = ''
rc_cases = rc_df[rc_df.mri_xnat_sid.isin(scans_df.case)]
for idx, row in rc_cases.iterrows():
scan_cases = scans_df[scans_df.case == row.mri_xnat_sid]
scans_df.in_redcap.loc[scan_cases.index] = True
# Visit info
scans_df.visit_ignore___yes.loc[scan_cases.index] = row.visit_ignore___yes
scans_df.visit_ignore_why.loc[scan_cases.index] = row.visit_ignore_why
scans_df.visit_ignore_why_other.loc[scan_cases.index] = row.visit_ignore_why_other
scans_df.visit_notes.loc[scan_cases.index] = row.visit_notes
# Scan info
scans_df.mri_missing.loc[scan_cases.index] = row.mri_missing
scans_df.mri_missing_why.loc[scan_cases.index] = row.mri_missing_why
scans_df.mri_missing_why_other.loc[scan_cases.index] = row.mri_missing_why_other
scans_df.mri_notes.loc[scan_cases.index] = row.mri_notes
return scans_df
def is_in_xnat(xnat_df, scans_df):
"""
Checks XNAT for scans near the visit date recorded in REDCap
"""
def main(args=None):
# Connect to REDCap
project_entry = get_project_entry()
# Get the visit dataframe
project_df = data_entry_fields(fields, project_entry, args.event)
# Get a list of all EIDs for the given visit
xnat_eids = project_df['mri_xnat_eids'].tolist()
# Read the csv file from xnat_extractor
xnat_csv = pd.read_csv(args.infile)
# Filter the XNAT records by the EIDs in REDCap
# This provides a list of all the scans in XNAT that are also in REDCap
filter_csv = xnat_csv[xnat_csv['experiment_id'].isin(xnat_eids)]
# Iterate through scans missing in the pipeline and check:
# - they are present in the filtered REDCap list
# present in the REDCap filter list.
if args.missing_scans:
list_missing_scans = pd.read_csv(args.missing_scans)
missing_scans_df = append_site_id_row(xnat_csv, list_missing_scans)
# Add columns indicating if there is data for this visit in redcap
in_redcap = is_in_redcap(project_df, missing_scans_df)
filter_csv = in_redcap
# Write the results to disk
filter_csv.to_csv(args.outfile)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--infile',
required=True,
help="Input csv file from xnat_extractor.py")
parser.add_argument('-e', '--event',
choices=['baseline_visit_arm_1', '1y_visit_arm_1'],
default='1y_visit_arm_1')
parser.add_argument('-m', '--missing-scans',
help="Output of list_missing_scans script.")
parser.add_argument('-o', '--outfile',
default=os.path.join('/tmp', 'xnat_scans_filter.csv'))
parser.add_argument('-v', '--verbose',
action='store_true',
help='Enable verbose reporting.')
argv = parser.parse_args()
sys.exit(main(args=argv))
|
nicholsn/ncanda-data-integration
|
scripts/reporting/xnat_scans_filter.py
|
Python
|
bsd-3-clause
| 5,552
|
[
"VisIt"
] |
314ef162b5f6d2432aac2c19d168b632f55943d5da9bcd18b7a131db1fa1f7fd
|
#!/usr/bin/env python
import argparse
import copy
import logging
import re
import sys
from cpt_gffParser import gffParse, gffWrite, gffSeqFeature
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(name="blastxml2gff3")
__doc__ = """
BlastXML files, when transformed to GFF3, do not normally show gaps in the
blast hits. This tool aims to fill that "gap".
"""
def blastxml2gff3(blastxml, min_gap=3, trim=False, trim_end=False, include_seq=False):
from Bio.Blast import NCBIXML
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation
blast_records = NCBIXML.parse(blastxml)
for idx_record, record in enumerate(blast_records):
# http://www.sequenceontology.org/browser/release_2.4/term/SO:0000343
match_type = { # Currently we can only handle BLASTN, BLASTP
"BLASTN": "nucleotide_match",
"BLASTP": "protein_match",
}.get(record.application, "match")
recid = record.query
if " " in recid:
recid = recid[0 : recid.index(" ")]
rec = SeqRecord(Seq("ACTG"), id=recid)
for idx_hit, hit in enumerate(record.alignments):
for idx_hsp, hsp in enumerate(hit.hsps):
qualifiers = {
"ID": "b2g.%s.%s.%s" % (idx_record, idx_hit, idx_hsp),
"source": "blast",
"score": hsp.expect,
"accession": hit.accession,
"hit_id": hit.hit_id,
"length": hit.length,
"hit_titles": hit.title.split(" >"),
}
if include_seq:
qualifiers.update(
{
"blast_qseq": hsp.query,
"blast_sseq": hsp.sbjct,
"blast_mseq": hsp.match,
}
)
for prop in (
"score",
"bits",
"identities",
"positives",
"gaps",
"align_length",
"strand",
"frame",
"query_start",
"query_end",
"sbjct_start",
"sbjct_end",
):
qualifiers["blast_" + prop] = getattr(hsp, prop, None)
desc = hit.title.split(" >")[0]
qualifiers["description"] = desc[desc.index(" ") :]
# This required a fair bit of sketching out/match to figure out
# the first time.
#
# the match_start location must account for queries and
# subjecst that start at locations other than 1
parent_match_start = hsp.query_start - hsp.sbjct_start
# The end is the start + hit.length because the match itself
# may be longer than the parent feature, so we use the supplied
# subject/hit length to calculate the real ending of the target
# protein.
parent_match_end = hsp.query_start + hit.length + hsp.query.count("-")
# If we trim the left end, we need to trim without losing information.
used_parent_match_start = parent_match_start
if trim:
if parent_match_start < 1:
used_parent_match_start = 0
if trim or trim_end:
if parent_match_end > hsp.query_end:
parent_match_end = hsp.query_end + 1
# The ``match`` feature will hold one or more ``match_part``s
top_feature = gffSeqFeature(
FeatureLocation(used_parent_match_start, parent_match_end),
type=match_type,
strand=0,
qualifiers=qualifiers,
)
# Unlike the parent feature, ``match_part``s have sources.
part_qualifiers = {"source": "blast"}
top_feature.sub_features = []
for idx_part, (start, end, cigar) in enumerate(
generate_parts(
hsp.query, hsp.match, hsp.sbjct, ignore_under=min_gap
)
):
part_qualifiers["Gap"] = cigar
part_qualifiers["ID"] = qualifiers["ID"] + (".%s" % idx_part)
# Otherwise, we have to account for the subject start's location
match_part_start = parent_match_start + hsp.sbjct_start + start - 1
# We used to use hsp.align_length here, but that includes
# gaps in the parent sequence
#
# Furthermore align_length will give calculation errors in weird places
# So we just use (end-start) for simplicity
match_part_end = match_part_start + (end - start)
top_feature.sub_features.append(
gffSeqFeature(
FeatureLocation(match_part_start, match_part_end),
type="match_part",
strand=0,
qualifiers=copy.deepcopy(part_qualifiers),
)
)
rec.features.append(top_feature)
rec.annotations = {}
yield rec
def __remove_query_gaps(query, match, subject):
"""remove positions in all three based on gaps in query
In order to simplify math and calculations...we remove all of the gaps
based on gap locations in the query sequence::
Q:ACTG-ACTGACTG
S:ACTGAAC---CTG
will become::
Q:ACTGACTGACTG
S:ACTGAC---CTG
which greatly simplifies the process of identifying the correct location
for a match_part
"""
prev = 0
fq = ""
fm = ""
fs = ""
for position in re.finditer("-", query):
fq += query[prev : position.start()]
fm += match[prev : position.start()]
fs += subject[prev : position.start()]
prev = position.start() + 1
fq += query[prev:]
fm += match[prev:]
fs += subject[prev:]
return (fq, fm, fs)
def generate_parts(query, match, subject, ignore_under=3):
region_q = []
region_m = []
region_s = []
(query, match, subject) = __remove_query_gaps(query, match, subject)
region_start = -1
region_end = -1
mismatch_count = 0
for i, (q, m, s) in enumerate(zip(query, match, subject)):
# If we have a match
if m != " " or m == "+":
if region_start == -1:
region_start = i
# It's a new region, we need to reset or it's pre-seeded with
# spaces
region_q = []
region_m = []
region_s = []
region_end = i
mismatch_count = 0
else:
mismatch_count += 1
region_q.append(q)
region_m.append(m)
region_s.append(s)
if mismatch_count >= ignore_under and region_start != -1 and region_end != -1:
region_q = region_q[0:-ignore_under]
region_m = region_m[0:-ignore_under]
region_s = region_s[0:-ignore_under]
yield region_start, region_end + 1, cigar_from_string(
region_q, region_m, region_s, strict_m=True
)
region_q = []
region_m = []
region_s = []
region_start = -1
region_end = -1
mismatch_count = 0
yield region_start, region_end + 1, cigar_from_string(
region_q, region_m, region_s, strict_m=True
)
def _qms_to_matches(query, match, subject, strict_m=True):
matchline = []
for (q, m, s) in zip(query, match, subject):
ret = ""
if m != " " or m == "+":
ret = "="
elif m == " ":
if q == "-":
ret = "D"
elif s == "-":
ret = "I"
else:
ret = "X"
else:
log.warn("Bad data: \n\t%s\n\t%s\n\t%s\n" % (query, match, subject))
if strict_m:
if ret == "=" or ret == "X":
ret = "M"
matchline.append(ret)
return matchline
def _matchline_to_cigar(matchline):
cigar_line = []
last_char = matchline[0]
count = 0
for char in matchline:
if char == last_char:
count += 1
else:
cigar_line.append("%s%s" % (last_char, count))
count = 1
last_char = char
cigar_line.append("%s%s" % (last_char, count))
return " ".join(cigar_line)
def cigar_from_string(query, match, subject, strict_m=True):
matchline = _qms_to_matches(query, match, subject, strict_m=strict_m)
if len(matchline) > 0:
return _matchline_to_cigar(matchline)
else:
return ""
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert Blast XML to gapped GFF3", epilog=""
)
parser.add_argument(
"blastxml", type=argparse.FileType("r"), help="Blast XML Output"
)
parser.add_argument(
"--min_gap",
type=int,
help="Maximum gap size before generating a new match_part",
default=3,
)
parser.add_argument(
"--trim",
action="store_true",
help="Trim blast hits to be only as long as the parent feature",
)
parser.add_argument(
"--trim_end", action="store_true", help="Cut blast results off at end of gene"
)
parser.add_argument("--include_seq", action="store_true", help="Include sequence")
args = parser.parse_args()
for rec in blastxml2gff3(**vars(args)):
if len(rec.features):
gffWrite([rec], sys.stdout)
|
TAMU-CPT/galaxy-tools
|
tools/jbrowse/blastxml_to_gapped_gff3.py
|
Python
|
gpl-3.0
| 10,031
|
[
"BLAST"
] |
2f9ca991786f67c97c957efafee5cc410ec0e1d91420836031099080dd6ffd55
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# StereoscoPy for GIMP
#
# Copyright (C) 2018 Seán Hewitt <contact@2sh.me>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from gimpfu import *
from PIL import Image
import stereoscopy
_anaglyph_methods = [
("Wimmer", "wimmer"),
("Gray", "gray"),
("Color", "color"),
("Half Color", "half-color"),
("Dubois", "dubois")
]
_anaglyph_color_schemes = [
("Red/Cyan", "red-cyan"),
("Red/Green", "red-green"),
("Red/Blue", "red-blue"),
("Green/Magenta", "green-magenta"),
("Amber/Blue", "amber-blue"),
("Magenta/Cyan", "magenta-cyan")
]
_anaglyph_luma_coding = [
("Rec. 709", stereoscopy.ANAGLYPH_LUMA_REC709),
("Rec. 601", stereoscopy.ANAGLYPH_LUMA_REC601),
("RGB", stereoscopy.ANAGLYPH_LUMA_RGB)
]
def _create_stereoscopic_image(func, name, layers):
pdb.gimp_progress_set_text("Preparing...")
width = layers[0].image.width
height = layers[0].image.height
p_images = []
for i, layer in enumerate(layers):
if i > 1:
break
pdb.gimp_image_undo_freeze(layer.image)
temp = layer.copy()
layer.image.add_layer(temp, 0)
temp.resize(width, height, *layer.offsets)
rgn = temp.get_pixel_rgn(0, 0, temp.width, temp.height, False)
if temp.has_alpha:
mode = "RGBA"
else:
mode = "RGB"
p_image = Image.frombytes(mode, (temp.width, temp.height),
rgn[0:temp.width, 0:temp.height])
p_images.append(p_image)
layer.image.remove_layer(temp)
pdb.gimp_image_undo_thaw(layer.image)
pdb.gimp_progress_set_text("Creating...")
p_image = func(p_images)
width, height = p_image.size
pdb.gimp_progress_set_text("Displaying...")
image = gimp.Image(width, height, RGB)
image.disable_undo()
layer = gimp.Layer(image, name,
width, height, RGB_IMAGE, 100, NORMAL_MODE)
image.add_layer(layer, 0)
if p_image.mode == "RGBA":
layer.add_alpha()
rgn = layer.get_pixel_rgn(0, 0, width, height)
rgn[0:width, 0:height] = p_image.tobytes()
image.enable_undo()
gimp.Display(image)
gimp.displays_flush()
def create_anaglyph(image, drawable, left, right,
method, color_scheme, luma_coding):
method = _anaglyph_methods[method][1]
color_scheme = _anaglyph_color_schemes[color_scheme][1]
luma_coding = _anaglyph_luma_coding[luma_coding][1]
def func(images):
return stereoscopy.create_anaglyph(images,
method, color_scheme, luma_coding)
_create_stereoscopic_image(func, "Anaglyph", (left, right))
def create_side_by_side_image(image, drawable, left, right,
method, squash, divider):
def func(images):
is_horizontal = method == 0 or method == 1
if squash:
for i in range(len(images)):
images[i] = stereoscopy.squash(images[i], is_horizontal)
if method == 0 or method == 3:
images.reverse()
return stereoscopy.create_side_by_side_image(images,
is_horizontal, int(divider))
_create_stereoscopic_image(func, "Side-by-side", (left, right))
def create_patterned_image(image, drawable, left, right,
method, odd, width):
method = [stereoscopy.PATTERN_INTERLACED_H,
stereoscopy.PATTERN_INTERLACED_V,
stereoscopy.PATTERN_CHECKERBOARD][method]
def func(images):
return stereoscopy.create_patterned_image(images,
method, width, not odd)
_create_stereoscopic_image(func, "Patterned", (left, right))
def create_wiggle_animation(image, drawable,
duration):
image = pdb.gimp_image_duplicate(image)
for layer in image.layers[1:-1]:
image.add_layer(layer.copy(), 0)
image_duration = int(round(duration/len(image.layers)))
for layer in image.layers:
layer.name += " ({}ms)".format(image_duration)
gimp.Display(image)
gimp.displays_flush()
register(
"Anaglyph",
"Create an anaglypth",
"Create an anaglypth",
"Seán Hewitt",
"Seán Hewitt",
"2018",
"<Image>/Filters/StereoscoPy/Anaglyph...",
"*",
[
(PF_LAYER, "left", "Left image", None),
(PF_LAYER, "right", "Right image", None),
(PF_OPTION, "method", "Method", 0,
[t for t,_ in _anaglyph_methods]),
(PF_OPTION, "color_scheme", "Color Scheme", 0,
[t for t,_ in _anaglyph_color_schemes]),
(PF_OPTION, "luma_coding", "Luma Coding", 0,
[t for t,_ in _anaglyph_luma_coding])
],
[],
create_anaglyph)
register(
"Side-by-side",
"Create a side-by-side image",
"Create a side-by-side image",
"Seán Hewitt",
"Seán Hewitt",
"2018",
"<Image>/Filters/StereoscoPy/Side-by-side...",
"*",
[
(PF_LAYER, "left", "Left image", None),
(PF_LAYER, "right", "Right image", None),
(PF_OPTION, "method", "Method", 0, [
"Right/Left (Cross-eye)",
"Left/Right (Parallel, VR)",
"Over/Under",
"Under/Over"]),
(PF_TOGGLE, "squash", "Squash", False),
(PF_SPINNER, "divider", "Divider", 0, (0, 9999999, 1))
],
[],
create_side_by_side_image)
register(
"Patterned",
"Create a patterned image",
"Create a patterned image",
"Seán Hewitt",
"Seán Hewitt",
"2018",
"<Image>/Filters/StereoscoPy/Patterned...",
"*",
[
(PF_LAYER, "left", "Left image", None),
(PF_LAYER, "right", "Right image", None),
(PF_OPTION, "method", "Method", 0, [
"Interlaced horizontally",
"Interlaced vertically",
"Checkerboard"]),
(PF_OPTION, "odd", "Order", 0, [
"Even",
"Odd"]),
(PF_SPINNER, "width", "Width", 1, (1, 9999999, 1))
],
[],
create_patterned_image)
register(
"Wiggle",
"Create a wiggle animation",
"Create a wiggle animation",
"Seán Hewitt",
"Seán Hewitt",
"2018",
"<Image>/Filters/StereoscoPy/Wiggle...",
"*",
[
(PF_SPINNER, "duration", "Duration (ms)", 300, (1, 9999999, 1))
],
[],
create_wiggle_animation)
main()
|
SeanHewitt/stereosco.py
|
gimp_plugin/stereoscopy-gimp.py
|
Python
|
gpl-3.0
| 6,114
|
[
"Amber"
] |
a027e111dfd6febbd7f26be3cc46f420977d8d79f790ffc4a145fa0db36d74fb
|
###
### This script can be run with pvpython rather than pvbatch, as it does not
### need mpi.
###
### Purpose:
###
### See the purpose description in the script "hacc_magnitude_halos.py", as
### this script does the exact same thing, but for the points density image data
### files.
###
### Input Files:
###
### 1) DataExploration/Output/Cosmology/data/run-1/halos-%d.vtu
### 2) DataExploration/Output/Cosmology/data/run-1/points-%d.vti
###
### Output Files:
###
### A cinema dataset into: DataExploration/Output/Cosmology/halo_points_time_linear
###
import sys, os
from paraview.simple import *
from cinema_utilities import *
from paraview import data_exploration as wx
# Need this import in order to directly rescale transfer functions to data range
from vtkPVServerManagerRenderingPython import *
# -----------------------------------------------------------------------------
# Path to input/output data/directories
# -----------------------------------------------------------------------------
#path_root = '/Volumes/OLeary'
path_root = '/media/scott/CINEMA FAT'
data_base_path = os.path.join(path_root, 'DataExploration/Data/Cosmology/data/run-1')
halos_pattern = os.path.join(data_base_path, 'halos-%d.vtu')
points_pattern = os.path.join(data_base_path, 'points-%d.vti')
file_times = range(0, 451, 50)
halos_filenames = [ (halos_pattern % time) for time in file_times]
points_filenames = [ (points_pattern % time) for time in file_times]
resolution = 500
output_working_dir = os.path.join(path_root, 'DataExploration/Output/Cosmology/points_halos_linear')
# -----------------------------------------------------------------------------
# Helper methods
# -----------------------------------------------------------------------------
def buildLookupTables(luts):
for key in luts:
dataRange = luts[key]["range"]
if key == 'SplatterValues':
luts[key]["lut"] = GetLookupTableForArray( key,
1,
RGBPoints = [0.0, 0.368627, 0.309804, 0.635294, 0.09999996033819177, 0.196078, 0.533333, 0.741176, 0.19999992067638353, 0.4, 0.760784, 0.647059, 0.29999988101457525, 0.670588, 0.866667, 0.643137, 0.39999984135276706, 0.901961, 0.960784, 0.596078, 0.4999998016909588, 1.0, 1.0, 0.74902, 0.5999997620291505, 0.996078, 0.878431, 0.545098, 0.6999997223673423, 0.992157, 0.682353, 0.380392, 0.7999996827055341, 0.956863, 0.427451, 0.262745, 0.8999996430437259, 0.835294, 0.243137, 0.309804, 0.9999916033850905, 0.619608, 0.00392157, 0.258824],
NanColor = [0.500008, 0.0, 0.0],
ColorSpace='RGB',
ScalarRangeInitialized=1.0)
#luts[key]["lut"] = GetLookupTableForArray( key, 1, RGBPoints=[0.0, 0.0, 0.0, 1.0, 500.0, 1.0, 0.0, 0.0], VectorMode='Magnitude', NanColor=[0.0, 0.0, 0.0], ColorSpace='HSV', ScalarRangeInitialized=1.0, LockScalarRange=1)
else:
luts[key]["lut"] = GetLookupTableForArray( key, 1, RGBPoints=[dataRange[0], 0.231373, 0.298039, 0.752941, (dataRange[0]+dataRange[1])/2, 0.865003, 0.865003, 0.865003, dataRange[1], 0.705882, 0.0156863, 0.14902], VectorMode='Magnitude', NanColor=[0.0, 0.0, 0.0], ColorSpace='Diverging', ScalarRangeInitialized=1.0, LockScalarRange=1)
# -----------------------------------------------------------------------------
# Rendering configuration
# -----------------------------------------------------------------------------
view_size = [resolution, resolution]
#angle_steps = [15, 15]
angle_steps = [90, 90]
distance = 470
rotation_axis = [0.0, 1.0, 0.0]
center_of_rotation = [63.999996185675, 63.99996185355, 64.000034331975]
view = GetRenderView()
view.ViewSize = view_size
view.Background = [1.0, 1.0, 1.0]
view.OrientationAxesVisibility = 0
view.CenterAxesVisibility = 0
halos_array = ['on', 'off']
volume_array = ['on', 'off']
# -----------------------------------------------------------------------------
# Output configuration
# -----------------------------------------------------------------------------
title = "499-1 - Probe the Cosmic Structure of the Dark Universe"
description = """
In the standard model of cosmology, dark energy and dark matter
together account for 95 percent of the mass energy of the universe;
however, their ultimate origin remains a mystery. The Argonne
Leadership Computing Facility (ALCF) will allocate significant
supercomputing resources towards unraveling one of the key
puzzles-the nature of the dark energy causing the universe to
accelerate its current expansion rate.
"""
analysis = wx.AnalysisManager(output_working_dir, title, description)
id = 'magnitude-volume-halos-time'
title = 'Magnitude Volume + Halos/Time'
description = '''
Show the magnitude density using volume visualization with glyphed halos.
'''
analysis.register_analysis(id, title, description, '{time}/{halos}/{volume}/{volumeIdx}/{theta}_{phi}.jpg', "parametric-image-stack")
fng = analysis.get_file_name_generator(id)
exporter = wx.ThreeSixtyImageStackExporter(fng, view, center_of_rotation, distance, rotation_axis, angle_steps)
exporter.set_analysis(analysis)
# -----------------------------------------------------------------------------
# Pipeline configuration
# -----------------------------------------------------------------------------
# Volume
volume = XMLImageDataReader( PointArrayStatus=['SplatterValues'], FileName=points_filenames )
volume_rep = Show(volume)
volume_rep.Representation = 'Volume'
outline = Outline(Input=volume)
outlineRep = Show(outline)
outlineRep.ColorArrayName = [None, '']
outlineRep.DiffuseColor = [0.0, 0.0, 0.0]
outlineRep.LineWidth = 1.0
halos_reader = XMLUnstructuredGridReader( FileName=halos_filenames )
glyph = Glyph(Input = halos_reader, GlyphType="Sphere", GlyphTransform="Transform2" )
glyph.Scalars = ['POINTS', 'magnitude']
glyph.ScaleFactor = 0.004
glyph.ScaleMode = 'scalar'
glyph.GlyphMode = 'All Points'
glyph.GlyphType.ThetaResolution = 16
glyph.GlyphType.PhiResolution = 16
glyph_rep = Show(glyph)
glyph_rep.Representation = 'Surface'
luts = {
"SplatterValues" : {
"range": [0.0, 500.0],
"colorBy": ('POINT_DATA', 'SplatterValues'),
"pwfunc": []
},
"magnitude" : {
"range": [25.0, 913.0],
"colorBy": ('POINT_DATA', 'magnitude'),
"pwfunc": []
},
}
buildLookupTables(luts)
volume_rep.LookupTable = luts['SplatterValues']["lut"]
volume_rep.ColorArrayName = luts['SplatterValues']["colorBy"]
glyph_rep.LookupTable = luts['magnitude']["lut"]
glyph_rep.ColorArrayName = luts['magnitude']["colorBy"]
# -----------------------------------------------------------------------------
# Batch processing
# -----------------------------------------------------------------------------
analysis.begin()
Render()
for time in range(0, len(file_times), 1):
GetAnimationScene().TimeKeeper.Time = float(time)
UpdatePipeline(time)
#vtkSMPVRepresentationProxy.RescaleTransferFunctionToDataRange(volume_rep.SMProxy)
#vtkSMPVRepresentationProxy.RescaleTransferFunctionToDataRange(glyph_rep.SMProxy)
#dataRange = getTotalPointDataRange(volume, "SplatterValues")
dataRange = [0.0, 1.0]
print "Moving to timestep ",time,", new data range: ",dataRange
for halos in halos_array:
if halos == 'on':
glyph_rep.Visibility = 1
else:
glyph_rep.Visibility = 0
fng.update_active_arguments(halos=halos)
fng.update_label_arguments(halos="Halos")
for vlome in volume_array:
fng.update_active_arguments(volume=vlome)
fng.update_label_arguments(volume="Volume")
if vlome == 'on':
volume_rep.Visibility = 1
for volumeIdx in range(10):
curRange = dataRange[1] - dataRange[0]
curStep = dataRange[0] + (float(volumeIdx) * (curRange / 10.0))
newPwf = CreatePiecewiseFunction( Points=[dataRange[0], 0.0, 0.5, 0.0,
curStep, 0.0, 0.5, 0.0,
dataRange[1], 1.0, 0.5, 0.0] )
volume_rep.ScalarOpacityFunction = newPwf
fng.update_active_arguments(volumeIdx=volumeIdx)
fng.update_label_arguments(volumeIdx="Idx")
exporter.UpdatePipeline(time)
else:
volume_rep.Visibility = 0
for volumeIdx in range(10):
fng.update_active_arguments(volumeIdx=volumeIdx)
fng.update_label_arguments(volumeIdx="Idx")
exporter.UpdatePipeline(time)
analysis.end()
|
Kitware/cinema
|
scripts/data_generation/cosmology/hacc_volume_halos.py
|
Python
|
bsd-3-clause
| 9,226
|
[
"ParaView"
] |
9d0e6432010af448d6619840ea4189409f241a183caf657408a053c873aa4236
|
# Copyright (c) 2017-2019 The University of Manchester
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Algorithm/data structure for allocating boards in SpiNNaker machines at
the granularity of individual SpiNNaker boards and with awareness of the
functionality of a machine.
"""
from enum import Enum
from collections import deque
from math import ceil
from datetime import datetime
from .links import Links
from .pack_tree import PackTree
from .area_to_rect import area_to_rect
from .coordinates import board_down_link, WrapAround
from threading import RLock
class Allocator(object):
""" Performs high-level allocation of SpiNNaker boards from a larger,
possibly faulty, toroidal machine.
Internally this object uses a
:py:class:`spalloc_server.pack_tree.PackTree` to allocate
rectangular blocks of triads in a machine. A :py:class:`._CandidateFilter`
to restrict the allocations made by
:py:class:`~spalloc_server.pack_tree.PackTree` to those which match
the needs of the user (e.g. specific connectivity requirements).
The allocator can allocate either rectangular blocks of triads or
individual boards. When allocating individual boards, the allocator
allocates a 1x1 triad block from the
:py:class:`~spalloc_server.pack_tree.PackTree` and returns one of
the boards from that block. Subsequent single-board allocations will use up
spare boards left in triads allocated for single-board allocations before
allocating new 1x1 triads.
"""
# pylint: disable=too-many-arguments, unused-argument
def __init__(self, width, height, dead_boards=None, dead_links=None,
next_id=1, seconds_before_free=30):
"""
Parameters
----------
width, height : int
Dimensions of the machine in triads.
dead_boards : set([(x, y, z), ...])
The set of boards which are dead and which must not be allocated.
dead_links :set([(x, y, z,\
:py:class:`spalloc_server.links.Links`), ...])
The set of links leaving boards which are known not to be working.
Connections to boards in the set dead_boards are assumed to be
dead and need not be included in this list. Note that both link
directions must be flagged as dead (if the link is bidirectionally
down).
next_id : int
The ID of the next allocation to be made.
seconds_before_free : int
The number of seconds between a board being freed and it becoming
available again
"""
self.width = width
self.height = height
self.dead_boards = dead_boards if dead_boards is not None else set()
self.dead_links = dead_links if dead_links is not None else set()
self.seconds_before_free = seconds_before_free
# Unique IDs are assigned to every new allocation. The next ID to be
# allocated.
self.next_id = next_id
# A 2D tree at the granularity of triads used for board allocation.
self.pack_tree = PackTree(0, 0, width, height)
# Provides a lookup from (live) allocation IDs to the type of
# allocation.
self.allocation_types = {}
# Lookup from allocation IDs to the bottom-left board in the allocation
self.allocation_board = {}
# Storage for delayed freeing of boards
self.to_free = deque()
self.to_free_lock = RLock()
# Since we cannot allocate individual boards in the pack_tree, whenever
# an individual board is requested a whole triad may be allocated and
# one of the boards from the triad returned. This dictionary records
# what triads have been allocated like this and which boards are
# unused. These may then be used for future single-board allocations
# rather than allocating another whole triad.
# A dictionary containing any triads used for allocating individual
# boards which still have some free and working boards.
# {(x, y): [z, ...], ...}
self.single_board_triads = {}
# When all the boards in a triad in single_board_triads are used up the
# triad is removed from that dictionary and placed into the set below.
self.full_single_board_triads = set()
def __getstate__(self):
""" Called when pickling this object.
This object may only be pickled once
:py:meth:`~spalloc_server.controller.Controller.stop` and
:py:meth:`~spalloc_server.controller.Controller.join` have returned.
"""
state = self.__dict__.copy()
# Do not keep references to unpickleable dynamic state
state["to_free_lock"] = None
return state
def __setstate__(self, state):
""" Called when unpickling this object.
Note that though the object must be pickled when stopped, the unpickled
object will start running immediately.
"""
self.__dict__.update(state)
# Restore lock
self.to_free_lock = RLock()
def _alloc_triads_possible(self, width, height, max_dead_boards=None,
max_dead_links=None, require_torus=False,
min_ratio=0.0): # @UnusedVariable
""" Is it guaranteed that the specified allocation *could* succeed if
enough of the machine is free?
This function may be conservative. If the specified request would fail
when no resources have been allocated, we return False, even if some
circumstances the allocation may succeed. For example, if one board in
each of the four corners of the machine is dead, no allocation with
max_dead_boards==0 can succeed when the machine is empty but may
succeed if some other allocation has taken place.
Parameters
----------
width, height : int
The size of the block to allocate, in triads.
max_dead_boards : int or None
The maximum number of broken or unreachable boards to allow in the
allocated region. If None, any number of dead boards is permitted,
as long as the board on the bottom-left corner is alive (Default:
None).
max_dead_links : int or None
The maximum number of broken links allow in the allocated region.
When require_torus is True this includes wrap-around links,
otherwise peripheral links are not counted. If None, any number of
broken links is allowed. (Default: None).
require_torus : bool
If True, only allocate blocks with torus connectivity. In general
this will only succeed for requests to allocate an entire machine
(when the machine is otherwise not in use!). (Default: False)
min_ratio : float
Ignored.
Returns
-------
bool
See Also
--------
alloc_possible : The (public) wrapper which also supports checking
triad allocations.
"""
# If too big, we can't fit
if width > self.width or height > self.height:
return False
# Can't be a non-machine
if width <= 0 or height <= 0:
return False
# If torus connectivity is required, we must be *exactly* the right
# size otherwise we can't help...
if require_torus and (width != self.width or height != self.height):
return False
# Test to see whether the allocation could succeed in the idle machine
cf = _CandidateFilter(self.width, self.height,
self.dead_boards, self.dead_links,
max_dead_boards, max_dead_links, require_torus)
for x, y in set([(0, 0),
(self.width - width, 0),
(0, self.height - height),
(self.width - width, self.height - height)]):
if cf(x, y, width, height):
return True
# No possible allocation could be made...
return False
def _alloc_triads(self, width, height, max_dead_boards=None,
max_dead_links=None, require_torus=False,
min_ratio=0.0): # @UnusedVariable
""" Allocate a rectangular block of triads of interconnected boards.
Parameters
----------
width, height : int
The size of the block to allocate, in triads.
max_dead_boards : int or None
The maximum number of broken or unreachable boards to allow in the
allocated region. If None, any number of dead boards is permitted,
as long as the board on the bottom-left corner is alive (Default:
None).
max_dead_links : int or None
The maximum number of broken links allow in the allocated region.
When require_torus is True this includes wrap-around links,
otherwise peripheral links are not counted. If None, any number of
broken links is allowed. (Default: None).
require_torus : bool
If True, only allocate blocks with torus connectivity. In general
this will only succeed for requests to allocate an entire machine
(when the machine is otherwise not in use!). (Default: False)
min_ratio : float
Ignored.
Returns
-------
(allocation_id, boards, periphery, torus) or None
If the allocation was successful a four-tuple is returned. If the
allocation was not successful None is returned.
The ``allocation_id`` is an integer which should be used to free
the allocation with the :py:meth:`.free` method. ``boards`` is a
set of (x, y, z) tuples giving the locations of the (working)
boards in the allocation. ``periphery`` is a set of (x, y, z, link)
tuples giving the links which leave the allocated region. ``torus``
is True iff at least one torus link is working.
See Also
--------
alloc : The (public) wrapper which also supports allocating individual
boards.
"""
# Special case: If a torus is required this is only deliverable when
# the requirements match the size of the machine exactly.
if require_torus and (width != self.width or height != self.height):
return None
# Sanity check: can't be a non-machine
if width <= 0 or height <= 0:
return None
cf = _CandidateFilter(self.width, self.height,
self.dead_boards, self.dead_links,
max_dead_boards, max_dead_links, require_torus)
xy = self.pack_tree.alloc(width, height,
candidate_filter=cf)
# If a block could not be allocated, fail
if xy is None:
return None
# If a block was allocated, store the allocation
allocation_id = self.next_id
self.next_id += 1
self.allocation_types[allocation_id] = _AllocationType.triads
x, y = xy
self.allocation_board[allocation_id] = (x, y, 0)
return (allocation_id, cf.boards, cf.periphery, cf.torus)
def _alloc_boards_possible(self, boards, min_ratio=0.0,
max_dead_boards=None, max_dead_links=None,
require_torus=False):
""" Is it guaranteed that the specified allocation *could* succeed if
enough of the machine is free?
This function may be conservative. If the specified request would fail
when no resources have been allocated, we return False, even if some
circumstances the allocation may succeed. For example, if one board in
each of the four corners of the machine is dead, no allocation with
max_dead_boards==0 can succeed when the machine is empty but may
succeed if some other allocation has taken place.
Parameters
----------
boards : int
The *minimum* number of boards, must be at least 1. Note that if
only 1 board is required, :py:class:`._alloc_board` would be a more
appropriate function since this function may return as many as
three boards when only a single one is requested.
min_ratio : float
The aspect ratio which the allocated region must be 'at least as
square as'. Set to 0.0 for any allowable shape.
max_dead_boards : int or None
The maximum number of broken or unreachable boards to allow in the
allocated region. If None, any number of dead boards is permitted,
as long as the board on the bottom-left corner is alive (Default:
None).
max_dead_links : int or None
The maximum number of broken links allow in the allocated region.
When require_torus is True this includes wrap-around links,
otherwise peripheral links are not counted. If None, any number of
broken links is allowed. (Default: None).
require_torus : bool
If True, only allocate blocks with torus connectivity. In general
this will only succeed for requests to allocate an entire machine
(when the machine is otherwise not in use!). (Default: False)
Returns
-------
bool
See Also
--------
alloc_possible : The (public) wrapper which also supports checking
triad allocations.
"""
# Convert number of boards to number of triads (rounding up...)
triads = int(ceil(boards / 3.0))
# Sanity check: can't be a non-machine
if triads <= 0:
return False
# Special case: If a torus is required this is only deliverable when
# the requirements match the size of the machine exactly.
if require_torus and (triads != self.width * self.height):
return False
# If no region of the right shape can be made, just fail
wh = area_to_rect(triads, self.width, self.height, min_ratio)
if wh is None:
return False
width, height = wh
# Test to see whether the allocation could succeed in the idle machine
cf = _CandidateFilter(self.width, self.height,
self.dead_boards, self.dead_links,
max_dead_boards, max_dead_links, require_torus,
boards)
for x, y in set([(0, 0),
(self.width - width, 0),
(0, self.height - height),
(self.width - width, self.height - height)]):
if cf(x, y, width, height):
return True
# No possible allocation could be made...
return False
def _alloc_boards(self, boards, min_ratio=0.0, max_dead_boards=None,
max_dead_links=None, require_torus=False):
""" Allocate a rectangular block of triads with at least the specified
number of boards which is 'at least as square' as the specified aspect
ratio.
Parameters
----------
boards : int
The *minimum* number of boards, must be at least 1. Note that if
only 1 board is required, :py:class:`._alloc_board` would be a more
appropriate function since this function may return as many as
three boards when only a single one is requested.
min_ratio : float
The aspect ratio which the allocated region must be 'at least as
square as'. Set to 0.0 for any allowable shape.
max_dead_boards : int or None
The maximum number of broken or unreachable boards to allow in the
allocated region. If None, any number of dead boards is permitted,
as long as the board on the bottom-left corner is alive (Default:
None).
max_dead_links : int or None
The maximum number of broken links allow in the allocated region.
When require_torus is True this includes wrap-around links,
otherwise peripheral links are not counted. If None, any number of
broken links is allowed. (Default: None).
require_torus : bool
If True, only allocate blocks with torus connectivity. In general
this will only succeed for requests to allocate an entire machine
(when the machine is otherwise not in use!). (Default: False)
Returns
-------
(allocation_id, boards, periphery, torus) or None
If the allocation was successful a four-tuple is returned. If the
allocation was not successful None is returned.
The ``allocation_id`` is an integer which should be used to free
the allocation with the :py:meth:`.free` method. ``boards`` is a
set of (x, y, z) tuples giving the locations of the (working)
boards in the allocation. ``periphery`` is a set of (x, y, z, link)
tuples giving the links which leave the allocated region. ``torus``
is a :py:class:`.WrapAround` value indicating torus connectivity
when at least one torus may exist.
See Also
--------
alloc : The (public) wrapper which also supports allocating individual
boards.
"""
# Convert number of boards to number of triads (rounding up...)
triads = int(ceil(boards / 3.0))
# Sanity check: can't be a non-machine
if triads <= 0:
return None
# Special case: If a torus is required this is only deliverable when
# the requirements match the size of the machine exactly.
if require_torus and (triads != self.width * self.height):
return None
cf = _CandidateFilter(self.width, self.height,
self.dead_boards, self.dead_links,
max_dead_boards, max_dead_links, require_torus,
boards)
xywh = self.pack_tree.alloc_area(triads, min_ratio,
candidate_filter=cf)
# If a block could not be allocated, fail
if xywh is None:
return None
# If a block was allocated, store the allocation
allocation_id = self.next_id
self.next_id += 1
self.allocation_types[allocation_id] = _AllocationType.triads
x, y, _, _ = xywh
self.allocation_board[allocation_id] = (x, y, 0)
return (allocation_id, cf.boards, cf.periphery, cf.torus)
def _alloc_board_possible(
self, x=None, y=None, z=None,
max_dead_boards=None, max_dead_links=None, # @UnusedVariable
require_torus=False, min_ratio=0.0): # @UnusedVariable
""" Is it guaranteed that the specified allocation *could* succeed if
enough of the machine is free?
Parameters
----------
x, y, z : ints or None
If specified, requests a specific board.
max_dead_boards : int or None
Ignored.
max_dead_links : int or None
Ignored.
require_torus : bool
Must be False.
min_ratio : float
Ignored.
Returns
-------
bool
See Also
--------
alloc_possible : The (public) wrapper which also supports checking
board allocations.
"""
assert require_torus is False
assert (((x is None) == (y is None) == (z is None)) or
((x == 1) == (y is None) == (z is None)))
board_requested = y is not None
# If the requested board is outside the dimensions of the machine, the
# request definitely can't be met.
if board_requested and not(0 <= x < self.width and
0 <= y < self.height and
0 <= z < 3):
return False
# If the requested board is dead, this should fail
if board_requested and (x, y, z) in self.dead_boards:
return False
# If there are no working boards, we must also fail
if len(self.dead_boards) >= (self.width * self.height * 3):
return False
# Should be possible!
return True
def _alloc_board(
self, x=None, y=None, z=None,
max_dead_boards=None, max_dead_links=None, # @UnusedVariable
require_torus=False, min_ratio=0.0): # @UnusedVariable
""" Allocate a single board, optionally specifying a specific board to
allocate.
Parameters
----------
x, y, z : ints or None
If None, an arbitrary free board will be returned if possible. If
all are defined, attempts to allocate the specific board requested
if available and working.
max_dead_boards : int or None
Ignored.
max_dead_links : int or None
Ignored.
require_torus : bool
Must be False.
min_ratio : float
Ignored.
Returns
-------
(allocation_id, boards, periphery, torus) or None
If the allocation was successful a four-tuple is returned. If the
allocation was not successful None is returned.
The ``allocation_id`` is an integer which should be used to free
the allocation with the :py:meth:`.free` method. ``boards`` is a
set of (x, y, z) tuples giving the location of to allocated board.
``periphery`` is a set of (x, y, z, link) tuples giving the links
which leave the board. ``torus`` is always
:py:attr:`.WrapAround.none` for single boards.
See Also
--------
alloc : The (public) wrapper which also supports allocating triads.
"""
assert require_torus is False
assert (((x is None) == (y is None) == (z is None)) or
((x == 1) == (y is None) == (z is None)))
board_requested = y is not None
# Special case: the desired board is dead: just give up
if board_requested and (x, y, z) in self.dead_boards:
return None
# Try and return a board from an already allocated set of single-board
# triads if possible
if (self.single_board_triads and
(not board_requested or
z in self.single_board_triads.get((x, y), set()))):
if not board_requested:
# No specific board requested, return any available
x, y = next(iter(self.single_board_triads))
available = self.single_board_triads[(x, y)]
z = available.pop()
else:
# A specific board was requested (and is available), get that
# one
available = self.single_board_triads[(x, y)]
available.remove(z)
# If we used up the last board, move the triad to the full list
if not available:
del self.single_board_triads[(x, y)]
self.full_single_board_triads.add((x, y))
# Allocate the board
allocation_id = self.next_id
self.next_id += 1
self.allocation_types[allocation_id] = _AllocationType.board
self.allocation_board[allocation_id] = (x, y, z)
# pylint: disable=not-an-iterable
return (allocation_id,
set([(x, y, z)]),
set((x, y, z, link) for link in Links),
WrapAround.none)
# The desired board was not available in an already-allocated triad.
# Attempt to request that triad.
def has_at_least_one_working_board(
x, y, width, height): # @UnusedVariable
num_dead = 0
for z in range(3):
if (x, y, z) in self.dead_boards:
num_dead += 1
return num_dead < 3
if board_requested:
xy = self.pack_tree.request(x, y)
else:
xy = self.pack_tree.alloc(
1, 1, candidate_filter=has_at_least_one_working_board)
# If a triad could not be allocated, fail
if xy is None:
return None
# If a triad was allocated, add it to the set of allocated triads for
# single-boards
self.single_board_triads[xy] = \
set(z for z in range(3)
if (xy[0], xy[1], z) not in self.dead_boards)
# Recursing will return a board from the triad
return self._alloc_board(x, y, z)
def _alloc_type(
self, x_or_num_or_width=None, y_or_height=None, z=None,
max_dead_boards=None, max_dead_links=None, # @UnusedVariable
require_torus=False, min_ratio=0.0): # @UnusedVariable
""" Returns the type of allocation the user is attempting to make (and
fails if it is invalid.
Usage::
a.alloc() # Allocate any single board
a.alloc(1) # Allocate any single board
a.alloc(3, 2, 1) # Allocate the specific board (3, 2, 1)
a.alloc(4) # Allocate at least 4 boards
a.alloc(2, 3, **kwargs) # Allocate a 2x3 triad machine
Parameters
----------
<nothing> OR num OR x, y, z OR width, height
If nothing, allocate a single board.
If num, allocate at least that number of boards. Special case: if
1, allocate exactly 1 board.
If x, y and z, allocate a specific board.
If width and height, allocate a block of this size, in triads.
max_dead_boards : int or None
The maximum number of broken or unreachable boards to allow in the
allocated region. If None, any number of dead boards is permitted,
as long as the board on the bottom-left corner is alive (Default:
None).
max_dead_links : int or None
The maximum number of broken links allow in the allocated region.
When require_torus is True this includes wrap-around links,
otherwise peripheral links are not counted. If None, any number of
broken links is allowed. (Default: None).
require_torus : bool
If True, only allocate blocks with torus connectivity. In general
this will only succeed for requests to allocate an entire machine
(when the machine is otherwise not in use!). Must be False when
allocating boards. (Default: False)
Returns
-------
:py:class:`._AllocationType`
"""
# Work-around for Python 2's non-support for keyword-only arguments...
args = []
if x_or_num_or_width is not None:
args.append(x_or_num_or_width)
if y_or_height is not None:
args.append(y_or_height)
if z is not None:
args.append(z)
# Select allocation type
if not args:
alloc_type = _AllocationType.board
elif len(args) == 1:
if args[0] == 1:
alloc_type = _AllocationType.board
else:
alloc_type = _AllocationType.boards
elif len(args) == 2:
alloc_type = _AllocationType.triads
elif len(args) == 3: # pragma: no branch
alloc_type = _AllocationType.board
# Validate arguments
if alloc_type == _AllocationType.board:
if require_torus:
raise ValueError(
"require_torus must be False when allocating boards.")
return alloc_type
def alloc_possible(self, *args, **kwargs):
""" Is the specified allocation actually possible on this machine?
Usage::
a.alloc_possible() # Can allocate a single board?
a.alloc_possible(1) # Can allocate a single board?
a.alloc_possible(4) # Can allocate at least 4 boards?
a.alloc_possible(3, 2, 1) # Can allocate a board (3, 2, 1)?
a.alloc_possible(2, 3, **kwargs) # Can allocate 2x3 triads?
Parameters
----------
<nothing> OR num OR x, y, z OR width, height
If nothing, allocate a single board.
If num, allocate at least that number of boards. Special case: if
1, allocate exactly 1 board.
If x, y and z, allocate a specific board.
If width and height, allocate a block of this size, in triads.
min_ratio : float
The aspect ratio which the allocated region must be 'at least as
square as'. Set to 0.0 for any allowable shape. Ignored when
allocating single boards or specific rectangles of triads.
max_dead_boards : int or None
The maximum number of broken or unreachable boards to allow in the
allocated region. If None, any number of dead boards is permitted,
as long as the board on the bottom-left corner is alive (Default:
None).
max_dead_links : int or None
The maximum number of broken links allow in the allocated region.
When require_torus is True this includes wrap-around links,
otherwise peripheral links are not counted. If None, any number of
broken links is allowed. (Default: None).
require_torus : bool
If True, only allocate blocks with torus connectivity. In general
this will only succeed for requests to allocate an entire machine
(when the machine is otherwise not in use!). Must be False when
allocating boards. (Default: False)
Returns
-------
bool
"""
alloc_type = self._alloc_type(*args, **kwargs)
if alloc_type is _AllocationType.board:
return self._alloc_board_possible(*args, **kwargs)
elif alloc_type is _AllocationType.boards:
return self._alloc_boards_possible(*args, **kwargs)
return self._alloc_triads_possible(*args, **kwargs)
def alloc(self, *args, **kwargs):
""" Attempt to allocate a board or rectangular region of triads of
boards.
Usage::
a.alloc() # Allocate a single board
a.alloc(1) # Allocate a single board
a.alloc(4) # Allocate at least 4 boards
a.alloc(3, 2, 1) # Allocate a specific board (3, 2, 1)
a.alloc(2, 3, **kwargs) # Allocate a 2x3 triad machine
Parameters
----------
<nothing> OR num OR x, y, z OR width, height
If all None, allocate a single board.
If num, allocate at least that number of boards. Special case: if
1, allocate exactly 1 board.
If x, y and z, allocate a specific board.
If width and height, allocate a block of this size, in triads.
min_ratio : float
The aspect ratio which the allocated region must be 'at least as
square as'. Set to 0.0 for any allowable shape. Ignored when
allocating single boards or specific rectangles of triads.
max_dead_boards : int or None
The maximum number of broken or unreachable boards to allow in the
allocated region. If None, any number of dead boards is permitted,
as long as the board on the bottom-left corner is alive (Default:
None).
max_dead_links : int or None
The maximum number of broken links allow in the allocated region.
When require_torus is True this includes wrap-around links,
otherwise peripheral links are not counted. If None, any number of
broken links is allowed. (Default: None).
require_torus : bool
If True, only allocate blocks with torus connectivity. In general
this will only succeed for requests to allocate an entire machine
(when the machine is otherwise not in use!). Must be False when
allocating boards. (Default: False)
Returns
-------
(allocation_id, boards, periphery, torus) or None
If the allocation was successful a four-tuple is returned. If the
allocation was not successful None is returned.
The ``allocation_id`` is an integer which should be used to free
the allocation with the :py:meth:`.free` method. ``boards`` is a
set of (x, y, z) tuples giving the locations of to allocated
boards. ``periphery`` is a set of (x, y, z, link) tuples giving
the links which leave the allocated set of boards. ``torus`` is a
:py:class:`.WrapAround` value indicating torus connectivity when at
least one torus may exist.
"""
# Free things that can now be freed
self.check_free()
# Do the allocation
alloc_type = self._alloc_type(*args, **kwargs)
if alloc_type is _AllocationType.board:
return self._alloc_board(*args, **kwargs)
elif alloc_type is _AllocationType.boards:
return self._alloc_boards(*args, **kwargs)
return self._alloc_triads(*args, **kwargs)
def free(self, allocation_id):
""" Free the resources consumed by the specified allocation.
Parameters
----------
allocation_id : int
The ID of the allocation to free.
"""
_type = self.allocation_types.pop(allocation_id)
x, y, z = self.allocation_board.pop(allocation_id)
with self.to_free_lock:
self.to_free.append((datetime.now(), _type, x, y, z))
def check_free(self):
""" Free any of the items on the "to free" list that have expired
"""
changed = False
with self.to_free_lock:
while self.to_free:
free_time, _, _, _, _ = self.to_free[0]
time_diff = (datetime.now() - free_time).total_seconds()
if time_diff < self.seconds_before_free:
break
self._free_next()
changed = True
return changed
def _free_next(self):
""" Free the next item on the "to_free" list
"""
_, _type, x, y, z = self.to_free.popleft()
if _type is _AllocationType.triads:
# Simply free the allocation
self.pack_tree.free(x, y)
elif _type is _AllocationType.board:
# If the triad the board came from was full, it now isn't...
if (x, y) in self.full_single_board_triads:
self.full_single_board_triads.remove((x, y))
self.single_board_triads[(x, y)] = set()
# Return the board to the set available in that triad
self.single_board_triads[(x, y)].add(z)
# If all working boards have been freed in the triad, we must free
# the triad.
working = set(z for z in range(3)
if (x, y, z) not in self.dead_boards)
if self.single_board_triads[(x, y)] == working:
del self.single_board_triads[(x, y)]
self.pack_tree.free(x, y)
else: # pragma: no cover
assert False, "Unknown allocation type!"
class _AllocationType(Enum):
""" Type identifiers for allocations.
"""
triads = 0
""" A rectangular block of triads.
"""
board = 1
""" A single board.
"""
boards = 2
""" Two or more boards, to be allocated as triads.
This type only returned by :py:meth:`.Allocator._alloc_type` and is never
used as an allocation type.
"""
class _CandidateFilter(object):
""" A filter which, given a rectangular region of triads, will check
to see if it meets some set of criteria.
If any candidate is accepted the following attributes are set according to
the last accepted candidate.
Attributes
----------
boards : set([(x, y, z), ...])
The working boards present in the accepted candidate. None if no
candidate has been accepted.
periphery : set([(x, y, z, :py:class:`spalloc_server.links.Links`), ...])
The links around the periphery of the selection of boards which should
be disabled to isolate the system. None if no candidate has been
accepted.
torus : :py:class:`.WrapAround`
Describes the types of wrap-around links the candidate has.
"""
def __init__(self, width, height, dead_boards, dead_links,
max_dead_boards, max_dead_links, require_torus,
expected_boards=None):
""" Create a new candidate filter.
Parameters
----------
width, height : int
Dimensions (in triads) of the system within which candidates are
being chosen.
dead_boards : set([(x, y, z), ...])
The set of boards which are dead and which must not be allocated.
dead_links : set([(x, y, z,\
:py:class:`spalloc_server.links.Links`), ...])
The set of links leaving boards which are known not to be working.
Connections to boards in the set dead_boards are assumed to be
dead and need not be included in this list. Note that both link
directions must be flagged as dead (if the link is bidirectionally
down).
max_dead_boards : int or None
The maximum number of broken or unreachable boards to allow in the
allocated region. If None, any number of dead boards is permitted,
as long as the board on the bottom-left corner is alive (Default:
None).
max_dead_links : int or None
The maximum number of broken links allow in the allocated region.
When require_torus is True this includes wrap-around links,
otherwise peripheral links are not counted. If None, any number of
broken links is allowed. (Default: None).
require_torus : bool
If True, only allocate blocks with torus connectivity. In general
this will only succeed for requests to allocate an entire machine
(when the machine is otherwise not in use!). (Default: False)
expected_boards : int or None
If given, specifies the number of boards which are expected to be
in a candidate. This ensures that when an over-allocation is made,
the max_dead_boards figure is offset by any over-allocation.
If None, assumes the candidate width * candidate height * 3.
"""
# pylint: disable=too-many-arguments
self.width = width
self.height = height
self.dead_boards = dead_boards
self.dead_links = dead_links
self.max_dead_boards = max_dead_boards
self.max_dead_links = max_dead_links
self.require_torus = require_torus
self.expected_boards = expected_boards
self.boards = None
self.periphery = None
self.torus = None
def _enumerate_boards(self, x, y, width, height):
""" Starting from board (x, y, 0), enumerate as many reachable and
working boards as possible within the rectangle width x height triads.
Returns
-------
set([(x, y, z), ...])
"""
# The set of visited (and working) boards
boards = set()
to_visit = deque([(x, y, 0)])
while to_visit:
x1, y1, z1 = to_visit.popleft()
# Skip dead boards and boards we've seen before
if ((x1, y1, z1) in self.dead_boards or
(x1, y1, z1) in boards):
continue
boards.add((x1, y1, z1))
# Visit neighbours which are within the range
for link in Links: # pylint: disable=not-an-iterable
# Skip dead links
if (x1, y1, z1, link) in self.dead_links:
continue
x2, y2, z2, _ = board_down_link(
x1, y1, z1, link, self.width, self.height)
# Only process links to boards in the specified range
if (x <= x2 < x + width and
y <= y2 < y + height):
to_visit.append((x2, y2, z2))
# Return the set of boards we could reach
return boards
def _classify_links(self, boards):
""" Get a list of links of various classes connected to the supplied
set of boards.
Parameters
----------
boards : set([(x, y, z), ...])
A set of fully-connected, alive boards.
Returns
-------
alive : set([(x, y, z, :py:class:`.links.Links`), ...])
Links which are working and connect one board
in the set to another.
wrap : set([(x, y, z, :py:class:`.links.Links`), ...])
Working links between working boards in the set which wrap-around
the toroid.
dead : set([(x, y, z, :py:class:`.links.Links`), ...])
Links which are not working and connect one board in the set to
another.
dead_wrap : set([(x, y, z, :py:class:`.links.Links`), ...])
Dead links between working boards in the set which wrap-around the
toroid.
periphery : set([(x, y, z, :py:class:`.links.Links`), ...])
Links are those which connect from one board in the set to a board
outside the set. These links may be dead or alive.
wrap_around_type : :py:class:`~spalloc_server.coordinates.WrapAround`
What types of wrap-around links are present (making no distinction
between dead and alive links)?
"""
# pylint: disable=too-many-locals
alive = set()
wrap = set()
dead = set()
dead_wrap = set()
periphery = set()
wrap_around_type = WrapAround.none
for x1, y1, z1 in boards:
for link in Links: # pylint: disable=not-an-iterable
x2, y2, z2, wrapped = board_down_link(
x1, y1, z1, link, self.width, self.height)
if (x2, y2, z2) in boards:
wrap_around_type |= wrapped
if wrapped:
if (x1, y1, z1, link) in self.dead_links:
dead_wrap.add((x1, y1, z1, link))
else:
wrap.add((x1, y1, z1, link))
else:
if (x1, y1, z1, link) in self.dead_links:
dead.add((x1, y1, z1, link))
else:
alive.add((x1, y1, z1, link))
else:
periphery.add((x1, y1, z1, link))
return alive, wrap, dead, dead_wrap, periphery, \
WrapAround(wrap_around_type)
def __call__(self, x, y, width, height):
""" Test whether the region specified meets the stated requirements.
If True is returned, the set of boards in the region is stored in
self.boards and the set of links on the periphery are stored in
self.periphery.
"""
boards = self._enumerate_boards(x, y, width, height)
# Make sure the maximum dead boards limit isn't exceeded
if self.max_dead_boards is not None:
if self.expected_boards is not None:
expected_boards = self.expected_boards
else:
expected_boards = width * height * 3
alive = len(boards)
dead = expected_boards - alive
if alive == 0 or dead > self.max_dead_boards:
return False
elif not boards:
return False
# Make sure the maximum dead links limit isn't exceeded (and that torus
# links exist if requested)
alive, _, dead, dead_wrap, periphery, wrap_around_type = \
self._classify_links(boards)
if self.require_torus and wrap_around_type == WrapAround.none:
return False
if self.max_dead_links is not None:
dead_links = len(dead)
if self.require_torus:
dead_links += len(dead_wrap)
if dead_links > self.max_dead_links:
return False
# All looks good, accept this region and keep the enumerated boards and
# peripheral links
self.boards = boards
self.periphery = periphery
self.torus = wrap_around_type
return True
|
project-rig/spalloc_server
|
spalloc_server/allocator.py
|
Python
|
gpl-2.0
| 45,698
|
[
"VisIt"
] |
04bc8f20d320a2dcd9f2434088b517d2176264a105a31882bb2748d601736108
|
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# Revisions copyright 2008-2009 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# L.Pritchard@scri.ac.uk
################################################################################
"""AbstractDrawer module (considered to be a private module, the API may change!)
Provides:
- AbstractDrawer - Superclass for methods common to the Drawer objects
- page_sizes - Method that returns a ReportLab pagesize when passed
a valid ISO size
- draw_box - Method that returns a closed path object when passed
the proper co-ordinates. For HORIZONTAL boxes only.
- angle2trig - Method that returns a tuple of values that are the
vector for rotating a point through a passed angle,
about an origin
- intermediate_points - Method that returns a list of values intermediate
between the points in a passed dataset
For drawing capabilities, this module uses reportlab to draw and write
the diagram: http://www.reportlab.com
For dealing with biological information, the package expects Biopython objects
like SeqFeatures.
"""
# ReportLab imports
from __future__ import print_function
from Bio._py3k import range
from reportlab.lib import pagesizes
from reportlab.lib import colors
from reportlab.graphics.shapes import Polygon
from math import pi, sin, cos
################################################################################
# METHODS
################################################################################
# Utility method to translate strings to ISO page sizes
def page_sizes(size):
"""Convert size string into a Reportlab pagesize.
Arguments:
- size - A string representing a standard page size, eg 'A4' or 'LETTER'
"""
sizes = {'A0': pagesizes.A0, # ReportLab pagesizes, keyed by ISO string
'A1': pagesizes.A1,
'A2': pagesizes.A2,
'A3': pagesizes.A3,
'A4': pagesizes.A4,
'A5': pagesizes.A5,
'A6': pagesizes.A6,
'B0': pagesizes.B0,
'B1': pagesizes.B1,
'B2': pagesizes.B2,
'B3': pagesizes.B3,
'B4': pagesizes.B4,
'B5': pagesizes.B5,
'B6': pagesizes.B6,
'ELEVENSEVENTEEN': pagesizes.ELEVENSEVENTEEN,
'LEGAL': pagesizes.LEGAL,
'LETTER': pagesizes.LETTER
}
try:
return sizes[size]
except:
raise ValueError("%s not in list of page sizes" % size)
def _stroke_and_fill_colors(color, border):
"""Helper function handle border and fill colors (PRIVATE)."""
if not isinstance(color, colors.Color):
raise ValueError("Invalid color %r" % color)
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill color
elif border:
if not isinstance(border, colors.Color):
raise ValueError("Invalid border color %r" % border)
strokecolor = border
else:
# e.g. False
strokecolor = None
return strokecolor, color
def draw_box(point1, point2,
color=colors.lightgreen, border=None, colour=None,
**kwargs):
"""Draw a box.
Arguments:
- point1, point2 - coordinates for opposite corners of the box
(x,y tuples)
- color /colour - The color for the box
(colour takes priority over color)
- border - Border color for the box
Returns a closed path object, beginning at (x1,y1) going round
the four points in order, and filling with the passed color.
"""
x1, y1 = point1
x2, y2 = point2
# Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
del colour
strokecolor, color = _stroke_and_fill_colors(color, border)
x1, y1, x2, y2 = min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2)
return Polygon([x1, y1, x2, y1, x2, y2, x1, y2],
strokeColor=strokecolor,
fillColor=color,
strokewidth=0,
**kwargs)
def draw_cut_corner_box(point1, point2, corner=0.5,
color=colors.lightgreen, border=None, **kwargs):
"""Draw a box with the corners cut off."""
x1, y1 = point1
x2, y2 = point2
if not corner:
return draw_box(point1, point2, color, border)
elif corner < 0:
raise ValueError("Arrow head length ratio should be positive")
strokecolor, color = _stroke_and_fill_colors(color, border)
boxheight = y2 - y1
boxwidth = x2 - x1
corner = min(boxheight * 0.5, boxheight * 0.5 * corner)
return Polygon([x1, y1 + corner,
x1, y2 - corner,
x1 + corner, y2,
x2 - corner, y2,
x2, y2 - corner,
x2, y1 + corner,
x2 - corner, y1,
x1 + corner, y1],
strokeColor=strokecolor,
strokeWidth=1,
strokeLineJoin=1, # 1=round
fillColor=color,
**kwargs)
def draw_polygon(list_of_points,
color=colors.lightgreen, border=None, colour=None,
**kwargs):
"""Draw polygon.
Arguments:
- list_of_point - list of (x,y) tuples for the corner coordinates
- color / colour - The color for the box
Returns a closed path object, beginning at (x1,y1) going round
the four points in order, and filling with the passed colour.
"""
# Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
del colour
strokecolor, color = _stroke_and_fill_colors(color, border)
xy_list = []
for (x, y) in list_of_points:
xy_list.append(x)
xy_list.append(y)
return Polygon(xy_list,
strokeColor=strokecolor,
fillColor=color,
strokewidth=0,
**kwargs)
def draw_arrow(point1, point2, color=colors.lightgreen, border=None,
shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right',
colour=None, **kwargs):
"""Draw an arrow.
Returns a closed path object representing an arrow enclosed by the
box with corners at {point1=(x1,y1), point2=(x2,y2)}, a shaft height
given by shaft_height_ratio (relative to box height), a head length
given by head_length_ratio (also relative to box height), and
an orientation that may be 'left' or 'right'.
"""
x1, y1 = point1
x2, y2 = point2
if shaft_height_ratio < 0 or 1 < shaft_height_ratio:
raise ValueError("Arrow shaft height ratio should be in range 0 to 1")
if head_length_ratio < 0:
raise ValueError("Arrow head length ratio should be positive")
# Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
del colour
strokecolor, color = _stroke_and_fill_colors(color, border)
# Depending on the orientation, we define the bottom left (x1, y1) and
# top right (x2, y2) coordinates differently, but still draw the box
# using the same relative co-ordinates:
xmin, ymin = min(x1, x2), min(y1, y2)
xmax, ymax = max(x1, x2), max(y1, y2)
if orientation == 'right':
x1, x2, y1, y2 = xmin, xmax, ymin, ymax
elif orientation == 'left':
x1, x2, y1, y2 = xmax, xmin, ymin, ymax
else:
raise ValueError("Invalid orientation %s, should be 'left' or 'right'"
% repr(orientation))
# We define boxheight and boxwidth accordingly, and calculate the shaft
# height from these. We also ensure that the maximum head length is
# the width of the box enclosure
boxheight = y2 - y1
boxwidth = x2 - x1
shaftheight = boxheight * shaft_height_ratio
headlength = min(abs(boxheight) * head_length_ratio, abs(boxwidth))
if boxwidth < 0:
headlength *= -1 # reverse it
shafttop = 0.5 * (boxheight + shaftheight)
shaftbase = boxheight - shafttop
headbase = boxwidth - headlength
midheight = 0.5 * boxheight
return Polygon([x1, y1 + shafttop,
x1 + headbase, y1 + shafttop,
x1 + headbase, y2,
x2, y1 + midheight,
x1 + headbase, y1,
x1 + headbase, y1 + shaftbase,
x1, y1 + shaftbase],
strokeColor=strokecolor,
# strokeWidth=max(1, int(boxheight/40.)),
strokeWidth=1,
# default is mitre/miter which can stick out too much:
strokeLineJoin=1, # 1=round
fillColor=color,
**kwargs)
def angle2trig(theta):
"""Convert angle to a reportlab ready tuple.
Arguments:
- theta - Angle in degrees, counter clockwise from horizontal
Returns a representation of the passed angle in a format suitable
for ReportLab rotations (i.e. cos(theta), sin(theta), -sin(theta),
cos(theta) tuple)
"""
c = cos(theta * pi / 180)
s = sin(theta * pi / 180)
return(c, s, -s, c) # Vector for rotating point around an origin
def intermediate_points(start, end, graph_data):
"""Generate intermediate points describing provided graph data..
Returns a list of (start, end, value) tuples describing the passed
graph data as 'bins' between position midpoints.
"""
# print start, end, len(graph_data)
newdata = [] # data in form (X0, X1, val)
# add first block
newdata.append((start,
graph_data[0][0] + (graph_data[1][0] - graph_data[0][0]) / 2.,
graph_data[0][1]))
# add middle set
for index in range(1, len(graph_data) - 1):
lastxval, lastyval = graph_data[index - 1]
xval, yval = graph_data[index]
nextxval, nextyval = graph_data[index + 1]
newdata.append((lastxval + (xval - lastxval) / 2.,
xval + (nextxval - xval) / 2., yval))
# add last block
newdata.append((xval + (nextxval - xval) / 2.,
end, graph_data[-1][1]))
# print newdata[-1]
# print newdata
return newdata
################################################################################
# CLASSES
################################################################################
class AbstractDrawer(object):
"""AbstractDrawer
Provides:
Methods:
- __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0) Called on instantiation
- set_page_size(self, pagesize, orientation) Set the page size to the
passed size and orientation
- set_margins(self, x, y, xl, xr, yt, yb) Set the drawable area of the
page
- set_bounds(self, start, end) Set the bounds for the elements to be
drawn
- is_in_bounds(self, value) Returns a boolean for whether the position
is actually to be drawn
- __len__(self) Returns the length of sequence that will be drawn
Attributes:
- tracklines Boolean for whether to draw lines delineating tracks
- pagesize Tuple describing the size of the page in pixels
- x0 Float X co-ord for leftmost point of drawable area
- xlim Float X co-ord for rightmost point of drawable area
- y0 Float Y co-ord for lowest point of drawable area
- ylim Float Y co-ord for topmost point of drawable area
- pagewidth Float pixel width of drawable area
- pageheight Float pixel height of drawable area
- xcenter Float X co-ord of center of drawable area
- ycenter Float Y co-ord of center of drawable area
- start Int, base to start drawing from
- end Int, base to stop drawing at
- length Size of sequence to be drawn
- cross_track_links List of tuples each with four entries (track A,
feature A, track B, feature B) to be linked.
"""
def __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, cross_track_links=None):
"""Create the object.
Arguments:
- parent Diagram object containing the data that the drawer
draws
- pagesize String describing the ISO size of the image, or a tuple
of pixels
- orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
- x Float (0->1) describing the relative size of the X
margins to the page
- y Float (0->1) describing the relative size of the Y
margins to the page
- xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
- xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
- xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
- yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
- yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
- start Int, the position to begin drawing the diagram at
- end Int, the position to stop drawing the diagram at
- tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
- cross_track_links List of tuples each with four entries (track A,
feature A, track B, feature B) to be linked.
"""
self._parent = parent # The calling Diagram object
# Perform 'administrative' tasks of setting up the page
self.set_page_size(pagesize, orientation) # Set drawing size
self.set_margins(x, y, xl, xr, yt, yb) # Set page margins
self.set_bounds(start, end) # Set limits on what will be drawn
self.tracklines = tracklines # Set flags
if cross_track_links is None:
cross_track_links = []
else:
self.cross_track_links = cross_track_links
def set_page_size(self, pagesize, orientation):
"""Set page size of the drawing..
Arguments:
- pagesize Size of the output image, a tuple of pixels (width,
height, or a string in the reportlab.lib.pagesizes
set of ISO sizes.
- orientation String: 'landscape' or 'portrait'
"""
if isinstance(pagesize, str): # A string, so translate
pagesize = page_sizes(pagesize)
elif isinstance(pagesize, tuple): # A tuple, so don't translate
pagesize = pagesize
else:
raise ValueError("Page size %s not recognised" % pagesize)
shortside, longside = min(pagesize), max(pagesize)
orientation = orientation.lower()
if orientation not in ('landscape', 'portrait'):
raise ValueError("Orientation %s not recognised" % orientation)
if orientation == 'landscape':
self.pagesize = (longside, shortside)
else:
self.pagesize = (shortside, longside)
def set_margins(self, x, y, xl, xr, yt, yb):
"""Set page margins.
Arguments:
- x Float(0->1), Absolute X margin as % of page
- y Float(0->1), Absolute Y margin as % of page
- xl Float(0->1), Left X margin as % of page
- xr Float(0->1), Right X margin as % of page
- yt Float(0->1), Top Y margin as % of page
- yb Float(0->1), Bottom Y margin as % of page
Set the page margins as proportions of the page 0->1, and also
set the page limits x0, y0 and xlim, ylim, and page center
xorigin, yorigin, as well as overall page width and height
"""
# Set left, right, top and bottom margins
xmargin_l = xl or x
xmargin_r = xr or x
ymargin_top = yt or y
ymargin_btm = yb or y
# Set page limits, center and height/width
self.x0, self.y0 = self.pagesize[0] * xmargin_l, self.pagesize[1] * ymargin_btm
self.xlim, self.ylim = self.pagesize[0] * (1 - xmargin_r), self.pagesize[1] * (1 - ymargin_top)
self.pagewidth = self.xlim - self.x0
self.pageheight = self.ylim - self.y0
self.xcenter, self.ycenter = self.x0 + self.pagewidth / 2., self.y0 + self.pageheight / 2.
def set_bounds(self, start, end):
"""Set start and end points for the drawing as a whole.
Arguments:
- start - The first base (or feature mark) to draw from
- end - The last base (or feature mark) to draw to
"""
low, high = self._parent.range() # Extent of tracks
if start is not None and end is not None and start > end:
start, end = end, start
if start is None or start < 0: # Check validity of passed args and
start = 0 # default to 0
if end is None or end < 0:
end = high + 1 # default to track range top limit
self.start, self.end = int(start), int(end)
self.length = self.end - self.start + 1
def is_in_bounds(self, value):
"""Check if given value is within the region selected for drawing,
Arguments:
- value - A base position
"""
if value >= self.start and value <= self.end:
return 1
return 0
def __len__(self):
"""Returns the length of the region to be drawn."""
return self.length
def _current_track_start_end(self):
track = self._parent[self.current_track_level]
if track.start is None:
start = self.start
else:
start = max(self.start, track.start)
if track.end is None:
end = self.end
else:
end = min(self.end, track.end)
return start, end
|
zjuchenyuan/BioWeb
|
Lib/Bio/Graphics/GenomeDiagram/_AbstractDrawer.py
|
Python
|
mit
| 19,362
|
[
"Biopython"
] |
2306a92a6055011b38848ce621b5a5f8f1b66807261670f5950b09eb92b33436
|
import os
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
with open("README.rst") as f:
long_desc = f.read()
ind = long_desc.find("\n")
long_desc = long_desc[ind + 1:]
setup(
name="custodian",
packages=find_packages(),
version="0.8.1",
install_requires=["monty>=0.5.9", "six"],
extras_require={"vasp, nwchem, qchem": ["pymatgen>=3.0.2"]},
package_data={},
author="Shyue Ping Ong, William Davidson Richards, Stephen Dacek, "
"Xiaohui Qu",
author_email="ongsp@ucsd.edu",
maintainer="Shyue Ping Ong",
url="https://github.com/materialsproject/custodian",
license="MIT",
description="A simple JIT job management framework in Python.",
long_description=long_desc,
keywords=["jit", "just-in-time", "job", "management", "vasp"],
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules"
],
scripts=[os.path.join("scripts", f) for f in os.listdir("scripts")]
)
|
alberthxf/custodian
|
setup.py
|
Python
|
mit
| 1,560
|
[
"NWChem",
"VASP",
"pymatgen"
] |
67769fd8decb4185f9cc8e137d1965cfa8413d5c744dc49f1a178f45332c1245
|
"""
This module contains updated code for dealing with query objects in Goat. Idea is
to provide a common set of attributes that all queries possess and then add more
attributes and methods in subclasses to allow different subtypes to be coded in a
representative manner.
"""
from persistent import Persistent
from bin.initialize_goat import configs
class Query(Persistent):
"""Generic Query class"""
def __init__(self, identity, name=None, description=None, location=None,
alphabet=None, sequence=None, target_db=None, original_query=None):
# standard attributes for all queries
self.identity = identity
self.name = name
self.description = description
self.location = location
self.alphabet = alphabet
self.sequence = sequence
# attributes for reverse searches
self.target_db = target_db # if present, name of record
self.original_query = original_query # if present = qid
class SeqQuery(Query):
"""
Query object for a single sequence-based query, e.g. for blast or phmmer. Also
important for providing reverse search information for HMM- and MSA-based
queries when considering hits and redundant accessions.
"""
def __init__(self, record=None, racc_mode=None, *args, **kwargs):
Query.__init__(self, *args, **kwargs)
self.search_type = 'seq'
self.record = record # record for running self-search
self.racc_mode = racc_mode
self.search_ran = False # to begin
self.all_accs = []
self.raccs = [] # redundant_accessions; description/evalue tuple
def add_all_accs(self, hit_list):
"""Adds results after self-BLAST"""
self.all_accs = hit_list
self._p_changed = 1
def add_raccs(self, racc_list):
"""Adds redundant accessions"""
self.raccs = racc_list
self._p_changed = 1
class HMMQuery(Query):
"""
HMM object for a single hmm-based query. At minimum, need to provide the file/
sequence for the HMM (as in base class), but can also choose to add one or more
query objects (mainly for the raccs), and additionally either sequences or an
MSA for use in running iterative searches. Need to have a way of making an
HMM from the input MSA, including when initial query only includes sequences.
"""
def __init__(self, *args, **kwargs):
Query.__init__(self, *args, **kwargs)
self.search_type = 'hmm'
self.associated_queries = [] # associated qids only!
self.records = [] # record options for rBLAST
# Specified query/record, if applicable
self.spec_qid = None
self.spec_record = None
# Associated files, if applicable
self.seq_file = None
if self.seq_file:
self.add_seqs()
self.msa_file = None
if self.msa_file:
self.add_msa()
self.num_seqs = 0
self.num_determined = False
def add_query(self, qid, qobj=None):
"""Convenience function"""
self.associated_queries.append(qid)
try:
mqdb = configs['misc_queries']
if qobj:
mqdb[qid] = qobj # not present already, add to db
else:
qobj = mqdb[qid] # present already, fetch to get record
self.records.append(qobj.record)
except:
pass
def add_seqs(self):
"""Adds sequences from supplied file"""
from Bio import SeqIO
self.seqs = []
try:
self.seqs = list(SeqIO.parse(self.seq_file, "fasta")) # otherwise generator!
self.determine_num_seqs(self.seqs)
except: # not FASTA
pass
def add_msa(self):
"""Adds an MSA object from supplied file"""
from Bio import AlignIO
self.msa = None
for align_format in ['fasta','clustal','phylip','nexus']: # add more later
try:
self.msa = AlignIO.read(self.msa_file, align_format)
self.determine_num_seqs(self.msa)
except:
pass
def determine_num_seqs(self, iterable):
"""Called from either add_seqs or add_msa"""
temp_num = 0
for seq in iterable:
temp_num += 1
if not self.num_determined:
self.num_seqs = temp_num
else:
if temp_num != self.num_seqs:
pass # warn user
class MSAQuery(Query):
"""
MSA object for a single msa-based query. Like HMM-based queries, should provide
an option for including queries for raccs (for reverse searches).
"""
pass
|
chris-klinger/Goat
|
queries/query_objects.py
|
Python
|
gpl-3.0
| 4,653
|
[
"BLAST"
] |
7ee8de1ae5ad4cd3a596f6855cb349b109f05654a6b961aaa67f65663752f636
|
from __future__ import print_function, division, unicode_literals
import os
import operator
from pymatgen.phasediagram.analyzer import PDAnalyzer
from pymatgen.phasediagram.maker import PhaseDiagram
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.matproj.rest import MPRester
from monty.serialization import loadfn
from twod_materials.utils import is_converged
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import twod_materials
PACKAGE_PATH = twod_materials.__file__.replace('__init__.pyc', '')
PACKAGE_PATH = PACKAGE_PATH.replace('__init__.py', '')
PACKAGE_PATH = '/'.join(PACKAGE_PATH.split('/')[:-2])
try:
config_vars = loadfn(os.path.join(os.path.expanduser('~'), 'config.yaml'))
except:
print('WARNING: No config.yaml file was found. please configure the '\
'config.yaml and put it in your home directory.')
# Still set them for testing purposes.
config_vars = loadfn(os.path.join(PACKAGE_PATH, 'config.yaml'))
if 'MP_API' in os.environ:
MPR = MPRester(os.environ['MP_API'])
else:
MPR = MPRester(config_vars['mp_api'])
def get_competing_phases():
"""
Collect the species to which the material might decompose to.
Returns:
A list of phases as tuples formatted as
[(formula_1, Materials_Project_ID_1),
(formula_2, Materials_Project_ID_2), ...]
"""
total_competing_phases = []
composition = Structure.from_file('POSCAR').composition
try:
energy = Vasprun('vasprun.xml').final_energy
except:
energy = 100 # The function can work without a vasprun.xml
entries = MPR.get_entries_in_chemsys(
[elt.symbol for elt in composition]
)
my_entry = ComputedEntry(composition, energy)
entries.append(my_entry)
pda = PDAnalyzer(PhaseDiagram(entries))
decomp = pda.get_decomp_and_e_above_hull(my_entry, allow_negative=True)
competing_phases = [
(entry.composition.reduced_formula,
entry.entry_id) for entry in decomp[0]
]
return competing_phases
def get_hull_distance(competing_phase_directory='../competing_phases'):
"""
Calculate the material's distance to the thermodynamic hull,
based on species in the Materials Project database.
Args:
competing_phase_directory (str): absolute or relative path
to the location where your competing phases have been
relaxed. The default expectation is that they are stored
in a directory named 'competing_phases' at the same level
as your material's relaxation directory.
Returns:
float. distance (eV/atom) between the material and the
hull.
"""
finished_competitors = {}
original_directory = os.getcwd()
# Determine which competing phases have been relaxed in the current
# framework and store them in a dictionary ({formula: entry}).
if os.path.isdir(competing_phase_directory):
os.chdir(competing_phase_directory)
for comp_dir in [
dir for dir in os.listdir(os.getcwd()) if os.path.isdir(dir) and
is_converged(dir)
]:
vasprun = Vasprun('{}/vasprun.xml'.format(comp_dir))
composition = vasprun.final_structure.composition
energy = vasprun.final_energy
finished_competitors[comp_dir] = ComputedEntry(composition, energy)
os.chdir(original_directory)
else:
raise ValueError('Competing phase directory does not exist.')
composition = Structure.from_file('POSCAR').composition
try:
energy = Vasprun('vasprun.xml').final_energy
except:
raise ValueError('This directory does not have a converged vasprun.xml')
my_entry = ComputedEntry(composition, energy) # 2D material
entries = MPR.get_entries_in_chemsys(
[elt.symbol for elt in composition]
)
# If the energies of competing phases have been calculated in
# the current framework, put them in the phase diagram instead
# of the MP energies.
for i in range(len(entries)):
formula = entries[i].composition.reduced_formula
if formula in finished_competitors:
entries[i] = finished_competitors[formula]
else:
entries[i] = ComputedEntry(entries[i].composition, 100)
entries.append(my_entry) # 2D material
pda = PDAnalyzer(PhaseDiagram(entries))
decomp = pda.get_decomp_and_e_above_hull(my_entry, allow_negative=True)
return decomp[1]
def plot_hull_distances(hull_distances, fmt='pdf'):
"""
Create a bar graph of the formation energies of several 2D materials.
Args:
hull_distances (dict): follow the format:
{reduced_formula: hull_distance (in eV/atom)}
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
hsize = 12 + (len(hull_distances) - 4) / 3
ax = plt.figure(figsize=(hsize, 10)).gca()
ax.set_ylim(0, 700)
ax.set_xlim(0, len(hull_distances))
x_ticklabels = []
i = 0
for compound in sorted(
hull_distances.items(), key=operator.itemgetter(1)):
proper_formula = ''
for char in compound[0]:
try:
int(char)
proper_formula += '_{}'.format(char)
except ValueError:
proper_formula += char
x_ticklabels.append(r'$\mathrm{%s}$' % proper_formula)
hull_distance = hull_distances[compound[0]] * 1000
# Good chance of stability
if hull_distance < 100:
color_code = 0.5
# Decent chance of stability
elif hull_distance < 200:
color_code = 0.71
# Poor chance of stability
else:
color_code = 0.92
ax.add_patch(plt.Rectangle((i + 0.1, 0), height=hull_distance,
width=0.8, linewidth=0,
facecolor=plt.cm.jet(color_code)))
i += 1
ax.set_xticks([x + 0.5 for x in range(len(hull_distances))])
ax.set_xticklabels(x_ticklabels, family='serif', size=20, rotation=60)
ax.set_yticklabels(ax.get_yticks(), family='serif', size=20)
ax.set_ylabel(r'$\mathrm{E_F\/(meV/atom)}$', size=40)
plt.savefig('stability_plot.{}'.format(fmt), transparent=True)
|
ashtonmv/twod_materials
|
twod_materials/stability/analysis.py
|
Python
|
gpl-3.0
| 6,441
|
[
"VASP",
"pymatgen"
] |
0a36cf52460f80529a2bf2b885ff46e00d8809d77bc91ac023625013ed55c8b3
|
#-*- coding:utf-8 -*-
from __future__ import division
from keras.layers import Convolution2D ,MaxPooling2D,Flatten
from keras.layers.core import Dense, Dropout, Activation
from keras.models import Sequential
from keras.optimizers import SGD
from keras.utils import np_utils
from sklearn.cross_validation import train_test_split, StratifiedKFold ,cross_val_score
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
import matplotlib.pylab as plt
import numpy as np
import pickle
#
# Categorical to Label for further use in classification_report
def cat2lab (cat):
'''only for binary category
#Args:
cat : binary categorical variable
'''
return np.array([0 if s[0] else 1 for s in cat])
def plot_wegh (model):
'''
Plot weights of convolution layer
#Args
model : fitted model
'''
wegh_arr = model.get_weights()
num = len(wegh_arr[0])
if type(np.sqrt(num)) is int:
col = np.sqrt(num)
row = np.sqrt(num)
else:
col = int(num/2)
row = int(num/col)
fig ,axes = plt.subplots(row,col, subplot_kw={'xticks': [], 'yticks': []})
plt.subplots_adjust(hspace=0.02,wspace = 0.05)
for i, ax in zip(xrange(num),axes.flat):
ax.imshow(wegh_arr[0][i][0])
ax.grid('off')
plt.show()
# Load files and regularize
img_pickle = open('d://labels_new.p')
lab_pickle = open('d://images_new.p')
labels = np.array(pickle.load(img_pickle))
imgs = np.array(pickle.load(lab_pickle))
reg_imgs = imgs /255
cat_labels = np_utils.to_categorical(labels,nb_classes=2)
#reshape to shape (50,50)
reg_imgs_2d =[]
for img in reg_imgs:
reg_imgs_2d.append(np.reshape(img,(50,50)))
reg_imgs_2d = np.array(reg_imgs_2d)
#reshape to shape (1,50,50) for CNN
reg_imgs_3d =[]
for img in reg_imgs:
reg_imgs_3d.append(np.reshape(img,(1,50,50)))
reg_imgs_3d = np.array(reg_imgs_3d)
x_tr1,x_te1,y_tr1,y_te1 = train_test_split(reg_imgs,cat_labels,test_size= 0.2,random_state= 123)
x_tr2,x_te2,y_tr2,y_te2 = train_test_split(reg_imgs_2d,cat_labels,test_size= 0.2,random_state= 123)
x_tr3,x_te3,y_tr3,y_te3 = train_test_split(reg_imgs_3d,cat_labels,test_size= 0.2,random_state= 123)
x_trn1,x_ten1,y_trn1,y_ten1 = train_test_split(reg_imgs,labels,test_size= 0.2,random_state= 123)
#Simple neuron
model1 = Sequential()
model1.add(Dense(2500, input_dim=2500,init='uniform',activation='relu'))
model1.add(Dropout(0.25))
# ,activity_regularizer=activity_l2(0.01)))
model1.add(Dense(2, activation="softmax"))
model1.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.005,decay= 1e-6,momentum=0.7,nesterov=True))
hist1 = model1.fit(x_tr1, y_tr1, nb_epoch=500,validation_split=0.2 ,batch_size=50,show_accuracy=True,verbose=1)
print(model1.summary())
print(model1.evaluate(x_te1,y_te1,batch_size=50,show_accuracy=True))
y_pred1 = model1.predict_classes(x_te1,20)
y_ten1 = cat2lab(y_te1)
print(classification_report(y_ten1,y_pred1))
print(model1.summary())
plt.plot(hist1.history['acc'],label='acc')
plt.plot(hist1.history['loss'],label='loss')
plt.plot(hist1.history['val_loss'],label='val_loss')
plt.plot(hist1.history['val_acc'],label='val_acc')
plt.legend()
plt.grid('off')
plt.show()
# CNN modeling some layers are hashed to make compact model
model2 = Sequential()
model2.add(Convolution2D(10,10, 10, border_mode='same', input_shape=(1, 50, 50)))
model2.add(Activation('relu'))
# model2.add(Convolution2D(50, 5, 5,init='uniform'))
# model2.add(Activation('relu'))
model2.add(MaxPooling2D(pool_size=(2, 2)))
model2.add(Dropout(0.5))
model2.add(Convolution2D(10, 10, 10,init='uniform' ,border_mode='same'))
model2.add(Activation('relu'))
# model2.add(Convolution2D(100, 5, 5,init='uniform'))
# model2.add(Activation('relu'))
model2.add(MaxPooling2D(pool_size=(2, 2)))
model2.add(Dropout(0.3))
model2.add(Flatten())
model2.add(Dense(1250,init='uniform'))
model2.add(Activation('relu'))
model2.add(Dense(2,activation='softmax'))
model2.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01,decay=1e-6,
momentum=0.5,
nesterov=True))
hist2 = model2.fit(x_tr3, y_tr3, nb_epoch=300 , batch_size=50 ,validation_split=0.2, show_accuracy=True ,shuffle=True,verbose =1)
print(model2.summary())
print(model2.evaluate(x_te3,y_te3,batch_size=50,show_accuracy=True,verbose=1))
y_pred2 = model2.predict_classy_ten2 = cat2lab(y_te3)
y_ten2 = cat2lab(y_te3)
print(classification_report(y_ten2,y_pred2))
plt.figure(figsize=(25,15))
plt.plot(hist2.history['acc'],label='acc')
plt.plot(hist2.history['loss'],label='loss')
plt.plot(hist2.history['val_acc'],'--',label='val_acc')
plt.plot(hist2.history['val_loss'],'--',label='val_loss')
plt.legend()
plt.ylim(0,max(hist2.history['acc'])+0.05)
plt.grid('off')
plt.show()
plot_wegh(model2)
y_pred2 = model2.predict_classes(x_te3)
y_ten2 = cat2lab(y_te3)
print('score for CNN is :')
print(model2.evaluate(x_te3,y_te3, batch_size=50, show_accuracy=True, verbose=1))
print(classification_report(y_ten2,y_pred2))
cv = StratifiedKFold(labels,n_folds=10,shuffle=True)
params = {'C' : [1e1, 1e2, 1e3,1e4,1e5],
'gamma' : [0.0001,0.0005,0.001,0.005,0.01]}
clf_grid = GridSearchCV(SVC(kernel='rbf'),params,cv=cv)
model3 = clf_grid.fit(reg_imgs,labels)
print('score for grid-searched SVM is :')
print(model3.best_score_,model3.best_score_)
#demonstration of upper GridSearchCV method
svc_rslt = []
for x,y in cv:
clf = SVC(kernel='rbf',C=10.0,gamma = 0.005,)
clf.fit(reg_imgs[x],labels[x])
svc_rslt.append(clf.score(reg_imgs[y], labels[y]))
svc_rslt = np.array(svc_rslt)
print('cross validated SVC score is ' , svc_rslt.mean())
# tried ensenble with various algorithm
ens1 = RandomForestClassifier(n_estimators = 250 , max_depth= None,verbose=1)
ens2 = AdaBoostClassifier(SVC(kernel='rbf',gamma=0.005,C = 10.0),
algorithm="SAMME",
n_estimators=100,
learning_rate=0.01)
ens3 = AdaBoostClassifier(DecisionTreeClassifier(max_depth=None),
algorithm="SAMME",
n_estimators=100,
learning_rate=0.01)
ens1.fit(x_trn1, y_trn1)
ens2.fit(x_trn1, y_trn1)
ens3.fit(x_trn1, y_trn1)
print('RandomForest score : ',ens1.score(x_ten1,y_ten1))
print('Adaboost-SVM score : ',ens2.score(x_ten1,y_ten1))
print('Adaboost-Decision score : ',ens3.score(x_ten1,y_ten1))
|
Jesse-Back/mach_image_proc
|
modeling.py
|
Python
|
gpl-3.0
| 6,724
|
[
"NEURON"
] |
f47159ca8f0c0beb2f5dbacf410fabcbbbeeefc649ef8c568deabaa9317ab129
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
************************************
espressopp.integrator.VelocityVerlet
************************************
.. function:: espressopp.integrator.VelocityVerlet(system)
:param system:
:type system:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.MDIntegrator import *
from _espressopp import integrator_VelocityVerlet
class VelocityVerletLocal(MDIntegratorLocal, integrator_VelocityVerlet):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_VelocityVerlet, system)
if pmi.isController :
class VelocityVerlet(MDIntegrator):
pmiproxydefs = dict(
cls = 'espressopp.integrator.VelocityVerletLocal',
pmicall = ['resetTimers','getNumResorts'],
pmiinvoke = ['getTimers']
)
|
espressopp/espressopp
|
src/integrator/VelocityVerlet.py
|
Python
|
gpl-3.0
| 1,804
|
[
"ESPResSo"
] |
ad86a626a5d6ef923637d38d2c9b821178b1d07c0dd38a9531e09ac63a292df1
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function used to test handling CloudEvent functions."""
import flask
def function(cloud_event):
"""Test Event function that checks to see if a valid CloudEvent was sent.
The function returns 200 if it received the expected event, otherwise 500.
Args:
cloud_event: A CloudEvent as defined by https://github.com/cloudevents/sdk-python.
Returns:
HTTP status code indicating whether valid event was sent or not.
"""
valid_event = (
cloud_event["id"] == "my-id"
and cloud_event.data == {"name": "john"}
and cloud_event["source"] == "from-galaxy-far-far-away"
and cloud_event["type"] == "cloud_event.greet.you"
and cloud_event["time"] == "2020-08-16T13:58:54.471765"
)
if not valid_event:
flask.abort(500)
|
GoogleCloudPlatform/functions-framework-python
|
tests/test_functions/cloud_events/main.py
|
Python
|
apache-2.0
| 1,382
|
[
"Galaxy"
] |
171057fbdef766ad0c8ca4c6ba54bcb3baeb5c49f2438bbbdd05267f12292e95
|
from handlers import handlers, handler
from handlers.base import BaseHandler
import dns.resolver
from conf import config, debug_logger
import logging
import hashlib
import nss.nss as nss
from tls import nssconfig
from nss.error import NSPRError
intended_usage = nss.certificateUsageSSLServer
logger = logging.getLogger(__name__)
# Introduce DANE is kinda complicated because of we do not have any means to
# know where the user is trying to connect because we need the name domain to
# make a dns query. To address this problem we extract the subject_common_name
# from the certificate and try different schemes. But is not enough. One
# example is you try to visit https://good.dane.verisignlabs.com/ it has a
# valid TLSA record but the cert's subjectCommonName is *.dane.verisign.lab.
# In this case to retrieve the TLSA record we should use
# good.dane.verisignlabs.com but we have dane.verisign.lab so we are not able
# to check this TLSA.
# Another problem that we have is if we do not have the whole chain.
# If the TLSA record received is something like 0 0 1 (associated data)
# maybe the data associated makes a reference to the CA root. It is
# important that the whole chain is presented.
# Also remember that we are not using DNSSEC along with DANE
# so could be possible being under a possible attack due to
# someone would impersonate the dns server
@handler(handlers, isHandler=config.V_DANE)
class Dane(BaseHandler):
name = "dane"
def __init__(self, cert, ocsp):
super(Dane, self).__init__(cert, ocsp)
self.on_certificate(cert)
def verify_chain(self, cert):
approved_usage = not intended_usage
try:
length = cert.length_chain()
if length > 4:
return False
cert_nss = cert.get_cert_nss()
certdb = nssconfig.certdb
approved_usage = cert_nss.verify_now(
certdb, True, intended_usage, None)
except NSPRError:
cert.add_to_nssdb(cert_nss.issuer.common_name, deep=1)
if length == 4:
inter = cert.get_cert_nss(deep=1)
cert.add_to_nssdb(inter.issuer.common_name, deep=2)
try:
approved_usage = cert_nss.verify_now(
certdb, True, intended_usage, None)
except NSPRError:
pass
if approved_usage & intended_usage:
return True
else:
return False
def on_certificate(self, cert):
def verify(url):
try:
answer = dns.resolver.query('_443._tcp.' + url, 'TLSA')
except:
# print "Unexpected error:", sys.exc_info()[0]
return -1
(
cert_usage,
selector,
match_type,
associated_data
) = [str(ans) for ans in answer][0].split(' ')
funct = [cert.der_data, cert.subject_public_key_info]
hash_funct = [None, hashlib.sha256, hashlib.sha512]
temp = hash_funct[int(match_type)]
# depend on the match_type we need use different algorithms
if cert_usage == '3' or cert_usage == '1':
# depend on the selector file we use the whole certificate or
# only the subjectPublicKeyInfo
data = funct[int(selector)]()
if temp is not None:
m = temp(data)
data = m.hexdigest()
if data == associated_data:
return True
else:
return False
if cert_usage == '0' or cert_usage == '2':
# We must check for each certificate in the chain that the
# associated data is presented
for cer in xrange(0, cert.length_chain()):
data = funct[int(selector)](deep=cer)
if temp is not None:
m = temp(data)
data = m.hexdigest()
if data == associated_data:
if cert_usage == '0':
return True
else:
cert.add_to_nssdb(
cert.subject_common_name(deep=cer),
deep=cer)
value = self.verify_chain(cert)
cert.remove_from_nssdb(
cert.subject_common_name(deep=cer)
)
return value
return False
try:
url = cert.subject_common_name()
except IndexError:
debug_logger.debug("\t[-] ERROR extracting subject_common_name")
return
result = False
result = verify(url)
if result is True:
debug_logger.debug(
"\t[+] Certificate %s has a valid TLSA record" %
cert.ca_name()
)
return
if url[0:3] == "www":
url = url.replace("www.", '')
result = verify(url)
if url[0] == '*':
url = url.replace('*', 'www')
result = verify(url)
if result is True:
debug_logger.debug(
"\t[+] Certificate %s has a valid TLSA record" %
cert.ca_name()
)
return
url = url.replace('www', '*')
if url[0] == '*.':
url = url.replace('*.', '')
result = verify(url)
if result is True:
debug_logger.debug(
"\t[+] Certificate %s has a valid TLSA record" %
cert.ca_name()
)
return
if result == -1:
debug_logger.debug(
"\t[-] Certificate {0} does not implement DANE".format(
cert.ca_name()))
return
debug_logger.debug(
"\t[-] Certificate {0} has not a valid TLSA".format(
cert.ca_name()))
|
alvarofe/cassle
|
handlers/dane.py
|
Python
|
gpl-3.0
| 6,178
|
[
"VisIt"
] |
c40e975efe6ea4d10ff6edea8deaf4eb8c372e6ce0e3c1f5ccf4d4e2d0e0b18d
|
from edc_appointment.models import Appointment
from edc_constants.constants import YES, NOT_REQUIRED, UNKEYED
from edc_registration.models import RegisteredSubject
from edc_rule_groups.classes import RuleGroup, site_rule_groups, Logic, CrfRule, RequisitionRule
from tshilo_dikotla.constants import NO_MODIFICATIONS, START, MODIFIED
from td_maternal.rule_groups import func_mother_pos
from td_maternal.models import MaternalVisit, MaternalRando
from .models import InfantArvProph, InfantArvProphMod
from .models import InfantVisit, InfantFu, InfantBirthData, InfantFeeding, InfantNvpDispensing
def get_previous_visit(visit_instance, timepoints, visit_model):
registered_subject = visit_instance.appointment.registered_subject
position = timepoints.index(
visit_instance.appointment.visit_definition.code)
timepoints_slice = timepoints[:position]
if len(timepoints_slice) > 1:
timepoints_slice.reverse()
for point in timepoints_slice:
try:
previous_appointment = Appointment.objects.filter(
registered_subject=registered_subject, visit_definition__code=point).order_by('-created').first()
return visit_model.objects.filter(appointment=previous_appointment).order_by('-created').first()
except Appointment.DoesNotExist:
pass
except visit_model.DoesNotExist:
pass
except AttributeError:
pass
return None
def get_subject_identifier(infant_subject_identifier):
try:
return RegisteredSubject.objects.get(subject_identifier=infant_subject_identifier)
except RegisteredSubject.DoesNotExist:
pass
def maternal_hiv_status_visit(visit_instance):
maternal_registered_subject = get_subject_identifier(
visit_instance.appointment.registered_subject.relative_identifier)
try:
maternal_visit_2000 = MaternalVisit.objects.get(
subject_identifier=maternal_registered_subject.subject_identifier,
appointment__visit_definition__code='2000M')
return func_mother_pos(maternal_visit_2000)
except Exception as e:
pass
def func_show_infant_arv_proph(visit_instance):
previous_visit = get_previous_visit(visit_instance,
['2000', '2010', '2020', '2060', '2090',
'2120', '2180', '2240', '2300', '2360'],
InfantVisit)
if not previous_visit:
return False
try:
infant_arv_proph = InfantArvProph.objects.get(
infant_visit=previous_visit)
except InfantArvProph.DoesNotExist:
if visit_instance.appointment.visit_definition.code == '2010':
return maternal_hiv_status_visit(visit_instance)
return False
else:
return not InfantArvProphMod.objects.filter(
infant_arv_proph=infant_arv_proph,
dose_status='Permanently discontinued').order_by('-created').first()
def func_infant_heu(visit_instance):
"""Returns true if mother of the infant is hiv positive."""
appointment = visit_instance.appointment
latest_maternal_visit = MaternalVisit.objects.filter(
appointment__registered_subject__subject_identifier=appointment.registered_subject.relative_identifier,
).order_by('-created').first()
if func_mother_pos(latest_maternal_visit):
return True
return False
def func_infant_heu_and_require_pcr(visit_instance):
appointment = visit_instance.appointment
latest_maternal_visit = MaternalVisit.objects.filter(
appointment__registered_subject__subject_identifier=appointment.registered_subject.relative_identifier,
).order_by('-created').first()
if (func_mother_pos(latest_maternal_visit)
and visit_instance.appointment.visit_instance == '0'
and visit_instance.appointment.visit_definition.code in ['2010', '2020', '2060']):
return True
return False
def func_require_infant_elisa(visit_instance):
""" Returns true if the infant is HEU and at visit 2180
otherwise returns false if HEU and not 2180 for PRN."""
return (visit_instance.appointment.visit_definition.code == '2180'
and visit_instance.appointment.visit_instance == '0'
and func_infant_heu(visit_instance))
def func_require_infant_dbs(visit_instance):
""""""
return (visit_instance.appointment.visit_definition.code == '2010'
and visit_instance.appointment.visit_instance == '0'
and func_infant_heu(visit_instance))
def func_show_infant_nvp_dispensing(visit_instance):
show_infant_nvp_dispensing = False
maternal_registered_subject = get_subject_identifier(
visit_instance.appointment.registered_subject.relative_identifier)
try:
maternal_rando = MaternalRando.objects.get(
subject_identifier=maternal_registered_subject.subject_identifier)
show_infant_nvp_dispensing = func_infant_heu(visit_instance) and maternal_rando.rx.strip(
'\n') == 'NVP'
except MaternalRando.DoesNotExist:
pass
return show_infant_nvp_dispensing
def func_show_nvp_adjustment_2010(visit_instance):
nvp_adjustment = False
subject_identifier = visit_instance.appointment.registered_subject.subject_identifier
try:
if visit_instance.appointment.visit_definition.code == '2010':
visit_2000 = InfantVisit.objects.filter(
appointment__visit_definition__code='2000',
appointment__registered_subject__subject_identifier=subject_identifier).order_by('created').first()
nvp_dispensing = InfantNvpDispensing.objects.get(
infant_visit=visit_2000)
nvp_adjustment = func_infant_heu(
visit_instance) and nvp_dispensing.nvp_prophylaxis == YES
except InfantVisit.DoesNotExist:
pass
except InfantNvpDispensing.DoesNotExist:
pass
return nvp_adjustment
class InfantRegisteredSubjectRuleGroup(RuleGroup):
arv_proph = CrfRule(
logic=Logic(
predicate=func_show_infant_arv_proph,
consequence=UNKEYED,
alternative=NOT_REQUIRED),
target_model=[('td_infant', 'infantarvproph'), ])
birth_arv = CrfRule(
logic=Logic(
predicate=func_infant_heu,
consequence=UNKEYED,
alternative=NOT_REQUIRED),
target_model=[('td_infant', 'infantbirtharv'), ])
infant_nvp_dispensing = CrfRule(
logic=Logic(
predicate=func_show_infant_nvp_dispensing,
consequence=UNKEYED,
alternative=NOT_REQUIRED),
target_model=[('td_infant', 'infantnvpdispensing'), ])
nvp_adjustment = CrfRule(
logic=Logic(
predicate=func_show_nvp_adjustment_2010,
consequence=UNKEYED,
alternative=NOT_REQUIRED),
target_model=[('td_infant', 'infantnvpadjustment'), ])
class Meta:
app_label = 'td_infant'
source_fk = None
source_model = RegisteredSubject
site_rule_groups.register(InfantRegisteredSubjectRuleGroup)
class InfantFuRuleGroup(RuleGroup):
physical_assessment_yes = CrfRule(
logic=Logic(
predicate=('physical_assessment', 'equals', YES),
consequence=UNKEYED,
alternative=NOT_REQUIRED),
target_model=[('td_infant', 'infantfuphysical'), ])
has_dx_yes = CrfRule(
logic=Logic(
predicate=('has_dx', 'equals', YES),
consequence=UNKEYED,
alternative=NOT_REQUIRED),
target_model=[('td_infant', 'infantfudx'), ])
class Meta:
app_label = 'td_infant'
source_fk = (InfantVisit, 'infant_visit')
source_model = InfantFu
site_rule_groups.register(InfantFuRuleGroup)
class InfantFeedingRuleGroup(RuleGroup):
solid_foods = CrfRule(
logic=Logic(
predicate=('formula_intro_occur', 'equals', YES),
consequence=UNKEYED,
alternative=NOT_REQUIRED),
target_model=[('td_infant', 'solidfoodassessment'), ])
class Meta:
app_label = 'td_infant'
source_fk = (InfantVisit, 'infant_visit')
source_model = InfantFeeding
site_rule_groups.register(InfantFeedingRuleGroup)
class InfantBirthDataRuleGroup(RuleGroup):
congenital_anomalities_yes = CrfRule(
logic=Logic(
predicate=('congenital_anomalities', 'equals', YES),
consequence=UNKEYED,
alternative=NOT_REQUIRED),
target_model=[('td_infant', 'infantcongenitalanomalies'), ])
class Meta:
app_label = 'td_infant'
source_fk = (InfantVisit, 'infant_visit')
source_model = InfantBirthData
site_rule_groups.register(InfantBirthDataRuleGroup)
class InfantRequisitionRuleGroup(RuleGroup):
require_dna_pcr = RequisitionRule(
logic=Logic(
predicate=func_infant_heu_and_require_pcr,
consequence=UNKEYED,
alternative=NOT_REQUIRED),
target_model=[('td_lab', 'infantrequisition')],
target_requisition_panels=['DNA PCR'])
require_dbs = RequisitionRule(
logic=Logic(
predicate=func_require_infant_dbs,
consequence=NOT_REQUIRED,
alternative=UNKEYED),
target_model=[('td_lab', 'infantrequisition')],
target_requisition_panels=['DBS (Store Only)'])
require_elisa = RequisitionRule(
logic=Logic(
predicate=func_require_infant_elisa,
consequence=UNKEYED,
alternative=NOT_REQUIRED),
target_model=[('td_lab', 'infantrequisition')],
target_requisition_panels=['ELISA'])
# require_plasma_pbmc = RequisitionRule(
# logic=Logic(
# predicate=func_plasma_pbmc,
# consequence=UNKEYED,
# alternative=NOT_REQUIRED),
# target_model=[('td_lab', 'maternalrequisition')],
# target_requisition_panels=['PBMC Plasma (STORE ONLY)'])
#
# require_insulin_glucose = RequisitionRule(
# logic=Logic(
# predicate=func_insulin_glucose,
# consequence=UNKEYED,
# alternative=NOT_REQUIRED),
# target_model=[('td_lab', 'maternalrequisition')],
# target_requisition_panels=['Infant Insulin', 'Infant Glucose'])
class Meta:
app_label = 'td_infant'
source_fk = None
source_model = RegisteredSubject
site_rule_groups.register(InfantRequisitionRuleGroup)
|
botswana-harvard/tshilo-dikotla
|
td_infant/rule_groups.py
|
Python
|
gpl-2.0
| 10,566
|
[
"VisIt"
] |
420a27f192a6775b446e0bcda0a41668a1e4d28dd32984cbb010c41c362fc01b
|
# This file is part of the Fluggo Media Library for high-quality
# video and audio processing.
#
# Copyright 2010 Brian J. Crowell <brian@fluggo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import collections, weakref, functools
class EZList(collections.MutableSequence):
def __init__(self):
self._marks = []
@functools.total_ordering
class Mark(object):
def __init__(self, index, left_gravity):
self.index = index
self.left_gravity = left_gravity
def __index__(self):
return self.index
def __int__(self):
return self.index
def __add__(self, other):
return self.index + other.__index__()
def __radd__(self, other):
return other.__index__() + self.index
def __sub__(self, other):
return self.index - other.__index__()
def __rsub__(self, other):
return other.__index__() - self.index
def __lt__(self, other):
return self.index < other.__index__()
def __eq__(self, other):
return self.index == other.__index__()
def __repr__(self):
return 'Mark({0.index}, left_gravity={0.left_gravity})'.format(self)
'''
Allows implementing __setitem__ and __delitem__ on a list as
replace operations. Implement _replace_range in your code.
'''
def _replace_range(self, start, stop, items):
'''
Replace indexes start through stop with items.
my_list._replace_range(start, stop, items)
start - Index in the current list to start replacement.
stop - Index to stop replacement at; replace range(start, stop).
items - List of items to put in place. This list is not at
all guaranteed to match the number of items being replaced.
Call _update_marks immediately after updating your internal list.
A minimal implementation might be:
def _replace_range(self, start, stop, items):
self._internal_list[start:stop] = items
self._update_marks(start, stop, len(items))
'''
raise NotImplementedError
def _iter_marks(self):
i = 0
while i < len(self._marks):
strong_ref = self._marks[i]()
if not strong_ref or not strong_ref.index:
del self._marks[i]
else:
i += 1
yield strong_ref
def create_mark(self, index, left_gravity):
if index < 0 or index > len(self):
raise IndexError
for mark in self._iter_marks():
if mark.index == index:
return mark
mark = self.Mark(index, left_gravity)
self._marks.append(weakref.ref(mark))
return mark
def _update_marks(self, start, stop, new_length):
for mark in self._iter_marks():
if mark.index < start:
continue
elif mark.index <= stop:
# It's in the middle; where it ends up depends on its gravity
if mark.left_gravity:
mark.index = start
else:
mark.index = start + new_length
else:
mark.index += new_length - (stop - start)
def insert(self, index, value):
self[index:index] = [value]
def __setitem__(self, key, value):
start, stop, step = None, None, None
is_slice = isinstance(key, slice)
items = None
if is_slice:
start, stop, step = key.indices(len(self))
items = value
else:
start, stop, step = key, key + 1, 1
items = [value]
if step == 1:
self._replace_range(start, stop, items)
else:
# Reduce it to solid ranges
i = 0
for j in range(start, stop, step):
if i < len(items):
self._replace_range(j, j + 1, [items[i]])
else:
self._replace_range(j, j + 1, [])
def __delitem__(self, key):
# Just a special case of __setitem__ above
if isinstance(key, slice):
self[key] = []
else:
self[key:key + 1] = []
|
fluggo/Canvas
|
fluggo/ezlist.py
|
Python
|
gpl-3.0
| 4,860
|
[
"Brian"
] |
e709e4025ee82fb2ef83c00420f0678ecf6405f7820c797ea9ae3e363ac2a504
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-18 14:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Galaxy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now_add=True)),
('galaxy_name', models.CharField(max_length=50)),
('galaxy_type', models.CharField(blank=True, max_length=50, null=True)),
('notes', models.TextField(default=None)),
('distance', models.DecimalField(decimal_places=3, max_digits=6)),
('diameter', models.PositiveIntegerField(blank=True, default=0, null=True)),
('number_of_stars', models.PositiveIntegerField(blank=True, default=0, null=True)),
('galaxy_mass', models.CharField(blank=True, max_length=50, null=True)),
],
options={
'ordering': ['id'],
'verbose_name': 'Galaxy',
'verbose_name_plural': 'Galaxies',
},
),
]
|
craigderington/SagittariusA
|
milkyway/galaxies/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 1,376
|
[
"Galaxy"
] |
6fa6196dcc13bc885f7b870a3f94f00aa1348bc8d5cc3cdad56daa4c821aa47d
|
# Copyright 2013-2017, Brian May
#
# This file is part of python-alogger.
#
# python-alogger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-alogger is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-alogger If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, unicode_literals
"""
Declare log parsing methods in here.
methods take a line of a log and return a python dict containing
Key | type | Description
----------------------------------------------
user | string | username
project | string | pid
est_wall_time | int | estimated wall time
act_wall_time | int | actual wall time
cpu_usage | int | CPU usage in seconds
queue | datetime |
ctime | datetime | the time in seconds when the job
| | was Created (first submitted)
qtime | datetime | the time in seconds when the job
| | was Queued into the current queue
etime | datetime | time in seconds when the job became Eligible to run
start | datetime | the time in seconds when job execution Started
jobid | string | Expected to also have host name
cores | int | number of cores
jobname | string | Job name
exit_status | int | Exit status - or a string from Slurm !
Optional
mem | int | memory used
vmem | int | virtual memory used
list_mem | int | memory requested
list_vmem | int | virtual memory requested
list_pmem | int | memory requested (per processor)
list_pvmem | int | virtual memory requested (per processor)
Raises value error if funky wall time
So a user submits a job, thats "qtime" (or, probably 'ctime')
eventually, its starts to run, that "start"
finally, it finishes, one way or another, thats "start" + "act_wall_time"
If queue is light, etime can equal qtime, but not if the job is blocked.
"""
|
Karaage-Cluster/python-alogger
|
alogger/parsers/__init__.py
|
Python
|
gpl-3.0
| 2,411
|
[
"Brian"
] |
07e39f675820be3ba2074eae6bc1a062ab3bd82503d3b2be19f8a2ad3f1dd7ae
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import glob
import os
import shutil
import tempfile
class Ncl(Package):
"""NCL is an interpreted language designed specifically for
scientific data analysis and visualization. Supports NetCDF 3/4,
GRIB 1/2, HDF 4/5, HDF-EOD 2/5, shapefile, ASCII, binary.
Numerous analysis functions are built-in."""
homepage = "https://www.ncl.ucar.edu"
version('6.4.0', 'a981848ddcaf1c263279648265f24766',
url='https://www.earthsystemgrid.org/download/fileDownload.html?logicalFileId=86b9bec2-fa01-11e6-a976-00c0f03d5b7c',
extension='tar.gz')
patch('spack_ncl.patch')
# Make ncl compile with hdf5 1.10
patch('hdf5.patch')
# ymake-filter's buffer may overflow
patch('ymake-filter.patch')
# This installation script is implemented according to this manual:
# http://www.ncl.ucar.edu/Download/build_from_src.shtml
variant('hdf4', default=False, description='Enable HDF4 support.')
variant('gdal', default=False, description='Enable GDAL support.')
variant('triangle', default=True, description='Enable Triangle support.')
variant('udunits2', default=True, description='Enable UDUNITS-2 support.')
variant('openmp', default=True, description='Enable OpenMP support.')
# Non-optional dependencies according to the manual:
depends_on('jpeg')
depends_on('netcdf')
depends_on('cairo+X')
# Extra dependencies that may be missing from build system:
depends_on('bison', type='build')
depends_on('flex+lex')
depends_on('libiconv')
depends_on('tcsh')
# Also, the manual says that ncl requires zlib, but that comes as a
# mandatory dependency of libpng, which is a mandatory dependency of cairo.
# The following dependencies are required, otherwise several components
# fail to compile:
depends_on('curl')
depends_on('libiconv')
depends_on('libx11')
depends_on('libxaw')
depends_on('libxmu')
# In Spack, we do not have an option to compile netcdf without netcdf-4
# support, so we will tell the ncl configuration script that we want
# support for netcdf-4, but the script assumes that hdf5 is compiled with
# szip support. We introduce this restriction with the following dependency
# statement.
depends_on('hdf5+szip')
depends_on('szip')
# ESMF is only required at runtime (for ESMF_regridding.ncl)
depends_on('esmf', type='run')
# In Spack, we also do not have an option to compile netcdf without DAP
# support, so we will tell the ncl configuration script that we have it.
# Some of the optional dependencies according to the manual:
depends_on('hdf', when='+hdf4')
depends_on('gdal', when='+gdal')
depends_on('udunits2', when='+udunits2')
# We need src files of triangle to appear in ncl's src tree if we want
# triangle's features.
resource(
name='triangle',
url='http://www.netlib.org/voronoi/triangle.zip',
md5='10aff8d7950f5e0e2fb6dd2e340be2c9',
placement='triangle_src',
when='+triangle')
sanity_check_is_file = ['bin/ncl']
def patch(self):
# Make configure scripts use Spack's tcsh
files = ['Configure'] + glob.glob('config/*')
filter_file('^#!/bin/csh -f', '#!/usr/bin/env csh', *files)
@run_before('install')
def filter_sbang(self):
# Filter sbang before install so Spack's sbang hook can fix it up
files = glob.glob('ncarg2d/src/bin/scripts/*')
files += glob.glob('ncarview/src/bin/scripts/*')
files += glob.glob('ni/src/scripts/*')
csh = join_path(self.spec['tcsh'].prefix.bin, 'csh')
filter_file('^#!/bin/csh', '#!{0}'.format(csh), *files)
def install(self, spec, prefix):
if (self.compiler.fc is None) or (self.compiler.cc is None):
raise InstallError('NCL package requires both '
'C and Fortran compilers.')
self.prepare_site_config()
self.prepare_install_config()
self.prepare_src_tree()
make('Everything', parallel=False)
def setup_environment(self, spack_env, run_env):
run_env.set('NCARG_ROOT', self.spec.prefix)
def prepare_site_config(self):
fc_flags = []
cc_flags = []
c2f_flags = []
if '+openmp' in self.spec:
fc_flags.append(self.compiler.openmp_flag)
cc_flags.append(self.compiler.openmp_flag)
if self.compiler.name == 'gcc':
fc_flags.append('-fno-range-check')
c2f_flags.extend(['-lgfortran', '-lm'])
elif self.compiler.name == 'intel':
fc_flags.append('-fp-model precise')
cc_flags.append('-fp-model precise')
c2f_flags.extend(['-lifcore', '-lifport'])
with open('./config/Spack', 'w') as f:
f.writelines([
'#define HdfDefines\n',
'#define CppCommand \'/usr/bin/env cpp -traditional\'\n',
'#define CCompiler cc\n',
'#define FCompiler fc\n',
('#define CtoFLibraries ' + ' '.join(c2f_flags) + '\n'
if len(c2f_flags) > 0
else ''),
('#define CtoFLibrariesUser ' + ' '.join(c2f_flags) + '\n'
if len(c2f_flags) > 0
else ''),
('#define CcOptions ' + ' '.join(cc_flags) + '\n'
if len(cc_flags) > 0
else ''),
('#define FcOptions ' + ' '.join(fc_flags) + '\n'
if len(fc_flags) > 0
else ''),
'#define BuildShared NO'
])
def prepare_install_config(self):
# Remove the results of the previous configuration attempts.
self.delete_files('./Makefile', './config/Site.local')
# Generate an array of answers that will be passed to the interactive
# configuration script.
config_answers = [
# Enter Return to continue
'\n',
# Build NCL?
'y\n',
# Parent installation directory :
'\'' + self.spec.prefix + '\'\n',
# System temp space directory :
'\'' + tempfile.gettempdir() + '\'\n',
# Build NetCDF4 feature support (optional)?
'y\n'
]
if '+hdf4' in self.spec:
config_answers.extend([
# Build HDF4 support (optional) into NCL?
'y\n',
# Also build HDF4 support (optional) into raster library?
'y\n',
# Did you build HDF4 with szip support?
'y\n' if self.spec.satisfies('^hdf+szip') else 'n\n'
])
else:
config_answers.extend([
# Build HDF4 support (optional) into NCL?
'n\n',
# Also build HDF4 support (optional) into raster library?
'n\n'
])
config_answers.extend([
# Build Triangle support (optional) into NCL
'y\n' if '+triangle' in self.spec else 'n\n',
# If you are using NetCDF V4.x, did you enable NetCDF-4 support?
'y\n',
# Did you build NetCDF with OPeNDAP support?
'y\n',
# Build GDAL support (optional) into NCL?
'y\n' if '+gdal' in self.spec else 'n\n',
# Build EEMD support (optional) into NCL?
'n\n',
# Build Udunits-2 support (optional) into NCL?
'y\n' if '+uduints2' in self.spec else 'n\n',
# Build Vis5d+ support (optional) into NCL?
'n\n',
# Build HDF-EOS2 support (optional) into NCL?
'n\n',
# Build HDF5 support (optional) into NCL?
'y\n',
# Build HDF-EOS5 support (optional) into NCL?
'n\n',
# Build GRIB2 support (optional) into NCL?
'n\n',
# Enter local library search path(s) :
# The paths will be passed by the Spack wrapper.
' \n',
# Enter local include search path(s) :
# All other paths will be passed by the Spack wrapper.
'\'' + join_path(self.spec['freetype'].prefix.include,
'freetype2') + '\'\n',
# Go back and make more changes or review?
'n\n',
# Save current configuration?
'y\n'
])
config_answers_filename = 'spack-config.in'
config_script = Executable('./Configure')
with open(config_answers_filename, 'w') as f:
f.writelines(config_answers)
with open(config_answers_filename, 'r') as f:
config_script(input=f)
def prepare_src_tree(self):
if '+triangle' in self.spec:
triangle_src = join_path(self.stage.source_path, 'triangle_src')
triangle_dst = join_path(self.stage.source_path, 'ni', 'src',
'lib', 'hlu')
shutil.copy(join_path(triangle_src, 'triangle.h'), triangle_dst)
shutil.copy(join_path(triangle_src, 'triangle.c'), triangle_dst)
@staticmethod
def delete_files(*filenames):
for filename in filenames:
if os.path.exists(filename):
try:
os.remove(filename)
except OSError as e:
raise InstallError('Failed to delete file %s: %s' % (
e.filename, e.strerror))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/ncl/package.py
|
Python
|
lgpl-2.1
| 10,803
|
[
"NetCDF"
] |
8f2e50d69b31a67f7a29cc9299ce2b9f1773e6fd68597e9f88734bd63cec8e56
|
"""
Current source classes for the brian module.
Classes:
DCSource -- a single pulse of current of constant amplitude.
StepCurrentSource -- a step-wise time-varying current.
ACSource -- a sine modulated current.
NoisyCurrentSource -- a Gaussian whitish noise current.
:copyright: Copyright 2006-2016 by the PyNN team, see AUTHORS.
:license: CeCILL, see LICENSE for details.
"""
import logging
import numpy
import brian
from brian import ms, nA, Hz, network_operation, amp as ampere
from pyNN.standardmodels import electrodes, build_translations, StandardCurrentSource
from pyNN.parameters import ParameterSpace, Sequence
from .. import simulator
logger = logging.getLogger("PyNN")
current_sources = []
@network_operation(when='start')
def update_currents():
global current_sources
for current_source in current_sources:
current_source._update_current()
class BrianCurrentSource(StandardCurrentSource):
"""Base class for a source of current to be injected into a neuron."""
def __init__(self, **parameters):
super(StandardCurrentSource, self).__init__(**parameters)
global current_sources
self.cell_list = []
self.indices = []
current_sources.append(self)
parameter_space = ParameterSpace(self.default_parameters,
self.get_schema(),
shape=(1,))
parameter_space.update(**parameters)
parameter_space = self.translate(parameter_space)
self.set_native_parameters(parameter_space)
def set_native_parameters(self, parameters):
parameters.evaluate(simplify=True)
for name, value in parameters.items():
if isinstance(value, Sequence):
value = value.value
object.__setattr__(self, name, value)
self._reset()
def _reset(self):
# self.i reset to 0 only at the start of a new run; not for continuation of existing runs
if not hasattr(self, 'running') or self.running == False:
self.i = 0
self.running = True
if self._is_computed:
self._generate()
def inject_into(self, cell_list):
__doc__ = StandardCurrentSource.inject_into.__doc__
for cell in cell_list:
if not cell.celltype.injectable:
raise TypeError("Can't inject current into a spike source.")
self.cell_list.extend(cell_list)
for cell in cell_list:
self.indices.extend([cell.parent.id_to_index(cell)])
def _update_current(self):
# check if current timestamp is within dt/2 of target time; Brian uses seconds as unit of time
if self.running and abs(simulator.state.t - self.times[self.i] * 1e3) < (simulator.state.dt/2.0):
for cell, idx in zip(self.cell_list, self.indices):
if not self._is_playable:
cell.parent.brian_group.i_inj[idx] = self.amplitudes[self.i] * ampere
else:
cell.parent.brian_group.i_inj[idx] = self._compute(self.times[self.i]) * ampere
self.i += 1
if self.i >= len(self.times):
self.running = False
if self._is_playable:
# ensure that currents are set to 0 after t_stop
for cell, idx in zip(self.cell_list, self.indices):
cell.parent.brian_group.i_inj[idx] = 0
def _record(self):
self.i_state_monitor = brian.StateMonitor(self.cell_list[0].parent.brian_group[self.indices[0]], 'i_inj', record=0)
simulator.state.network.add(self.i_state_monitor)
def _get_data(self):
return numpy.array((self.i_state_monitor.times / ms, self.i_state_monitor[0] / nA))
class StepCurrentSource(BrianCurrentSource, electrodes.StepCurrentSource):
__doc__ = electrodes.StepCurrentSource.__doc__
translations = build_translations(
('amplitudes', 'amplitudes', nA),
('times', 'times', ms)
)
_is_computed = False
_is_playable = False
class ACSource(BrianCurrentSource, electrodes.ACSource):
__doc__ = electrodes.ACSource.__doc__
translations = build_translations(
('amplitude', 'amplitude', nA),
('start', 'start', ms),
('stop', 'stop', ms),
('frequency', 'frequency', Hz),
('offset', 'offset', nA),
('phase', 'phase', 1)
)
_is_computed = True
_is_playable = True
def __init__(self, **parameters):
BrianCurrentSource.__init__(self, **parameters)
self._generate()
def _generate(self):
# Note: Brian uses seconds as unit of time
temp_num_t = len(numpy.arange(self.start, self.stop + simulator.state.dt * 1e-3, simulator.state.dt * 1e-3))
self.times = numpy.array([self.start+(i*simulator.state.dt*1e-3) for i in range(temp_num_t)])
def _compute(self, time):
# Note: Brian uses seconds as unit of time; frequency is specified in Hz; thus no conversion required
return self.offset + self.amplitude * numpy.sin((time-self.start) * 2 * numpy.pi * self.frequency + 2 * numpy.pi * self.phase / 360)
class DCSource(BrianCurrentSource, electrodes.DCSource):
__doc__ = electrodes.DCSource.__doc__
translations = build_translations(
('amplitude', 'amplitude', nA),
('start', 'start', ms),
('stop', 'stop', ms)
)
_is_computed = True
_is_playable = False
def __init__(self, **parameters):
BrianCurrentSource.__init__(self, **parameters)
self._generate()
def _generate(self):
if self.start == 0:
self.times = [self.start, self.stop]
self.amplitudes = [self.amplitude, 0.0]
else:
self.times = [0.0, self.start, self.stop]
self.amplitudes = [0.0, self.amplitude, 0.0]
# ensures proper handling of changes in parameters on the fly
if self.start < simulator.state.t*1e-3 < self.stop:
self.times.insert(-1, simulator.state.t*1e-3)
self.amplitudes.insert(-1, self.amplitude)
if (self.start==0 and self.i==2) or (self.start!=0 and self.i==3):
self.i -= 1
class NoisyCurrentSource(BrianCurrentSource, electrodes.NoisyCurrentSource):
__doc__ = electrodes.NoisyCurrentSource.__doc__
translations = build_translations(
('mean', 'mean', nA),
('start', 'start', ms),
('stop', 'stop', ms),
('stdev', 'stdev', nA),
('dt', 'dt', ms)
)
_is_computed = True
_is_playable = True
def __init__(self, **parameters):
BrianCurrentSource.__init__(self, **parameters)
self._generate()
def _generate(self):
temp_num_t = len(numpy.arange(self.start, self.stop, max(self.dt, simulator.state.dt * 1e-3)))
self.times = numpy.array([self.start+(i*max(self.dt, simulator.state.dt * 1e-3)) for i in range(temp_num_t)])
self.times = numpy.append(self.times, self.stop)
def _compute(self, time):
return self.mean + self.stdev * numpy.random.randn()
|
anupkdas-nus/global_synapses
|
pyNN-dispackgaes/brian/standardmodels/electrodes.py
|
Python
|
gpl-3.0
| 7,143
|
[
"Brian",
"Gaussian",
"NEURON"
] |
45c9e07b194857600c686a72fa85b5b19f1089ed8a14a84ace14e286a27cbd3b
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
******************************
**espresso.interaction.LJcos**
******************************
"""
from espresso import pmi, infinity
from espresso.esutil import *
from espresso.interaction.Potential import *
from espresso.interaction.Interaction import *
from _espresso import interaction_LJcos, \
interaction_VerletListLJcos, \
interaction_VerletListAdressLJcos, \
interaction_VerletListHadressLJcos, \
interaction_CellListLJcos, \
interaction_FixedPairListLJcos
class LJcosLocal(PotentialLocal, interaction_LJcos):
'The (local) Lennard-Jones potential.'
def __init__(self, phi=1.0):
"""Initialize the local Lennard Jones object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_LJcos, phi)
class VerletListLJcosLocal(InteractionLocal, interaction_VerletListLJcos):
'The (local) Lennard Jones interaction using Verlet lists.'
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListLJcos, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
def getVerletListLocal(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletList(self)
class VerletListAdressLJcosLocal(InteractionLocal, interaction_VerletListAdressLJcos):
'The (local) Lennard Jones interaction using Verlet lists.'
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListAdressLJcos, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class VerletListHadressLJcosLocal(InteractionLocal, interaction_VerletListHadressLJcos):
'The (local) Lennard Jones interaction using Verlet lists.'
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListHadressLJcos, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class CellListLJcosLocal(InteractionLocal, interaction_CellListLJcos):
'The (local) Lennard Jones interaction using cell lists.'
def __init__(self, stor):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListLJcos, stor)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class FixedPairListLJcosLocal(InteractionLocal, interaction_FixedPairListLJcos):
'The (local) Lennard-Jones interaction using FixedPair lists.'
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListLJcos, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def setFixedPairList(self, fixedpairlist):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedPairList(self, fixedpairlist)
def getFixedPairList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedPairList(self)
if pmi.isController:
class LJcos(Potential):
'The Lennard-Jones potential.'
pmiproxydefs = dict(
cls = 'espresso.interaction.LJcosLocal',
pmiproperty = ['phi']
)
class VerletListLJcos(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListLJcosLocal',
pmicall = ['setPotential', 'getPotential', 'getVerletList']
)
class VerletListAdressLJcos(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListAdressLJcosLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListHadressLJcos(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListHadressLJcosLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class CellListLJcos(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.CellListLJcosLocal',
pmicall = ['setPotential']
)
class FixedPairListLJcos(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.FixedPairListLJcosLocal',
pmicall = ['setPotential', 'setFixedPairList','getFixedPairList' ]
)
|
BackupTheBerlios/espressopp
|
src/interaction/LJcos.py
|
Python
|
gpl-3.0
| 7,660
|
[
"ESPResSo"
] |
c3f41f9f0532a6399c4a758810f34496298584db9aac44d9551fffc3e4521181
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.