text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import os
import warnings
import numpy as np
from astropy.convolution import Gaussian1DKernel, Gaussian2DKernel
from astropy.io import fits
from astropy import wcs
import astropy
import multiprocessing
from .. import cubes, Cube, CubeStack
from ...spectrum.models import n2hp, ammonia_constants
from ...spectrum.models.ammonia import cold_ammonia_model
def make_test_cube(shape=(30,9,9), outfile='test.fits', snr=30,
sigma=None, seed=0):
"""
Generates a simple gaussian cube with noise of given shape and writes
it out as a FITS file.
Parameters
----------
shape : a tuple of three ints, optional
Sets the size of the resulting spectral cube.
snr : float, optional
The signal to noise ratio of brightest channel in the central pixel
outfile : string or file object, optional
Output file.
sigma : a tuple of two floats, optional
Standard deviations of the Gaussian kernels used to generate the
signal component. The two components of the tuple govern the spectral
and spatial kernel sizes, respectively.
seed : int or array_like, optional
Passed to np.random.seed to set the random generator.
"""
if sigma is None:
sigma1d, sigma2d = shape[0] / 10., np.mean(shape[1:]) / 5.
else:
sigma1d, sigma2d = sigma
# generate a 3d ellipsoid with a maximum of one
gauss1d = Gaussian1DKernel(stddev = sigma1d, x_size = shape[0])
gauss2d = Gaussian2DKernel(stddev = sigma2d,
x_size = shape[1],
y_size = shape[2])
signal_cube = gauss1d.array[:, None, None] * gauss2d.array
signal_cube = signal_cube / signal_cube.max()
# adding Gaussian noise
np.random.seed(seed)
noise_std = signal_cube.max() / snr
noise_cube = np.random.normal(loc = 0, scale = noise_std,
size = signal_cube.shape)
test_cube = signal_cube + noise_cube
# making a simple header for the test cube:
test_hdu = fits.PrimaryHDU(test_cube)
# the strange cdelt values are a workaround
# for what seems to be a bug in wcslib:
# https://github.com/astropy/astropy/issues/4555
cdelt1, cdelt2, cdelt3 = -(4e-3 + 1e-8), 4e-3 + 1e-8, -0.1
keylist = {'CTYPE1': 'RA---GLS', 'CTYPE2': 'DEC--GLS', 'CTYPE3': 'VRAD',
'CDELT1': cdelt1, 'CDELT2': cdelt2, 'CDELT3': cdelt3,
'CRVAL1': 0, 'CRVAL2': 0, 'CRVAL3': 5,
'CRPIX1': 9, 'CRPIX2': 0, 'CRPIX3': 5,
'CUNIT1': 'deg', 'CUNIT2': 'deg', 'CUNIT3': 'km s-1',
'BMAJ': cdelt2 * 3, 'BMIN': cdelt2 * 3, 'BPA': 0.0,
'BUNIT' : 'K', 'EQUINOX': 2000.0, 'RESTFREQ': 300e9}
# write out some values used to generate the cube:
keylist['SIGMA' ] = abs(sigma1d*cdelt3), 'in units of CUNIT3'
keylist['RMSLVL'] = noise_std
keylist['SEED' ] = seed
test_header = fits.Header()
test_header.update(keylist)
test_hdu = fits.PrimaryHDU(data=test_cube, header=test_header)
if astropy.version.major >= 2 or (astropy.version.major==1 and astropy.version.minor>=3):
test_hdu.writeto(outfile, overwrite=True, checksum=True)
else:
test_hdu.writeto(outfile, clobber=True, checksum=True)
def download_test_cube(outfile='test.fits'):
"""
Downloads a sample fits file from Dropbox (325kB).
"""
from astropy.utils.data import download_file
test_cube_url = 'https://db.tt/i0jWA7DU'
tmp_path = download_file(test_cube_url)
try:
os.rename(tmp_path, outfile)
except OSError:
# os.rename doesn't like cross-device links
import shutil
shutil.move(tmp_path, outfile)
def test_subimage_integ_header(cubefile='test.fits'):
"""
Checks if the coordinates of the spectral
cube are drifting away after cropping it.
"""
# getting a dummy .fits file
if not os.path.exists(cubefile):
#download_test_cube(cubefile)
make_test_cube((100,9,9),cubefile)
cube = fits.getdata(cubefile)
header = fits.getheader(cubefile)
xcen, ycen = 4.5, 4.5
xwidth, ywidth = 2.5, 2.5
# saving results from subimage_integ:
cutData, cutHead = cubes.subimage_integ(cube, xcen, xwidth, ycen, ywidth,
vrange=(0,header['NAXIS3']-1),
zunits='pixels', units='pixels',
header=header)
assert cutHead['CRPIX1'] == 7.0
assert cutHead['CRPIX2'] == -2.0
w1 = wcs.WCS(header)
w2 = wcs.WCS(cutHead)
# pixel 2,2 in the original image should be pixel 0,0 in the new one
x1,y1,z1 = w1.wcs_pix2world(2,2,0,0)
x2,y2 = w2.wcs_pix2world(0,0,0)
np.testing.assert_almost_equal(x1,x2)
np.testing.assert_almost_equal(y1,y2)
def do_fiteach(save_cube=None, save_pars=None, show_plot=False):
"""Fits a cube with a gaussian for later use"""
if save_cube is None:
save_cube = 'test.fits'
test_sigma = 10 # in pixel values, each pixel is CDELT3 thick
make_test_cube((100,10,10), save_cube,
sigma=(test_sigma, 5) )
spc = Cube(save_cube)
guesses = [0.5,0.2,0.8]
map_rms = np.zeros_like(spc.cube[0])+spc.header['RMSLVL']
spc.fiteach(fittype = 'gaussian',
guesses = guesses,
start_from_pixel = (5,5),
multicore = multiprocessing.cpu_count(),
blank_value = np.nan,
verbose_level = 3,
errmap = map_rms,
signal_cut = 5)
if show_plot:
spc.mapplot()
if save_pars:
spc.write_fit(save_pars, overwrite=True)
return spc
def test_fiteach(save_cube=None, save_pars=None, show_plot=False):
"""
A simple test on Cube.fiteach() checking
that for a noise with set seed the fraction
of line width values within errorbars is
remaning constant.
"""
spc = do_fiteach(save_cube, save_pars, show_plot)
# checking the fit
map_seed = spc.header['SEED']
map_sigma_post = spc.parcube[2]
map_sigma_true = np.zeros_like(map_sigma_post) + spc.header['SIGMA']
map_in_bounds = np.abs(map_sigma_true-map_sigma_post) < spc.errcube[2]
err_frac = map_in_bounds[~map_in_bounds].size / float(map_sigma_post.size)
assert map_seed == 0
assert err_frac == 0.34
def test_get_modelcube(cubefile=None, parfile=None, multicore=1):
"""
Tests get_modelcube() method for Cube and CubeStack classes.
If either cubefile or parfile isn't set, fill generate and
fit a sample cube through do_fiteach().
Computes the residual cube and collapses it into standard
deviation of the residual map. Checks that the number of the
residual pixels three sigma doesn't change for a fixed noise.
"""
if cubefile is None or parfile is None:
cubefile = 'test.fits'
parfile = 'test_pars.fits'
sp_cube = do_fiteach(save_cube=cubefile, save_pars=parfile)
else:
sp_cube = Cube(cubefile)
map_rms = sp_cube.header['RMSLVL']
map_seed = sp_cube.header['SEED']
assert map_seed == 0
sp_cube.xarr.velocity_convention = 'radio'
sp_stack = CubeStack([sp_cube])
sp_stack._modelcube = None
# assuming one gaussian component
for spc in [sp_cube, sp_stack]:
spc.load_model_fit(parfile, npars=3)
# calling CubeStack converted xarr units to GHz
spc.xarr.convert_to_unit('km/s')
spc.get_modelcube(multicore=multicore)
resid_cube = spc.cube - spc._modelcube
above1sig = (resid_cube.std(axis=0) > map_rms).flatten()
assert above1sig[above1sig].size == 31
def test_get_modelcube_badpar(cubefile=None, parfile=None, sigma_threshold=5,
multicore=1):
"""
Test loading a model cube that has at least one invalid parameter.
Regression test for #163
This is essentially only testing that get_modelcube works in the presence
of invalid fit parameters
"""
if cubefile is None or parfile is None:
cubefile = 'test.fits'
fh = fits.open('test_pars.fits')
fh[0].data[1,0,0] *= -1 # set the width to be negative
if astropy.version.major >= 2 or (astropy.version.major==1 and astropy.version.minor>=3):
fh.writeto('test_pars_bad.fits', overwrite=True)
else:
fh.writeto('test_pars_bad.fits', clobber=True)
fh.close()
parfile = 'test_pars_bad.fits'
sp_cube = do_fiteach(save_cube=cubefile, save_pars=parfile)
else:
sp_cube = Cube(cubefile)
map_seed = sp_cube.header['SEED']
map_rms = sp_cube.header['RMSLVL']
sp_cube.xarr.velocity_convention = 'radio'
sp_stack = CubeStack([sp_cube])
sp_stack._modelcube = None
# assuming one gaussian component
for spc in [sp_cube, sp_stack]:
spc.load_model_fit(parfile, npars=3, _temp_fit_loc=(0,0))
spc.get_modelcube(multicore=multicore)
resid_cube = spc.cube - spc._modelcube
def test_registry_inheritance(cubefile='test.fits'):
"""
Regression test for #166
"""
# getting a dummy .fits file
if not os.path.exists(cubefile):
#download_test_cube(cubefile)
make_test_cube((100,9,9),cubefile)
spc = Cube(cubefile)
spc.xarr.velocity_convention = 'radio'
# spc.Registry.add_fitter('n2hp_vtau', n2hp.n2hp_vtau_fitter, 4)
sp = spc.get_spectrum(3,3)
sp.Registry.add_fitter('n2hp_vtau', n2hp.n2hp_vtau_fitter, 4)
assert 'n2hp_vtau' in sp.Registry.multifitters
assert 'n2hp_vtau' in sp.Registry.npars
sp.specfit(fittype='n2hp_vtau', guesses=[1,2,3,4])
def test_noerror_cube(cubefile='test.fits'):
"""
Regression test for #159
"""
if not os.path.exists(cubefile):
make_test_cube((100,9,9),cubefile)
spc = Cube(cubefile)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('default')
spc.fiteach(fittype='gaussian', guesses=[0.7,0.5,0.8],
start_from_point=(4,4),
)
assert "If signal_cut is set" in str(w[-1].message)
assert not np.all(spc.has_fit)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('default')
spc.fiteach(fittype='gaussian', guesses=[0.7,0.5,0.8], signal_cut=0)
assert np.all(spc.has_fit)
def test_slice_header(cubefile='test.fits'):
"""
Regression test for #184
"""
if not os.path.exists(cubefile):
make_test_cube((100,9,9),cubefile)
spc = Cube(cubefile)
spc_cut = spc.slice(-1, 1, 'km/s', update_header = True)
naxis3 = spc_cut.header['NAXIS3']
crval3 = spc_cut.header['CRVAL3']
crpix3 = spc_cut.header['CRPIX3']
cunit3 = spc_cut.header['CUNIT3']
assert naxis3 == spc_cut.xarr.size
assert spc_cut.xarr.x_to_pix(crval3, cunit3) + 1 == crpix3
def test_stuck_cubestack(timeout = 5):
"""
Regression test for #194
"""
make_test_cube(outfile = 'cube1.fits')
make_test_cube(outfile = 'cube2.fits')
spc1 = Cube('cube1.fits')
spc2 = Cube('cube2.fits')
spc1.header['HISTORY'] = "history and comment keywords"
spc2.header['COMMENT'] = "should not cause any trouble"
spc1.xarr.velocity_convention = 'radio'
spc2.xarr.velocity_convention = 'radio'
def timecap():
CubeStack([spc1, spc2])
p = multiprocessing.Process(target = timecap)
p.start()
p.join(timeout = timeout)
frozen = p.is_alive()
if frozen:
p.terminate
assert not frozen
def test_copy_ids(cubefile='test.fits'):
"""
Regression test for #182
"""
if not os.path.exists(cubefile):
make_test_cube((100,9,9), cubefile)
spc1 = Cube(cubefile)
spc2 = spc1.copy()
deep_attr_lst = ['xarr', 'data', 'cube', 'maskmap',
'error', 'errorcube']
for attr in deep_attr_lst:
attr1, attr2 = getattr(spc1, attr), getattr(spc2, attr)
# None always points to the same id
if attr1 is not None:
assert id(attr1) != id(attr2)
naxis_old = spc1.header['NAXIS1']
spc2.header['NAXIS1'] += 1
assert spc1.header['NAXIS1'] == naxis_old
def make_nh3_cube(shape, pars, errs11, errs22, seed=42):
"""
Tinkers with two test gaussian cubes, overwriting their spectra with NH3
(1,1) and (2,2) lines.
"""
xsize = shape[0]
np.random.seed(seed)
make_test_cube(shape=shape, outfile='foo11.fits')
make_test_cube(shape=shape, outfile='foo22.fits')
spc11 = Cube('foo11.fits')
spc22 = Cube('foo22.fits')
spc11.xarr.velocity_convention = 'radio'
spc22.xarr.velocity_convention = 'radio'
spc11.xarr.refX = ammonia_constants.freq_dict['oneone']
spc22.xarr.refX = ammonia_constants.freq_dict['twotwo']
spc = CubeStack([spc11, spc22])
spc.specfit.Registry.add_fitter('cold_ammonia', npars=6,
function=cold_ammonia_model(
line_names=['oneone', 'twotwo']))
spc.specfit.fitter = spc.specfit.Registry.multifitters['cold_ammonia']
for y, x in np.ndindex(spc.cube.shape[1:]):
spc.cube[:, y, x] = spc.specfit.get_full_model(pars=pars)
spc.cube[:xsize, y, x] += np.random.normal(scale=errs11, size=xsize)
spc.cube[xsize:, y, x] += np.random.normal(scale=errs22, size=xsize)
return spc
def test_nonuniform_chan_weights(shape=(1000, 1, 2), err11=0.01, err22=0.25,
pars=[15, 15, 14, 0.2, -45, 0.5],
guesses=[12, 12, 14, 0.1, -45, 0.5]):
""" Regression test for #224 """
# Line setup - a high S/R (1,1) NH3 line fit together with a noisy (2,2) line.
spc = make_nh3_cube(shape, pars, err11, err22)
errorcube = np.zeros_like(spc.cube)
xsize = shape[0]
errorcube[:xsize] = err11
errorcube[xsize:] = err22
# case #1:
# the errors are calculated on both lines separately, and (1,1)
# and (2,2) channels are being weighed equally with their respective errors
spc.fiteach(fittype='cold_ammonia', errmap=errorcube, guesses=guesses,
fixed=[False] * 5 + [True])
pinfo = spc.get_spectrum(0, 0).specfit.parinfo
err_Tkin = pinfo.errors[0]
err_sigma = pinfo.errors[3]
# NOTE: if the (1,1) and (2,2) channels are being weighed equally, the
# uncertainties would be err_Tkin ~ 0.8924 and err_sigma ~ 0.12545
assert np.allclose(err_sigma, 9.696e-4, 1e-4)
assert np.allclose(err_Tkin, 1.5147, 1e-4)
# case #2, expecting the same outcome as case 1:
# it's also OK to let errmap=None if the Cube.errorcube has been predefined
spc.errorcube = errorcube
spc.fiteach(fittype='cold_ammonia', errmap=None, guesses=guesses,
fixed=[False] * 5 + [True])
pinfo = spc.get_spectrum(0, 0).specfit.parinfo
err_Tkin = pinfo.errors[0]
err_sigma = pinfo.errors[3]
assert np.allclose(err_sigma, 9.696e-4, 1e-4)
assert np.allclose(err_Tkin, 1.5147, 1e-4)
|
allisony/pyspeckit
|
pyspeckit/cubes/tests/test_cubetools.py
|
Python
|
mit
| 15,106
|
[
"Gaussian"
] |
cd82b6faff07b07ad02b32320d23777230d9946c0799122c37bf3afdfb477b99
|
###############################################################################
# Copyright 2015-2019 University of Florida. All rights reserved.
# This file is part of UF CTS-IT's NACCulator project.
# Use of this source code is governed by the license found in the LICENSE file.
###############################################################################
from nacc.lbd.ivp import forms as lbd_ivp_forms
from nacc.uds3 import packet as lbd_ivp_packet
def build_lbd_ivp_form(record):
''' Converts REDCap CSV data into a packet (list of IVP Form objects) '''
packet = lbd_ivp_packet.Packet()
# Set up the forms..........
# This form cannot precede June 1, 2017.
if not (int(record['visityr']) > 2017) or \
(int(record['visityr']) == 2017 and int(record['visitmo']) > 6) \
or (int(record['visityr']) == 2017 and int(record['visitmo']) == 6
and int(record['visitday']) >= 1):
raise ValueError('Visit date cannot precede June 1, 2017.')
B1L = lbd_ivp_forms.FormB1L()
B1L.LBSSALIV = record['LBSSALIV'.lower()]
B1L.LBSSWALL = record['LBSSWALL'.lower()]
B1L.LBSINSeX = record['LBSINSeX'.lower()]
B1L.LBSPrSeX = record['LBSPrSeX'.lower()]
B1L.LBSWeIGH = record['LBSWeIGH'.lower()]
B1L.LBSSMeLL = record['LBSSMeLL'.lower()]
B1L.LBSSWeAt = record['LBSSWeAT'.lower()]
B1L.LBStoLCD = record['LBStoLCD'.lower()]
B1L.LBStoLHt = record['LBStoLHt'.lower()]
B1L.LBSDBVIS = record['LBSDBVIS'.lower()]
B1L.LBSCoNSt = record['LBSCoNSt'.lower()]
B1L.LBSHDStL = record['LBSHDStL'.lower()]
B1L.LBSLSStL = record['LBSLSStL'.lower()]
B1L.LBSUBLAD = record['LBSUBLAD'.lower()]
B1L.LBSUStrM = record['LBSUStrM'.lower()]
B1L.LBSUPASS = record['LBSUPASS'.lower()]
B1L.LBSDZStU = record['LBSDZStU'.lower()]
B1L.LBSDZStN = record['LBSDZStN'.lower()]
B1L.LBSFAINt = record['LBSFAINt'.lower()]
B1L.LBSPSyM = record['LBSPSyM'.lower()]
B1L.LBPSyAGe = record['LBPSyAGe'.lower()]
B1L.LBSSUPSy = record['LBSSUPSy'.lower()]
B1L.LBSSUPDI = record['LBSSUPDI'.lower()]
B1L.LBSSUPHt = record['LBSSUPHt'.lower()]
B1L.LBSStNSy = record['LBSStNSy'.lower()]
B1L.LBSStNDI = record['LBSStNDI'.lower()]
B1L.LBSStNHt = record['LBSStNHt'.lower()]
B1L.LBSAGerM = record['LBSAGerM'.lower()]
B1L.LBSAGeSM = record['LBSAGeSM'.lower()]
B1L.LBSAGeGt = record['LBSAGeGt'.lower()]
B1L.LBSAGeFL = record['LBSAGeFL'.lower()]
B1L.LBSAGetr = record['LBSAGetr'.lower()]
B1L.LBSAGeBr = record['LBSAGeBr'.lower()]
B1L.LBSSCLAU = record['LBSSCLAU'.lower()]
B1L.LBSSCLVr = record['LBSSCLVr'.lower()]
B1L.LBSSCLot = record['LBSSCLot'.lower()]
B1L.LBSSCor = record['LBSSCor'.lower()]
packet.append(B1L)
B2L = lbd_ivp_forms.FormB2L()
B2L.LBUDSPCH = record['LBUDSPCH'.lower()]
B2L.LBUDSALV = record['LBUDSALV'.lower()]
B2L.LBUDSWAL = record['LBUDSWAL'.lower()]
B2L.LBUWrIte = record['LBUWrIte'.lower()]
B2L.LBUDFooD = record['LBUDFooD'.lower()]
B2L.LBUDreSS = record['LBUDreSS'.lower()]
B2L.LBUDHyGN = record['LBUDHyGN'.lower()]
B2L.LBUDtUrN = record['LBUDtUrN'.lower()]
B2L.LBUDFALL = record['LBUDFALL'.lower()]
B2L.LBUDFrZ = record['LBUDFrZ'.lower()]
B2L.LBUDWALK = record['LBUDWALK'.lower()]
B2L.LBUDtreM = record['LBUDtreM'.lower()]
B2L.LBUDSeNS = record['LBUDSeNS'.lower()]
packet.append(B2L)
B3L = lbd_ivp_forms.FormB3L()
B3L.LBUMSPCH = record['LBUMSPCH'.lower()]
B3L.LBUMSPCX = record['LBUMSPCX'.lower()]
B3L.LBUMFACe = record['LBUMFACe'.lower()]
B3L.LBUMFACX = record['LBUMFACX'.lower()]
B3L.LBUMtrFA = record['LBUMtrFA'.lower()]
B3L.LBUtrFAX = record['LBUtrFAX'.lower()]
B3L.LBUMtrrH = record['LBUMtrrH'.lower()]
B3L.LBUtrrHX = record['LBUtrrHX'.lower()]
B3L.LBUMtrLH = record['LBUMtrLH'.lower()]
B3L.LBUtrLHX = record['LBUtrLHX'.lower()]
B3L.LBUMtrrF = record['LBUMtrrF'.lower()]
B3L.LBUtrrFX = record['LBUtrrFX'.lower()]
B3L.LBUMtrLF = record['LBUMtrLF'.lower()]
B3L.LBUtrLFX = record['LBUtrLFX'.lower()]
B3L.LBUMAtrH = record['LBUMAtrH'.lower()]
B3L.LBUAtrHX = record['LBUAtrHX'.lower()]
B3L.LBUMAtLH = record['LBUMAtLH'.lower()]
B3L.LBUAtLHX = record['LBUAtLHX'.lower()]
B3L.LBUMrGNK = record['LBUMrGNK'.lower()]
B3L.LBUrGNKX = record['LBUrGNKX'.lower()]
B3L.LBUMrGrU = record['LBUMrGrU'.lower()]
B3L.LBUrGrUX = record['LBUrGrUX'.lower()]
B3L.LBUMrGLU = record['LBUMrGLU'.lower()]
B3L.LBUrGLUX = record['LBUrGLUX'.lower()]
B3L.LBUMrGrL = record['LBUMrGrL'.lower()]
B3L.LBUrGrLX = record['LBUrGrLX'.lower()]
B3L.LBUMrGLL = record['LBUMrGLL'.lower()]
B3L.LBUrGLLX = record['LBUrGLLX'.lower()]
B3L.LBUMFtrH = record['LBUMFtrH'.lower()]
B3L.LBUFtrHX = record['LBUFtrHX'.lower()]
B3L.LBUMFtLH = record['LBUMFtLH'.lower()]
B3L.LBUFtLHX = record['LBUFtLHX'.lower()]
B3L.LBUMHMrH = record['LBUMHMrH'.lower()]
B3L.LBUHMrHX = record['LBUHMrHX'.lower()]
B3L.LBUMHMLH = record['LBUMHMLH'.lower()]
B3L.LBUHMLHX = record['LBUHMLHX'.lower()]
B3L.LBUMPSrH = record['LBUMPSrH'.lower()]
B3L.LBUPSrHX = record['LBUPSrHX'.lower()]
B3L.LBUMPSLH = record['LBUMPSLH'.lower()]
B3L.LBUPSLHX = record['LBUPSLHX'.lower()]
B3L.LBUMLGrL = record['LBUMLGrL'.lower()]
B3L.LBULGrLX = record['LBULGrLX'.lower()]
B3L.LBUMLGLL = record['LBUMLGLL'.lower()]
B3L.LBULGLLX = record['LBULGLLX'.lower()]
B3L.LBUMrISe = record['LBUMrISe'.lower()]
B3L.LBUMrISX = record['LBUMrISX'.lower()]
B3L.LBUMPoSt = record['LBUMPoSt'.lower()]
B3L.LBUMPoSX = record['LBUMPoSX'.lower()]
B3L.LBUMGAIt = record['LBUMGAIt'.lower()]
B3L.LBUMGAIX = record['LBUMGAIX'.lower()]
B3L.LBUPStBL = record['LBUPStBL'.lower()]
B3L.LBUPStBX = record['LBUPStBX'.lower()]
B3L.LBUMBrAD = record['LBUMBrAD'.lower()]
B3L.LBUMBrAX = record['LBUMBrAX'.lower()]
B3L.LBUMHNyr = record['LBUMHNyr'.lower()]
B3L.LBUMHNyX = record['LBUMHNyX'.lower()]
packet.append(B3L)
B4L = lbd_ivp_forms.FormB4L()
B4L.LBDeLUS = record['LBDeLUS'.lower()]
B4L.LBDHUrt = record['LBDHUrt'.lower()]
B4L.LBDSteAL = record['LBDSteAL'.lower()]
B4L.LBDAFFr = record['LBDAFFr'.lower()]
B4L.LBDGUeSt = record['LBDGUeSt'.lower()]
B4L.LBDIMPoS = record['LBDIMPoS'.lower()]
B4L.LBDHoMe = record['LBDHoMe'.lower()]
B4L.LBDABAND = record['LBDABAND'.lower()]
B4L.LBDPreS = record['LBDPreS'.lower()]
B4L.LBDotHer = record['LBDotHer'.lower()]
B4L.LBDeLFrQ = record['LBDeLFrQ'.lower()]
B4L.LBDeLSeV = record['LBDeLSeV'.lower()]
B4L.LBDeLDSt = record['LBDeLDSt'.lower()]
B4L.LBHALL = record['LBHALL'.lower()]
B4L.LBHVoICe = record['LBHVoICe'.lower()]
B4L.LBHPeoPL = record['LBHPeoPL'.lower()]
B4L.LBHNotPr = record['LBHNotPr'.lower()]
B4L.LBHoDor = record['LBHoDor'.lower()]
B4L.LBHFeeL = record['LBHFeeL'.lower()]
B4L.LBHtASte = record['LBHtASte'.lower()]
B4L.LBHotSeN = record['LBHotSeN'.lower()]
B4L.LBHALFrQ = record['LBHALFrQ'.lower()]
B4L.LBHALSeV = record['LBHALSeV'.lower()]
B4L.LBHALDSt = record['LBHALDSt'.lower()]
B4L.LBANXIet = record['LBANXIet'.lower()]
B4L.LBANeVNt = record['LBANeVNt'.lower()]
B4L.LBANreLX = record['LBANreLX'.lower()]
B4L.LBANBrtH = record['LBANBrtH'.lower()]
B4L.LBANBUtt = record['LBANBUtt'.lower()]
B4L.LBANPLAC = record['LBANPLAC'.lower()]
B4L.LBANSePr = record['LBANSePr'.lower()]
B4L.LBANotHr = record['LBANotHr'.lower()]
B4L.LBANXFrQ = record['LBANXFrQ'.lower()]
B4L.LBANXSeV = record['LBANXSeV'.lower()]
B4L.LBANXDSt = record['LBANXDSt'.lower()]
B4L.LBAPAtHy = record['LBAPAtHy'.lower()]
B4L.LBAPSPNt = record['LBAPSPNt'.lower()]
B4L.LBAPCoNV = record['LBAPCoNV'.lower()]
B4L.LBAPAFF = record['LBAPAFF'.lower()]
B4L.LBAPCHor = record['LBAPCHor'.lower()]
B4L.LBAPINt = record['LBAPINt'.lower()]
B4L.LBAPFAML = record['LBAPFAML'.lower()]
B4L.LBAPINtr = record['LBAPINtr'.lower()]
B4L.LBAPotH = record['LBAPotH'.lower()]
B4L.LBAPAFrQ = record['LBAPAFrQ'.lower()]
B4L.LBAPASeV = record['LBAPASeV'.lower()]
B4L.LBAPADSt = record['LBAPADSt'.lower()]
B4L.LBDoPAM = record['LBDoPAM'.lower()]
B4L.LBDAGe = record['LBDAGe'.lower()]
B4L.LBDDrUG1 = record['LBDDrUG1'.lower()]
B4L.LBDDoSe1 = record['LBDDoSe1'.lower()]
B4L.LBDAGe2 = record['LBDAGe2'.lower()]
B4L.LBDDrUG2 = record['LBDDrUG2'.lower()]
B4L.LBDDoSe2 = record['LBDDoSe2'.lower()]
B4L.LBDeLAGe = record['LBDeLAGe'.lower()]
B4L.LBDeLMeD = record['LBDeLMeD'.lower()]
B4L.LBDeLMD1 = record['LBDeLMD1'.lower()]
B4L.LBDeLMD2 = record['LBDeLMD2'.lower()]
B4L.LBHALAGe = record['LBHALAGe'.lower()]
B4L.LBHALMeD = record['LBHALMeD'.lower()]
B4L.LBHALMD1 = record['LBHALMD1'.lower()]
B4L.LBHALMD2 = record['LBHALMD2'.lower()]
B4L.LBANXAGe = record['LBANXAGe'.lower()]
B4L.LBANXMeD = record['LBANXMeD'.lower()]
B4L.LBANXMD1 = record['LBANXMD1'.lower()]
B4L.LBANXMD2 = record['LBANXMD2'.lower()]
B4L.LBAPAAGe = record['LBAPAAGe'.lower()]
B4L.LBAPAMeD = record['LBAPAMeD'.lower()]
B4L.LBAPAMD1 = record['LBAPAMD1'.lower()]
B4L.LBAPAMD2 = record['LBAPAMD2'.lower()]
packet.append(B4L)
B5L = lbd_ivp_forms.FormB5L()
B5L.LBMLtHrG = record['LBMLtHrG'.lower()]
B5L.LBMSLeeP = record['LBMSLeeP'.lower()]
B5L.LBMDISrG = record['LBMDISrG'.lower()]
B5L.LBMStAre = record['LBMStAre'.lower()]
packet.append(B5L)
B6L = lbd_ivp_forms.FormB6L()
B6L.LBSPCGIM = record['LBSPCGIM'.lower()]
B6L.LBSPDrM = record['LBSPDrM'.lower()]
B6L.LBSPyrS = record['LBSPyrS'.lower()]
B6L.LBSPMoS = record['LBSPMoS'.lower()]
B6L.LBSPINJS = record['LBSPINJS'.lower()]
B6L.LBSPINJP = record['LBSPINJP'.lower()]
B6L.LBSPCHAS = record['LBSPCHAS'.lower()]
B6L.LBSPMoVe = record['LBSPMoVe'.lower()]
B6L.LBSPLeGS = record['LBSPLeGS'.lower()]
B6L.LBSPNerV = record['LBSPNerv'.lower()]
B6L.LBSPUrGL = record['LBSPUrGL'.lower()]
B6L.LBSPSeNS = record['LBSPSeNS'.lower()]
B6L.LBSPWorS = record['LBSPWorS'.lower()]
B6L.LBSPWALK = record['LBSPWALK'.lower()]
B6L.LBSPAWAK = record['LBSPAWAK'.lower()]
B6L.LBSPBrtH = record['LBSPBrtH'.lower()]
B6L.LBSPtrt = record['LBSPtrt'.lower()]
B6L.LBSPCrMP = record['LBSPCrMP'.lower()]
B6L.LBSPALrt = record['LBSPALrt'.lower()]
packet.append(B6L)
B7L = lbd_ivp_forms.FormB7L()
B7L.LBSCLIV = record['LBSCLIV'.lower()]
B7L.LBSCSLP = record['LBSCSLP'.lower()]
B7L.LBSCBeHV = record['LBSCBeHV'.lower()]
B7L.LBSCDrM = record['LBSCDrM'.lower()]
B7L.LBSCyrS = record['LBSCyrS'.lower()]
B7L.LBSCMoS = record['LBSCMoS'.lower()]
B7L.LBSCINJS = record['LBSCINJS'.lower()]
B7L.LBSCINJP = record['LBSCINJP'.lower()]
B7L.LBSCCHAS = record['LBSCCHAS'.lower()]
B7L.LBSCMoVe = record['LBSCMoVe'.lower()]
B7L.LBSCLeGS = record['LBSCLeGS'.lower()]
B7L.LBSCNerV = record['LBSCNerV'.lower()]
B7L.LBSCSeNS = record['LBSCSeNS'.lower()]
B7L.LBSCWorS = record['LBSCWorS'.lower()]
B7L.LBSCWALK = record['LBSCWALK'.lower()]
B7L.LBSCAWAK = record['LBSCAWAK'.lower()]
B7L.LBSCBrtH = record['LBSCBrtH'.lower()]
B7L.LBSCtrt = record['LBSCtrt'.lower()]
B7L.LBSCCrMP = record['LBSCCrMP'.lower()]
B7L.LBSCALrt = record['LBSCALrt'.lower()]
packet.append(B7L)
B8L = lbd_ivp_forms.FormB8L()
B8L.PACoGIMP = record['PACoGIMP'.lower()]
B8L.PANSFALL = record['PANSFALL'.lower()]
B8L.PANSWKoF = record['PANSWKoF'.lower()]
B8L.PANSLyAW = record['PANSLyAW'.lower()]
B8L.PANSWKer = record['PANSWKer'.lower()]
B8L.PANSLttL = record['PANSLttL'.lower()]
B8L.SCPArAte = record['SCPArAte'.lower()]
B8L.PADSUNeX = record['PADSUNeX'.lower()]
B8L.PADSSItP = record['PADSSItP'.lower()]
B8L.PADSWAtV = record['PADSWAtV'.lower()]
B8L.PADStALK = record['PADStALK'.lower()]
B8L.PADSAWDy = record['PADSAWDy'.lower()]
B8L.PADSFLDy = record['PADSFLDy'.lower()]
packet.append(B8L)
B9L = lbd_ivp_forms.FormB9L()
B9L.CoNSFALL = record['CoNSFALL'.lower()]
B9L.CoNSWKoF = record['CoNSWKoF'.lower()]
B9L.CoNSLyAW = record['CoNSLyAW'.lower()]
B9L.CoNSWKer = record['CoNSWKer'.lower()]
B9L.CoNSLttL = record['CoNSLttL'.lower()]
B9L.SCCorAte = record['SCCorAte'.lower()]
B9L.CoDSUNeX = record['CoDSUNeX'.lower()]
B9L.CoDSSItP = record['CoDSSItP'.lower()]
B9L.CoDSWAtV = record['CoDSWAtV'.lower()]
B9L.CoDStALK = record['CoDStALK'.lower()]
B9L.CoDSAWDy = record['CoDSAWDy'.lower()]
B9L.CoDSFLDy = record['CoDSFLDy'.lower()]
B9L.SCCoFrSt = record['SCCoFrSt'.lower()]
B9L.SCCoAGeN = record['SCCoAGeN'.lower()]
B9L.SCCoAGeD = record['SCCoAGeD'.lower()]
B9L.SCCoCoMP = record['SCCoCoMP'.lower()]
B9L.SCCoSCVr = record['SCCoSCVr'.lower()]
B9L.SCCootH = record['SCCootH'.lower()]
B9L.SCCoSCor = record['SCCoSCor'.lower()]
packet.append(B9L)
C1L = lbd_ivp_forms.FormC1L()
C1L.LBNSWorD = record['LBNSWorD'.lower()]
C1L.LBNSCoLr = record['LBNSCoLr'.lower()]
C1L.LBNSCLWD = record['LBNSCLWD'.lower()]
C1L.LBNPFACe = record['LBNPFACe'.lower()]
C1L.LBNPNoIS = record['LBNPNoIS'.lower()]
C1L.LBNPtCor = record['LBNPtCor'.lower()]
C1L.LBNPPArD = record['LBNPPArD'.lower()]
packet.append(C1L)
D1L = lbd_ivp_forms.FormD1L()
D1L.LBCDSCoG = record['LBCDSCoG'.lower()]
D1L.LBCCMeM = record['LBCCMeM'.lower()]
D1L.LBCCLANG = record['LBCCLANG'.lower()]
D1L.LBCCAtt = record['LBCCAtt'.lower()]
D1L.LBCCeXDe = record['LBCCeXDe'.lower()]
D1L.LBCCVIS = record['LBCCVIS'.lower()]
D1L.LBCDSMoV = record['LBCDSMoV'.lower()]
D1L.LBCMBrAD = record['LBCMBrAD'.lower()]
D1L.LBCMrIGD = record['LBCMrIGD'.lower()]
D1L.LBCMrtrM = record['LBCMrtrM'.lower()]
D1L.LBCMPtrM = record['LBCMPtrM'.lower()]
D1L.LBCMAtrM = record['LBCMAtrM'.lower()]
D1L.LBCMMyoC = record['LBCMMyoC'.lower()]
D1L.LBCMGAIt = record['LBCMGAIt'.lower()]
D1L.LBCMPINS = record['LBCMPINS'.lower()]
D1L.LBCDSBeV = record['LBCDSBeV'.lower()]
D1L.LBCBDeP = record['LBCBDeP'.lower()]
D1L.LBCBAPA = record['LBCBAPA'.lower()]
D1L.LBCBANX = record['LBCBANX'.lower()]
D1L.LBCBHALL = record['LBCBHALL'.lower()]
D1L.LBCBDeL = record['LBCBDeL'.lower()]
D1L.LBCDSAUt = record['LBCDSAUt'.lower()]
D1L.LBCAreM = record['LBCAreM'.lower()]
D1L.LBCAAPN = record['LBCAAPN'.lower()]
D1L.LBCALGSL = record['LBCALGSL'.lower()]
D1L.LBCArSLe = record['LBCArSLe'.lower()]
D1L.LBCADtSL = record['LBCADtSL'.lower()]
D1L.LBCACGFL = record['LBCACGFL'.lower()]
D1L.LBCAHyPt = record['LBCAHyPt'.lower()]
D1L.LBCACoNS = record['LBCACoNS'.lower()]
D1L.LBCAHyPS = record['LBCAHyPS'.lower()]
D1L.LBCAFALL = record['LBCAFALL'.lower()]
D1L.LBCASyNC = record['LBCASyNC'.lower()]
D1L.LBCASNAP = record['LBCASNAP'.lower()]
D1L.LBCoGSt = record['LBCoGSt'.lower()]
D1L.LBCoGDX = record['LBCoGDX'.lower()]
packet.append(D1L)
E1L = lbd_ivp_forms.FormE1L()
E1L.LBGLrrK2 = record['LBGLrrK2'.lower()]
E1L.LBGLrKIS = record['LBGLrKIS'.lower()]
E1L.LBGPArK2 = record['LBGPArK2'.lower()]
E1L.LBGPK2IS = record['LBGPK2IS'.lower()]
E1L.LBGPArK7 = record['LBGPArK7'.lower()]
E1L.LBGPK7IS = record['LBGPK7IS'.lower()]
E1L.LBGPINK1 = record['LBGPINK1'.lower()]
E1L.LBGPNKIS = record['LBGPNKIS'.lower()]
E1L.LBGSNCA = record['LBGSNCA'.lower()]
E1L.LBGSNCIS = record['LBGSNCIS'.lower()]
E1L.LBGGBA = record['LBGGBA'.lower()]
E1L.LBGGBAIS = record['LBGGBAIS'.lower()]
E1L.LBGotHr = record['LBGotHr'.lower()]
E1L.LBGotHIS = record['LBGotHIS'.lower()]
E1L.LBGotHX = record['LBGotHX'.lower()]
packet.append(E1L)
E2L = lbd_ivp_forms.FormE2L()
E2L.LBISMrI = record['LBISMrI'.lower()]
E2L.LBISMMo = record['LBISMMo'.lower()]
E2L.LBISMDy = record['LBISMDy'.lower()]
E2L.LBISMyr = record['LBISMyr'.lower()]
E2L.LBISMQAV = record['LBISMQAV'.lower()]
E2L.LBISMHIP = record['LBISMHIP'.lower()]
E2L.LBISMAVL = record['LBISMAVL'.lower()]
E2L.LBISMDCM = record['LBISMDCM'.lower()]
E2L.LBISMFMt = record['LBISMFMt'.lower()]
E2L.LBISMADN = record['LBISMADN'.lower()]
E2L.LBISMVer = record['LBISMVer'.lower()]
E2L.LBISMMAN = record['LBISMMAN'.lower()]
E2L.LBISMoM = record['LBISMoM'.lower()]
E2L.LBISMStr = record['LBISMStr'.lower()]
E2L.LBISMoS = record['LBISMoS'.lower()]
E2L.LBIFPet = record['LBIFPet'.lower()]
E2L.LBIFPMo = record['LBIFPMo'.lower()]
E2L.LBIFPDy = record['LBIFPDy'.lower()]
E2L.LBIFPyr = record['LBIFPyr'.lower()]
E2L.LBIFPQAV = record['LBIFPQAV'.lower()]
E2L.LBIFPoCC = record['LBIFPoCC'.lower()]
E2L.LBIFPtPP = record['LBIFPtPP'.lower()]
E2L.LBIFPISL = record['LBIFPISL'.lower()]
E2L.LBIFPAVL = record['LBIFPAVL'.lower()]
E2L.LBIFPDCM = record['LBIFPDCM'.lower()]
E2L.LBIFPFMt = record['LBIFPFMt'.lower()]
E2L.LBIFPADN = record['LBIFPADN'.lower()]
E2L.LBIFPVer = record['LBIFPVer'.lower()]
E2L.LBIFPMAN = record['LBIFPMAN'.lower()]
E2L.LBIFPoM = record['LBIFPoM'.lower()]
E2L.LBIAPet = record['LBIAPet'.lower()]
E2L.LBIAPMo = record['LBIAPMo'.lower()]
E2L.LBIAPDy = record['LBIAPDy'.lower()]
E2L.LBIAPyr = record['LBIAPyr'.lower()]
E2L.LBIAPQAV = record['LBIAPQAV'.lower()]
E2L.LBIAPAVL = record['LBIAPAVL'.lower()]
E2L.LBIAPDCM = record['LBIAPDCM'.lower()]
E2L.LBIAPFMt = record['LBIAPFMt'.lower()]
E2L.LBIAPLIG = record['LBIAPLIG'.lower()]
E2L.LBIAPoL = record['LBIAPoL'.lower()]
E2L.LBIAPADN = record['LBIAPADN'.lower()]
E2L.LBIAPVer = record['LBIAPVer'.lower()]
E2L.LBIAPMAN = record['LBIAPMAN'.lower()]
E2L.LBIAPoM = record['LBIAPoM'.lower()]
E2L.LBItPet = record['LBItPet'.lower()]
E2L.LBItPMo = record['LBItPMo'.lower()]
E2L.LBItPDy = record['LBItPDy'.lower()]
E2L.LBItPyr = record['LBItPyr'.lower()]
E2L.LBItPQAV = record['LBItPQAV'.lower()]
E2L.LBItPAVL = record['LBItPAVL'.lower()]
E2L.LBItPDCM = record['LBItPDCM'.lower()]
E2L.LBItPFMt = record['LBItPFMt'.lower()]
E2L.LBItPLIG = record['LBItPLIG'.lower()]
E2L.LBItPoL = record['LBItPoL'.lower()]
E2L.LBItPADN = record['LBItPADN'.lower()]
E2L.LBItPVer = record['LBItPVer'.lower()]
E2L.LBItPMAN = record['LBItPMAN'.lower()]
E2L.LBItPoM = record['LBItPoM'.lower()]
E2L.LBIDAtS = record['LBIDAtS'.lower()]
E2L.LBIDSMo = record['LBIDSMo'.lower()]
E2L.LBIDSDy = record['LBIDSDy'.lower()]
E2L.LBIDSyr = record['LBIDSyr'.lower()]
E2L.LBIDSQAV = record['LBIDSQAV'.lower()]
E2L.LBIDSABN = record['LBIDSABN'.lower()]
packet.append(E2L)
E3L = lbd_ivp_forms.FormE3L()
E3L.LBoPoLyS = record['LBoPoLyS'.lower()]
E3L.LBoPoSMo = record['LBoPoSMo'.lower()]
E3L.LBoPoSDy = record['LBoPoSDy'.lower()]
E3L.LBoPoSyr = record['LBoPoSyr'.lower()]
E3L.LBoPoPoS = record['LBoPoPoS'.lower()]
E3L.LBoPoAVL = record['LBoPoAVL'.lower()]
E3L.LBoCMIBG = record['LBoCMIBG'.lower()]
E3L.LBoCMMo = record['LBoCMMo'.lower()]
E3L.LBoCMDy = record['LBoCMDy'.lower()]
E3L.LBoCMyr = record['LBoCMyr'.lower()]
E3L.LBoCMPoS = record['LBoCMPoS'.lower()]
E3L.LBoCMAVL = record['LBoCMAVL'.lower()]
E3L.LBoANoS = record['LBoANoS'.lower()]
E3L.LBoANMo = record['LBoANMo'.lower()]
E3L.LBoANDy = record['LBoANDy'.lower()]
E3L.LBoANyr = record['LBoANyr'.lower()]
E3L.LBoANPoS = record['LBoANPoS'.lower()]
E3L.LBoANAVL = record['LBoANAVL'.lower()]
E3L.LBoANVer = record['LBoANVer'.lower()]
E3L.LBoANotH = record['LBoANotH'.lower()]
E3L.LBoeeG = record['LBOeeG'.lower()]
E3L.LBoeGMo = record['LBoeGMo'.lower()]
E3L.LBoeGDy = record['LBoeGDy'.lower()]
E3L.LBoeGyr = record['LBoeGyr'.lower()]
E3L.LBoeGPoS = record['LBoeGPoS'.lower()]
E3L.LBoeGAVL = record['LBoeGAVL'.lower()]
E3L.LBoMSLt = record['LBoMSLt'.lower()]
E3L.LBoMSMo = record['LBoMSMo'.lower()]
E3L.LBoMSDy = record['LBoMSDy'.lower()]
E3L.LBoMSyr = record['LBoMSyr'.lower()]
E3L.LBoMSPoS = record['LBoMSPoS'.lower()]
E3L.LBoMSAVL = record['LBoMSAVL'.lower()]
E3L.LBotILt = record['LBotILt'.lower()]
E3L.LBotLMo = record['LBotLMo'.lower()]
E3L.LBotLDy = record['LBotLDY'.lower()]
E3L.LBotLyr = record['LBotLyr'.lower()]
E3L.LBotLPoS = record['LBotLPoS'.lower()]
E3L.LBotLAVL = record['LBotLAVL'.lower()]
E3L.LBoQSArt = record['LBoQSArt'.lower()]
E3L.LBoQSMo = record['LBoQSMo'.lower()]
E3L.LBoQSDy = record['LBoQSDy'.lower()]
E3L.LBoQSyr = record['LBoQSyr'.lower()]
E3L.LBoQSPoS = record['LBoQSPoS'.lower()]
E3L.LBoSGAVL = record['LBoSGAVL'.lower()]
E3L.LBotHerM = record['LBotHerM'.lower()]
E3L.LBotHMo = record['LBotHMo'.lower()]
E3L.LBotHDy = record['LBotHDy'.lower()]
E3L.LBotHyr = record['LBotHyr'.lower()]
E3L.LBotHPoS = record['LBotHPoS'.lower()]
E3L.LBotHAVL = record['LBotHAVL'.lower()]
E3L.LBoCGAIt = record['LBoCGAIt'.lower()]
E3L.LBoCGMo = record['LBoCGMo'.lower()]
E3L.LBoCGDy = record['LBoCGDy'.lower()]
E3L.LBoCGyr = record['LBoCGyr'.lower()]
E3L.LBoCGPoS = record['LBoCGPoS'.lower()]
E3L.LBoCGAVL = record['LBoCGAVL'.lower()]
packet.append(E3L)
update_header(record, packet)
return packet
def update_header(record, packet):
for header in packet:
header.PACKET = "IL"
header.FORMID = header.form_name
header.FORMVER = 3
header.ADCID = record['adcid']
header.PTID = record['ptid']
header.VISITMO = record['visitmo']
header.VISITDAY = record['visitday']
header.VISITYR = record['visityr']
header.VISITNUM = record['visitnum']
header.INITIALS = record['initials']
|
ctsit/nacculator
|
nacc/lbd/ivp/builder.py
|
Python
|
bsd-2-clause
| 21,964
|
[
"VisIt"
] |
076e6029125164b4458ded638046b69079c547873f98d58d3b1c8ce8afabcc5a
|
# box
# Copyright 2013-2014 Dipen Patel
# See LICENSE for details.
import urllib
import httplib
import json
from error import ERRORCODES, BoxError
import mimetypes
BOX_API_VERSION = "/2.0"
BOX_API_URL = "api.box.com"
BOX_DOWNLOAD_URL = "dl.boxcloud.com"
BOX_API_UPLOAD_URL = "upload.box.com"
class BoxClient(object):
def __init__(self, access_token=None, timeout=None):
self.access_token = access_token
self.timeout = timeout
def user_info(self, userId="me"):
"""Get information of logged in user.
Args:
- userId : User id to get information
Returns:
returns dictionary of user information
For more details, visit:
http://developers.box.com/docs/#users
"""
return self.request("/users/"+userId)
def get_folders(self, folderId, **args):
""" Get list of folders in given folder including all metadata
Args:
- folderId : Folder id to get list of items
Returns:
- A dictionary containing the metadata of files/folers
For more details, visit:
http://developers.box.com/docs/#folders-folder-object
"""
return self.request("/folders/"+folderId, qs_args=args)
def get_folders_items(self, folderId, **args):
""" Get list of folders in given folder without any other metadata
Args:
- folderId : Folder id to get list of items
Returns:
- A dictionary containing the list of files/folers
For more details, visit:
http://developers.box.com/docs/#folders-retrieve-a-folders-items
"""
return self.request("/folders/"+folderId+'/items', qs_args=args)
def create_folder(self, **post_data):
""" Creates an Empty folder inside specified parent folder
Args:
post_data : Dictionary object containing Folder name and parent Id
e.g. {"name": "New folder", "parent":{"id": "0"}}
Returns:
- A full folder object is returned
For more details, visit:
http://developers.box.com/docs/#folders-create-a-new-folder
"""
return self.request("/folders/", method='POST', post_args=post_data)
def update_folder_info(self, folderId, **post_data):
"""update folder information
Args:
- folderId : folder's id to update information
- post_data : parameter lists to update(in json format)
Returns:
- The updated folder is returned if the name is valid
for more details, visit:
http://developers.box.com/docs/#folders-update-information-about-a-folder
"""
return self.request("/folders/"+folderId, method="PUT", post_args=post_data)
def delete_folder(self, folderId, **qs_args):
"""Delete a folder with given id
Args:
- folderId : Folder id to delete
- qs_args : dictionary object of optional parameter "recursive"
e.g. {"recursive": "true"}
Returns:
- returns an 204 status response if folder is deleted successfully.
"""
return self.request("/folders/"+folderId, method='DELETE', qs_args=qs_args)
def get_files(self, fileId, **args):
"""Get information about a file with given id
Args:
- fileId : File id to get information
- args : dictionary object of optional parameter
Returns:
- A full file object is returned
For more details, visit:
http://developers.box.com/docs/#files-get
"""
return self.request("/files/"+fileId, qs_args=args)
def download_file(self, fileId, **args):
"""download a file of given fileID
Args:
- args : optional arguments (Version id)
Returns:
- httplib.HTTPResponse that is the result of the request.
close HttpResponse once file is downloaded.
"""
return self.request("/files/"+fileId+"/content", qs_args=args)
def upload_file(self, fileObj, parentId, fileId=None):
""" upload a file to specified folder
Args:
- fileObj : file object
- parentId : folder id where file need to upload
- fileId : if wanted to update existing file in parentId
Returns:
- full file object is returned in json object if the ID is valid
for more details, visit
http://developers.box.com/docs/#files-upload-a-file
"""
return self.request_upload(parentId, fileObj=fileObj, fileId=fileId)
def delete_file(self, fileId):
"""Delete a file with given id
Args:
- fileId : File id to delete
Returns:
- returns an 204 status response if file is deleted successfully.
for more details, visit
http://developers.box.com/docs/#files-delete-a-file
"""
return self.request("/files/"+fileId, method="DELETE")
def get_file_comments(self, fileId):
"""Get comments of given file id
Args:
- fileId : File id to get comment
Returns:
- A collection of comment objects are returned.
If there are no comments on the file, an empty comments array is returned.
for more details, visit
http://developers.box.com/docs/#files-view-the-comments-on-a-file
"""
return self.request("/files/"+fileId+"/comments")
def get_comments(self, commentId):
"""Get comments with given id
Args:
- commentId : Comment id to get
Returns:
- A full comment object is returned is the ID is valid and if the user has access to the comment.
for more details, visit
http://developers.box.com/docs/#comments-get-information-about-a-comment
"""
return self.request("/comments/"+commentId)
def add_comments(self, **post_data):
"""Add comments to given id
Args:
- post_data : Dictionary object containing type of Comment and message
e.g. {"item": {"type": "file", "id": "FILE_ID"}, "message": "YOUR_MESSAGE"}
Returns:
- The new comment object is returned. Errors may occur if the item id is invalid,
the item type is invalid/unsupported, or if the user does not have access to the item being commented on.
for more details, visit
http://developers.box.com/docs/#comments-add-a-comment-to-an-item
"""
return self.request("/comments/", method="POST", post_args=post_data)
def edit_comment(self, commentId, **post_data):
"""Edit comments to given id
Args:
- commentId : id of comment to modify
- post_data : Dictionary object containing new message
e.g. {message": "YOUR NEW MESSAGE"}
Returns:
- The full updated comment object is returned
if the ID is valid and if the user has access to the comment.
for more details, visit
http://developers.box.com/docs/#comments-change-a-comments-message
"""
return self.request("/comments/"+commentId, method="PUT", post_args=post_data)
def delete_comment(self, commentId):
"""Delete comment of given id
Args:
- commentId : id of comment to delete
Returns:
- An empty 204 response is returned to confirm deletion of the comment.
Errors can be thrown if the ID is invalid or if the user is not authorized to delete this particular comment.
for more details, visit
http://developers.box.com/docs/#comments-delete-a-comment
"""
return self.request("/comments/"+commentId, method="DELETE")
def search_items(self, **qs_args):
"""Search items in user's box account
Args:
- qs_args : Dictionary object containing search query, limit and offset
e.g. {"query": "football", "limit":1, "offset": 0}
Returns:
- A collection of search results is returned.
If there are no matching search results, the entries array will be empty.
for more details, visit
http://developers.box.com/docs/#search-searching-a-users-account
"""
return self.request("/search/", method="GET", qs_args=qs_args)
def request_upload(self, parentId, method='POST', fileObj=None, fileId=None):
"""An internal method that builds the url, headers, and params for Box API request.
Args:
- path : API endpoint with leading slash
- method : An HTTP method
- qs_args : query sting arguments to send
- post_args : POST data to send
Returns:
- return json or raw response based on API endpoint.
"""
con = httplib.HTTPSConnection(BOX_API_UPLOAD_URL, timeout=self.timeout)
if fileId:
path = '/api/'+BOX_API_VERSION+'/files/'+fileId+'/content'
else:
path = '/api/'+BOX_API_VERSION+'/files/content'
headerValue = 'Bearer %s' % (self.access_token,)
fields = {"filename": fileObj,"parent_id":parentId}
content_type, body = self._encode_multipart_form(fields)
headers = {
"Authorization": headerValue,
"Content-Type": content_type
}
con.request(method, path, body, headers)
response = {}
data = con.getresponse()
if data.status in ERRORCODES:
response["status"] = data.status
response["error"] = data.reason
else:
response = data.read()
data.close()
con.close()
try:
return json.loads(response)
except Exception, e:
try:
return json.dumps(response)
except:
raise BoxError(e)
# based on: http://code.activestate.com/recipes/146306/
def _encode_multipart_form(self, fields):
"""Encode files as 'multipart/form-data'.
Fields are a dict of form name-> value. For files, value should
be a file object.
Returns (content_type, body) ready for httplib.HTTP instance.
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields.items():
L.append('--' + BOUNDARY)
if hasattr(value, 'read') and callable(value.read):
filename = getattr(value, 'name')
L.append(('Content-Disposition: form-data;'
'name="%s";'
'filename="%s"') % (key, filename))
L.append('Content-Type: %s' % (mimetypes.guess_type(filename)[0],))
value = value.read()
else:
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
if isinstance(value, unicode):
value = value.encode('ascii')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def request(self, path, method='GET', qs_args=None, post_args=None):
"""An internal method that builds the url, headers, and params for Box API request.
Args:
- path : API endpoint with leading slash
- method : An HTTP method
- qs_args : query sting arguments to send
- post_args : POST data to send
Returns:
- return json or raw response based on API endpoint.
"""
qs_args = qs_args or {}
post_data = json.dumps(post_args) if post_args else None
con = httplib.HTTPSConnection(BOX_API_URL, timeout=self.timeout)
path = BOX_API_VERSION+path
url = '%s?%s' % (path, urllib.urlencode(qs_args))
headerValue = 'Bearer %s' % (self.access_token,)
headers = {"Authorization": headerValue}
con.request(method, url, post_data, headers)
response = {}
data = con.getresponse()
if data.status in ERRORCODES:
response["status"] = data.status
response["error"] = data.reason
elif data.status == 201 or data.status == 200:
response = data.read()
elif data.status == 302:
url = data.getheader("location", "")
data.close()
con.close()
con1 = httplib.HTTPSConnection(BOX_DOWNLOAD_URL, timeout=self.timeout)
con1.request(method, url[len("https://"+BOX_DOWNLOAD_URL):])
return con1.getresponse()
data.close()
con.close()
try:
# To parse response returned by Box.com APIs
return json.loads(response)
except:
try:
# To parse response created by this library
return json.dumps(response)
except:
# return raw response
return response
|
dipen30/boxapi
|
box/client.py
|
Python
|
mit
| 13,730
|
[
"VisIt"
] |
eb07801e83c3f0d43f6b0e970e8f90f2da70f84327a8ab6087a2f512882c877e
|
import vtk
def ReadPolyData(filename):
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(filename)
reader.Update()
return reader.GetOutput()
def WritePolyData(input,filename):
writer = vtk.vtkXMLPolyDataWriter()
writer.SetFileName(filename)
writer.SetInputData(input)
writer.Write()
file_path = "/home/ksansom/caseFiles/mri/VWI_proj/case1/vmtk/case1_VCG.ply"
out_path = "/home/ksansom/caseFiles/mri/VWI_proj/case1/vmtk/case1_VCG_smooth.ply"
reader = vtk.vtkPLYReader()
reader.SetFileName(file_path)
reader.Update()
smooth = vtk.vtkSmoothPolyDataFilter()
smooth.SetInputConnection(reader.GetOutputPort())
smooth.SetNumberOfIterations(10)
smooth.BoundarySmoothingOff()
smooth.SetFeatureAngle(120)
smooth.SetEdgeAngle(90)
smooth.SetRelaxationFactor(.05)
writer = vtk.vtkPLYWriter()
writer.SetFileName(out_path)
writer.SetInputConnection(smooth.GetOutputPort())
writer.Write()
|
kayarre/Tools
|
vtk/smooth_polydata.py
|
Python
|
bsd-2-clause
| 919
|
[
"VTK"
] |
7e5bb2d564ba0fb724b8a2d36a4b0682f1068abc391599572c3265c55933dd01
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class NcbiRmblastn(AutotoolsPackage):
"""RMBlast search engine for NCBI"""
homepage = "http://www.repeatmasker.org/RMBlast.html"
url = "ftp://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/2.9.0/ncbi-blast-2.9.0+-src.tar.gz"
version('2.9.0', sha256='a390cc2d7a09422759fc178db84de9def822cbe485916bbb2ec0d215dacdc257')
patch('isb-2.9.0+-rmblast-p1.patch', when="@2.9.0")
configure_directory = 'c++'
def configure_args(self):
args = [
"--with-mt",
"--without-debug",
"--without-krb5",
"--without-openssl",
"--with-projects=scripts/projects/rmblastn/project.lst"]
return args
|
iulian787/spack
|
var/spack/repos/builtin/packages/ncbi-rmblastn/package.py
|
Python
|
lgpl-2.1
| 904
|
[
"BLAST"
] |
e38ee9ba300f864277887f1976d42e78ad18c521fbdf2eaac9b1c2d395a191fb
|
import os
from subprocess import Popen, PIPE, STDOUT
from ase import Atoms
from ase.calculators.turbomole import Turbomole
# Delete old coord, control, ... files, if exist
for f in ['coord',
'basis',
'energy',
'mos',
'statistics',
'control']:
if os.path.exists(f):
os.remove(f)
atoms = Atoms('H2', positions=[(0, 0, 0), (0, 0, 1.1)])
atoms.set_calculator(Turbomole()) # Writes a coord file as well
# Write all commands for the define command in a string
define_str = '\n\na coord\n*\nno\nb all sto-3g hondo\n*\neht\n\n\n\n*'
# Run define
p = Popen('define', stdout=PIPE, stdin=PIPE, stderr=STDOUT)
stdout = p.communicate(input=define_str)
# Run turbomole
atoms.get_potential_energy()
|
askhl/ase
|
ase/test/turbomole/turbomole_H2.py
|
Python
|
gpl-2.0
| 752
|
[
"ASE",
"TURBOMOLE"
] |
530d9391f8d868f59c4ae83f1788fb637a2dfcd31a8229ae8dd0df8dcbb3fd01
|
#!/usr/bin/env python
import pysam
import argparse
import sys
import time
import logging
import multiprocessing
DEFAULT_MIN_SOFT_CLIP=20
DEFAULT_MIN_SOFT_CLIP_MAPQ=10
DEFAULT_MIN_SOFT_CLIP_MATE_MAPQ=10
DEFAULT_BAD_MAP_MAX_SOFT_CLIP=50
DEFAULT_BAD_MAP_MIN_MAPQ=10
DEFAULT_BAD_MAP_MIN_MATE_MAPQ=10
DEFAULT_BAD_MAP_MIN_NM=8
def add_options(main_parser):
local_parser = main_parser.add_argument_group("Read extraction options.")
local_parser.add_argument('--min_soft_clip', default=DEFAULT_MIN_SOFT_CLIP,
help="Minimum soft-clipping for a read to be considered heavily soft-clipped", type=int)
local_parser.add_argument('--min_soft_clip_mapq', default=DEFAULT_MIN_SOFT_CLIP_MAPQ,
help="Min mapping quality of a heavily soft-clipped read to be considered for junction-mapping",
type=int)
local_parser.add_argument('--min_soft_clip_mate_mapq', default=DEFAULT_MIN_SOFT_CLIP_MATE_MAPQ,
help="Min mapping quality of the mate of a heavily soft-clipped read to be considered for junction-mapping",
type=int)
local_parser.add_argument('--bad_map_max_soft_clip', default=DEFAULT_BAD_MAP_MAX_SOFT_CLIP,
help="Maximum soft-clip for a read to be considered badly-mapped and, therefore, used for junction-mapping",
type=int)
local_parser.add_argument('--bad_map_min_mapq', default=DEFAULT_BAD_MAP_MIN_MAPQ,
help="Minimum mapping quality of a read to considered badly-mapped", type=int)
local_parser.add_argument('--bad_map_min_nm', default=DEFAULT_BAD_MAP_MIN_NM,
help="Min edit distance for a read to be considered badly mapped", type=int)
local_parser.add_argument('--bad_map_min_mate_mapq', default=DEFAULT_BAD_MAP_MIN_MATE_MAPQ,
help="Minimum mapping quality of the mate of a badly mapped read to be considered for junction-mapping",
type=int)
local_parser.add_argument('--bams', nargs='+', help="BAMs", required=True)
local_parser.add_argument('--chromosome', help="Chromosome to process. Leave unspecified to include all")
local_parser.add_argument('--out', help="Output file. Leave unspecified for stdout.")
def is_good_candidate(aln, min_soft_clip, min_soft_clip_mapq, min_soft_clip_mate_mapq, bad_map_max_soft_clip,
bad_map_min_mapq, bad_map_min_nm, bad_map_min_mate_mapq):
if aln.is_duplicate or aln.is_secondary: return False
if aln.is_unmapped: return True
# some tweaking may be required to ensure the reads in a pair are used consistently
if aln.cigar is None: return False
tags = aln.tags
nm = int(aln.opt("NM"))
xm = int(aln.opt("XM")) if "XM" in tags else 0
mq = int(aln.opt("MQ")) if "MQ" in tags else 30
max_soft_clip = 0
max_del = 0
for (op, length) in aln.cigar:
if op == 4:
max_soft_clip = max(max_soft_clip, length)
elif op == 2:
max_del = max(max_del, length)
if (
max_soft_clip >= min_soft_clip or max_del >= min_soft_clip) and aln.mapq >= min_soft_clip_mapq and xm == 0 and mq >= min_soft_clip_mate_mapq: return True
if (
max_soft_clip <= bad_map_max_soft_clip or max_del <= bad_map_max_soft_clip) and aln.mapq >= bad_map_min_mapq and nm >= bad_map_min_nm and mq >= bad_map_min_mate_mapq: return True
return False
def get_iterator(bam_handle, chromosome):
if chromosome is None:
return bam_handle
if chromosome:
return bam_handle.fetch(chromosome)
# Get the iterator for the reads with no coordinates
bam_header = bam_handle.header
for bam_chr_dict in bam_header['SQ'][::-1]:
chr_name = bam_chr_dict['SN']
chr_length = bam_chr_dict['LN']
if bam_handle.count(chr_name) > 0:
bam_handle.fetch(chr_name)
return bam_handle
bam_handle.reset()
return bam_handle
def print_candidate_reads(bams, chromosome, min_soft_clip=DEFAULT_MIN_SOFT_CLIP, min_soft_clip_mapq=DEFAULT_MIN_SOFT_CLIP_MAPQ, min_soft_clip_mate_mapq=DEFAULT_MIN_SOFT_CLIP_MATE_MAPQ, bad_map_max_soft_clip=DEFAULT_BAD_MAP_MAX_SOFT_CLIP,
bad_map_min_mapq=DEFAULT_BAD_MAP_MIN_MAPQ, bad_map_min_nm=DEFAULT_BAD_MAP_MIN_NM, bad_map_min_mate_mapq=DEFAULT_BAD_MAP_MIN_MATE_MAPQ, outfile=None):
func_logger = logging.getLogger("%s-%s" % (print_candidate_reads.__name__, multiprocessing.current_process()))
start_time = time.time()
outfd = sys.stdout if outfile is None else open(outfile, "w")
readcount = 0
for input_file in bams:
sam_file = pysam.Samfile(input_file, "r" + ("" if input_file.endswith("sam") else "b"))
iterator = get_iterator(sam_file, chromosome)
for aln in iterator:
if not is_good_candidate(aln, min_soft_clip, min_soft_clip_mapq, min_soft_clip_mate_mapq,
bad_map_max_soft_clip, bad_map_min_mapq, bad_map_min_nm,
bad_map_min_mate_mapq): continue
read_id = aln.qname
if aln.is_paired and not aln.mate_is_unmapped:
read_id = read_id + "$" + sam_file.getrname(aln.rnext)
outfd.write("@%s\n%s\n+\n%s\n" % (read_id, aln.seq, aln.qual))
readcount += 1
sam_file.close()
if outfile is not None:
outfd.close()
func_logger.info("Extracted %d reads from BAMs %s for chromosome %s (%g s)" % (readcount, ", ".join(map(str, bams)), str(chromosome), time.time() - start_time))
return readcount
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Select reads for junction mapping: unmapped reads, heavily soft-clipped reads and badly mapped reads are selected for junction-mapping in later stages",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_options(parser)
args = parser.parse_args()
print_candidate_reads(args.bams, args.chromosome, args.min_soft_clip, args.min_soft_clip_mapq, args.min_soft_clip_mate_mapq, args.bad_map_max_soft_clip,
args.bad_map_min_mapq, args.bad_map_min_nm, args.bad_map_min_mate_mapq, outfile=args.out)
|
bioinform/breakseq2
|
breakseq2/breakseq_pre.py
|
Python
|
bsd-2-clause
| 6,295
|
[
"pysam"
] |
24381efa19767dd74ef531ee0105a8420652df83bfc62c21a85cd437ee2dc381
|
from argparse import ArgumentParser, FileType, ArgumentDefaultsHelpFormatter
from csv import DictReader
from io import StringIO
from itertools import groupby
from operator import itemgetter
from tempfile import NamedTemporaryFile
from micall.core.denovo import write_contig_refs
def parse_args():
parser = ArgumentParser(
description='Run a set of contigs through BLAST again.',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('contigs_csv',
type=FileType(),
nargs='?',
default='contigs.csv',
help='contigs to search for')
return parser.parse_args()
def main():
args = parse_args()
fasta_file = NamedTemporaryFile(mode='w', prefix='contigs', suffix='.fasta')
contig_sources = [] # [(sample_name, contig_num, ref_name, contig_size)]
ref_name = None
for sample_name, sample_rows in groupby(DictReader(args.contigs_csv),
itemgetter('sample')):
for contig_num, row in enumerate(sample_rows, 1):
ref_name = row['ref']
header = f'>{sample_name}_{contig_num}-{ref_name}\n'
fasta_file.write(header)
fasta_file.write(row['contig'])
fasta_file.write('\n')
contig_size = len(row['contig'])
contig_sources.append((sample_name,
contig_num,
ref_name,
contig_size))
if __name__ == '__live_coding__' and ref_name != 'unknown':
break
fasta_file.flush()
new_contigs_csv = StringIO()
blast_csv = StringIO()
write_contig_refs(fasta_file.name, new_contigs_csv, blast_csv=blast_csv)
blast_csv.seek(0)
for source_contig_num, contig_rows in groupby(DictReader(blast_csv),
itemgetter('contig_num')):
contig_rows = sorted(contig_rows, key=lambda r: int(r['score']))
sample_name, contig_num, ref_name, contig_size = contig_sources[
int(source_contig_num)-1]
best_blast_hits = [None] * contig_size
for row in contig_rows:
if row['ref_name'] == 'HIV1-CON-XX-Consensus-seed':
# Doesn't tell us about HIV subtype, so skip it.
continue
start = int(row['start'])
end = int(row['end'])
best_blast_hits[start-1:end] = [row['ref_name']] * (end-start+1)
best_subtypes = set()
matches = []
for blast_ref, ref_positions in groupby(best_blast_hits):
match_size = len(list(ref_positions))
if match_size > 100 and blast_ref is not None:
subtype = '-'.join(blast_ref.split('-')[:2])
best_subtypes.add(subtype)
else:
blast_ref = 'other'
matches.append(f'{blast_ref} x {match_size}')
if len(best_subtypes) == 1:
summary, = best_subtypes
elif not best_subtypes:
summary = None
else:
summary = ', '.join(matches)
print(f'{sample_name}, {contig_num}-{ref_name}: {summary}')
main()
|
cfe-lab/MiCall
|
micall/utils/contig_blaster.py
|
Python
|
agpl-3.0
| 3,252
|
[
"BLAST"
] |
f38f078f2db33eab592ca3c829c84df5b684cbf191219476e07c2374b112a86b
|
# $Id: octopus_conf_handler.py 2016-12-17 $
# Author: Coen Meerbeek <coen@buzzardlabs.com>
# Copyright: BuzzardLabs 2016
import splunk.admin as admin
import splunk.entity as en
# import your required python modules
'''
Copyright (C) 2005 - 2010 Splunk Inc. All Rights Reserved.
Description: This skeleton python script handles the parameters in the configuration page.
handleList method: lists configurable parameters in the configuration page
corresponds to handleractions = list in restmap.conf
handleEdit method: controls the parameters and saves the values
corresponds to handleractions = edit in restmap.conf
'''
class ConfigApp(admin.MConfigHandler):
'''
Set up supported arguments
'''
def setup(self):
if self.requestedAction == admin.ACTION_EDIT:
for arg in ['hostname', 'protocol', 'apikey']:
self.supportedArgs.addOptArg(arg)
'''
Read the initial values of the parameters from the custom file
myappsetup.conf, and write them to the setup page.
If the app has never been set up,
uses .../app_name/default/myappsetup.conf.
If app has been set up, looks at
.../local/myappsetup.conf first, then looks at
.../default/myappsetup.conf only if there is no value for a field in
.../local/myappsetup.conf
For boolean fields, may need to switch the true/false setting.
For text fields, if the conf file says None, set to the empty string.
'''
def handleList(self, confInfo):
confDict = self.readConf("octopus")
if None != confDict:
for stanza, settings in confDict.items():
for key, val in settings.items():
if key in ['hostname'] and val in [None, '']:
val = ''
elif key in ['protocol'] and val in [None, '']:
val = ''
elif key in ['apikey'] and val in [None, '']:
val = ''
confInfo[stanza].append(key, val)
'''
After user clicks Save on setup page, take updated parameters,
normalize them, and save them somewhere
'''
def handleEdit(self, confInfo):
name = self.callerArgs.id
args = self.callerArgs
if self.callerArgs.data['hostname'][0] in [None, '']:
self.callerArgs.data['hostname'][0] = ''
if self.callerArgs.data['protocol'][0] in [None, '']:
self.callerArgs.data['protocol'][0] = ''
if self.callerArgs.data['apikey'][0] in [None, '']:
self.callerArgs.data['apikey'][0] = ''
'''
Since we are using a conf file to store parameters,
write them to the [octopus] stanza
in app_name/local/octopus.conf
'''
self.writeConf('octopus', 'octopus', self.callerArgs.data)
# initialize the handler
admin.init(ConfigApp, admin.CONTEXT_NONE)
|
cmeerbeek/splunk-addon-octopus-deploy
|
TA-OctopusNT-Fwd/bin/octopus_conf_handler.py
|
Python
|
mit
| 2,769
|
[
"Octopus"
] |
5f0b1349d75acb7023deea204bba77a2050a0a88662b42167fc83d9425c7fa66
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import espressomd
import numpy as np
from espressomd.interactions import RigidBond
@utx.skipIfMissingFeatures("BOND_CONSTRAINT")
class RigidBondTest(ut.TestCase):
def test(self):
target_acc = 1E-3
tol = 1.2 * target_acc
s = espressomd.System(box_l=[1.0, 1.0, 1.0])
s.box_l = [10, 10, 10]
s.cell_system.skin = 0.4
s.time_step = 0.01
s.thermostat.set_langevin(kT=1, gamma=1, seed=42)
r = RigidBond(r=1.2, ptol=1E-3, vtol=target_acc)
s.bonded_inter.add(r)
for i in range(5):
s.part.add(id=i, pos=(i * 1.2, 0, 0))
if i > 0:
s.part[i].bonds = ((r, i - 1),)
s.integrator.run(5000)
for i in range(1, 5):
d = s.distance(s.part[i], s.part[i - 1])
v_d = s.distance_vec(s.part[i], s.part[i - 1])
self.assertAlmostEqual(d, 1.2, delta=tol)
# Velocity projection on distance vector
vel_proj = np.dot(s.part[i].v - s.part[i - 1].v, v_d) / d
self.assertLess(vel_proj, tol)
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/rigid_bond.py
|
Python
|
gpl-3.0
| 1,886
|
[
"ESPResSo"
] |
6f78504aafb5157e56cf3beb79510eecff6ad5d34c9c5912eb85ceda39cf6fb5
|
import functools
from typing import List, Any
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import _gaussian_random_matrix
from sklearn.random_projection import _sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.exceptions import DataDimensionalityWarning
all_sparse_random_matrix: List[Any] = [_sparse_random_matrix]
all_dense_random_matrix: List[Any] = [_gaussian_random_matrix]
all_random_matrix = all_sparse_random_matrix + all_dense_random_matrix
all_SparseRandomProjection: List[Any] = [SparseRandomProjection]
all_DenseRandomProjection: List[Any] = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
@pytest.mark.parametrize("n_samples, eps", [
(100, 1.1),
(100, 0.0),
(100, -0.1),
(0, 0.5)
])
def test_invalid_jl_domain(n_samples, eps):
with pytest.raises(ValueError):
johnson_lindenstrauss_min_dim(n_samples, eps=eps)
def test_input_size_jl_min_dim():
with pytest.raises(ValueError):
johnson_lindenstrauss_min_dim(3 * [100], eps=2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
eps=np.full((10, 10), 0.5))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
inputs = [(0, 0), (-1, 1), (1, -1), (1, 0), (-1, 0)]
for n_components, n_features in inputs:
with pytest.raises(ValueError):
random_matrix(n_components, n_features)
def check_size_generated(random_matrix):
inputs = [(1, 5), (5, 1), (5, 5), (1, 1)]
for n_components, n_features in inputs:
assert random_matrix(n_components, n_features).shape == (
n_components, n_features)
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
with pytest.raises(ValueError):
random_matrix(n_components, n_features, density=density)
@pytest.mark.parametrize("random_matrix", all_random_matrix)
def test_basic_property_of_random_matrix(random_matrix):
# Check basic properties of random matrix generation
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
check_zero_mean_and_unit_norm(random_matrix)
@pytest.mark.parametrize("random_matrix", all_sparse_random_matrix)
def test_basic_property_of_sparse_random_matrix(random_matrix):
check_input_with_sparse_random_matrix(random_matrix)
random_matrix_dense = functools.partial(random_matrix, density=1.0)
check_zero_mean_and_unit_norm(random_matrix_dense)
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = _gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = _sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert np.sqrt(s) / np.sqrt(n_components) in values
assert - np.sqrt(s) / np.sqrt(n_components) in values
if density == 1.0:
assert np.size(values) == 2
else:
assert 0. in values
assert np.size(values) == 3
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
@pytest.mark.parametrize("density", [1.1, 0, -0.1])
def test_sparse_random_projection_transformer_invalid_density(density):
for RandomProjection in all_SparseRandomProjection:
with pytest.raises(ValueError):
RandomProjection(density=density).fit(data)
@pytest.mark.parametrize("n_components, fit_data", [
('auto', [[0, 1, 2]]), (-10, data)]
)
def test_random_projection_transformer_invalid_input(n_components, fit_data):
for RandomProjection in all_RandomProjection:
with pytest.raises(ValueError):
RandomProjection(n_components=n_components).fit(fit_data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
with pytest.raises(ValueError):
RandomProjection(n_components='auto').transform(data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
with pytest.raises(ValueError, match=expected_msg):
rp.fit(data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert distances_ratio.max() < 1 + eps
assert 1 - eps < distances_ratio.min()
def test_SparseRandomProj_output_representation():
for SparseRandomProj in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProj(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProj(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert rp.n_components == 'auto'
assert rp.n_components_ == 110
if RandomProjection in all_SparseRandomProjection:
assert rp.density == 'auto'
assert_almost_equal(rp.density_, 0.03, 2)
assert rp.components_.shape == (110, n_features)
projected_1 = rp.transform(data)
assert projected_1.shape == (n_samples, 110)
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
with pytest.raises(ValueError):
rp.transform(data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert projected.shape == (n_samples, 100)
assert rp.components_.shape == (100, n_features)
assert rp.components_.nnz < 115 # close to 1% density
assert 85 < rp.components_.nnz # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
with pytest.warns(DataDimensionalityWarning):
RandomProjection(n_components=n_features + 1).fit(data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
def test_johnson_lindenstrauss_min_dim():
"""Test Johnson-Lindenstrauss for small eps.
Regression test for #17111: before #19374, 32-bit systems would fail.
"""
assert johnson_lindenstrauss_min_dim(100, eps=1e-5) == 368416070986
|
kevin-intel/scikit-learn
|
sklearn/tests/test_random_projection.py
|
Python
|
bsd-3-clause
| 13,557
|
[
"Gaussian"
] |
1b4b950dd931d7016a16e6c22bf953efdb669fc66c4fbd7a10a2485555b7412c
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
r"""
Calculating path similarity --- :mod:`MDAnalysis.analysis.psa`
==========================================================================
:Author: Sean Seyler
:Year: 2015
:Copyright: GNU Public License v3
.. versionadded:: 0.10.0
The module contains code to calculate the geometric similarity of trajectories
using path metrics such as the Hausdorff or Fréchet distances
[Seyler2015]_. The path metrics are functions of two paths and return a
nonnegative number, i.e., a distance. Two paths are identical if their distance
is zero, and large distances indicate dissimilarity. Each path metric is a
function of the individual points (e.g., coordinate snapshots) that comprise
each path and, loosely speaking, identify the two points, one per path of a
pair of paths, where the paths deviate the most. The distance between these
points of maximal deviation is measured by the root mean square deviation
(RMSD), i.e., to compute structural similarity.
One typically computes the pairwise similarity for an ensemble of paths to
produce a symmetric distance matrix, which can be clustered to, at a glance,
identify patterns in the trajectory data. To properly analyze a path ensemble,
one must select a suitable reference structure to which all paths (each
conformer in each path) will be universally aligned using the rotations
determined by the best-fit rmsds. Distances between paths and their structures
are then computed directly with no further alignment. This pre-processing step
is necessary to preserve the metric properties of the Hausdorff and Fréchet
metrics; using the best-fit rmsd on a pairwise basis does not generally
preserve the triangle inequality.
.. SeeAlso:: The `PSAnalysisTutorial`_ outlines a typical application of PSA to
a set of trajectories, including doing proper alignment,
performing distance comparisons, and generating heat
map-dendrogram plots from hierarchical clustering.
.. Rubric:: References
.. [Seyler2015] Seyler SL, Kumar A, Thorpe MF, Beckstein O (2015)
Path Similarity Analysis: A Method for Quantifying
Macromolecular Pathways. PLoS Comput Biol 11(10): e1004568.
doi: `10.1371/journal.pcbi.1004568`_
.. _`10.1371/journal.pcbi.1004568`: http://dx.doi.org/10.1371/journal.pcbi.1004568
.. _`PSAnalysisTutorial`: https://github.com/Becksteinlab/PSAnalysisTutorial
Helper functions and variables
------------------------------
The following convenience functions are used by other functions in this module.
.. autofunction:: sqnorm
.. autofunction:: get_msd_matrix
.. autofunction:: get_coord_axes
Classes, methods, and functions
-------------------------------
.. autofunction:: get_path_metric_func
.. autofunction:: hausdorff
.. autofunction:: hausdorff_wavg
.. autofunction:: hausdorff_avg
.. autofunction:: hausdorff_neighbors
.. autofunction:: discrete_frechet
.. autofunction:: dist_mat_to_vec
.. autoclass:: Path
:members:
.. attribute:: u_original
:class:`MDAnalysis.Universe` object with a trajectory
.. attribute:: u_reference
:class:`MDAnalysis.Universe` object containing a reference structure
.. attribute:: ref_select
string, selection for
:meth:`~MDAnalysis.core.groups.AtomGroup.select_atoms` to select frame
from :attr:`Path.u_reference`
.. attribute:: path_select
string, selection for
:meth:`~MDAnalysis.core.groups.AtomGroup.select_atoms` to select atoms
to compose :attr:`Path.path`
.. attribute:: ref_frame
int, frame index to select frame from :attr:`Path.u_reference`
.. attribute:: u_fitted
:class:`MDAnalysis.Universe` object with the fitted trajectory
.. attribute:: path
:class:`numpy.ndarray` object representation of the fitted trajectory
.. autoclass:: PSAPair
.. attribute:: npaths
int, total number of paths in the comparison in which *this*
:class:`PSAPair` was generated
.. attribute:: matrix_id
(int, int), (row, column) indices of the location of *this*
:class:`PSAPair` in the corresponding pairwise distance matrix
.. attribute:: pair_id
int, ID of *this* :class:`PSAPair` (the pair_id:math:`^\text{th}`
comparison) in the distance vector corresponding to the pairwise distance
matrix
.. attribute:: nearest_neighbors
dict, contains the nearest neighbors by frame index and the
nearest neighbor distances for each path in *this* :class:`PSAPair`
.. attribute:: hausdorff_pair
dict, contains the frame indices of the Hausdorff pair for each path in
*this* :class:`PSAPair` and the corresponding (Hausdorff) distance
.. autoclass:: PSAnalysis
:members:
.. attribute:: universes
list of :class:`MDAnalysis.Universe` objects containing trajectories
.. attribute:: u_reference
:class:`MDAnalysis.Universe` object containing a reference structure
.. attribute:: ref_select
string, selection for
:meth:`~MDAnalysis.core.groups.AtomGroup.select_atoms` to select frame
from :attr:`PSAnalysis.u_reference`
.. attribute:: path_select
string, selection for
:meth:`~MDAnalysis.core.groups.AtomGroup.select_atoms` to select atoms
to compose :attr:`Path.path`
.. attribute:: ref_frame
int, frame index to select frame from :attr:`Path.u_reference`
.. attribute:: filename
string, name of file to store calculated distance matrix
(:attr:`PSAnalysis.D`)
.. attribute:: paths
list of :class:`numpy.ndarray` objects representing the set/ensemble of
fitted trajectories
.. attribute:: D
string, name of file to store calculated distance matrix
(:attr:`PSAnalysis.D`)
.. Markup definitions
.. ------------------
..
.. |3Dp| replace:: :math:`N_p \times N \times 3`
.. |2Dp| replace:: :math:`N_p \times (3N)`
.. |3Dq| replace:: :math:`N_q \times N \times 3`
.. |2Dq| replace:: :math:`N_q \times (3N)`
.. |3D| replace:: :math:`N_p\times N\times 3`
.. |2D| replace:: :math:`N_p\times 3N`
.. |Np| replace:: :math:`N_p`
"""
from __future__ import division, absolute_import
import six
from six.moves import range, cPickle
import numpy as np
import warnings,numbers
import MDAnalysis
import MDAnalysis.analysis.align
from MDAnalysis import NoDataError
import os
import logging
logger = logging.getLogger('MDAnalysis.analysis.psa')
def get_path_metric_func(name):
"""Selects a path metric function by name.
:Arguments:
*name*
string, name of path metric
:Returns:
The path metric function specified by *name* (if found).
"""
path_metrics = {
'hausdorff' : hausdorff,
'weighted_average_hausdorff' : hausdorff_wavg,
'average_hausdorff' : hausdorff_avg,
'hausdorff_neighbors' : hausdorff_neighbors,
'discrete_frechet' : discrete_frechet
}
try:
return path_metrics[name]
except KeyError as key:
print("Path metric {0} not found. Valid selections: ".format(key))
for name in path_metrics.keys(): print(" \"{0}\"".format(name))
def sqnorm(v, axis=None):
"""Compute the sum of squares of elements along specified axes.
Parameters
----------
v : numpy.ndarray
coordinates
axes : None / int / tuple (optional)
Axes or axes along which a sum is performed. The default
(*axes* = ``None``) performs a sum over all the dimensions of
the input array. The value of *axes* may be negative, in
which case it counts from the last axis to the zeroth axis.
Returns
-------
float
the sum of the squares of the elements of `v` along `axes`
"""
return np.sum(v*v, axis=axis)
def get_msd_matrix(P, Q, axis=None):
r"""Generate the matrix of pairwise mean-squared deviations between paths.
The MSDs between all pairs of points in `P` and `Q` are
calculated, each pair having a point from `P` and a point from
`Q`.
`P` (`Q`) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time
steps, :math:`N` atoms, and :math:`3N` coordinates (e.g.,
:attr:`MDAnalysis.core.groups.AtomGroup.positions`). The pairwise MSD
matrix has dimensions :math:`N_p` by :math:`N_q`.
Parameters
----------
P : numpy.ndarray
the points in the first path
Q : numpy.ndarray
the points in the second path
Returns
-------
msd_matrix : numpy.ndarray
matrix of pairwise MSDs between points in `P` and points
in `Q`
Notes
-----
We calculate the MSD matrix
.. math::
M_{ij} = ||p_i - q_j||^2
where :math:`p_i \in P` and :math:`q_j \in Q`.
"""
return np.asarray([sqnorm(p - Q, axis=axis) for p in P])
def get_coord_axes(path):
"""Return the number of atoms and the axes corresponding to atoms
and coordinates for a given path.
The `path` is assumed to be a :class:`numpy.ndarray` where the 0th axis
corresponds to a frame (a snapshot of coordinates). The :math:`3N`
(Cartesian) coordinates are assumed to be either:
1. all in the 1st axis, starting with the x,y,z coordinates of the
first atom, followed by the *x*,*y*,*z* coordinates of the 2nd, etc.
2. in the 1st *and* 2nd axis, where the 1st axis indexes the atom
number and the 2nd axis contains the *x*,*y*,*z* coordinates of
each atom.
Parameters
----------
path : numpy.ndarray
representing a path
Returns
-------
(int, (int, ...))
the number of atoms and the axes containing coordinates
"""
path_dimensions = len(path.shape)
if path_dimensions == 3:
N = path.shape[1]
axis = (1,2) # 1st axis: atoms, 2nd axis: x,y,z coords
elif path_dimensions == 2:
# can use mod to check if total # coords divisible by 3
N = path.shape[1] / 3
axis = (1,) # 1st axis: 3N structural coords (x1,y1,z1,...,xN,xN,zN)
else:
err_str = "Path must have 2 or 3 dimensions; the first dimensions (axis"\
+ " 0) must correspond to frames, axis 1 (and axis 2, if" \
+ " present) must contain atomic coordinates."
raise ValueError(err_str)
return N, axis
def hausdorff(P, Q):
r"""Calculate the symmetric Hausdorff distance between two paths.
*P* (*Q*) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time
steps, :math:`N` atoms, and :math:`3N` coordinates (e.g.,
:attr:`MDAnalysis.core.groups.AtomGroup.positions`). *P* (*Q*) has
either shape |3Dp| (|3Dq|), or |2Dp| (|2Dq|) in flattened form.
Parameters
----------
P : numpy.ndarray
the points in the first path
Q : numpy.ndarray
the points in the second path
Returns
-------
float
the Hausdorff distance between paths `P` and `Q`
Example
-------
Calculate the Hausdorff distance between two halves of a trajectory:
>>> from MDAnalysis.tests.datafiles import PSF, DCD
>>> u = Universe(PSF,DCD)
>>> mid = len(u.trajectory)/2
>>> ca = u.select_atoms('name CA')
>>> P = numpy.array([
... ca.positions for _ in u.trajectory[:mid:]
... ]) # first half of trajectory
>>> Q = numpy.array([
... ca.positions for _ in u.trajectory[mid::]
... ]) # second half of trajectory
>>> hausdorff(P,Q)
4.7786639840135905
>>> hausdorff(P,Q[::-1]) # hausdorff distance w/ reversed 2nd trajectory
4.7786639840135905
Note that reversing the path does not change the Hausdorff distance.
Notes
-----
The Hausdorff distance is calculated in a brute force manner from the
distance matrix without further optimizations, essentially following
[Huttenlocher1993]_.
.. SeeAlso::
:func:`scipy.spatial.distance.directed_hausdorff` is an optimized
implementation of the early break algorithm of [Taha2015]_; note that
one still has to calculate the *symmetric* Hausdorff distance as
`max(directed_hausdorff(P, Q)[0], directed_hausdorff(Q, P)[0])`.
References
----------
.. [Huttenlocher1993] D. P. Huttenlocher, G. A. Klanderman, and
W. J. Rucklidge. Comparing images using the Hausdorff distance. IEEE
Transactions on Pattern Analysis and Machine Intelligence,
15(9):850–863, 1993.
.. [Taha2015] A. A. Taha and A. Hanbury. An efficient algorithm for
calculating the exact Hausdorff distance. IEEE Transactions On Pattern
Analysis And Machine Intelligence, 37:2153-63, 2015.
"""
N, axis = get_coord_axes(P)
d = get_msd_matrix(P, Q, axis=axis)
return ( max( np.amax(np.amin(d, axis=0)), \
np.amax(np.amin(d, axis=1)) ) / N )**0.5
def hausdorff_wavg(P, Q):
r"""Calculate the weighted average Hausdorff distance between two paths.
*P* (*Q*) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time
steps, :math:`N` atoms, and :math:`3N` coordinates (e.g.,
:attr:`MDAnalysis.core.groups.AtomGroup.positions`). *P* (*Q*) has
either shape |3Dp| (|3Dq|), or |2Dp| (|2Dq|) in flattened form. The nearest
neighbor distances for *P* (to *Q*) and those of *Q* (to *P*) are averaged
individually to get the average nearest neighbor distance for *P* and
likewise for *Q*. These averages are then summed and divided by 2 to get a
measure that gives equal weight to *P* and *Q*.
Parameters
----------
P : numpy.ndarray
the points in the first path
Q : numpy.ndarray
the points in the second path
Returns
-------
float
the weighted average Hausdorff distance between paths `P` and `Q`
Example
-------
>>> from MDAnalysis import Universe
>>> from MDAnalysis.tests.datafiles import PSF, DCD
>>> u = Universe(PSF,DCD)
>>> mid = len(u.trajectory)/2
>>> ca = u.select_atoms('name CA')
>>> P = numpy.array([
... ca.positions for _ in u.trajectory[:mid:]
... ]) # first half of trajectory
>>> Q = numpy.array([
... ca.positions for _ in u.trajectory[mid::]
... ]) # second half of trajectory
>>> hausdorff_wavg(P,Q)
2.5669644353703447
>>> hausdorff_wavg(P,Q[::-1]) # weighted avg hausdorff dist w/ Q reversed
2.5669644353703447
Notes
-----
The weighted average Hausdorff distance is not a true metric (it does not
obey the triangle inequality); see [Seyler2015]_ for further details.
"""
N, axis = get_coord_axes(P)
d = get_msd_matrix(P, Q, axis=axis)
out = 0.5*( np.mean(np.amin(d,axis=0)) + np.mean(np.amin(d,axis=1)) )
return ( out / N )**0.5
def hausdorff_avg(P, Q):
r"""Calculate the average Hausdorff distance between two paths.
*P* (*Q*) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time
steps, :math:`N` atoms, and :math:`3N` coordinates (e.g.,
:attr:`MDAnalysis.core.groups.AtomGroup.positions`). *P* (*Q*) has
either shape |3Dp| (|3Dq|), or |2Dp| (|2Dq|) in flattened form. The nearest
neighbor distances for *P* (to *Q*) and those of *Q* (to *P*) are all
averaged together to get a mean nearest neighbor distance. This measure
biases the average toward the path that has more snapshots, whereas weighted
average Hausdorff gives equal weight to both paths.
Parameters
----------
P : numpy.ndarray
the points in the first path
Q : numpy.ndarray
the points in the second path
Returns
-------
float
the average Hausdorff distance between paths `P` and `Q`
Example
-------
>>> from MDAnalysis.tests.datafiles import PSF, DCD
>>> u = Universe(PSF,DCD)
>>> mid = len(u.trajectory)/2
>>> ca = u.select_atoms('name CA')
>>> P = numpy.array([
... ca.positions for _ in u.trajectory[:mid:]
... ]) # first half of trajectory
>>> Q = numpy.array([
... ca.positions for _ in u.trajectory[mid::]
... ]) # second half of trajectory
>>> hausdorff_avg(P,Q)
2.5669646575869005
>>> hausdorff_avg(P,Q[::-1]) # hausdorff distance w/ reversed 2nd trajectory
2.5669646575869005
Notes
-----
The average Hausdorff distance is not a true metric (it does not obey the
triangle inequality); see [Seyler2015]_ for further details.
"""
N, axis = get_coord_axes(P)
d = get_msd_matrix(P, Q, axis=axis)
out = np.mean( np.append( np.amin(d,axis=0), np.amin(d,axis=1) ) )
return ( out / N )**0.5
def hausdorff_neighbors(P, Q):
r"""Find the Hausdorff neighbors of two paths.
*P* (*Q*) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time
steps, :math:`N` atoms, and :math:`3N` coordinates (e.g.,
:attr:`MDAnalysis.core.groups.AtomGroup.positions`). *P* (*Q*) has
either shape |3Dp| (|3Dq|), or |2Dp| (|2Dq|) in flattened form.
Parameters
----------
P : numpy.ndarray
the points in the first path
Q : numpy.ndarray
the points in the second path
Returns
-------
dict
dictionary of two pairs of numpy arrays, the first pair (key
"frames") containing the indices of (Hausdorff) nearest
neighbors for `P` and `Q`, respectively, the second (key
"distances") containing (corresponding) nearest neighbor
distances for `P` and `Q`, respectively
Notes
-----
Hausdorff neighbors are those points on the two paths that are separated by
the Hausdorff distance. They are the farthest nearest neighbors and are
maximally different in the sense of the Hausdorff distance [Seyler2015]_.
.. SeeAlso::
:func:`scipy.spatial.distance.directed_hausdorff` can also provide the
Hausdorff neighbors.
"""
N, axis = get_coord_axes(P)
d = get_msd_matrix(P, Q, axis=axis)
nearest_neighbors = {
'frames' : (np.argmin(d, axis=1), np.argmin(d, axis=0)),
'distances' : ((np.amin(d,axis=1)/N)**0.5, (np.amin(d, axis=0)/N)**0.5)
}
return nearest_neighbors
def discrete_frechet(P, Q):
r"""Calculate the discrete Fréchet distance between two paths.
*P* (*Q*) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time
steps, :math:`N` atoms, and :math:`3N` coordinates (e.g.,
:attr:`MDAnalysis.core.groups.AtomGroup.positions`). *P* (*Q*) has
either shape |3Dp| (|3Dq|), or :|2Dp| (|2Dq|) in flattened form.
Parameters
----------
P : numpy.ndarray
the points in the first path
Q : numpy.ndarray
the points in the second path
Returns
-------
float
the discrete Fréchet distance between paths *P* and *Q*
Example
-------
Calculate the discrete Fréchet distance between two halves of a
trajectory.
>>> u = Universe(PSF,DCD)
>>> mid = len(u.trajectory)/2
>>> ca = u.select_atoms('name CA')
>>> P = np.array([
... ca.positions for _ in u.trajectory[:mid:]
... ]) # first half of trajectory
>>> Q = np.array([
... ca.positions for _ in u.trajectory[mid::]
... ]) # second half of trajectory
>>> discrete_frechet(P,Q)
4.7786639840135905
>>> discrete_frechet(P,Q[::-1]) # frechet distance w/ 2nd trj reversed 2nd
6.8429011177113832
Note that reversing the direction increased the Fréchet distance:
it is sensitive to the direction of the path.
Notes
-----
The discrete Fréchet metric is an approximation to the continuous Fréchet
metric [Frechet1906]_ [Alt1995]_. The calculation of the continuous
Fréchet distance is implemented with the dynamic programming algorithm of
[EiterMannila1994]_ [EiterMannila1997]_.
References
----------
.. [Frechet1906] M. Fréchet. Sur quelques points du calcul
fonctionnel. Rend. Circ. Mat. Palermo, 22(1):1–72, Dec. 1906.
.. [Alt1995] H. Alt and M. Godau. Computing the Fréchet distance between
two polygonal curves. Int J Comput Geometry & Applications,
5(01n02):75–91, 1995. doi: `10.1142/S0218195995000064`_
.. _`10.1142/S0218195995000064`: http://doi.org/10.1142/S0218195995000064
.. [EiterMannila1994] T. Eiter and H. Mannila. Computing discrete Fréchet
distance. Technical Report CD-TR 94/64, Christian Doppler Laboratory for
Expert Systems, Technische Universität Wien, Wien, 1994.
.. [EiterMannila1997] T. Eiter and H. Mannila. Distance measures for point
sets and their computation. Acta Informatica, 34:109–133, 1997. doi: `10.1007/s002360050075`_.
.. _10.1007/s002360050075: http://doi.org/10.1007/s002360050075
"""
N, axis = get_coord_axes(P)
Np, Nq = len(P), len(Q)
d = get_msd_matrix(P, Q, axis=axis)
ca = -np.ones((Np, Nq))
def c(i, j):
"""Compute the coupling distance for two partial paths formed by *P* and
*Q*, where both begin at frame 0 and end (inclusive) at the respective
frame indices :math:`i-1` and :math:`j-1`. The partial path of *P* (*Q*)
up to frame *i* (*j*) is formed by the slicing ``P[0:i]`` (``Q[0:j]``).
:func:`c` is called recursively to compute the coupling distance
between the two full paths *P* and *Q* (i.e., the discrete Frechet
distance) in terms of coupling distances between their partial paths.
:Arguments:
*i*
int, partial path of *P* through final frame *i-1*
*j*
int, partial path of *Q* through final frame *j-1*
:Returns:
float, the coupling distance between partial paths ``P[0:i]`` and
``Q[0:j]``
"""
if ca[i,j] != -1 : return ca[i,j]
if i > 0:
if j > 0: ca[i,j] = max( min(c(i-1,j),c(i,j-1),c(i-1,j-1)), d[i,j] )
else: ca[i,j] = max( c(i-1,0), d[i,0] )
elif j > 0: ca[i,j] = max( c(0,j-1), d[0,j] )
else: ca[i,j] = d[0,0]
return ca[i,j]
return ( c(Np-1, Nq-1) / N )**0.5
def dist_mat_to_vec(N, i, j):
"""Convert distance matrix indices (in the upper triangle) to the index of
the corresponding distance vector.
This is a convenience function to locate distance matrix elements (and the
pair generating it) in the corresponding distance vector. The row index *j*
should be greater than *i+1*, corresponding to the upper triangle of the
distance matrix.
Parameters
----------
N : int
size of the distance matrix (of shape *N*-by-*N*)
i : int
row index (starting at 0) of the distance matrix
j : int
column index (starting at 0) of the distance matrix
Returns
-------
int
index (of the matrix element) in the corresponding distance vector
"""
if not (isinstance(N, numbers.Integral) or isinstance(i, numbers.Integral)
or isinstance(j, numbers.Integral)):
err_str = "N, i, j all must be of type int"
raise ValueError(err_str)
if i < 0 or j < 0 or N < 2:
error_str = "Matrix indices are invalid; i and j must be greater than 0 and N must be greater the 2"
raise ValueError(error_str)
if (j > i and (i > N - 1 or j > N)) or (j < i and (i > N or j > N - 1)):
err_str = "Matrix indices are out of range; i and j must be less than" \
+ " N = {0:d}".format(N)
raise ValueError(err_str)
if j > i:
return (N*i) + j - (i+2)*(i+1)/2
elif j < i:
warn_str = "Column index entered (j = {:d} is smaller than row index" \
+ " (i = {:d}). Using symmetric element in upper triangle of" \
+ " distance matrix instead: i --> j, j --> i"
warnings.warn(warn_str.format(j, i))
return (N*j) + i - (j+2)*(j+1)/2
else:
err_str = "Error in processing matrix indices; i and j must be integers"\
+ " less than integer N = {0:d} such that j >= i+1.".format(N)
raise ValueError(err_str)
class Path(object):
"""Represent a path based on a :class:`~MDAnalysis.core.universe.Universe`.
Pre-process a :class:`Universe` object: (1) fit the trajectory to a
reference structure, (2) convert fitted time series to a
:class:`numpy.ndarray` representation of :attr:`Path.path`.
The analysis is performed with :meth:`PSAnalysis.run` and stores the result
in the :class:`numpy.ndarray` distance matrix :attr:`PSAnalysis.D`.
:meth:`PSAnalysis.run` also generates a fitted trajectory and path from
alignment of the original trajectories to a reference structure.
.. versionadded:: 0.9.1
"""
def __init__(self, universe, reference, ref_select='name CA',
path_select='all', ref_frame=0):
"""Setting up trajectory alignment and fitted path generation.
Parameters
----------
universe : Universe
:class:`MDAnalysis.Universe` object containing a trajectory
reference : Universe
reference structure (uses `ref_frame` from the trajectory)
ref_select : str or dict or tuple (optional)
The selection to operate on for rms fitting; can be one of:
1. any valid selection string for
:meth:`~MDAnalysis.core.groups.AtomGroup.select_atoms` that
produces identical selections in *mobile* and *reference*; or
2. a dictionary ``{'mobile':sel1, 'reference':sel2}`` (the
:func:`MDAnalysis.analysis.align.fasta2select` function returns
such a dictionary based on a ClustalW_ or STAMP_ sequence
alignment); or
3. a tuple ``(sel1, sel2)``
When using 2. or 3. with *sel1* and *sel2* then these selections
can also each be a list of selection strings (to generate an
AtomGroup with defined atom order as described under
:ref:`ordered-selections-label`).
ref_frame : int
frame index to select the coordinate frame from
`ref_select.trajectory`
path_select : selection_string
atom selection composing coordinates of (fitted) path; if ``None``
then `path_select` is set to `ref_select` [``None``]
"""
self.u_original = universe
self.u_reference = reference
self.ref_select = ref_select
self.ref_frame = ref_frame
self.path_select = path_select
self.top_name = self.u_original.filename
self.trj_name = self.u_original.trajectory.filename
self.newtrj_name = None
self.u_fitted = None
self.path = None
self.natoms = None
def fit_to_reference(self, filename=None, prefix='', postfix='_fit',
rmsdfile=None, targetdir=os.path.curdir,
mass_weighted=False, tol_mass=0.1):
"""Align each trajectory frame to the reference structure
Parameters
----------
filename : str (optional)
file name for the RMS-fitted trajectory or pdb; defaults to the
original trajectory filename (from :attr:`Path.u_original`) with
*prefix* prepended
prefix : str (optional)
prefix for auto-generating the new output filename
rmsdfile : str (optional)
file name for writing the RMSD time series [``None``]
mass_weighted : bool (optional)
do a mass-weighted RMSD fit, default is ``False``
tol_mass : float (optional)
Reject match if the atomic masses for matched atoms differ by more
than `tol_mass` [0.1]
Returns
-------
Universe
:class:`MDAnalysis.Universe` object containing a fitted trajectory
Notes
-----
Uses :class:`MDAnalysis.analysis.align.AlignTraj` for the fitting.
"""
head, tail = os.path.split(self.trj_name)
oldname, ext = os.path.splitext(tail)
filename = filename or oldname
self.newtrj_name = os.path.join(targetdir, filename + postfix + ext)
self.u_reference.trajectory[self.ref_frame] # select frame from ref traj
aligntrj = MDAnalysis.analysis.align.AlignTraj(self.u_original,
self.u_reference,
select=self.ref_select,
filename=self.newtrj_name,
prefix=prefix,
mass_weighted=mass_weighted,
tol_mass=tol_mass).run()
if rmsdfile is not None:
aligntrj.save(rmsdfile)
return MDAnalysis.Universe(self.top_name, self.newtrj_name)
def to_path(self, fitted=False, select=None, flat=False):
r"""Generates a coordinate time series from the fitted universe
trajectory.
Given a selection of *N* atoms from *select*, the atomic positions for
each frame in the fitted universe (:attr:`Path.u_fitted`) trajectory
(with |Np| total frames) are appended sequentially to form a 3D or 2D
(if *flat* is ``True``) :class:`numpy.ndarray` representation of the
fitted trajectory (with dimensions |3D| or |2D|, respectively).
Parameters
----------
fitted : bool (optional)
construct a :attr:`Path.path` from the :attr:`Path.u_fitted`
trajectory; if ``False`` then :attr:`Path.path` is generated with
the trajectory from :attr:`Path.u_original` [``False``]
select : str (optional)
the selection for constructing the coordinates of each frame in
:attr:`Path.path`; if ``None`` then :attr:`Path.path_select`
is used, else it is overridden by *select* [``None``]
flat : bool (optional)
represent :attr:`Path.path` as a 2D (|2D|) :class:`numpy.ndarray`;
if ``False`` then :attr:`Path.path` is a 3D (|3D|)
:class:`numpy.ndarray` [``False``]
Returns
-------
numpy.ndarray
representing a time series of atomic positions of an
:class:`MDAnalysis.core.groups.AtomGroup` selection from
:attr:`Path.u_fitted.trajectory`
"""
select = select if select is not None else self.path_select
if fitted:
if not isinstance(self.u_fitted, MDAnalysis.Universe):
raise TypeError("Fitted universe not found. Generate a fitted " +
"universe with fit_to_reference() first, or explicitly "+
"set argument \"fitted\" to \"False\" to generate a " +
"path from the original universe.")
u = self.u_fitted
else:
u = self.u_original
frames = u.trajectory
atoms = u.select_atoms(select)
self.natoms = len(atoms)
frames.rewind()
if flat:
return np.array([atoms.positions.flatten() for _ in frames])
else:
return np.array([atoms.positions for _ in frames])
def run(self, align=False, filename=None, postfix='_fit', rmsdfile=None,
targetdir=os.path.curdir, mass_weighted=False, tol_mass=0.1,
flat=False):
r"""Generate a path from a trajectory and reference structure.
As part of the path generation, the trajectory can be superimposed
("aligned") to a reference structure if specified.
This is a convenience method to generate a fitted trajectory from an
inputted universe (:attr:`Path.u_original`) and reference structure
(:attr:`Path.u_reference`). :meth:`Path.fit_to_reference` and
:meth:`Path.to_path` are used consecutively to generate a new universe
(:attr:`Path.u_fitted`) containing the fitted trajectory along with the
corresponding :attr:`Path.path` represented as an
:class:`numpy.ndarray`. The method returns a tuple of the topology name
and new trajectory name, which can be fed directly into an
:class:`MDAnalysis.Universe` object after unpacking the tuple using the
``*`` operator, as in
``MDAnalysis.Universe(*(top_name, newtraj_name))``.
Parameters
----------
align : bool (optional)
Align trajectory to atom selection :attr:`Path.ref_select` of
:attr:`Path.u_reference`. If ``True``, a universe containing an
aligned trajectory is produced with :meth:`Path.fit_to_reference`
[``False``]
filename : str (optional)
filename for the RMS-fitted trajectory or pdb; defaults to the
original trajectory filename (from :attr:`Path.u_original`) with
*prefix* prepended
postfix : str (optional)
prefix for auto-generating the new output filename
rmsdfile : str (optional)
file name for writing the RMSD time series [``None``]
mass_weighted : bool (optional)
do a mass-weighted RMSD fit
tol_mass : float (optional)
Reject match if the atomic masses for matched atoms differ by more
than *tol_mass* [0.1]
flat : bool (optional)
represent :attr:`Path.path` with 2D (|2D|) :class:`numpy.ndarray`;
if ``False`` then :attr:`Path.path` is a 3D (|3D|)
:class:`numpy.ndarray` [``False``]
Returns
-------
topology_trajectory : tuple
A tuple of the topology name and new trajectory name.
"""
if align:
self.u_fitted = self.fit_to_reference( \
filename=filename, postfix=postfix, \
rmsdfile=rmsdfile, targetdir=targetdir, \
mass_weighted=False, tol_mass=0.1)
self.path = self.to_path(fitted=align, flat=flat)
return self.top_name, self.newtrj_name
def get_num_atoms(self):
"""Return the number of atoms used to construct the :class:`Path`.
Must run :meth:`Path.to_path` prior to calling this method.
Returns
-------
int
the number of atoms in the :class:`Path`
"""
if self.natoms is None:
raise ValueError("No path data; do 'Path.to_path()' first.")
return self.natoms
class PSAPair(object):
"""Generate nearest neighbor and Hausdorff pair information between a pair
of paths from an all-pairs comparison generated by :class:`PSA`.
The nearest neighbors for each path of a pair of paths is generated by
:meth:`PSAPair.compute_nearest_neighbors` and stores the result
in a dictionary (:attr:`nearest_neighbors`): each path has a
:class:`numpy.ndarray` of the frames of its nearest neighbors, and a
:class:`numpy.ndarray` of its nearest neighbor distances
:attr:`PSAnalysis.D`. For example, *nearest_neighbors['frames']* is a pair
of :class:`numpy.ndarray`, the first being the frames of the nearest
neighbors of the first path, *i*, the second being those of the second path,
*j*.
The Hausdorff pair for the pair of paths is found by calling
:meth:`find_hausdorff_pair` (locates the nearest neighbor pair having the
largest overall distance separating them), which stores the result in a
dictionary (:attr:`hausdorff_pair`) containing the frames (indices) of the
pair along with the corresponding (Hausdorff) distance.
*hausdorff_pair['frame']* contains a pair of frames in the first path, *i*,
and the second path, *j*, respectively, that correspond to the Hausdorff
distance between them.
.. versionadded:: 0.11
"""
def __init__(self, npaths, i, j):
"""Set up a :class:`PSAPair` for a pair of paths that are part of a
:class:`PSA` comparison of *npaths* total paths.
Each unique pair of paths compared using :class:`PSA` is related by
their nearest neighbors (and corresponding distances) and the Hausdorff
pair and distance. :class:`PSAPair` is a convenience class for
calculating and encapsulating nearest neighbor and Hausdorff pair
information for one pair of paths.
Given *npaths*, :class:`PSA` performs and all-pairs comparison among all
paths for a total of :math:`\text{npaths}*(\text{npaths}-1)/2` unique
comparisons. If distances between paths are computed, the all-pairs
comparison can be summarized in a symmetric distance matrix whose upper
triangle can be mapped to a corresponding distance vector form in a
one-to-one manner. A particular comparison of a pair of paths in a
given instance of :class:`PSAPair` is thus unique identified by the row
and column indices in the distance matrix representation (whether or not
distances are actually computed), or a single ID (index) in the
corresponding distance vector.
Parameters
----------
npaths : int
total number of paths in :class:`PSA` used to generate *this*
:class:`PSAPair`
i : int
row index (starting at 0) of the distance matrix
j : int
column index (starting at 0) of the distance matrix
"""
self.npaths = npaths
self.matrix_idx = (i,j)
self.pair_idx = self._dvec_idx(i,j)
# Set by calling hausdorff_nn
self.nearest_neighbors = {'frames' : None, 'distances' : None}
# Set by self.getHausdorffPair
self.hausdorff_pair = {'frames' : (None, None), 'distance' : None}
def _dvec_idx(self, i, j):
"""Convert distance matrix indices (in the upper triangle) to the index
of the corresponding distance vector.
This is a convenience function to locate distance matrix elements (and
the pair generating it) in the corresponding distance vector. The row
index *j* should be greater than *i+1*, corresponding to the upper
triangle of the distance matrix.
Parameters
----------
i : int
row index (starting at 0) of the distance matrix
j : int
column index (starting at 0) of the distance matrix
Returns
-------
int
(matrix element) index in the corresponding distance vector
"""
return (self.npaths*i) + j - (i+2)*(i+1)/2
def compute_nearest_neighbors(self, P,Q, N=None):
"""Generates Hausdorff nearest neighbor lists of *frames* (by index) and
*distances* for *this* pair of paths corresponding to distance matrix
indices (*i*,*j*).
:meth:`PSAPair.compute_nearest_neighbors` calls
:func:`hausdorff_neighbors` to populate the dictionary of the nearest
neighbor lists of frames (by index) and distances
(:attr:`PSAPair.nearest_neighbors`). This method must explicitly take as
arguments a pair of paths, *P* and *Q*, where *P* is the
:math:`i^\text{th}` path and *Q* is the :math:`j^\text{th}` path among
the set of *N* total paths in the comparison.
Parameters
----------
P : numpy.ndarray
representing a path
Q : numpy.ndarray
representing a path
N : int
size of the distance matrix (of shape *N*-by-*N*) [``None``]
"""
hn = hausdorff_neighbors(P, Q)
self.nearest_neighbors['frames'] = hn['frames']
self.nearest_neighbors['distances'] = hn['distances']
def find_hausdorff_pair(self):
r"""Find the Hausdorff pair (of frames) for *this* pair of paths.
:meth:`PSAPair.find_hausdorff_pair` requires that
`:meth:`PSAPair.compute_nearest_neighbors` be called first to
generate the nearest neighbors (and corresponding distances) for each
path in *this* :class:`PSAPair`. The Hausdorff pair is the nearest
neighbor pair (of snapshots/frames), one in the first path and one in
the second, with the largest separation distance.
"""
if self.nearest_neighbors['distances'] is None:
err_str = "Nearest neighbors have not been calculated yet;" \
+ " run compute_nearest_neighbors() first."
raise NoDataError(err_str)
nn_idx_P, nn_idx_Q = self.nearest_neighbors['frames']
nn_dist_P, nn_dist_Q = self.nearest_neighbors['distances']
max_nn_dist_P = max(nn_dist_P)
max_nn_dist_Q = max(nn_dist_Q)
if max_nn_dist_P > max_nn_dist_Q:
max_nn_idx_P = np.argmax(nn_dist_P)
self.hausdorff_pair['frames'] = max_nn_idx_P, nn_idx_P[max_nn_idx_P]
self.hausdorff_pair['distance'] = max_nn_dist_P
else:
max_nn_idx_Q = np.argmax(nn_dist_Q)
self.hausdorff_pair['frames'] = nn_idx_Q[max_nn_idx_Q], max_nn_idx_Q
self.hausdorff_pair['distance'] = max_nn_dist_Q
def get_nearest_neighbors(self, frames=True, distances=True):
"""Returns the nearest neighbor frame indices, distances, or both, for
each path in *this* :class:`PSAPair`.
:meth:`PSAPair.get_nearest_neighbors` requires that the nearest
neighbors (:attr:`nearest_neighbors`) be initially computed by first
calling :meth:`compute_nearest_neighbors`. At least one of *frames*
or *distances* must be ``True``, or else a ``NoDataError`` is raised.
Parameters
----------
frames : bool
if ``True``, return nearest neighbor frame indices
[``True``]
distances : bool
if ``True``, return nearest neighbor distances [``True``]
Returns
-------
dict or tuple
If both *frames* and *distances* are ``True``, return the entire
dictionary (:attr:`nearest_neighbors`); if only *frames* is
``True``, return a pair of :class:`numpy.ndarray` containing the
indices of the frames (for the pair of paths) of the nearest
neighbors; if only *distances* is ``True``, return a pair of
:class:`numpy.ndarray` of the nearest neighbor distances (for the
pair of paths).
"""
if self.nearest_neighbors['distances'] is None:
err_str = "Nearest neighbors have not been calculated yet;" \
+ " run compute_nearest_neighbors() first."
raise NoDataError(err_str)
if frames:
if distances:
return self.nearest_neighbors
else:
return self.nearest_neighbors['frames']
elif distances:
return self.nearest_neighbors['distances']
else:
err_str = "Need to select Hausdorff pair \"frames\" or" \
+ " \"distances\" or both. \"frames\" and \"distances\" cannot" \
+ " both be set to False."
raise NoDataError(err_str)
def get_hausdorff_pair(self, frames=True, distance=True):
"""Returns the Hausdorff pair of frames indices, the Hausdorff distance,
or both, for the paths in *this* :class:`PSAPair`.
:meth:`PSAPair.get_hausdorff_pair` requires that the Hausdorff pair
(and distance) be initially found by first calling
:meth:`find_hausdorff_pair`. At least one of *frames* or *distance*
must be ``True``, or else a ``NoDataError`` is raised.
Parameters
----------
frames : bool
if ``True``, return the indices of the frames
of the Hausdorff pair [``True``]
distances : bool
if ``True``, return Hausdorff distance [``True``]
Returns
-------
dict or tuple
If both *frames* and *distance* are ``True``, return the entire
dictionary (:attr:`hausdorff_pair`); if only *frames* is
``True``, return a pair of ``int`` containing the indices of the
frames (one index per path) of the Hausdorff pair; if only *distance*
is ``True``, return the Hausdorff distance for this path pair.
"""
if self.hausdorff_pair['distance'] is None:
err_str = "Hausdorff pair has not been calculated yet;" \
+ " run find_hausdorff_pair() first."
raise NoDataError(err_str)
if frames:
if distance:
return self.hausdorff_pair
else:
return self.hausdorff_pair['frames']
elif distance:
return self.hausdorff_pair['distance']
else:
err_str = "Need to select Hausdorff pair \"frames\" or" \
+ " \"distance\" or both. \"frames\" and \"distance\" cannot" \
+ " both be set to False."
raise NoDataError(err_str)
class PSAnalysis(object):
"""Perform Path Similarity Analysis (PSA) on a set of trajectories.
The analysis is performed with :meth:`PSAnalysis.run` and stores the result
in the :class:`numpy.ndarray` distance matrix :attr:`PSAnalysis.D`.
:meth:`PSAnalysis.run` also generates a fitted trajectory and path from
alignment of the original trajectories to a reference structure.
.. versionadded:: 0.8
"""
def __init__(self, universes, reference=None, ref_select='name CA',
ref_frame=0, path_select=None, labels=None,
targetdir=os.path.curdir):
"""Setting up Path Similarity Analysis.
The mutual similarity between all unique pairs of trajectories
are computed using a selected path metric.
Parameters
----------
universes : list
a list of universes (:class:`MDAnalysis.Universe` object), each
containing a trajectory
reference : Universe
reference coordinates; :class:`MDAnalysis.Universe` object; if
``None`` the first time step of the first item in `universes` is used
[``None``]
ref_select : str or dict or tuple
The selection to operate on; can be one of:
1. any valid selection string for
:meth:`~MDAnalysis.core.groups.AtomGroup.select_atoms` that
produces identical selections in *mobile* and *reference*; or
2. a dictionary ``{'mobile':sel1, 'reference':sel2}`` (the
:func:`MDAnalysis.analysis.align.fasta2select` function returns
such a dictionary based on a ClustalW_ or STAMP_ sequence
alignment); or
3. a tuple ``(sel1, sel2)``
When using 2. or 3. with *sel1* and *sel2* then these selections
can also each be a list of selection strings (to generate an
AtomGroup with defined atom order as described under
:ref:`ordered-selections-label`).
mass_weighted : bool
do a mass-weighted RMSD fit [``False``]
tol_mass : float
Reject match if the atomic masses for matched atoms differ by more
than *tol_mass* [0.1]
ref_frame : int
frame index to select frame from *reference* [0]
path_select : str
atom selection composing coordinates of (fitted) path; if ``None``
then *path_select* is set to *ref_select* [``None``]
targetdir : str
output files are saved there; if ``None`` then "./psadata" is
created and used [.]
labels : list
list of strings, names of trajectories to be analyzed
(:class:`MDAnalysis.Universe`); if ``None``, defaults to trajectory
names [``None``]
.. _ClustalW: http://www.clustal.org/
.. _STAMP: http://www.compbio.dundee.ac.uk/manuals/stamp.4.2/
"""
self.universes = universes
self.u_reference = self.universes[0] if reference is None else reference
self.ref_select = ref_select
self.ref_frame = ref_frame
self.path_select = self.ref_select if path_select is None else path_select
if targetdir is None:
try:
targetdir = os.path.join(os.path.curdir, 'psadata')
os.makedirs(targetdir)
except OSError:
if not os.path.isdir(targetdir):
raise
self.targetdir = os.path.realpath(targetdir)
# Set default directory names for storing topology/reference structures,
# fitted trajectories, paths, distance matrices, and plots
self.datadirs = {'fitted_trajs' : '/fitted_trajs',
'paths' : '/paths',
'distance_matrices' : '/distance_matrices',
'plots' : '/plots'}
for dir_name, directory in six.iteritems(self.datadirs):
try:
full_dir_name = os.path.join(self.targetdir, dir_name)
os.makedirs(full_dir_name)
except OSError:
if not os.path.isdir(full_dir_name):
raise
# Keep track of topology, trajectory, and related files
trj_names = []
for i, u in enumerate(self.universes):
head, tail = os.path.split(u.trajectory.filename)
filename, ext = os.path.splitext(tail)
trj_names.append(filename)
self.trj_names = trj_names
self.fit_trj_names = None
self.path_names = None
self.top_name = self.universes[0].filename if len(universes) != 0 else None
self.labels = labels or self.trj_names
# Names of persistence (pickle) files where topology and trajectory
# filenames are stored--should not be modified by user
self._top_pkl = os.path.join(self.targetdir, "psa_top-name.pkl")
self._trjs_pkl = os.path.join(self.targetdir, "psa_orig-traj-names.pkl")
self._fit_trjs_pkl = os.path.join(self.targetdir, "psa_fitted-traj-names.pkl")
self._paths_pkl = os.path.join(self.targetdir, "psa_path-names.pkl")
self._labels_pkl = os.path.join(self.targetdir, "psa_labels.pkl")
# Pickle topology and trajectory filenames for this analysis to curdir
with open(self._top_pkl, 'wb') as output:
cPickle.dump(self.top_name, output)
with open(self._trjs_pkl, 'wb') as output:
cPickle.dump(self.trj_names, output)
with open(self._labels_pkl, 'wb') as output:
cPickle.dump(self.labels, output)
self.natoms = None
self.npaths = None
self.paths = None
self.D = None # pairwise distances
self._HP = None # (distance vector order) list of all Hausdorff pairs
self._NN = None # (distance vector order) list of all nearest neighbors
self._psa_pairs = None # (distance vector order) list of all PSAPairs
def generate_paths(self, **kwargs):
"""Generate paths, aligning each to reference structure if necessary.
Parameters
----------
align : bool
Align trajectories to atom selection :attr:`PSAnalysis.ref_select`
of :attr:`PSAnalysis.u_reference` [``False``]
filename : str
strings representing base filename for fitted trajectories and
paths [``None``]
infix : str
additional tag string that is inserted into the output filename of
the fitted trajectory files ['']
mass_weighted : bool
do a mass-weighted RMSD fit
tol_mass : float
Reject match if the atomic masses for matched atoms differ by more
than *tol_mass*
ref_frame : int
frame index to select frame from *reference*
flat : bool
represent :attr:`Path.path` as a 2D (|2D|) :class:`numpy.ndarray`;
if ``False`` then :attr:`Path.path` is a 3D (|3D|)
:class:`numpy.ndarray` [``False``]
save : bool
if ``True``, pickle list of names for fitted trajectories
[``True``]
store : bool
if ``True`` then writes each path (:class:`numpy.ndarray`)
in :attr:`PSAnalysis.paths` to compressed npz (numpy) files
[``False``]
The fitted trajectories are written to new files in the
"/trj_fit" subdirectory in :attr:`PSAnalysis.targetdir` named
"filename(*trajectory*)XXX*infix*_psa", where "XXX" is a number between
000 and 999; the extension of each file is the same as its original.
Optionally, the trajectories can also be saved in numpy compressed npz
format in the "/paths" subdirectory in :attr:`PSAnalysis.targetdir` for
persistence and can be accessed as the attribute
:attr:`PSAnalysis.paths`.
"""
align = kwargs.pop('align', False)
filename = kwargs.pop('filename', 'fitted')
infix = kwargs.pop('infix', '')
mass_weighted = kwargs.pop('mass_weighted', False)
tol_mass = kwargs.pop('tol_mass', False)
ref_frame = kwargs.pop('ref_frame', self.ref_frame)
flat = kwargs.pop('flat', False)
save = kwargs.pop('save', True)
store = kwargs.pop('store', False)
paths = []
fit_trj_names = []
for i, u in enumerate(self.universes):
p = Path(u, self.u_reference, ref_select=self.ref_select, \
path_select=self.path_select, ref_frame=ref_frame)
trj_dir = self.targetdir + self.datadirs['fitted_trajs']
postfix = '{0}{1}{2:03n}'.format(infix, '_psa', i+1)
top_name, fit_trj_name = p.run(align=align, filename=filename, \
postfix=postfix, \
targetdir=trj_dir, \
mass_weighted=mass_weighted, \
tol_mass=tol_mass, flat=flat)
paths.append(p.path)
fit_trj_names.append(fit_trj_name)
self.natoms, axis = get_coord_axes(paths[0])
self.paths = paths
self.npaths = len(paths)
self.fit_trj_names = fit_trj_names
if save:
with open(self._fit_trjs_pkl, 'wb') as output:
cPickle.dump(self.fit_trj_names, output)
if store:
filename = kwargs.pop('filename', None)
self.save_paths(filename=filename)
def run(self, **kwargs):
"""Perform path similarity analysis on the trajectories to compute
the distance matrix.
A number of parameters can be changed from the defaults. The
result is stored as the array :attr:`PSAnalysis.D`.
Parameters
----------
metric : str or callable
selection string specifying the path metric to measure pairwise
distances among :attr:`PSAnalysis.paths` or a callable with the
same call signature as :func:`hausdorff`
[``'hausdorff'``]
start : int
`start` and `stop` frame index with `step` size: analyze
``trajectory[start:stop:step]`` [``None``]
stop : int
step : int
store : bool
if ``True`` then writes :attr:`PSAnalysis.D` to text and
compressed npz (numpy) files [``True``]
filename : str
string, filename to save :attr:`PSAnalysis.D`
"""
metric = kwargs.pop('metric', 'hausdorff')
start = kwargs.pop('start', None)
stop = kwargs.pop('stop', None)
step = kwargs.pop('step', None)
store = kwargs.pop('store', True)
if type(metric) is str:
metric_func = get_path_metric_func(metric)
else:
metric_func = metric
numpaths = self.npaths
D = np.zeros((numpaths,numpaths))
for i in range(0, numpaths-1):
for j in range(i+1, numpaths):
P = self.paths[i][start:stop:step]
Q = self.paths[j][start:stop:step]
D[i,j] = metric_func(P, Q)
D[j,i] = D[i,j]
self.D = D
if store:
filename = kwargs.pop('filename', str(metric))
self.save_result(filename=filename)
def run_pairs_analysis(self, **kwargs):
"""Perform PSA Hausdorff (nearest neighbor) pairs analysis on all unique
pairs of paths in :attr:`PSAnalysis.paths`.
Partial results can be stored in separate lists, where each list is
indexed according to distance vector convention (i.e., element *(i,j)*
in distance matrix representation corresponds to element
:math:`s=N*i+j-(i+1)*(i+2)` in distance vector representation, which is
the :math:`s^\text{th}` comparison). For each unique pair of paths, the
nearest neighbors for that pair can be stored in :attr:`NN` and the
Hausdorff pair in :attr:`HP`. :attr:`PP` stores the full information
of Hausdorff pairs analysis that is available for each pair of path,
including nearest neighbors lists and the Hausdorff pairs.
Parameters
----------
start : int
`start` and `stop` frame index with `step` size: analyze
``trajectory[start:stop:step]`` [``None``]
stop : int
step : int
neighbors : bool
if ``True``, then stores dictionary of nearest neighbor
frames/distances in :attr:`PSAnalysis.NN` [``False``]
hausdorff_pairs : bool
if ``True``, then stores dictionary of Hausdorff pair
frames/distances in :attr:`PSAnalysis.HP` [``False``]
"""
start = kwargs.pop('start', None)
stop = kwargs.pop('stop', None)
step = kwargs.pop('step', None)
neighbors = kwargs.pop('neighbors', False)
hausdorff_pairs = kwargs.pop('hausdorff_pairs', False)
numpaths = self.npaths
self._NN = [] # list of nearest neighbors pairs
self._HP = [] # list of Hausdorff pairs
self._psa_pairs = [] # list of PSAPairs
for i in range(0, numpaths-1):
for j in range(i+1, numpaths):
pp = PSAPair(i, j, numpaths)
P = self.paths[i][start:stop:step]
Q = self.paths[j][start:stop:step]
pp.compute_nearest_neighbors(P, Q, self.natoms)
pp.find_hausdorff_pair()
self._psa_pairs.append(pp)
if neighbors:
self._NN.append(pp.get_nearest_neighbors())
if hausdorff_pairs:
self._HP.append(pp.get_hausdorff_pair())
def save_result(self, filename=None):
"""Save distance matrix :attr:`PSAnalysis.D` to a numpy compressed npz
file and text file.
The data are saved with :func:`numpy.savez_compressed` and
:func:`numpy.savetxt` in the directory specified by
:attr:`PSAnalysis.targetdir`.
Parameters
----------
filename : str
specifies filename [``None``]
Returns
-------
filename : str
"""
filename = filename or 'psa_distances'
head = self.targetdir + self.datadirs['distance_matrices']
outfile = os.path.join(head, filename)
if self.D is None:
raise NoDataError("Distance matrix has not been calculated yet")
np.save(outfile + '.npy', self.D)
np.savetxt(outfile + '.dat', self.D)
logger.info("Wrote distance matrix to file %r.npz", outfile)
logger.info("Wrote distance matrix to file %r.dat", outfile)
return filename
def save_paths(self, filename=None):
"""Save fitted :attr:`PSAnalysis.paths` to numpy compressed npz files.
The data are saved with :func:`numpy.savez_compressed` in the directory
specified by :attr:`PSAnalysis.targetdir`.
Parameters
----------
filename : str
specifies filename [``None``]
Returns
-------
filename : str
"""
filename = filename or 'path_psa'
head = self.targetdir + self.datadirs['paths']
outfile = os.path.join(head, filename)
if self.paths is None:
raise NoDataError("Paths have not been calculated yet")
path_names = []
for i, path in enumerate(self.paths):
current_outfile = "{0}{1:03n}.npy".format(outfile, i+1)
np.save(current_outfile, self.paths[i])
path_names.append(current_outfile)
logger.info("Wrote path to file %r", current_outfile)
self.path_names = path_names
with open(self._paths_pkl, 'wb') as output:
cPickle.dump(self.path_names, output)
return filename
def load(self):
"""Load fitted paths specified by 'psa_path-names.pkl' in
:attr:`PSAnalysis.targetdir`.
"""
if not os.path.exists(self._paths_pkl):
raise NoDataError("Fitted trajectories cannot be loaded; save file" +
"{0} does not exist.".format(self._paths_pkl))
self.path_names = np.load(self._paths_pkl)
self.paths = [np.load(pname) for pname in self.path_names]
if os.path.exists(self._labels_pkl):
self.labels = np.load(self._labels_pkl)
print("Loaded paths from " + self._paths_pkl)
def plot(self, filename=None, linkage='ward', count_sort=False,
distance_sort=False, figsize=4.5, labelsize=12):
"""Plot a clustered distance matrix.
Usese method *linkage* and plots the corresponding dendrogram. Rows
(and columns) are identified using the list of strings specified by
:attr:`PSAnalysis.labels`.
If `filename` is supplied then the figure is also written to file (the
suffix determines the file type, e.g. pdf, png, eps, ...). All other
keyword arguments are passed on to :func:`matplotlib.pyplot.imshow`.
Parameters
----------
filename : str
save figure to *filename* [``None``]
linkage : str
name of linkage criterion for clustering [``'ward'``]
count_sort : bool
see :func:`scipy.cluster.hierarchy.dendrogram` [``False``]
distance_sort : bool
see :func:`scipy.cluster.hierarchy.dendrogram` [``False``]
figsize : float
set the vertical size of plot in inches [``4.5``]
labelsize : float
set the font size for colorbar labels; font size for path labels on
dendrogram default to 3 points smaller [``12``]
"""
from matplotlib.pyplot import figure, colorbar, cm, savefig, clf
if self.D is None:
err_str = "No distance data; do 'PSAnalysis.run(store=True)' first."
raise ValueError(err_str)
npaths = len(self.D)
dist_matrix = self.D
dgram_loc, hmap_loc, cbar_loc = self._get_plot_obj_locs()
aspect_ratio = 1.25
clf()
fig = figure(figsize=(figsize*aspect_ratio, figsize))
ax_hmap = fig.add_axes(hmap_loc)
ax_dgram = fig.add_axes(dgram_loc)
Z, dgram = self.cluster(dist_matrix, \
method=linkage, \
count_sort=count_sort, \
distance_sort=distance_sort)
rowidx = colidx = dgram['leaves'] # get row-wise ordering from clustering
ax_dgram.invert_yaxis() # Place origin at up left (from low left)
minDist, maxDist = 0, np.max(dist_matrix)
dist_matrix_clus = dist_matrix[rowidx,:]
dist_matrix_clus = dist_matrix_clus[:,colidx]
im = ax_hmap.matshow(dist_matrix_clus, aspect='auto', origin='lower', \
cmap=cm.YlGn, vmin=minDist, vmax=maxDist)
ax_hmap.invert_yaxis() # Place origin at upper left (from lower left)
ax_hmap.locator_params(nbins=npaths)
ax_hmap.set_xticks(np.arange(npaths), minor=True)
ax_hmap.set_yticks(np.arange(npaths), minor=True)
ax_hmap.tick_params(axis='x', which='both', labelleft='off', \
labelright='off', labeltop='on', labelsize=0)
ax_hmap.tick_params(axis='y', which='both', labelleft='on', \
labelright='off', labeltop='off', labelsize=0)
rowlabels = [self.labels[i] for i in rowidx]
collabels = [self.labels[i] for i in colidx]
ax_hmap.set_xticklabels(collabels, rotation='vertical', \
size=(labelsize-4), multialignment='center', minor=True)
ax_hmap.set_yticklabels(rowlabels, rotation='horizontal', \
size=(labelsize-4), multialignment='left', ha='right', \
minor=True)
ax_color = fig.add_axes(cbar_loc)
colorbar(im, cax=ax_color, ticks=np.linspace(minDist, maxDist, 10), \
format="%0.1f")
ax_color.tick_params(labelsize=labelsize)
# Remove major ticks from both heat map axes
for tic in ax_hmap.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
for tic in ax_hmap.yaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
# Remove minor ticks from both heat map axes
for tic in ax_hmap.xaxis.get_minor_ticks():
tic.tick1On = tic.tick2On = False
for tic in ax_hmap.yaxis.get_minor_ticks():
tic.tick1On = tic.tick2On = False
# Remove tickmarks from colorbar
for tic in ax_color.yaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
if filename is not None:
head = self.targetdir + self.datadirs['plots']
outfile = os.path.join(head, filename)
savefig(outfile, dpi=300, bbox_inches='tight')
return Z, dgram, dist_matrix_clus
def plot_annotated_heatmap(self, filename=None, linkage='ward', \
count_sort=False, distance_sort=False, \
figsize=8, annot_size=6.5):
"""Plot a clustered distance matrix.
Uses method `linkage` and plots annotated distances in the matrix. Rows
(and columns) are identified using the list of strings specified by
:attr:`PSAnalysis.labels`.
If `filename` is supplied then the figure is also written to file (the
suffix determines the file type, e.g. pdf, png, eps, ...). All other
keyword arguments are passed on to :func:`matplotlib.pyplot.imshow`.
Parameters
----------
filename : str
save figure to *filename* [``None``]
linkage : str
name of linkage criterion for clustering [``'ward'``]
count_sort : bool
see :func:`scipy.cluster.hierarchy.dendrogram` [``False``]
distance_sort : bool
see :func:`scipy.cluster.hierarchy.dendrogram` [``False``]
figsize : float
set the vertical size of plot in inches [``4.5``]
annot_size : float
font size of annotation labels on heat map [``6.5``]
"""
from matplotlib.pyplot import figure, colorbar, cm, savefig, clf
try:
import seaborn.apionly as sns
except ImportError:
raise ImportError(
"""ERROR --- The seaborn package cannot be found!
The seaborn API could not be imported. Please install it first.
You can try installing with pip directly from the
internet:
pip install seaborn
Alternatively, download the package from
http://pypi.python.org/pypi/seaborn/
and install in the usual manner.
"""
)
if self.D is None:
err_str = "No distance data; do 'PSAnalysis.run(store=True)' first."
raise ValueError(err_str)
dist_matrix = self.D
Z, dgram = self.cluster(dist_matrix, \
method=linkage, \
count_sort=count_sort, \
distance_sort=distance_sort, \
no_plot=True)
rowidx = colidx = dgram['leaves'] # get row-wise ordering from clustering
dist_matrix_clus = dist_matrix[rowidx,:]
dist_matrix_clus = dist_matrix_clus[:,colidx]
clf()
aspect_ratio = 1.25
fig = figure(figsize=(figsize*aspect_ratio, figsize))
ax_hmap = fig.add_subplot(111)
ax_hmap = sns.heatmap(dist_matrix_clus, \
linewidths=0.25, cmap=cm.YlGn, annot=True, fmt='3.1f', \
square=True, xticklabels=rowidx, yticklabels=colidx, \
annot_kws={"size": 7}, ax=ax_hmap)
# Remove major ticks from both heat map axes
for tic in ax_hmap.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
for tic in ax_hmap.yaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
# Remove minor ticks from both heat map axes
for tic in ax_hmap.xaxis.get_minor_ticks():
tic.tick1On = tic.tick2On = False
for tic in ax_hmap.yaxis.get_minor_ticks():
tic.tick1On = tic.tick2On = False
if filename is not None:
head = self.targetdir + self.datadirs['plots']
outfile = os.path.join(head, filename)
savefig(outfile, dpi=600, bbox_inches='tight')
return Z, dgram, dist_matrix_clus
def plot_nearest_neighbors(self, filename=None, idx=0, \
labels=('Path 1', 'Path 2'), figsize=4.5, \
multiplot=False, aspect_ratio=1.75, \
labelsize=12):
"""Plot nearest neighbor distances as a function of normalized frame
number.
The frame number is mapped to the interval *[0, 1]*.
If `filename` is supplied then the figure is also written to file (the
suffix determines the file type, e.g. pdf, png, eps, ...). All other
keyword arguments are passed on to :func:`matplotlib.pyplot.imshow`.
Parameters
----------
filename : str
save figure to *filename* [``None``]
idx : int
index of path (pair) comparison to plot [``0``]
labels : (str, str)
pair of names to label nearest neighbor distance
curves [``('Path 1', 'Path 2')``]
figsize : float
set the vertical size of plot in inches [``4.5``]
multiplot : bool
set to ``True`` to enable plotting multiple nearest
neighbor distances on the same figure [``False``]
aspect_ratio : float
set the ratio of width to height of the plot [``1.75``]
labelsize : float
set the font size for colorbar labels; font size for path labels on
dendrogram default to 3 points smaller [``12``]
"""
from matplotlib.pyplot import figure, savefig, tight_layout, clf, show
try:
import seaborn.apionly as sns
except ImportError:
raise ImportError(
"""ERROR --- The seaborn package cannot be found!
The seaborn API could not be imported. Please install it first.
You can try installing with pip directly from the
internet:
pip install seaborn
Alternatively, download the package from
http://pypi.python.org/pypi/seaborn/
and install in the usual manner.
"""
)
colors = sns.xkcd_palette(["cherry", "windows blue"])
if self._NN is None:
err_str = ("No nearest neighbor data; run "
"'PSAnalysis.run_nearest_neighbors()' first.")
raise ValueError(err_str)
sns.set_style('whitegrid')
if not multiplot:
clf()
fig = figure(figsize=(figsize*aspect_ratio, figsize))
ax = fig.add_subplot(111)
nn_dist_P, nn_dist_Q = self._NN[idx]['distances']
frames_P = len(nn_dist_P)
frames_Q = len(nn_dist_Q)
progress_P = np.asarray(range(frames_P))/(1.0*frames_P)
progress_Q = np.asarray(range(frames_Q))/(1.0*frames_Q)
ax.plot(progress_P, nn_dist_P, color=colors[0], lw=1.5, label=labels[0])
ax.plot(progress_Q, nn_dist_Q, color=colors[1], lw=1.5, label=labels[1])
ax.legend()
ax.set_xlabel(r'(normalized) progress by frame number', fontsize=12)
ax.set_ylabel(r'nearest neighbor rmsd ($\AA$)', fontsize=12)
ax.tick_params(axis='both', which='major', labelsize=12, pad=4)
sns.despine(bottom=True, left=True, ax=ax)
tight_layout()
if filename is not None:
head = self.targetdir + self.datadirs['plots']
outfile = os.path.join(head, filename)
savefig(outfile, dpi=300, bbox_inches='tight')
show()
def cluster(self, distArray, method='ward', count_sort=False, \
distance_sort=False, no_plot=False, no_labels=True, \
color_threshold=4):
"""Cluster trajectories and optionally plot the dendrogram.
Parameters
----------
method : str
name of linkage criterion for clustering [``'ward'``]
no_plot : bool
if ``True``, do not render the dendrogram [``False``]
no_labels : bool
if ``True`` then do not label dendrogram [``True``]
color_threshold : float
For brevity, let t be the color_threshold. Colors all the
descendent links below a cluster node k the same color if k is
the first node below the cut threshold t. All links connecting
nodes with distances greater than or equal to the threshold are
colored blue. If t is less than or equal to zero, all nodes are
colored blue. If color_threshold is None or ‘default’,
corresponding with MATLAB(TM) behavior, the threshold is set to
0.7*max(Z[:,2]). [``4``]]
Returns
-------
list
list of indices representing the row-wise order of the objects
after clustering
"""
import matplotlib
from scipy.cluster.hierarchy import linkage, dendrogram
matplotlib.rcParams['lines.linewidth'] = 0.5
Z = linkage(distArray, method=method)
dgram = dendrogram(Z, no_labels=no_labels, orientation='left', \
count_sort=count_sort, distance_sort=distance_sort, \
no_plot=no_plot, color_threshold=color_threshold)
return Z, dgram
def _get_plot_obj_locs(self):
"""Find and return coordinates for dendrogram, heat map, and colorbar.
Returns
-------
tuple
tuple of coordinates for placing the dendrogram, heat map, and
colorbar in the plot.
"""
plot_xstart = 0.04
plot_ystart = 0.04
label_margin = 0.155
dgram_height = 0.2 # dendrogram heights(s)
hmap_xstart = plot_xstart + dgram_height + label_margin
# Set locations for dendrogram(s), matrix, and colorbar
hmap_height = 0.8
hmap_width = 0.6
dgram_loc = [plot_xstart, plot_ystart, dgram_height, hmap_height]
cbar_width = 0.02
cbar_xstart = hmap_xstart + hmap_width + 0.01
cbar_loc = [cbar_xstart, plot_ystart, cbar_width, hmap_height]
hmap_loc = [hmap_xstart, plot_ystart, hmap_width, hmap_height]
return dgram_loc, hmap_loc, cbar_loc
def get_num_atoms(self):
"""Return the number of atoms used to construct the :class:`Path` instances in
:class:`PSA`.
.. note::
Must run :meth:`PSAnalysis.generate_paths` prior to calling this
method.
Returns
-------
int
the number of atoms in :class:`PSA`'s :class:`Path`s'
"""
if self.natoms is None:
err_str = "No path data; do 'PSAnalysis.generate_paths()' first."
raise ValueError(err_str)
return self.natoms
def get_num_paths(self):
"""Return the number of paths in :class:`PSA`.
.. note::
Must run :meth:`PSAnalysis.generate_paths` prior to calling this
method.
Returns
-------
int
the number of paths in :class:`PSA`
"""
if self.npaths is None:
err_str = "No path data; do 'PSAnalysis.generate_paths()' first."
raise ValueError(err_str)
return self.npaths
def get_paths(self):
"""Return the paths in :class:`PSA`.
.. note::
Must run :meth:`PSAnalysis.generate_paths` prior to calling this
method.
Returns
-------
list
list of :class:`numpy.ndarray` representations of paths in
:class:`PSA`
"""
if self.paths is None:
err_str = "No path data; do 'PSAnalysis.generate_paths()' first."
raise ValueError(err_str)
return self.paths
def get_pairwise_distances(self, vectorform=False):
"""Return the distance matrix (or vector) of pairwise path distances.
.. note::
Must run :meth:`PSAnalysis.run` with ``store=True`` prior to
calling this method.
Parameters
----------
vectorform : bool
if ``True``, return the distance vector instead [``False``]
Returns
-------
numpy.ndarray
representation of the distance matrix (or vector)
"""
if self.D is None:
err_str = "No distance data; do 'PSAnalysis.run(store=True)' first."
raise ValueError(err_str)
if vectorform:
from scipy.spatial.distance import squareform
return squareform(self.D)
else:
return self.D
@property
def psa_pairs(self):
"""The list of :class:`PSAPair` instances for each pair of paths.
:attr:`psa_pairs` is a list of all :class:`PSAPair` objects (in
distance vector order). The elements of a :class:`PSAPair` are pairs of
paths that have been compared using
:meth:`PSAnalysis.run_pairs_analysis`. Each :class:`PSAPair` contains
nearest neighbor and Hausdorff pair information specific to a pair of
paths. The nearest neighbor frames and distances for a :class:`PSAPair`
can be accessed in the nearest neighbor dictionary using the keys
'frames' and 'distances', respectively. E.g.,
:attr:`PSAPair.nearest_neighbors['distances']` returns a *pair* of
:class:`numpy.ndarray` corresponding to the nearest neighbor distances
for each path. Similarly, Hausdorff pair information can be accessed
using :attr:`PSAPair.hausdorff_pair` with the keys 'frames' and
'distance'.
.. note::
Must run :meth:`PSAnalysis.run_pairs_analysis` prior to calling this
method.
"""
if self._psa_pairs is None:
err_str = "No nearest neighbors data; do" \
+ " 'PSAnalysis.run_pairs_analysis()' first."
raise ValueError(err_str)
return self._psa_pairs
@property
def hausdorff_pairs(self):
"""The Hausdorff pair for each (unique) pairs of paths.
This attribute contains a list of Hausdorff pair information (in
distance vector order), where each element is a dictionary containing
the pair of frames and the (Hausdorff) distance between a pair of
paths. See :meth:`PSAnalysis.psa_pairs` and
:attr:`PSAPair.hausdorff_pair` for more information about accessing
Hausdorff pair data.
.. note::
Must run :meth:`PSAnalysis.run_pairs_analysis` with
``hausdorff_pairs=True`` prior to calling this method.
"""
if self._HP is None:
err_str = "No Hausdorff pairs data; do " \
+ "'PSAnalysis.run_pairs_analysis(hausdorff_pairs=True)' " \
+ "first."
raise ValueError(err_str)
return self._HP
@property
def nearest_neighbors(self):
"""The nearest neighbors for each (unique) pair of paths.
This attribute contains a list of nearest neighbor information (in
distance vector order), where each element is a dictionary containing
the nearest neighbor frames and distances between a pair of paths. See
:meth:`PSAnalysis.psa_pairs` and :attr:`PSAPair.nearest_neighbors` for
more information about accessing nearest neighbor data.
.. note::
Must run :meth:`PSAnalysis.run_pairs_analysis` with
``neighbors=True`` prior to calling this method.
"""
if self._NN is None:
err_str = "No nearest neighbors data; do" \
+ " 'PSAnalysis.run_pairs_analysis(neighbors=True)' first."
raise ValueError(err_str)
return self._NN
|
kain88-de/mdanalysis
|
package/MDAnalysis/analysis/psa.py
|
Python
|
gpl-2.0
| 83,800
|
[
"MDAnalysis"
] |
f981f67f978815fd3f8b3154e1506bcd7a8bb3c381c7098f34852747b1b1a4ab
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from importlib import import_module
from qiime2.plugin import (Plugin, Bool, Int, Str, Choices, Range, List, Set,
Visualization, Metadata, MetadataColumn,
Categorical, Numeric, TypeMatch)
from .format import (
IntSequenceFormat,
IntSequenceFormatV2,
IntSequenceMultiFileDirectoryFormat,
MappingFormat,
SingleIntFormat,
IntSequenceDirectoryFormat,
IntSequenceV2DirectoryFormat,
MappingDirectoryFormat,
FourIntsDirectoryFormat,
RedundantSingleIntDirectoryFormat,
UnimportableFormat,
UnimportableDirectoryFormat,
EchoFormat,
EchoDirectoryFormat,
Cephalapod,
CephalapodDirectoryFormat,
)
from .type import (IntSequence1, IntSequence2, IntSequence3, Mapping, FourInts,
SingleInt, Kennel, Dog, Cat, C1, C2, C3, Foo, Bar, Baz,
AscIntSequence, Squid, Octopus, Cuttlefish)
from .method import (concatenate_ints, split_ints, merge_mappings,
identity_with_metadata, identity_with_metadata_column,
identity_with_categorical_metadata_column,
identity_with_numeric_metadata_column,
identity_with_optional_metadata,
identity_with_optional_metadata_column,
params_only_method, no_input_method, deprecated_method,
optional_artifacts_method, long_description_method,
docstring_order_method, variadic_input_method,
unioned_primitives, type_match_list_and_set)
from .visualizer import (most_common_viz, mapping_viz, params_only_viz,
no_input_viz)
from .pipeline import (parameter_only_pipeline, typical_pipeline,
optional_artifact_pipeline, visualizer_only_pipeline,
pipelines_in_pipeline, pointless_pipeline,
failing_pipeline)
from ..cite import Citations
from .examples import (concatenate_ints_simple, concatenate_ints_complex,
typical_pipeline_simple, typical_pipeline_complex,
comments_only, identity_with_metadata_simple,
identity_with_metadata_merging,
identity_with_metadata_column_get_mdc,
variadic_input_simple, optional_inputs,
comments_only_factory,
)
citations = Citations.load('citations.bib', package='qiime2.core.testing')
dummy_plugin = Plugin(
name='dummy-plugin',
description='Description of dummy plugin.',
short_description='Dummy plugin for testing.',
version='0.0.0-dev',
website='https://github.com/qiime2/qiime2',
package='qiime2.core.testing',
user_support_text='For help, see https://qiime2.org',
citations=[citations['unger1998does'], citations['berry1997flying']]
)
import_module('qiime2.core.testing.transformer')
import_module('qiime2.core.testing.validator')
# Register semantic types
dummy_plugin.register_semantic_types(IntSequence1, IntSequence2, IntSequence3,
Mapping, FourInts, Kennel, Dog, Cat,
SingleInt, C1, C2, C3, Foo, Bar, Baz,
AscIntSequence, Squid, Octopus,
Cuttlefish)
# Register formats
dummy_plugin.register_formats(
IntSequenceFormatV2, MappingFormat, IntSequenceV2DirectoryFormat,
IntSequenceMultiFileDirectoryFormat, MappingDirectoryFormat,
EchoDirectoryFormat, EchoFormat, Cephalapod, CephalapodDirectoryFormat)
dummy_plugin.register_formats(
FourIntsDirectoryFormat, UnimportableDirectoryFormat, UnimportableFormat,
citations=[citations['baerheim1994effect']])
dummy_plugin.register_views(
int, IntSequenceFormat, IntSequenceDirectoryFormat,
SingleIntFormat, RedundantSingleIntDirectoryFormat,
citations=[citations['mayer2012walking']])
dummy_plugin.register_semantic_type_to_format(
IntSequence1,
artifact_format=IntSequenceDirectoryFormat
)
dummy_plugin.register_semantic_type_to_format(
IntSequence2,
artifact_format=IntSequenceV2DirectoryFormat
)
dummy_plugin.register_semantic_type_to_format(
IntSequence3,
artifact_format=IntSequenceMultiFileDirectoryFormat
)
dummy_plugin.register_semantic_type_to_format(
Mapping,
artifact_format=MappingDirectoryFormat
)
dummy_plugin.register_semantic_type_to_format(
FourInts,
artifact_format=FourIntsDirectoryFormat
)
dummy_plugin.register_semantic_type_to_format(
SingleInt,
artifact_format=RedundantSingleIntDirectoryFormat
)
dummy_plugin.register_semantic_type_to_format(
Kennel[Dog | Cat],
artifact_format=MappingDirectoryFormat
)
dummy_plugin.register_semantic_type_to_format(
C3[C1[Foo | Bar | Baz] | Foo | Bar | Baz,
C1[Foo | Bar | Baz] | Foo | Bar | Baz,
C1[Foo | Bar | Baz] | Foo | Bar | Baz]
| C2[Foo | Bar | Baz, Foo | Bar | Baz]
| C1[Foo | Bar | Baz | C2[Foo | Bar | Baz, Foo | Bar | Baz]]
| Foo
| Bar
| Baz,
artifact_format=EchoDirectoryFormat)
dummy_plugin.register_semantic_type_to_format(
AscIntSequence,
artifact_format=IntSequenceDirectoryFormat)
dummy_plugin.register_semantic_type_to_format(
Squid | Octopus | Cuttlefish,
artifact_format=CephalapodDirectoryFormat)
# TODO add an optional parameter to this method when they are supported
dummy_plugin.methods.register_function(
function=concatenate_ints,
inputs={
'ints1': IntSequence1 | IntSequence2,
'ints2': IntSequence1,
'ints3': IntSequence2
},
parameters={
'int1': Int,
'int2': Int
},
outputs=[
('concatenated_ints', IntSequence1)
],
name='Concatenate integers',
description='This method concatenates integers into'
' a single sequence in the order they are provided.',
citations=[citations['baerheim1994effect']],
examples={'concatenate_ints_simple': concatenate_ints_simple,
'concatenate_ints_complex': concatenate_ints_complex,
'comments_only': comments_only,
# execute factory to make a closure to test pickling
'comments_only_factory': comments_only_factory(),
},
)
T = TypeMatch([IntSequence1, IntSequence2])
dummy_plugin.methods.register_function(
function=split_ints,
inputs={
'ints': T
},
parameters={},
outputs=[
('left', T),
('right', T)
],
name='Split sequence of integers in half',
description='This method splits a sequence of integers in half, returning '
'the two halves (left and right). If the input sequence\'s '
'length is not evenly divisible by 2, the right half will '
'have one more element than the left.',
citations=[
citations['witcombe2006sword'], citations['reimers2012response']]
)
dummy_plugin.methods.register_function(
function=merge_mappings,
inputs={
'mapping1': Mapping,
'mapping2': Mapping
},
input_descriptions={
'mapping1': 'Mapping object to be merged'
},
parameters={},
outputs=[
('merged_mapping', Mapping)
],
output_descriptions={
'merged_mapping': 'Resulting merged Mapping object'},
name='Merge mappings',
description='This method merges two mappings into a single new mapping. '
'If a key is shared between mappings and the values differ, '
'an error will be raised.'
)
dummy_plugin.methods.register_function(
function=identity_with_metadata,
inputs={
'ints': IntSequence1 | IntSequence2
},
parameters={
'metadata': Metadata
},
outputs=[
('out', IntSequence1)
],
name='Identity',
description='This method does nothing, but takes metadata',
examples={
'identity_with_metadata_simple': identity_with_metadata_simple,
'identity_with_metadata_merging': identity_with_metadata_merging},
)
dummy_plugin.methods.register_function(
function=long_description_method,
inputs={
'mapping1': Mapping
},
input_descriptions={
'mapping1': ("This is a very long description. If asked about its "
"length, I would have to say it is greater than 79 "
"characters.")
},
parameters={
'name': Str,
'age': Int
},
parameter_descriptions={
'name': ("This is a very long description. If asked about its length,"
" I would have to say it is greater than 79 characters.")
},
outputs=[
('out', Mapping)
],
output_descriptions={
'out': ("This is a very long description. If asked about its length,"
" I would have to say it is greater than 79 characters.")
},
name="Long Description",
description=("This is a very long description. If asked about its length,"
" I would have to say it is greater than 79 characters.")
)
dummy_plugin.methods.register_function(
function=docstring_order_method,
inputs={
'req_input': Mapping,
'opt_input': Mapping
},
input_descriptions={
'req_input': "This should show up first.",
'opt_input': "This should show up third."
},
parameters={
'req_param': Str,
'opt_param': Int
},
parameter_descriptions={
'req_param': "This should show up second.",
'opt_param': "This should show up fourth."
},
outputs=[
('out', Mapping)
],
output_descriptions={
'out': "This should show up last, in it's own section."
},
name="Docstring Order",
description=("Tests whether inputs and parameters are rendered in "
"signature order")
)
dummy_plugin.methods.register_function(
function=identity_with_metadata_column,
inputs={
'ints': IntSequence1 | IntSequence2
},
parameters={
'metadata': MetadataColumn[Categorical | Numeric]
},
outputs=[
('out', IntSequence1)
],
name='Identity',
description='This method does nothing, '
'but takes a generic metadata column',
examples={
'identity_with_metadata_column_get_mdc':
identity_with_metadata_column_get_mdc,
},
)
dummy_plugin.methods.register_function(
function=identity_with_categorical_metadata_column,
inputs={
'ints': IntSequence1 | IntSequence2
},
parameters={
'metadata': MetadataColumn[Categorical]
},
outputs=[
('out', IntSequence1)
],
name='Identity',
description='This method does nothing, but takes a categorical metadata '
'column'
)
dummy_plugin.methods.register_function(
function=identity_with_numeric_metadata_column,
inputs={
'ints': IntSequence1 | IntSequence2
},
parameters={
'metadata': MetadataColumn[Numeric]
},
outputs=[
('out', IntSequence1)
],
name='Identity',
description='This method does nothing, but takes a numeric metadata column'
)
dummy_plugin.methods.register_function(
function=identity_with_optional_metadata,
inputs={
'ints': IntSequence1 | IntSequence2
},
parameters={
'metadata': Metadata
},
outputs=[
('out', IntSequence1)
],
name='Identity',
description='This method does nothing, but takes optional metadata'
)
dummy_plugin.methods.register_function(
function=identity_with_optional_metadata_column,
inputs={
'ints': IntSequence1 | IntSequence2
},
parameters={
'metadata': MetadataColumn[Numeric | Categorical]
},
outputs=[
('out', IntSequence1)
],
name='Identity',
description='This method does nothing, but takes an optional generic '
'metadata column'
)
dummy_plugin.methods.register_function(
function=params_only_method,
inputs={},
parameters={
'name': Str,
'age': Int
},
outputs=[
('out', Mapping)
],
name='Parameters only method',
description='This method only accepts parameters.',
)
dummy_plugin.methods.register_function(
function=unioned_primitives,
inputs={},
parameters={
'foo': Int % Range(1, None) | Str % Choices(['auto_foo']),
'bar': Int % Range(1, None) | Str % Choices(['auto_bar']),
},
outputs=[
('out', Mapping)
],
name='Unioned primitive parameter',
description='This method has a unioned primitive parameter'
)
dummy_plugin.methods.register_function(
function=no_input_method,
inputs={},
parameters={},
outputs=[
('out', Mapping)
],
name='No input method',
description='This method does not accept any type of input.'
)
dummy_plugin.methods.register_function(
function=deprecated_method,
inputs={},
parameters={},
outputs=[
('out', Mapping)
],
name='A deprecated method',
description='This deprecated method does not accept any type of input.',
deprecated=True,
)
dummy_plugin.methods.register_function(
function=optional_artifacts_method,
inputs={
'ints': IntSequence1,
'optional1': IntSequence1,
'optional2': IntSequence1 | IntSequence2
},
parameters={
'num1': Int,
'num2': Int
},
outputs=[
('output', IntSequence1)
],
name='Optional artifacts method',
description='This method declares optional artifacts and concatenates '
'whatever integers are supplied as input.',
examples={'optional_inputs': optional_inputs},
)
dummy_plugin.methods.register_function(
function=variadic_input_method,
inputs={
'ints': List[IntSequence1 | IntSequence2],
'int_set': Set[SingleInt]
},
parameters={
'nums': Set[Int],
'opt_nums': List[Int % Range(10, 20)]
},
outputs=[
('output', IntSequence1)
],
name='Test variadic inputs',
description='This method concatenates all of its variadic inputs',
input_descriptions={
'ints': 'A list of int artifacts',
'int_set': 'A set of int artifacts'
},
parameter_descriptions={
'nums': 'A set of ints',
'opt_nums': 'An optional list of ints'
},
output_descriptions={
'output': 'All of the above mashed together'
},
examples={'variadic_input_simple': variadic_input_simple},
)
T = TypeMatch([IntSequence1, IntSequence2])
dummy_plugin.methods.register_function(
function=type_match_list_and_set,
inputs={
'ints': T
},
parameters={
'strs1': List[Str],
'strs2': Set[Str]
},
outputs=[
('output', T)
],
name='TypeMatch with list and set params',
description='Just a method with a TypeMatch and list/set params',
input_descriptions={
'ints': 'An int artifact'
},
parameter_descriptions={
'strs1': 'A list of strings',
'strs2': 'A set of strings'
},
output_descriptions={
'output': '[0]'
}
)
dummy_plugin.visualizers.register_function(
function=params_only_viz,
inputs={},
parameters={
'name': Str,
'age': Int % Range(0, None)
},
name='Parameters only viz',
description='This visualizer only accepts parameters.'
)
dummy_plugin.visualizers.register_function(
function=no_input_viz,
inputs={},
parameters={},
name='No input viz',
description='This visualizer does not accept any type of input.'
)
dummy_plugin.visualizers.register_function(
function=most_common_viz,
inputs={
'ints': IntSequence1 | IntSequence2
},
parameters={},
name='Visualize most common integers',
description='This visualizer produces HTML and TSV outputs containing the '
'input sequence of integers ordered from most- to '
'least-frequently occurring, along with their respective '
'frequencies.',
citations=[citations['barbeito1967microbiological']]
)
# TODO add optional parameters to this method when they are supported
dummy_plugin.visualizers.register_function(
function=mapping_viz,
inputs={
'mapping1': Mapping,
'mapping2': Mapping
},
parameters={
'key_label': Str,
'value_label': Str
},
name='Visualize two mappings',
description='This visualizer produces an HTML visualization of two '
'key-value mappings, each sorted in alphabetical order by key.'
)
dummy_plugin.pipelines.register_function(
function=parameter_only_pipeline,
inputs={},
parameters={
'int1': Int,
'int2': Int,
'metadata': Metadata
},
outputs=[
('foo', IntSequence2),
('bar', IntSequence1)
],
name='Do multiple things',
description='This pipeline only accepts parameters',
parameter_descriptions={
'int1': 'An integer, the first one in fact',
'int2': 'An integer, the second one',
'metadata': 'Very little is done with this'
},
output_descriptions={
'foo': 'Foo - "The Integers of 2"',
'bar': 'Bar - "What a sequences"'
},
)
dummy_plugin.pipelines.register_function(
function=typical_pipeline,
inputs={
'int_sequence': IntSequence1,
'mapping': Mapping
},
parameters={
'do_extra_thing': Bool,
'add': Int
},
outputs=[
('out_map', Mapping),
('left', IntSequence1),
('right', IntSequence1),
('left_viz', Visualization),
('right_viz', Visualization)
],
input_descriptions={
'int_sequence': 'A sequence of ints',
'mapping': 'A map to a number other than 42 will fail'
},
parameter_descriptions={
'do_extra_thing': 'Increment `left` by `add` if true',
'add': 'Unused if `do_extra_thing` is false'
},
output_descriptions={
'out_map': 'Same as input',
'left': 'Left side of `int_sequence` unless `do_extra_thing`',
'right': 'Right side of `int_sequence`',
'left_viz': '`left` visualized',
'right_viz': '`right` visualized'
},
name='A typical pipeline with the potential to raise an error',
description='Waste some time shuffling data around for no reason',
citations=citations, # ALL of them.
examples={'typical_pipeline_simple': typical_pipeline_simple,
'typical_pipeline_complex': typical_pipeline_complex},
)
dummy_plugin.pipelines.register_function(
function=optional_artifact_pipeline,
inputs={
'int_sequence': IntSequence1,
'single_int': SingleInt
},
parameters={},
outputs=[
('ints', IntSequence1)
],
input_descriptions={
'int_sequence': 'Some integers',
'single_int': 'An integer'
},
output_descriptions={
'ints': 'More integers'
},
name='Do stuff normally, but override this one step sometimes',
description='Creates its own single_int, unless provided'
)
dummy_plugin.pipelines.register_function(
function=visualizer_only_pipeline,
inputs={
'mapping': Mapping
},
parameters={},
outputs=[
('viz1', Visualization),
('viz2', Visualization)
],
input_descriptions={
'mapping': 'A mapping to look at twice'
},
output_descriptions={
'viz1': 'The no input viz',
'viz2': 'Our `mapping` seen through the lense of "foo" *and* "bar"'
},
name='Visualize many things',
description='Looks at both nothing and a mapping'
)
dummy_plugin.pipelines.register_function(
function=pipelines_in_pipeline,
inputs={
'int_sequence': IntSequence1,
'mapping': Mapping
},
parameters={},
outputs=[
('int1', SingleInt),
('out_map', Mapping),
('left', IntSequence1),
('right', IntSequence1),
('left_viz', Visualization),
('right_viz', Visualization),
('viz1', Visualization),
('viz2', Visualization)
],
name='Do a great many things',
description=('Mapping is chained from typical_pipeline into '
'visualizer_only_pipeline')
)
dummy_plugin.pipelines.register_function(
function=pointless_pipeline,
inputs={},
parameters={},
outputs=[('random_int', SingleInt)],
name='Get an integer',
description='Integer was chosen to be 4 by a random dice roll'
)
dummy_plugin.pipelines.register_function(
function=failing_pipeline,
inputs={
'int_sequence': IntSequence1
},
parameters={
'break_from': Str % Choices(
{'arity', 'return-view', 'type', 'method', 'internal', 'no-plugin',
'no-action'})
},
outputs=[('mapping', Mapping)],
name='Test different ways of failing',
description=('This is useful to make sure all of the intermediate stuff is'
' cleaned up the way it should be.')
)
import_module('qiime2.core.testing.mapped')
|
qiime2/qiime2
|
qiime2/core/testing/plugin.py
|
Python
|
bsd-3-clause
| 21,565
|
[
"Octopus"
] |
fa9468976d648a7cb8db49c09e497c3c5e09d7139a51072e53cab21516486a68
|
#!/usr/bin/env python
# Functions:
# parse_indexes
#
# indexes_matrix
# select_cols_str
# select_cols_substr
# add_column
# copy_column
# add_desc_for_gmx
#
# add_header_line
# fill_empty_headers
# remove_header_line
# reorder_headers_alphabetical
# upper_headers
# lower_headers
# hash_headers
# remove_duplicate_headers
# rename_duplicate_headers
# rename_header
# rename_header_i
# append_to_headers
# prepend_to_headers
# replace_header
# replace_header_re
#
# strip_all_annots
# upper_annots
# lower_annots
# set_value_if_empty
# set_value_if_not_empty
# set_value_if_other_annot_equals
# set_value_if_other_annot_not_empty
# copy_value_if_empty
# copy_value_if_empty_header
# copy_value_if_empty_same_header
# copy_value_if_empty_same_header_all
# replace_whole_annot
# replace_annots
# prepend_to_annots
# apply_re_to_annots
# merge_annots
# merge_annots_to_new_col
# merge_annots_to_new_col_skip_empty
# split_annots
# split_annots_and_take_elem
# split_chr_start_end
#
# tcga_relabel_patient_barcodes
# tcga_label_patient_barcodes
# tcga_label_by_tissue_type
#
# _add_annots
# _subtract_annots
# _divide_annots
# _calc_two_annots
#
# select_if_annot_is
# select_if_annot_startswith
#
# flip01_matrix
# all_same
# min_annots
# max_annots
# add_to
# multiply_by
# normalize_to_max
# log_base
# neg_log_base
# add_two_annots
# subtract_two_annots
# divide_two_annots
# divide_many_annots
# average_same_header
# round_annots
#
# vcf_standardize
# vcf_remove_bad_coords
# vcf_remove_multicalls
# vcf_extract_format_values
# vcf_extract_info_values
# vcf_split_AD
# vcf_calc_vaf
#
# subtract_two_bed_lists
# subtract_value_from_bed_list
def parse_indexes(MATRIX, indexes_str, allow_duplicates=False,
check_range=True):
# Takes 1-based indexes and returns a list of 0-based indexes.
#
# Example inputs:
# 5
# 1,5,10
# 1-99,215-300
# Sample,2-5 # Also takes headers.
import re
from genomicode import parselib
max_index = len(MATRIX.headers)
indexes_str = indexes_str.replace("END", str(max_index))
# Replace headers with 1-based indexes.
parts = indexes_str.split(",")
for i in range(len(parts)):
if re.search("[^0-9-]", parts[i]):
# Returns 0-based index.
p = MATRIX.normalize_header_i(parts[i])
parts[i] = str(p+1) # want 1-based index
indexes_str = ",".join(parts)
I = []
for s, e in parselib.parse_ranges(indexes_str):
assert s >= 1
if check_range:
assert s <= len(MATRIX.headers), "Out of range: %d/%d" % (
s, len(MATRIX.headers))
s, e = s - 1, min(e, max_index)
I.extend(range(s, e))
if not allow_duplicates:
# Remove duplicated indexes. Need to preserve order.
nodup = []
for i in I:
if i not in nodup:
nodup.append(i)
I = nodup
return I
def indexes_matrix(MATRIX, indexes_list):
# indexes is a list of strings indicating indexes. Parse this and
# return a submatrix consisting of just those indexes.
from genomicode import AnnotationMatrix
if not indexes_list:
return MATRIX
I = []
for indexes in indexes_list:
x = parse_indexes(MATRIX, indexes, allow_duplicates=True)
I.extend(x)
x = AnnotationMatrix.colslice(MATRIX, I)
return x
def select_cols_str(MATRIX, cols_str):
# cols_str is a list of the names of the headers to keep.
if not cols_str:
return MATRIX
from genomicode import AnnotationMatrix
I = []
for i, h in enumerate(MATRIX.headers):
found = False
for s in cols_str:
if h == s:
found = True
if found:
I.append(i)
return AnnotationMatrix.colslice(MATRIX, I)
def select_cols_substr(MATRIX, cols_substr):
# cols_substr is a list of the substrings of the headers to keep.
if not cols_substr:
return MATRIX
from genomicode import AnnotationMatrix
I = []
for i, h in enumerate(MATRIX.headers):
found = False
for s in cols_substr:
if h.find(s) >= 0:
found = True
if found:
I.append(i)
return AnnotationMatrix.colslice(MATRIX, I)
def select_if_annot_is(MATRIX, args):
if not args:
return MATRIX
from genomicode import AnnotationMatrix
jobs = []
for arg in args:
x = arg.split(",")
assert len(x) == 2, "Format: <header>,<value>"
header, value = x
jobs.append((header, value))
I = range(MATRIX.num_annots())
for x in jobs:
header, value = x
annots = MATRIX[header]
x = [i for (i, x) in enumerate(annots) if x == value]
x = {}.fromkeys(x)
I = [i for i in I if i in x]
MATRIX_s = AnnotationMatrix.rowslice(MATRIX, I)
return MATRIX_s
def select_if_annot_startswith(MATRIX, arg):
if not arg:
return MATRIX
from genomicode import AnnotationMatrix
x = arg.split(",")
assert len(x) == 2, "Format: <header>,<value>"
header, value = x
annots = MATRIX[header]
I = [i for (i, x) in enumerate(annots) if x.startswith(value)]
MATRIX_s = AnnotationMatrix.rowslice(MATRIX, I)
return MATRIX_s
def flip01_matrix(MATRIX, indexes):
if not indexes:
return MATRIX
I = parse_indexes(MATRIX, indexes)
MATRIX = MATRIX.copy()
for i in I:
assert i >= 0 and i < len(MATRIX.headers_h)
header_h = MATRIX.headers_h[i]
annots = MATRIX.header2annots[header_h]
for j in range(len(annots)):
if annots[j].strip() == "0":
annots[j] = "1"
elif annots[j].strip() == "1":
annots[j] = "0"
MATRIX.header2annots[header_h] = annots
return MATRIX
def fill_empty_headers(MATRIX, fill_headers):
if not fill_headers:
return MATRIX
from genomicode import AnnotationMatrix
headers = MATRIX.headers[:]
for i in range(len(headers)):
if headers[i].strip():
continue
j = 0
while True:
x = "H%03d" % j
if x not in headers:
break
j += 1
headers[i] = x
return AnnotationMatrix.replace_headers(MATRIX, headers)
def reorder_headers_alphabetical(MATRIX, reorder_headers):
if not reorder_headers:
return MATRIX
from genomicode import jmath
from genomicode import AnnotationMatrix
O = jmath.order(MATRIX.headers)
headers = [MATRIX.headers[i] for i in O]
headers_h = [MATRIX.headers_h[i] for i in O]
M = AnnotationMatrix.AnnotationMatrix(
headers, headers_h, MATRIX.header2annots)
return M
def upper_headers(MATRIX, upper_headers):
if not upper_headers:
return MATRIX
from genomicode import AnnotationMatrix
# Convert to the upper case name. Need to be careful because may
# cause duplicates.
headers = [x.upper() for x in MATRIX.headers]
return AnnotationMatrix.replace_headers(MATRIX, headers)
def lower_headers(MATRIX, lower_headers):
if not lower_headers:
return MATRIX
from genomicode import AnnotationMatrix
# Convert to the lower case name. Need to be careful because may
# cause duplicates.
headers = [x.lower() for x in MATRIX.headers]
return AnnotationMatrix.replace_headers(MATRIX, headers)
def hash_headers(MATRIX, hash_headers):
if not hash_headers:
return MATRIX
from genomicode import hashlib
from genomicode import AnnotationMatrix
# Hash each name. Need to be careful because may cause
# duplicates.
headers = [hashlib.hash_var(x) for x in MATRIX.headers]
return AnnotationMatrix.replace_headers(MATRIX, headers)
def add_header_line(filename, header_list, is_csv=False):
# header_list is a list of a comma-separated list of headers.
from genomicode import AnnotationMatrix
from genomicode import filelib
from genomicode import jmath
delimiter = "\t"
if is_csv:
delimiter = ","
X = [x for x in filelib.read_cols(filename, delimiter=delimiter)]
# Check the dimensions of the matrix.
assert X, "empty matrix"
for i in range(len(X)):
assert len(X[i]) == len(X[0])
# Make each row an annotation.
X = jmath.transpose(X)
header_str = ",".join(header_list)
x = header_str.split(",")
assert len(x) >= len(X), "Matrix has %d columns, but %d headers given." % (
len(X), len(x))
# If there are more headers than columns, then fill the rest with
# blanks.
headers = x
headers_h = AnnotationMatrix.uniquify_headers(headers)
header2annots = {}
for i, header_h in enumerate(headers_h):
header_h = headers_h[i]
annots = [""] * len(X[0])
if i < len(X):
annots = X[i]
header2annots[header_h] = annots
return AnnotationMatrix.AnnotationMatrix(headers, headers_h, header2annots)
def remove_header_line(filename, read_as_csv):
from genomicode import AnnotationMatrix
from genomicode import jmath
MATRIX = AnnotationMatrix.read(filename, read_as_csv)
matrix = []
for header_h in MATRIX.headers_h:
x = MATRIX.header2annots[header_h]
matrix.append(x)
# Transpose the matrix.
matrix = jmath.transpose(matrix)
for x in matrix:
print "\t".join(map(str, x))
def remove_duplicate_headers(MATRIX, remove_dups):
if not remove_dups:
return MATRIX
from genomicode import AnnotationMatrix
I = []
seen = {}
for i, h in enumerate(MATRIX.headers):
if h in seen:
continue
seen[h] = 1
I.append(i)
return AnnotationMatrix.colslice(MATRIX, I)
def rename_duplicate_headers(MATRIX, rename_dups):
if not rename_dups:
return MATRIX
from genomicode import AnnotationMatrix
name2I = {} # name -> list of indexes
for i, name in enumerate(MATRIX.headers):
if name not in name2I:
name2I[name] = []
name2I[name].append(i)
nodup = MATRIX.headers[:]
for (name, I) in name2I.iteritems():
if len(I) < 2:
continue
for i in range(len(I)):
nodup[I[i]] = "%s (%d)" % (name, i+1)
x = AnnotationMatrix.replace_headers(MATRIX, nodup)
return x
def rename_header(MATRIX, rename_list):
# rename_list is list of strings in format of: <from>,<to>.
if not rename_list:
return MATRIX
from genomicode import AnnotationMatrix
from genomicode import parselib
rename_all = [] # list of (from_str, to_str)
for rename_str in rename_list:
x = rename_str.split(",")
if len(x) > 2 or len(x) == 1:
x = rename_str.split(";")
assert len(x) == 2, "format should be: <from>,<to>"
from_str, to_str = x
rename_all.append((from_str, to_str))
for from_str, to_str in rename_all:
h = MATRIX.normalize_header(from_str)
x = parselib.pretty_list(MATRIX.headers, max_items=5)
assert h, "%s not a header:\n%s" % (from_str, x)
#assert from_str in MATRIX.headers, "%s not a header" % from_str
#assert from_str in MATRIX.header2annots, "%s not a unique header" % \
# from_str
convert = {}
for from_str, to_str in rename_all:
assert from_str not in convert, "dup: %s" % from_str
convert[from_str] = to_str
# Convert to the new names.
headers = [convert.get(x, x) for x in MATRIX.headers]
x = AnnotationMatrix.replace_headers(MATRIX, headers)
return x
def rename_header_i(MATRIX, rename_list):
# rename_list is list of strings in format of: <index>,<to>.
if not rename_list:
return MATRIX
from genomicode import AnnotationMatrix
rename_all = [] # list of (0-based index, to_str)
for rename_str in rename_list:
x = rename_str.split(",")
assert len(x) == 2, "format should be: <from>,<to>"
index, to_str = x
index = int(index)
assert index >= 1 and index <= len(MATRIX.headers)
index -= 1
rename_all.append((index, to_str))
# Convert to the new names.
headers = MATRIX.headers[:]
for index, to_str in rename_all:
headers[index] = to_str
x = AnnotationMatrix.replace_headers(MATRIX, headers)
return x
def replace_header(MATRIX, replace_list):
# replace_list is list of strings in format of: <from>,<to>.
if not replace_list:
return MATRIX
from genomicode import AnnotationMatrix
replace_all = [] # list of (from_str, to_str)
for replace_str in replace_list:
x = replace_str.split(",")
assert len(x) == 2, "format should be: <from>,<to>"
from_str, to_str = x
replace_all.append((from_str, to_str))
# Convert to the new names.
headers = MATRIX.headers[:]
for from_str, to_str in replace_all:
for i in range(len(headers)):
x = headers[i]
x = x.replace(from_str, to_str)
headers[i] = x
x = AnnotationMatrix.replace_headers(MATRIX, headers)
return x
def replace_header_re(MATRIX, replace_list):
# replace_list is list of strings in format of: <from re>,<to>.
import re
if not replace_list:
return MATRIX
from genomicode import AnnotationMatrix
replace_all = [] # list of (from_re_str, to_str)
for replace_str in replace_list:
x = replace_str.split(",")
assert len(x) == 2, "format should be: <from>,<to>"
from_str, to_str = x
replace_all.append((from_str, to_str))
# Convert to the new names.
headers = MATRIX.headers[:]
for from_str, to_str in replace_all:
for i in range(len(headers)):
x = headers[i]
m = re.search(from_str, x)
if m:
x = x[:m.start(0)] + to_str + x[m.end(0):]
#x = x.replace(m.group(0), to_str)
headers[i] = x
x = AnnotationMatrix.replace_headers(MATRIX, headers)
return x
def append_to_headers(MATRIX, append_to_headers):
# append_to_headers is list of strings in format of: <indexes>;<postfix>.
if not append_to_headers:
return MATRIX
from genomicode import AnnotationMatrix
append_all = [] # list of (list of 0-based indexes, postfix)
for x in append_to_headers:
x = x.split(";", 1)
assert len(x) == 2
indexes_str, prefix = x
indexes = parse_indexes(MATRIX, indexes_str)
for i in indexes:
assert i >= 0 and i < len(MATRIX.headers)
append_all.append((indexes, prefix))
headers = MATRIX.headers[:]
for indexes, postfix in append_all:
for i in indexes:
headers[i] = "%s%s" % (headers[i], prefix)
return AnnotationMatrix.replace_headers(MATRIX, headers)
def prepend_to_headers(MATRIX, prepend_to_headers):
# prepend_to_headers is list of strings in format of: <indexes>;<prefix>.
if not prepend_to_headers:
return MATRIX
from genomicode import AnnotationMatrix
prepend_all = [] # list of (list of 0-based indexes, prefix)
for x in prepend_to_headers:
x = x.split(";", 1)
assert len(x) == 2
indexes_str, prefix = x
indexes = parse_indexes(MATRIX, indexes_str)
for i in indexes:
assert i >= 0 and i < len(MATRIX.headers)
prepend_all.append((indexes, prefix))
headers = MATRIX.headers[:]
for indexes, prefix in prepend_all:
for i in indexes:
headers[i] = "%s%s" % (prefix, headers[i])
x = AnnotationMatrix.replace_headers(MATRIX, headers)
return x
def add_column(MATRIX, add_column):
# add_column is list of strings in format of: <index>,<header>,<default>.
if not add_column:
return MATRIX
from genomicode import AnnotationMatrix
num_annots = None
for annots in MATRIX.header2annots.itervalues():
if num_annots is None:
num_annots = len(annots)
assert num_annots == len(annots)
add_all = [] # list of (0-based index, header, default_value)
last_index = -1
for x in add_column:
x = x.split(",", 2)
assert len(x) == 3, "Format should be: <index>,<header>,<value>"
index, header, default_value = x
if index == "END":
x = max(last_index+1, MATRIX.num_headers())
index = x + 1
index = int(index) - 1
last_index = index
add_all.append((index, header, default_value))
# Since the hashed header names might change, keep track of the
# indexes for each header.
h_indexes = [("OLD", i) for i in range(len(MATRIX.headers))]
for i, x in enumerate(add_all):
index, header, default_value = x
assert index >= 0 and index <= len(h_indexes)
h_indexes.insert(index, ("NEW", i))
headers = []
for (which_one, i) in h_indexes:
if which_one == "OLD":
headers.append(MATRIX.headers[i])
elif which_one == "NEW":
index, header, default_value = add_all[i]
headers.append(header)
else:
raise AssertionError
headers_h = AnnotationMatrix.uniquify_headers(headers)
header2annots = {}
for i_new, (which_one, i_old) in enumerate(h_indexes):
if which_one == "OLD":
old_header_h = MATRIX.headers_h[i_old]
new_header_h = headers_h[i_new]
header2annots[new_header_h] = MATRIX.header2annots[old_header_h]
elif which_one == "NEW":
index, header, default_value = add_all[i_old]
annots = [default_value] * num_annots
new_header_h = headers_h[i_new]
header2annots[new_header_h] = annots
else:
raise AssertionError
return AnnotationMatrix.AnnotationMatrix(headers, headers_h, header2annots)
def add_uid_column(MATRIX, arg):
# Format: <index>,<header>,<prefix>
if not arg:
return MATRIX
import math
from genomicode import AnnotationMatrix
num_annots = MATRIX.num_annots()
x = arg.split(",", 2)
assert len(x) == 3, "Format should be: <index>,<header>,<prefix>"
index, header, prefix = x
if index == "END":
index = MATRIX.num_headers() + 1
index = int(index) - 1
assert index >= 0 and index <= MATRIX.num_headers()
ndigits = int(math.ceil(math.log(num_annots, 10)))
ndigits = max(ndigits, 1) # math.log(1, 10) is 0
uid_annots = ["%s%0*d" % (prefix, ndigits, i) for i in range(num_annots)]
headers = MATRIX.headers[:]
all_annots = [MATRIX.header2annots[x] for x in MATRIX.headers_h]
headers.insert(index, header)
all_annots.insert(index, uid_annots)
return AnnotationMatrix.create_from_annotations(headers, all_annots)
def stratify_by_rank(MATRIX, args):
# Format: <index>;<breakpoints>
if not args:
return MATRIX
from genomicode import AnnotationMatrix
import analyze_clinical_outcome as aco
jobs = []
for arg in args:
x = arg.split(";")
if len(x) != 2:
x = arg.split(",")
assert len(x) == 2, "format should be: <index>;<breakpoints>"
header, breakpoints = x
index = MATRIX.normalize_header_i(header, index_base1=True)
breakpoints = aco.parse_rank_cutoffs(breakpoints)
x = index, breakpoints
jobs.append(x)
for x in jobs:
index, cutoffs = x
h = MATRIX.headers_h[index]
annots = MATRIX.header2annots[h]
I = [i for i, x in enumerate(annots) if x.strip()]
scores = [float(annots[i]) for i in I]
groups = aco.discretize_by_value(scores, cutoffs)
assert len(groups) == len(scores)
new_header = "%s Groups" % MATRIX.headers[index]
new_annots = [""] * len(annots)
for i, oi in enumerate(I):
new_annots[oi] = groups[i]
headers = MATRIX.headers + [new_header]
x = [MATRIX.header2annots[x] for x in MATRIX.headers_h]
all_annots = x + [new_annots]
MATRIX = AnnotationMatrix.create_from_annotations(headers, all_annots)
return MATRIX
def copy_column(MATRIX, copy_column):
# copy_column is a list of: <index>,<new_header>.
if not copy_column:
return MATRIX
from genomicode import AnnotationMatrix
jobs = [] # list of (0-based index, new_header)
for x in copy_column:
x = x.split(",", 1)
assert len(x) == 2
x, new_header = x
index = MATRIX.normalize_header_i(x, index_base1=True)
assert index is not None, "Unknown header: %s" % x
#index = int(index)
#assert index >= 1 and index <= len(MATRIX.headers)
#index -= 1 # convert to 0-based
x = index, new_header
jobs.append(x)
headers = MATRIX.headers[:]
all_annots = [MATRIX.header2annots[x] for x in MATRIX.headers_h]
for x in jobs:
index, new_header = x
headers.append(new_header)
all_annots.append(all_annots[index][:])
headers_h = AnnotationMatrix.uniquify_headers(headers)
assert len(headers_h) == len(all_annots)
header2annots = {}
for (header_h, annots) in zip(headers_h, all_annots):
header2annots[header_h] = annots
return AnnotationMatrix.AnnotationMatrix(headers, headers_h, header2annots)
def add_desc_for_gmx(MATRIX, arg):
if not arg:
return MATRIX
MATRIX = MATRIX.copy()
for h, annots in MATRIX.header2annots.iteritems():
annots.insert(0, "na")
return MATRIX
def set_value_if_empty(MATRIX, params):
# list of strings in format of: <indexes 1-based>,<value>
if not params:
return MATRIX
jobs = [] # list of (index 0-based, value)
for x in params:
x = x.split(",")
assert len(x) == 2, "format should be: <index 1-based>,<value>"
indexes, value = x
I = parse_indexes(MATRIX, indexes)
for i in I:
jobs.append((i, value))
MATRIX = MATRIX.copy()
for x in jobs:
index, value = x
h = MATRIX.headers_h[index]
# Change the annotations in place.
annots = MATRIX.header2annots[h]
for i in range(len(annots)):
if not annots[i]:
annots[i] = value
return MATRIX
def set_value_if_not_empty(MATRIX, params):
# list of strings in format of: <indexes 1-based>,<value>
if not params:
return MATRIX
jobs = [] # list of (index 0-based, value)
for x in params:
x = x.split(",")
assert len(x) == 2, "format should be: <index 1-based>,<value>"
indexes, value = x
I = parse_indexes(MATRIX, indexes)
for i in I:
jobs.append((i, value))
MATRIX = MATRIX.copy()
for x in jobs:
index, value = x
h = MATRIX.headers_h[index]
# Change the annotations in place.
annots = MATRIX.header2annots[h]
for i in range(len(annots)):
if annots[i].strip():
annots[i] = value
return MATRIX
def set_value_if_other_annot_equals(MATRIX, args):
# Format: <this_index 1-based>,<this_value>,<other_index>,<other_value>
if not args:
return MATRIX
from genomicode import AnnotationMatrix
# list of (this index 0-based, this value, other_index, other_value)
jobs = []
for arg in args:
x = arg.split(";")
if len(x) != 4:
x = arg.split(",")
assert len(x) == 4, "format should be: " + \
"<this_index 1-based>,<this_value>,<other_index>,<other_value>"
this_h, this_value, other_h, other_value = x
this_index = MATRIX.normalize_header_i(this_h, index_base1=True)
other_index = MATRIX.normalize_header_i(other_h, index_base1=True)
assert this_index is not None, "Could not find header: %s" % this_h
assert other_index is not None, "Could not find header: %s" % other_h
x = this_index, this_value, other_index, other_value
jobs.append(x)
MATRIX = MATRIX.copy()
for x in jobs:
this_index, this_value, other_index, other_value = x
this_h = MATRIX.headers_h[this_index]
other_h = MATRIX.headers_h[other_index]
# Change the annotations in place.
this_annots = MATRIX.header2annots[this_h]
other_annots = MATRIX.header2annots[other_h]
for i in range(len(this_annots)):
if other_annots[i] == other_value:
this_annots[i] = this_value
return MATRIX
def set_value_if_other_annot_not_empty(MATRIX, args):
# Format: <this_index 1-based>,<this_value>,<other_indexes>
if not args:
return MATRIX
from genomicode import AnnotationMatrix
# list of (this index 0-based, this value, other_indexes 0-based)
jobs = []
for arg in args:
x = arg.split(";")
if len(x) != 3:
x = arg.split(",")
assert len(x) == 3, "format should be: " + \
"<this_index 1-based>,<this_value>,<other_indexes>"
this_h, this_value, other_i = x
this_index = MATRIX.normalize_header_i(this_h, index_base1=True)
other_indexes = parse_indexes(MATRIX, other_i)
assert this_index is not None, "Could not find header: %s" % this_h
assert other_indexes, "Could not find indexes: %s" % other_i
x = this_index, this_value, other_indexes
jobs.append(x)
MATRIX = MATRIX.copy()
for x in jobs:
this_index, this_value, other_indexes = x
# Change the annotations in place.
this_h = MATRIX.headers_h[this_index]
this_annots = MATRIX.header2annots[this_h]
for other_index in other_indexes:
other_h = MATRIX.headers_h[other_index]
other_annots = MATRIX.header2annots[other_h]
for i in range(len(this_annots)):
if other_annots[i].strip():
this_annots[i] = this_value
return MATRIX
def copy_value_if_empty(MATRIX, copy_values):
# copy_values is list of strings in format of: <dst>,<src 1>[,<src
# 2>...].
if not copy_values:
return MATRIX
copy_indexes = [] # list of (dst, src1 [, src 2...]). 0-based
for copy_value in copy_values:
x = copy_value.split(",")
assert len(x) >= 2, "format should be: <dst>,<src 1>[, <src 2>...]"
x = [int(x) for x in x]
for i in range(len(x)):
# Should be 1-based indexes.
assert x[i] >= 1 and x[i] <= len(MATRIX.headers)
# Convert to 0-based indexes.
x = [x-1 for x in x]
copy_indexes.append(tuple(x))
MATRIX = MATRIX.copy()
for indexes in copy_indexes:
i_dst = indexes[0]
header_dst = MATRIX.headers_h[i_dst]
for i_src in indexes[1:]:
header_src = MATRIX.headers_h[i_src]
# Change the annotations in place.
annots_dst = MATRIX.header2annots[header_dst]
annots_src = MATRIX.header2annots[header_src]
for i in range(len(annots_dst)):
if not annots_dst[i].strip():
annots_dst[i] = annots_src[i]
return MATRIX
def copy_value_if_empty_header(MATRIX, copy_values):
# copy_values is list of strings in format of: <dst header>,<src
# 1>[,<src 2>...].
if not copy_values:
return MATRIX
copy_indexes = [] # list of (dst, src1 [, src 2...]). 0-based
for copy_value in copy_values:
headers = copy_value.split(",")
assert len(headers) >= 2, \
"format should be: <dst>,<src 1>[, <src 2>...]"
indexes = []
for header in headers:
i = [i for i in range(len(MATRIX.headers))
if header == MATRIX.headers[i]]
assert i, "Header not found: %s" % header
assert len(i) == 1, "Header duplicated: %s" % header
i = i[0]
indexes.append(i)
copy_indexes.append(tuple(indexes))
MATRIX = MATRIX.copy()
for indexes in copy_indexes:
i_dst = indexes[0]
header_dst = MATRIX.headers_h[i_dst]
for i_src in indexes[1:]:
header_src = MATRIX.headers_h[i_src]
# Change the annotations in place.
annots_dst = MATRIX.header2annots[header_dst]
annots_src = MATRIX.header2annots[header_src]
for i in range(len(annots_dst)):
if not annots_dst[i].strip():
annots_dst[i] = annots_src[i]
return MATRIX
def copy_value_if_empty_same_header(MATRIX, copy_values):
# copy_values is list of header names.
if not copy_values:
return MATRIX
copy_indexes = [] # list of list of indexes. 0-based
for copy_value in copy_values:
indexes = [i for i in range(len(MATRIX.headers))
if copy_value == MATRIX.headers[i]]
assert indexes, "Header not found: %s" % copy_value
assert len(indexes) > 1, "Header only found once: %s" % copy_value
copy_indexes.append(indexes)
MATRIX = MATRIX.copy()
# Clean up the data.
all_indexes = []
for I in copy_indexes:
all_indexes.extend(I)
all_indexes = {}.fromkeys(all_indexes)
for i in all_indexes:
header = MATRIX.headers_h[i]
x = MATRIX.header2annots[header]
x = [x.strip() for x in x]
MATRIX.header2annots[header] = x
for indexes in copy_indexes:
all_headers = [MATRIX.headers_h[i] for i in indexes]
all_annots = [MATRIX.header2annots[x] for x in all_headers]
for i_dst, annots_dst in enumerate(all_annots):
I = [i for (i, x) in enumerate(annots_dst) if not x]
for k in I:
for i_src, annots_src in enumerate(all_annots):
if i_src == i_dst:
continue
if annots_src[k]:
annots_dst[k] = annots_src[k]
break
for header, annots in zip(all_headers, all_annots):
MATRIX.header2annots[header] = annots
return MATRIX
def copy_value_if_empty_same_header_all(MATRIX, copy_values):
# copy_values is boolean
if not copy_values:
return MATRIX
dup = []
seen = {}
for h in MATRIX.headers:
if h in seen:
dup.append(h)
seen[h] = 1
dup = {}.fromkeys(dup)
return copy_value_if_empty_same_header(MATRIX, dup)
def strip_all_annots(MATRIX, strip):
if not strip:
return MATRIX
from genomicode import AnnotationMatrix
header2annots = {}
for header_h, annots in MATRIX.header2annots.iteritems():
annots = [x.strip() for x in annots]
header2annots[header_h] = annots
return AnnotationMatrix.AnnotationMatrix(
MATRIX.headers, MATRIX.headers_h, header2annots)
def upper_annots(MATRIX, upper):
if not upper:
return MATRIX
from genomicode import AnnotationMatrix
I = parse_indexes(MATRIX, upper)
header2annots = MATRIX.header2annots.copy()
for i in I:
assert i >= 0 and i < len(MATRIX.headers_h)
header_h = MATRIX.headers_h[i]
annots = MATRIX.header2annots[header_h]
annots = [x.upper() for x in annots]
header2annots[header_h] = annots
return AnnotationMatrix.AnnotationMatrix(
MATRIX.headers, MATRIX.headers_h, header2annots)
def lower_annots(MATRIX, lower):
if not lower:
return MATRIX
from genomicode import AnnotationMatrix
I = parse_indexes(MATRIX, lower)
header2annots = MATRIX.header2annots.copy()
for i in I:
assert i >= 0 and i < len(MATRIX.headers_h)
header_h = MATRIX.headers_h[i]
annots = MATRIX.header2annots[header_h]
annots = [x.lower() for x in annots]
header2annots[header_h] = annots
return AnnotationMatrix.AnnotationMatrix(
MATRIX.headers, MATRIX.headers_h, header2annots)
def replace_annot(MATRIX, replace_annot):
# list of strings in format of: <indexes 1-based>;<src>;<dst>
if not replace_annot:
return MATRIX
replace_all = [] # list of (indexes 0-based, src, dst)
for replace in replace_annot:
x = replace.split(";")
assert len(x) == 3, "format should be: <indexes>;<src>;<dst>"
indexes_str, src, dst = x
indexes = parse_indexes(MATRIX, indexes_str)
for index in indexes:
replace_all.append((index, src, dst))
MATRIX = MATRIX.copy()
for x in replace_all:
index, src, dst = x
h = MATRIX.headers_h[index]
annots = MATRIX.header2annots[h]
for i in range(len(annots)):
# Change the annotations in place.
annots[i] = annots[i].replace(src, dst)
return MATRIX
def replace_whole_annot(MATRIX, replace_annot):
# list of strings in format of: <indexes 1-based>;<src>;<dst>
if not replace_annot:
return MATRIX
replace_all = [] # list of (indexes 0-based, src, dst)
for replace in replace_annot:
x = replace.split(";")
assert len(x) == 3, "format should be: <indexes>;<src>;<dst>"
indexes_str, src, dst = x
indexes = parse_indexes(MATRIX, indexes_str)
#index = int(index)
## Should be 1-based.
#assert index >= 1 and index <= len(MATRIX.headers)
## Convert to 0-based.
#index -= 1
for index in indexes:
replace_all.append((index, src, dst))
MATRIX = MATRIX.copy()
for x in replace_all:
index, src, dst = x
h = MATRIX.headers_h[index]
annots = MATRIX.header2annots[h]
for i in range(len(annots)):
# Change the annotations in place.
if annots[i] == src:
annots[i] = dst
return MATRIX
def rename_duplicate_annot(MATRIX, args):
# <indexes>
if not args:
return MATRIX
indexes = parse_indexes(MATRIX, args)
MATRIX = MATRIX.copy()
for index in indexes:
h = MATRIX.headers_h[index]
annots = MATRIX.header2annots[h]
name2I = {} # name -> list of indexes
for i, name in enumerate(annots):
if name not in name2I:
name2I[name] = []
name2I[name].append(i)
nodup = annots[:]
for (name, I) in name2I.iteritems():
if len(I) < 2:
continue
for i in range(len(I)):
nodup[I[i]] = "%s_%d" % (name, i+1)
MATRIX.header2annots[h] = nodup
return MATRIX
def prepend_to_annots(MATRIX, prepend_annot):
# list of strings in format of: <indexes 1-based>;<text to prepend>
if not prepend_annot:
return MATRIX
prepend_all = [] # list of (index 0-based, src, dst)
for prepend in prepend_annot:
x = prepend.split(";")
assert len(x) == 2, "format should be: <indexes>;<text>"
indexes_str, text = x
indexes = parse_indexes(MATRIX, indexes_str)
for index in indexes:
prepend_all.append((index, text))
MATRIX = MATRIX.copy()
for x in prepend_all:
index, text = x
h = MATRIX.headers_h[index]
annots = MATRIX.header2annots[h]
for i in range(len(annots)):
# Change the annotations in place.
annots[i] = "%s%s" % (text, annots[i])
return MATRIX
def apply_re_to_annots(MATRIX, apply_annots):
# list of strings in format of: <indexes 1-based>;<regular expression>
import re
if not apply_annots:
return MATRIX
apply_all = [] # list of (index 0-based, regex)
for apply_ in apply_annots:
x = apply_.split(";")
assert len(x) == 2, "format should be: <indexes>;<regex>"
indexes_str, regex = x
indexes = parse_indexes(MATRIX, indexes_str)
for index in indexes:
apply_all.append((index, regex))
MATRIX = MATRIX.copy()
for x in apply_all:
index, regex = x
h = MATRIX.headers_h[index]
annots = MATRIX.header2annots[h]
for i in range(len(annots)):
# Change the annotations in place.
m = re.search(regex, annots[i])
if m:
annots[i] = m.group(1)
return MATRIX
def merge_annots(MATRIX, merge_annots):
# list of strings in format of:
# <src indexes 1-based>;<dst index 1-based>;<char>
if not merge_annots:
return MATRIX
merge_all = [] # list of (src indexes 0-based, dst index 0-based, char)
for merge in merge_annots:
x = merge.split(";")
assert len(x) == 3, \
"format should be: <src indexes>;<dst index>;<char>"
src_indexes_str, dst_indexes_str, merge_char = x
src_indexes = parse_indexes(MATRIX, src_indexes_str)
dst_indexes = parse_indexes(MATRIX, dst_indexes_str)
assert len(dst_indexes) == 1
dst_index = dst_indexes[0]
merge_all.append((src_indexes, dst_index, merge_char))
MATRIX = MATRIX.copy()
for x in merge_all:
src_indexes, dst_index, merge_char = x
src_annots = []
for i in src_indexes:
h = MATRIX.headers_h[i]
x = MATRIX.header2annots[h]
src_annots.append(x)
# Change MATRIX place.
h = MATRIX.headers_h[dst_index]
dst_annots = MATRIX.header2annots[h]
for i in range(len(dst_annots)):
x = [x[i] for x in src_annots]
merged = merge_char.join(x)
dst_annots[i] = merged
return MATRIX
def merge_annots_to_new_col(MATRIX, merge_annots):
# list of strings in format of:
# <src indexes 1-based>;<dst col name>;<char>
if not merge_annots:
return MATRIX
from genomicode import AnnotationMatrix
jobs = [] # list of (src indexes 0-based, dst_name, char)
for fmt in merge_annots:
x = fmt.split(";")
assert len(x) == 3, \
"format should be: <src indexes>;<dst name>;<char>. Got %s" % \
fmt
src_indexes_str, dst_name, merge_char = x
src_indexes = parse_indexes(MATRIX, src_indexes_str)
jobs.append((src_indexes, dst_name, merge_char))
headers = MATRIX.headers[:]
all_annots = [MATRIX.header2annots[x] for x in MATRIX.headers_h]
for x in jobs:
src_indexes, dst_name, merge_char = x
src_annots = []
for i in src_indexes:
h = MATRIX.headers_h[i]
x = MATRIX.header2annots[h]
src_annots.append(x)
dst_annots = [""] * MATRIX.num_annots()
for i in range(len(dst_annots)):
x = [x[i] for x in src_annots]
x = merge_char.join(x)
dst_annots[i] = x
headers.append(dst_name)
all_annots.append(dst_annots)
return AnnotationMatrix.create_from_annotations(headers, all_annots)
def merge_annots_to_new_col_skip_empty(MATRIX, merge_annots):
# list of strings in format of:
# <src indexes 1-based>;<dst col name>;<char>
if not merge_annots:
return MATRIX
from genomicode import AnnotationMatrix
jobs = [] # list of (src indexes 0-based, dst_name, char)
for fmt in merge_annots:
x = fmt.split(";")
assert len(x) == 3, \
"format should be: <src indexes>;<dst name>;<char>. Got %s" % \
fmt
src_indexes_str, dst_name, merge_char = x
src_indexes = parse_indexes(MATRIX, src_indexes_str)
jobs.append((src_indexes, dst_name, merge_char))
headers = MATRIX.headers[:]
all_annots = [MATRIX.header2annots[x] for x in MATRIX.headers_h]
for x in jobs:
src_indexes, dst_name, merge_char = x
src_annots = []
for i in src_indexes:
h = MATRIX.headers_h[i]
x = MATRIX.header2annots[h]
src_annots.append(x)
dst_annots = [""] * MATRIX.num_annots()
for i in range(len(dst_annots)):
x = [x[i] for x in src_annots]
x = [x for x in x if x] # only if not empty
x = merge_char.join(x)
dst_annots[i] = x
headers.append(dst_name)
all_annots.append(dst_annots)
return AnnotationMatrix.create_from_annotations(headers, all_annots)
def split_annots(MATRIX, split_annots):
# list of strings in format of:
# <src index>;<dst indexes>;<split char>
if not split_annots:
return MATRIX
jobs = [] # list of (src index 0-based, dst indexes 0-based, char)
for x in split_annots:
x = x.split(";")
assert len(x) == 3, \
"format should be: <src index>;<dst indexes>;<char>"
src_index_str, dst_indexes_str, split_char = x
src_indexes = parse_indexes(MATRIX, src_index_str)
dst_indexes = parse_indexes(MATRIX, dst_indexes_str)
assert len(src_indexes) == 1
src_index = src_indexes[0]
jobs.append((src_index, dst_indexes, split_char))
MATRIX = MATRIX.copy()
for x in jobs:
src_index, dst_indexes, split_char = x
h = MATRIX.headers_h[src_index]
src_annots = MATRIX.header2annots[h]
split_annots = [x.split(split_char) for x in src_annots]
for i in range(len(split_annots)):
#assert len(split_annots[i]) == len(dst_indexes), \
# "split/dst_indexes mismatch: %d %s %s" % (
# i, split_annots[i], len(dst_indexes))
assert len(split_annots[i]) <= len(dst_indexes), \
"split/dst_indexes mismatch: %d %s %s" % (
i, split_annots[i], len(dst_indexes))
for i in range(len(dst_indexes)):
h = MATRIX.headers_h[dst_indexes[i]]
dst_annots = MATRIX.header2annots[h]
assert len(split_annots) == len(dst_annots)
for j in range(len(split_annots)):
# change in place
if i < len(split_annots[j]):
dst_annots[j] = split_annots[j][i]
return MATRIX
def split_annots_and_take_elem(MATRIX, args):
if not args:
return MATRIX
from genomicode import AnnotationMatrix
jobs = [] # list of (src index 0-based, char, elem 0-based, dst header)
for arg in args:
x = arg.split(";")
if len(x) != 4:
x = arg.split(",")
assert len(x) == 4, (
"Format should be: "
"<src index>;<split_char>;<element index>;<new header>")
src_index_str, split_char, elem_index_str, new_header = x
src_index = MATRIX.normalize_header_i(src_index_str, index_base1=True)
assert src_index is not None, "Unknown header: %s" % x
elem_index = int(elem_index_str)
assert elem_index >= 1
elem_index -= 1
x = src_index, split_char, elem_index, new_header
jobs.append(x)
for x in jobs:
src_index, split_char, elem_index, new_header = x
h = MATRIX.headers_h[src_index]
src_annots = MATRIX.header2annots[h]
dst_annots = []
for annot in src_annots:
split_annots = annot.split(split_char)
ann = ""
if len(split_annots) > elem_index:
ann = split_annots[elem_index]
dst_annots.append(ann)
assert len(dst_annots) == len(src_annots)
headers = MATRIX.headers + [new_header]
x = [MATRIX.header2annots[x] for x in MATRIX.headers_h]
all_annots = x + [dst_annots]
assert len(headers) == len(all_annots)
MATRIX = AnnotationMatrix.create_from_annotations(headers, all_annots)
return MATRIX
def split_chr_start_end(MATRIX, arg):
# list of strings in format of: <header>
if not arg:
return MATRIX
from genomicode import AnnotationMatrix
jobs = [] # list of (index 0-based,)
for x in arg:
i = MATRIX.normalize_header_i(x, index_base1=True)
assert i is not None, "Unknown header: %s" % x
jobs.append((i,))
headers = MATRIX.headers[:]
all_annots = [MATRIX.header2annots[x] for x in MATRIX.headers_h]
for x in jobs:
index, = x
h = MATRIX.headers_h[index]
annots = MATRIX.header2annots[h]
all_chrom = [""] * len(annots)
all_start = [""] * len(annots)
all_end = [""] * len(annots)
for i, x in enumerate(annots):
# chr1:320117-320142
x = x.strip()
if not x:
continue
x = x.split(":")
assert len(x) == 2, "Bad format: %s" % annots[i]
chrom, pos = x
x = pos.split("-")
assert len(x) == 2, "Bad format: %s" % annots[i]
start, end = x
start, end = int(start), int(end)
assert end >= start
all_chrom[i] = chrom
all_start[i] = start
all_end[i] = end
h = MATRIX.headers[index]
x1 = "%s chr" % h
x2 = "%s start" % h
x3 = "%s end" % h
headers.extend([x1, x2, x3])
all_annots.extend([all_chrom, all_start, all_end])
return AnnotationMatrix.create_from_annotations(headers, all_annots)
def tcga_relabel_patient_barcodes(MATRIX, arg):
# string that should be <header> or <1-based index>
if not arg:
return MATRIX
from genomicode import AnnotationMatrix
import slice_matrix
arg = [arg]
jobs = [] # list of (index 0-based,)
for x in arg:
i = MATRIX.normalize_header_i(x, index_base1=True)
assert i is not None, "Unknown header: %s" % x
jobs.append((i,))
MATRIX = MATRIX.copy()
for x in jobs:
index, = x
h = MATRIX.headers_h[index]
annots = MATRIX.header2annots[h]
# Change the annotations in place.
for i, barcode in enumerate(annots):
try:
x = slice_matrix._parse_tcga_barcode(barcode)
except AssertionError, x:
# Keep all samples that don't look like a TCGA barcode.
if str(x).startswith("Invalid barcode"):
pass
else:
raise
else:
barcode = x[0]
annots[i] = barcode
return MATRIX
def tcga_label_patient_barcodes(MATRIX, arg):
# string that should be <src header>,<dst header>
if not arg:
return MATRIX
from genomicode import AnnotationMatrix
import slice_matrix
MATRIX = MATRIX.copy()
x = arg.split(",")
assert len(x) == 2, "Format: <src header>,<dst header>"
src_header, dst_header = x
i_src = MATRIX.normalize_header_i(src_header, index_base1=True)
assert i_src is not None, "Missing header: %s" % src_header
i_dst = MATRIX.normalize_header_i(dst_header, index_base1=True)
if i_dst is None:
# Create this header.
headers = MATRIX.headers[:]
all_annots = [MATRIX.header2annots[x] for x in MATRIX.headers_h]
headers.append(dst_header)
x = [""] * MATRIX.num_annots()
all_annots.append(x)
MATRIX = AnnotationMatrix.create_from_annotations(
headers, all_annots, headerlines=MATRIX.headerlines)
i_dst = MATRIX.normalize_header_i(dst_header, index_base1=True)
assert i_dst is not None, "Missing header: %s" % dst_header
h_src = MATRIX.headers_h[i_src]
h_dst = MATRIX.headers_h[i_dst]
src_annots = MATRIX.header2annots[h_src]
dst_annots = MATRIX.header2annots[h_dst]
# Change the annotations in place.
for i, barcode in enumerate(src_annots):
try:
x = slice_matrix._parse_tcga_barcode(barcode)
except AssertionError, x:
# Keep all samples that don't look like a TCGA barcode.
if str(x).startswith("Invalid barcode"):
pass
else:
raise
else:
barcode = x[0]
dst_annots[i] = barcode
return MATRIX
def tcga_label_by_tissue_type(MATRIX, arg):
# string that should be <src header>,<dst header>
if not arg:
return MATRIX
from genomicode import AnnotationMatrix
import slice_matrix
MATRIX = MATRIX.copy()
x = arg.split(",")
assert len(x) == 2, "Format: <src header>,<dst header>"
src_header, dst_header = x
i_src = MATRIX.normalize_header_i(src_header, index_base1=True)
assert i_src is not None, "Missing header: %s" % src_header
i_dst = MATRIX.normalize_header_i(dst_header, index_base1=True)
if i_dst is None:
# Create this header.
headers = MATRIX.headers[:]
all_annots = [MATRIX.header2annots[x] for x in MATRIX.headers_h]
headers.append(dst_header)
x = [""] * MATRIX.num_annots()
all_annots.append(x)
MATRIX = AnnotationMatrix.create_from_annotations(
headers, all_annots, headerlines=MATRIX.headerlines)
i_dst = MATRIX.normalize_header_i(dst_header, index_base1=True)
assert i_dst is not None, "Missing header: %s" % dst_header
h_src = MATRIX.headers_h[i_src]
h_dst = MATRIX.headers_h[i_dst]
src_annots = MATRIX.header2annots[h_src]
dst_annots = MATRIX.header2annots[h_dst]
# Change the annotations in place.
for i, barcode in enumerate(src_annots):
x = slice_matrix._parse_tcga_tissue_type(barcode)
if x is None:
x = barcode
dst_annots[i] = x
return MATRIX
def _add_annots(a1, a2):
return a1 + a2
def _subtract_annots(a1, a2):
return a1 - a2
def _divide_annots(a1, a2):
num, den = a1, a2
if abs(den) < 1E-50:
return ""
return num / den
def _calc_two_annots(MATRIX, calc_annots, calc_fn):
# calc_annots is a list of <annot 1>,<annot 2>,<dest>. Each are
# 1-based indexes. Returns a Matrix with the calculation applied.
if not calc_annots:
return MATRIX
to_calc = [] # list of (i1, i2, i_dest); 0-based
for ca in calc_annots:
x = ca.split(",")
assert len(x) == 3, "format should be: <annot1>,<annot2>,<dest>"
i_1, i_2, i_dest = x
i_1, i_2, i_dest = int(i_1), int(i_2), int(i_dest)
# Convert to 0-based index.
i_1, i_2, i_dest = i_1-1, i_2-1, i_dest-1
assert i_1 >= 0 and i_1 < len(MATRIX.headers)
assert i_2 >= 0 and i_2 < len(MATRIX.headers)
assert i_dest >= 0 and i_dest < len(MATRIX.headers)
x = i_1, i_2, i_dest
to_calc.append(x)
MATRIX = MATRIX.copy()
for (i_1, i_2, i_dest) in to_calc:
h_1 = MATRIX.headers_h[i_1]
h_2 = MATRIX.headers_h[i_2]
h_dest = MATRIX.headers_h[i_dest]
annots_1 = MATRIX.header2annots[h_1]
annots_2 = MATRIX.header2annots[h_2]
assert len(annots_1) == len(annots_2)
annots_dest = [""] * len(annots_1)
for i in range(len(annots_1)):
a1 = annots_1[i]
a2 = annots_2[i]
if not a1.strip() or not a2.strip():
continue
a1 = float(a1)
a2 = float(a2)
annots_dest[i] = calc_fn(a1, a2)
MATRIX.header2annots[h_dest] = annots_dest
return MATRIX
def all_same(MATRIX, all_same):
# format: <indexes 1-based>;<dest index>
if not all_same:
return MATRIX
x = all_same.split(";")
assert len(x) == 2, "format should be: <indexes>;<index dest>"
indexes_str, dst_i = x
indexes = parse_indexes(MATRIX, indexes_str)
dst_i = int(dst_i)
assert dst_i >= 1 and dst_i <= MATRIX.num_headers()
dst_i -= 1
MATRIX = MATRIX.copy()
annot_matrix = [] # indexes x annot matrix
for i in indexes:
h = MATRIX.headers_h[i]
x = MATRIX.header2annots[h]
annot_matrix.append(x)
same_annot = [1] * MATRIX.num_annots()
for i in range(MATRIX.num_annots()):
# See if all annot_matrix[i] is same.
same = True
for j in range(1, len(annot_matrix)):
if annot_matrix[j][i] != annot_matrix[0][i]:
same = False
if not same:
same_annot[i] = 0
h = MATRIX.headers_h[dst_i]
MATRIX.header2annots[h] = same_annot
return MATRIX
def min_annots(MATRIX, min_annots):
# format: <indexes 1-based>;<dest index>
from genomicode import jmath
if not min_annots:
return MATRIX
x = min_annots.split(";")
assert len(x) == 2, "format should be: <indexes>;<index dest>"
indexes_str, dst_i = x
indexes = parse_indexes(MATRIX, indexes_str)
dst_i = int(dst_i)
assert dst_i >= 1 and dst_i <= MATRIX.num_headers()
dst_i -= 1
MATRIX = MATRIX.copy()
annot_matrix = [] # indexes x annot matrix
for i in indexes:
h = MATRIX.headers_h[i]
x = MATRIX.header2annots[h]
x = map(float, x)
annot_matrix.append(x)
mins = jmath.min(annot_matrix, byrow=False)
assert len(mins) == MATRIX.num_annots()
h = MATRIX.headers_h[dst_i]
MATRIX.header2annots[h] = mins
return MATRIX
def max_annots(MATRIX, max_annots):
# format: <indexes 1-based>;<dest index>
from genomicode import jmath
if not max_annots:
return MATRIX
x = max_annots.split(";")
assert len(x) == 2, "format should be: <indexes>;<index dest>"
indexes_str, dst_i = x
indexes = parse_indexes(MATRIX, indexes_str)
dst_i = int(dst_i)
assert dst_i >= 1 and dst_i <= MATRIX.num_headers()
dst_i -= 1
MATRIX = MATRIX.copy()
annot_matrix = [] # indexes x annot matrix
for i in indexes:
h = MATRIX.headers_h[i]
x = MATRIX.header2annots[h]
x = map(float, x)
annot_matrix.append(x)
maxes = jmath.max(annot_matrix, byrow=False)
assert len(maxes) == MATRIX.num_annots()
DELTA = 1E-5
all_int = True
for x in maxes:
if abs(int(round(x))-x) > DELTA:
all_int - False
break
if all_int:
maxes = [int(x) for x in maxes]
h = MATRIX.headers_h[dst_i]
MATRIX.header2annots[h] = maxes
return MATRIX
def add_to(MATRIX, add_to):
# format: list of <header or index 1-based>,<number>
if not add_to:
return MATRIX
jobs = [] # list of (0-based index, number)
for x in add_to:
x = x.split(",")
assert len(x) == 2, "format should be: <index>,<number>"
header, number = x
index = MATRIX.normalize_header_i(header, index_base1=True)
assert index is not None, "Unknown header or index: %s" % header
number = _int_or_float(number)
x = index, number
jobs.append(x)
MATRIX = MATRIX.copy()
for x in jobs:
index, number = x
assert index < len(MATRIX.headers_h)
h = MATRIX.headers_h[index]
annots = MATRIX.header2annots[h]
for i in range(len(annots)):
x = _int_or_float(annots[i])
x = x + number
annots[i] = str(x)
return MATRIX
def multiply_by(MATRIX, multiply_by):
# format: list of <index 1-based>,<number>
if not multiply_by:
return MATRIX
jobs = [] # list of (0-based index, number)
for x in multiply_by:
x = x.split(",")
assert len(x) == 2, "format should be: <index>,<number>"
index, number = x
index = int(index)
number = float(number)
x = index-1, number
jobs.append(x)
MATRIX = MATRIX.copy()
for x in jobs:
index, number = x
assert index < len(MATRIX.headers_h)
h = MATRIX.headers_h[index]
annots = MATRIX.header2annots[h]
for i in range(len(annots)):
x = float(annots[i])
x = x * number
annots[i] = str(x)
return MATRIX
def normalize_to_max(MATRIX, args):
# format: list of <header>
if not args:
return MATRIX
jobs = [] # list of headers
for x in args:
h = MATRIX.normalize_header(x, index_base1=True)
assert h is not None, "Unknown header: %s" % x
jobs.append(h)
MATRIX = MATRIX.copy()
for x in jobs:
header = x
annots = MATRIX.header2annots[header]
annots = [float(x) for x in annots]
norm = max(annots)
if not norm:
continue
for i in range(len(annots)):
x = float(annots[i])/norm
annots[i] = str(x)
MATRIX.header2annots[header] = annots
return MATRIX
def log_base(MATRIX, log_base):
# format: list of <index 1-based>,<base>
if not log_base:
return MATRIX
import math
jobs = [] # list of (0-based index, base)
for x in log_base:
x = x.split(",")
assert len(x) == 2, "format should be: <index>,<base>"
index, base = x
index = int(index)
base = float(base)
x = index-1, base
jobs.append(x)
MIN = 1E-100
MATRIX = MATRIX.copy()
for x in jobs:
index, base = x
assert index < len(MATRIX.headers_h)
h = MATRIX.headers_h[index]
annots = MATRIX.header2annots[h]
for i in range(len(annots)):
x = float(annots[i])
x = max(x, MIN)
x = math.log(x, base)
annots[i] = str(x)
return MATRIX
def neg_log_base(MATRIX, log_base):
# format: list of <index 1-based>,<base>
if not log_base:
return MATRIX
import math
jobs = [] # list of (0-based index, base)
for x in log_base:
x = x.split(",")
assert len(x) == 2, "format should be: <index>,<base>"
index, base = x
index = int(index)
base = float(base)
x = index-1, base
jobs.append(x)
MIN = 1E-100
MATRIX = MATRIX.copy()
for x in jobs:
index, base = x
assert index < len(MATRIX.headers_h)
h = MATRIX.headers_h[index]
annots = MATRIX.header2annots[h]
for i in range(len(annots)):
x = float(annots[i])
x = max(x, MIN)
x = math.log(x, base)
x = x * -1
annots[i] = str(x)
return MATRIX
def add_two_annots(MATRIX, add_annots):
# Format: list of <annot 1>,<annot 2>,<dest>. Each are 1-based
# indexes. dest is annot1 - annot2.
return _calc_two_annots(MATRIX, add_annots, _add_annots)
def subtract_two_annots(MATRIX, subtract_annots):
# Format: list of <annot 1>,<annot 2>,<dest>. Each are 1-based
# indexes. dest is annot1 - annot2.
return _calc_two_annots(MATRIX, subtract_annots, _subtract_annots)
def divide_two_annots(MATRIX, divide_annots):
# Format: list of <numerator>,<denominator>,<dest>. Each are 1-based
# indexes.
return _calc_two_annots(MATRIX, divide_annots, _divide_annots)
def divide_many_annots(MATRIX, divide_annots):
# Format: list of <numerator indexes>;<denominator index>. Each
# are 1-based indexes.
if not divide_annots:
return MATRIX
divide_all = [] # list of (list of 0-based indexes, 0-based index)
for x in divide_annots:
x = x.split(";")
assert len(x) == 2
x1, x2 = x
indexes1 = parse_indexes(MATRIX, x1)
indexes2 = parse_indexes(MATRIX, x2)
assert len(indexes2) == 1
for i in indexes1 + indexes2:
assert i >= 0 and i < len(MATRIX.headers)
divide_all.append((indexes1, indexes2[0]))
MATRIX = MATRIX.copy()
for x in divide_all:
num_indexes, den_index = x
for i in range(MATRIX.num_annots()):
header_h = MATRIX.headers_h[den_index]
annots = MATRIX.header2annots[header_h]
den = float(annots[i])
for index in num_indexes:
header_h = MATRIX.headers_h[index]
annots = MATRIX.header2annots[header_h]
num = float(annots[i])
annots[i] = num / den
return MATRIX
def average_same_header(MATRIX, average):
if not average:
return MATRIX
from genomicode import jmath
from genomicode import AnnotationMatrix
# Make a list of all the duplicate headers.
header2I = {} # header -> list of indexes
for i, header in enumerate(MATRIX.headers):
if header not in header2I:
header2I[header] = []
header2I[header].append(i)
# Now make the new matrix.
headers = []
header2annots = {}
for header in MATRIX.headers:
if header in header2annots:
continue
I = header2I[header]
MATRIX_I = []
for i in I:
h = MATRIX.headers_h[i]
x = MATRIX.header2annots[h]
MATRIX_I.append(x)
if len(MATRIX_I) == 1:
x = MATRIX_I[0]
else:
for i in range(len(MATRIX_I)):
MATRIX_I[i] = [float(x) for x in MATRIX_I[i]]
x = jmath.mean(MATRIX_I, byrow=0)
headers.append(header)
header2annots[header] = x
return AnnotationMatrix.AnnotationMatrix(headers, headers, header2annots)
def round_annots(MATRIX, round_annots):
# Format: list of <index>. 1-based indexes.
if not round_annots:
return MATRIX
indexes = [] # list of 0-based indexes
for x in round_annots:
I = parse_indexes(MATRIX, x)
for i in I:
assert i >= 0 and i < len(MATRIX.headers)
indexes.extend(I)
indexes = sorted({}.fromkeys(indexes))
MATRIX = MATRIX.copy()
for index in indexes:
header_h = MATRIX.headers_h[index]
x = MATRIX.header2annots[header_h]
x = [int(round(float(x))) for x in x]
MATRIX.header2annots[header_h] = x
return MATRIX
def convert_percent_to_decimal(MATRIX, convert):
# Format: list of <index>. 1-based indexes.
if not convert:
return MATRIX
indexes = [] # list of 0-based indexes
for x in convert:
I = parse_indexes(MATRIX, x)
for i in I:
assert i >= 0 and i < len(MATRIX.headers)
indexes.extend(I)
indexes = sorted({}.fromkeys(indexes))
MATRIX = MATRIX.copy()
for index in indexes:
header_h = MATRIX.headers_h[index]
annots = MATRIX.header2annots[header_h]
for i in range(len(annots)):
x = annots[i]
x = x.strip()
if not x:
continue
if x.endswith("%"):
x = x[:-1]
x = float(x) / 100
annots[i] = x
MATRIX.header2annots[header_h] = annots
return MATRIX
## def _header_or_index(MATRIX, header):
## # header may be either a header or a 1-based index. Return the
## # hashed header.
## if not header:
## return None
## if header in MATRIX.headers:
## i = MATRIX.headers.index(header)
## return MATRIX.headers_h[i]
## if header in MATRIX.headers_h:
## return header
## header_i = None
## try:
## header_i = int(header)
## except ValueError, x:
## pass
## if header_i is not None:
## assert header_i >= 1 and header_i <= len(MATRIX.headers)
## i = header_i - 1
## return MATRIX.headers_h[i]
## raise AssertionError, "Unknown header: %s" % header
def vcf_standardize(MATRIX, vcf_standardize):
if not vcf_standardize:
return MATRIX
from genomicode import AnnotationMatrix
from genomicode import vcflib
# Format: <info_header>,<format_header>[,<genotype_header>]
x = vcf_standardize.split(",")
assert len(x) >= 2, \
"Format: <info_header>,<format_header>,<genotype_header>"
info_header, format_header = x[:2]
genotype_headers = x[2:]
info_header_n = MATRIX.normalize_header(info_header)
format_header_n = MATRIX.normalize_header(format_header)
assert info_header_n, "Missing header: %s" % info_header
assert format_header_n, "Missing header: %s" % format_header
if not genotype_headers:
# Find the genotype headers at the end of the file.
i1 = MATRIX.headers_h.index(info_header_n)
i2 = MATRIX.headers_h.index(format_header_n)
i_start = max(i1, i2) + 1
assert i_start < len(MATRIX.headers), "No columns at end of file."
for i in range(len(MATRIX.headers)-1, i_start-1, -1):
# See if every row is either blank or contains some colons.
h = MATRIX.headers_h[i]
annots = MATRIX.header2annots[h]
x = [x for x in annots if not x.strip() or x.find(":") >= 0]
if len(x) != len(annots):
break
i_geno = i+1
genotype_headers = MATRIX.headers[i_geno:]
assert genotype_headers, "No genotype headers found."
# Create a VCF object.
samples = genotype_headers
# Parse the info line.
x = MATRIX.header2annots[info_header_n]
more_info = [vcflib._parse_info_dict(x) for x in x]
# Parse the genotype data.
format_strings = MATRIX.header2annots[format_header_n]
genotypes = {}
for sample in genotype_headers:
genotype_strings = MATRIX[sample]
geno_dicts = [
vcflib._parse_genotype_dict(fs, gs)
for (fs, gs) in zip(format_strings, genotype_strings)]
genotypes[sample] = geno_dicts
vcf = vcflib.VCFFile(MATRIX, samples, more_info, genotypes)
CHROM = "chrom"
START = "start"
END = "end"
GENE = "gene"
GENE_ID = "entrez_gene_id"
FUNC = "func"
EXONICFUNC = "exonicfunc"
AACHANGE = "aachange"
NUM_REF = "num_ref"
NUM_ALT = "num_alt"
TOTAL = "total_reads"
VAF = "vaf"
CALL = "call"
# If I can't find these, then just fill with blank spaces.
# This can happen if the file is not annotated.
IGNORE_IF_MISSING = [GENE, GENE_ID, FUNC, EXONICFUNC, AACHANGE]
# List of tuples:
# - header name
# - list of possible original headers
COMMON_COLUMNS = [
(CHROM, ["chrom", "contig", "Chr", "CHROM", "#CHROM"]),
(START, ["start", "position", "Start", "POS", "pos"]),
(END, ["end", "End"]),
("ref_allele", ["ref_allele", "Ref", "REF"]),
("alt_allele", ["alt_allele", "Alt", "ALT"]),
(GENE, ["gene", "Gene", "Gene.refGene"]),
(GENE_ID, ["entrez_gene_id", "Entrez_Gene_Id"]),
(FUNC, ["func", "Func", "Func.refGene"]),
(EXONICFUNC, ["exonicfunc", "ExonicFunc", "ExonicFunc.refGene"]),
(AACHANGE, ["aachange", "AAChange", "AAChange.refGene"]),
]
# Sample specific columns.
SPECIFIC_COLUMNS = [
(NUM_REF, ["num_ref", "t_ref_count"], "num_ref"),
(NUM_ALT, ["num_alt", "t_alt_count"], "num_alt"),
(TOTAL, ["total_reads"], "total_reads"),
(VAF, ["vaf"], "vaf"),
(CALL, [], "call"),
]
headers = []
header2annots = {} # should contain no duplicates
missing = []
# Set the common columns.
for (dst_header, src_headers) in COMMON_COLUMNS:
header_i = None
for h in src_headers:
if h in MATRIX.headers:
header_i = MATRIX.headers.index(h)
break
headers.append(dst_header)
if header_i is None:
missing.append(dst_header)
continue
assert dst_header not in header2annots
h = MATRIX.headers_h[header_i]
annots = MATRIX.header2annots[h]
header2annots[dst_header] = annots
# Set the sample-specific columns.
for sample in genotype_headers:
info_list = [
vcflib.parse_info(vcf, sample, i)
for i in range(MATRIX.num_annots())]
for (dst_header, src_headers, info_member) in SPECIFIC_COLUMNS:
if len(genotype_headers) > 1:
dst_header = "%s %s" % (sample, dst_header)
# If there is only one sample, look for the src_headers.
if len(genotype_headers) == 1:
header_i = None
for h in src_headers:
if h in MATRIX.headers:
header_i = MATRIX.headers.index(h)
break
if header_i:
headers.append(dst_header)
assert dst_header not in header2annots
h = MATRIX.headers_h[header_i]
annots = MATRIX.header2annots[h]
header2annots[dst_header] = annots
continue
# Pull the information out the the info_list.
headers.append(dst_header)
assert dst_header not in header2annots
x = [getattr(x, info_member) for x in info_list]
x = [vcflib._fmt_vcf_value(x) for x in x]
header2annots[dst_header] = x
# If I can't find "end", and I could find the "start", then make
# it the same as start.
assert START not in missing
if END in missing:
header2annots[END] = header2annots[START][:]
missing.pop(missing.index(END))
# Ignore missing headers.
for header in IGNORE_IF_MISSING:
if header not in missing:
continue
missing.pop(missing.index(header))
annots = [""] * MATRIX.num_annots()
header2annots[header] = annots
# Make sure nothing is missing.
assert not missing, "Not found: %s" % ", ".join(map(str, missing))
all_annots = [header2annots.get(x) for x in headers]
# Clean up all annots.
for i in range(len(all_annots)):
for j in range(len(all_annots[i])):
if all_annots[i][j] is None:
all_annots[i][j] = ""
all_annots[i][j] = str(all_annots[i][j]).strip()
return AnnotationMatrix.create_from_annotations(headers, all_annots)
def vcf_remove_bad_coords(MATRIX, vcf_remove_bad_coords):
if not vcf_remove_bad_coords:
return MATRIX
# Column names must be standardized.
START = "start"
END = "end"
assert START in MATRIX.headers_h, "VCF must have standardized names"
assert END in MATRIX.headers_h, "VCF must have standardized names"
start_annots = MATRIX.header2annots[START]
end_annots = MATRIX.header2annots[END]
start_annots = [x.lower() for x in start_annots]
end_annots = [x.lower() for x in end_annots]
bad_indexes = {}
for i in range(len(start_annots)):
s, e = start_annots[i], end_annots[i]
if s.find("e") >= 0:
bad_indexes[i] = 1
elif e.find("e") >= 0:
bad_indexes[i] = 1
if not bad_indexes:
return MATRIX
MATRIX = MATRIX.copy()
for h, annots in MATRIX.header2annots.iteritems():
annots = [x for (i, x) in enumerate(annots) if i not in bad_indexes]
MATRIX.header2annots[h] = annots
return MATRIX
def vcf_remove_multicalls(MATRIX, vcf_remove_multicalls):
if not vcf_remove_multicalls:
return MATRIX
# Column names must be standardized.
CHROM = "chrom"
START = "start"
END = "end"
TOTAL = "total_reads"
assert CHROM in MATRIX.headers_h, "VCF must have standardized names"
assert START in MATRIX.headers_h, "VCF must have standardized names"
assert END in MATRIX.headers_h, "VCF must have standardized names"
chrom_annots = MATRIX.header2annots[CHROM]
start_annots = MATRIX.header2annots[START]
end_annots = MATRIX.header2annots[END]
total_annots = MATRIX.header2annots[TOTAL]
start_annots = [int(x) for x in start_annots]
end_annots = [int(x) for x in end_annots]
# Find the duplicates.
loc2indexes = {} # (chrom, start, end) -> list of indexes
for i in range(len(chrom_annots)):
chrom, start, end = chrom_annots[i], start_annots[i], end_annots[i]
x = chrom, start, end
if x not in loc2indexes:
loc2indexes[x] = []
loc2indexes[x].append(i)
# Find rows to discard.
bad_indexes = {}
for (loc, indexes) in loc2indexes.iteritems():
if len(indexes) < 2:
continue
# If there are duplicates, choose the best one.
most_i = most_reads = None
for i in indexes:
if most_reads is None or total_annots[i] > most_reads:
most_reads = total_annots[i]
most_i = i
assert most_i is not None
for i in indexes:
if i != most_i:
bad_indexes[i] = 1
MATRIX = MATRIX.copy()
# Edit MATRIX in place.
for h, annots in MATRIX.header2annots.iteritems():
annots = [x for (i, x) in enumerate(annots) if i not in bad_indexes]
MATRIX.header2annots[h] = annots
return MATRIX
def vcf_extract_format_values(MATRIX, vcf_format):
# Format: <format header>,<values header>,<value>[,value].
if not vcf_format:
return MATRIX
from genomicode import AnnotationMatrix
x = vcf_format.split(",")
x = [x.strip() for x in x]
assert len(x) >= 3, "Format: <header>,<header>,<value>[,<value>...]"
f_header = x[0]
v_header = x[1]
value_headers = x[2:]
assert f_header in MATRIX.headers, "Missing header: %s" % f_header
assert v_header in MATRIX.headers, "Missing header: %s" % v_header
# Assume no duplicates. Just use the first one.
h_f = MATRIX.headers_h[MATRIX.headers.index(f_header)]
h_v = MATRIX.headers_h[MATRIX.headers.index(v_header)]
annots_f = MATRIX.header2annots[h_f] # list of strings
annots_v = MATRIX.header2annots[h_v] # list of strings
assert len(annots_f) == len(annots_v)
# Parse out the annotations into a matrix.
annots_f = [x.split(":") for x in annots_f]
annots_v = [x.split(":") for x in annots_v]
headers = MATRIX.headers[:]
all_annots = [MATRIX.header2annots[x] for x in MATRIX.headers_h]
for value_header in value_headers:
values = [""] * len(annots_f)
for i in range(len(annots_f)):
fmt = annots_f[i]
vals = annots_v[i]
# 1. Sometimes len(vals) < len(fmt).
# GT:GQ:SDP:DP:RD:AD:FREQ:PVAL:RBQ:ABQ:RDF:RDR:ADF:ADR
# ./.:.:1
# 2. Sometimes the value_header is missing in one line.
# GT:GQ:PL (but AD in every other line)
#assert len(fmt) == len(vals)
if value_header not in fmt:
continue
#assert value_header in fmt, \
# "Missing value for: %s %s" % (value_header, annots_f[i])
j = fmt.index(value_header)
if j < len(vals):
values[i] = vals[j]
headers.append(value_header)
all_annots.append(values)
headers_h = AnnotationMatrix.uniquify_headers(headers)
assert len(headers_h) == len(all_annots)
header2annots = {}
for (header_h, annots) in zip(headers_h, all_annots):
header2annots[header_h] = annots
return AnnotationMatrix.AnnotationMatrix(headers, headers_h, header2annots)
def vcf_extract_info_values(MATRIX, vcf_info):
# Format: <format header>,<value>[,value].
if not vcf_info:
return MATRIX
from genomicode import AnnotationMatrix
x = vcf_info.split(",")
x = [x.strip() for x in x]
assert len(x) >= 2, "Format: <header>,<value>[,<value>...]"
i_header = x[0]
value_names = x[1:]
assert i_header in MATRIX.headers, "Missing header: %s" % i_header
# Assume no duplicates. Just use the first one.
# Parse out the annotations.
h_i = MATRIX.headers_h[MATRIX.headers.index(i_header)]
# list of strings. <name>=<value>[;<name>=<value>]
annots_str = MATRIX.header2annots[h_i] # list of strings
# list of dicts. <name>=<value>
annots_dict = [] # list of dicts
for x in annots_str:
x = x.split(";")
d = {}
for x in x:
x = x.split("=")
assert len(x) == 2
key, value = x
d[key] = value
annots_dict.append(d)
headers = MATRIX.headers[:]
all_annots = [MATRIX.header2annots[x] for x in MATRIX.headers_h]
for value_name in value_names:
values = [d.get(value_name, "") for d in annots_dict]
headers.append(value_name)
all_annots.append(values)
return AnnotationMatrix.create_from_annotations(headers, all_annots)
def vcf_split_AD(MATRIX, split_annots):
# list of strings in format of:
# <src index>;<dst indexes>
if not split_annots:
return MATRIX
jobs = [] # list of (src index 0-based, dst indexes 0-based, char)
for x in split_annots:
x = x.split(";")
assert len(x) == 2, \
"format should be: <src index>;<dst indexes>"
src_index_str, dst_indexes_str = x
src_indexes = parse_indexes(MATRIX, src_index_str)
dst_indexes = parse_indexes(MATRIX, dst_indexes_str)
assert len(src_indexes) == 1
src_index = src_indexes[0]
split_char = ","
jobs.append((src_index, dst_indexes, split_char))
MATRIX = MATRIX.copy()
for x in jobs:
src_index, dst_indexes, split_char = x
h = MATRIX.headers_h[src_index]
src_annots = MATRIX.header2annots[h]
split_annots = [x.split(split_char) for x in src_annots]
for i in range(len(split_annots)):
if len(split_annots[i]) == len(dst_indexes):
continue
# If there are only 2 dst_indexes, they should refer REF
# and ALT alleles. In this case, just add everything up
# into the ALT allele.
if len(dst_indexes) == 2:
x0 = split_annots[i][0]
x1 = sum(map(int, split_annots[i][1:]))
split_annots[i] = [x0, x1]
assert len(split_annots[i]) == len(dst_indexes), \
"split/dst_indexes mismatch: %d %s %s" % (
i, split_annots[i], len(dst_indexes))
for i in range(len(dst_indexes)):
h = MATRIX.headers_h[dst_indexes[i]]
dst_annots = MATRIX.header2annots[h]
assert len(split_annots) == len(dst_annots)
for j in range(len(split_annots)):
# change in place
dst_annots[j] = split_annots[j][i]
return MATRIX
def vcf_calc_vaf(MATRIX, calc_vaf):
# List of: <ref index>,<alt index>,<vaf index>
if not calc_vaf:
return MATRIX
jobs = []
for x in calc_vaf:
x = x.split(",")
assert len(x) == 3, \
"format should be: <ref index>,<alt index>,<vaf index>"
x1, x2, x3 = x
x1 = parse_indexes(MATRIX, x1)
x2 = parse_indexes(MATRIX, x2)
x3 = parse_indexes(MATRIX, x3)
assert len(x1) == 1, x1
assert len(x2) == 1, x2
assert len(x3) == 1, x3
ref_index, alt_index, vaf_index = x1[0], x2[0], x3[0]
x = ref_index, alt_index, vaf_index
jobs.append(x)
MATRIX = MATRIX.copy()
for x in jobs:
ref_index, alt_index, vaf_index = x
ref_h = MATRIX.headers_h[ref_index]
alt_h = MATRIX.headers_h[alt_index]
vaf_h = MATRIX.headers_h[vaf_index]
ref_annots = MATRIX.header2annots[ref_h]
alt_annots = MATRIX.header2annots[alt_h]
vaf_annots = MATRIX.header2annots[vaf_h]
# Change MATRIX in place.
for i in range(len(ref_annots)):
r = ref_annots[i].strip()
a = alt_annots[i].strip()
if not r or not a:
continue
r, a = int(r), int(a)
total = r+a
if not total:
continue
vaf_annots[i] = a / float(total)
return MATRIX
def subtract_two_bed_lists(MATRIX, subtract_two_bed_lists):
# Format: <annot 1>,<annot 2>,<dest>. Each are 1-based
# indexes. <annot 1> is comma-separated list of numbers. May end
# in an extra comma.
if not subtract_two_bed_lists:
return MATRIX
# same as calcBlocksizes, but order of annots is reversed. Should
# we keep both?
x = subtract_two_bed_lists.split(",")
assert len(x) == 3, "format should be: <annot1>,<annot2>,<dest>"
i_1, i_2, i_dest = x
i_1, i_2, i_dest = int(i_1), int(i_2), int(i_dest)
# Convert to 0-based index.
i_1, i_2, i_dest = i_1-1, i_2-1, i_dest-1
assert i_1 >= 0 and i_1 < len(MATRIX.headers)
assert i_2 >= 0 and i_2 < len(MATRIX.headers)
assert i_dest >= 0 and i_dest < len(MATRIX.headers)
MATRIX = MATRIX.copy()
h_1 = MATRIX.headers_h[i_1]
h_2 = MATRIX.headers_h[i_2]
h_dest = MATRIX.headers_h[i_dest]
annots_1 = MATRIX.header2annots[h_1]
annots_2 = MATRIX.header2annots[h_2]
assert len(annots_1) == len(annots_2)
annots_dest = [""] * len(annots_1)
for i in range(len(annots_1)):
a1 = annots_1[i]
a2 = annots_2[i]
if not a1.strip() or not a2.strip():
continue
ends_with_comma = False
a1 = a1.split(",")
a2 = a2.split(",")
assert len(a1) == len(a2), "Unequal lengths"
if a1[-1] == "" or a2[-1] == "":
ends_with_comma = True
if ends_with_comma:
assert a1[-1] == ""
assert a2[-1] == ""
a1 = a1[:-1]
a2 = a2[:-1]
a1 = [int(x) for x in a1]
a2 = [int(x) for x in a2]
d = [(a1[j]-a2[j]) for j in range(len(a1))]
d = ",".join(map(str, d))
if ends_with_comma:
d = d + ","
annots_dest[i] = d
MATRIX.header2annots[h_dest] = annots_dest
return MATRIX
def subtract_value_from_bed_list(MATRIX, subtract_value_from_bed_list):
# Format: <annot 1>,<annot 2>,<dest>. Each are 1-based
# indexes. <annot 1> is comma-separated list of numbers. May end
# in an extra comma. <annot 2> is single value.
if not subtract_value_from_bed_list:
return MATRIX
x = subtract_value_from_bed_list.split(",")
assert len(x) == 3, "format should be: <annot1>,<annot2>,<dest>"
i_1, i_2, i_dest = x
i_1, i_2, i_dest = int(i_1), int(i_2), int(i_dest)
# Convert to 0-based index.
i_1, i_2, i_dest = i_1-1, i_2-1, i_dest-1
assert i_1 >= 0 and i_1 < len(MATRIX.headers)
assert i_2 >= 0 and i_2 < len(MATRIX.headers)
assert i_dest >= 0 and i_dest < len(MATRIX.headers)
MATRIX = MATRIX.copy()
h_1 = MATRIX.headers_h[i_1]
h_2 = MATRIX.headers_h[i_2]
h_dest = MATRIX.headers_h[i_dest]
annots_1 = MATRIX.header2annots[h_1]
annots_2 = MATRIX.header2annots[h_2]
assert len(annots_1) == len(annots_2)
annots_dest = [""] * len(annots_1)
for i in range(len(annots_1)):
a1 = annots_1[i]
a2 = annots_2[i]
if not a1.strip() or not a2.strip():
continue
ends_with_comma = False
a1 = a1.split(",")
if a1[-1] == "":
ends_with_comma = True
a1 = a1[:-1]
a1 = [int(x) for x in a1]
a2 = int(a2)
d = [x-a2 for x in a1]
d = ",".join(map(str, d))
if ends_with_comma:
d = d + ","
annots_dest[i] = d
MATRIX.header2annots[h_dest] = annots_dest
return MATRIX
## def calc_blockSizes(MATRIX, calc_blockSizes):
## # Format: <annot 1>,<annot 2>,<dest>. Each are 1-based
## # indexes. <annot 1> is comma-separated list of numbers. May end
## # in an extra comma.
## if not calc_blockSizes:
## return MATRIX
## x = calc_blockSizes.split(",")
## assert len(x) == 3, "format should be: <annot1>,<annot2>,<dest>"
## i_1, i_2, i_dest = x
## i_1, i_2, i_dest = int(i_1), int(i_2), int(i_dest)
## # Convert to 0-based index.
## i_1, i_2, i_dest = i_1-1, i_2-1, i_dest-1
## assert i_1 >= 0 and i_1 < len(MATRIX.headers)
## assert i_2 >= 0 and i_2 < len(MATRIX.headers)
## assert i_dest >= 0 and i_dest < len(MATRIX.headers)
## MATRIX = MATRIX.copy()
## h_1 = MATRIX.headers_h[i_1]
## h_2 = MATRIX.headers_h[i_2]
## h_dest = MATRIX.headers_h[i_dest]
## annots_1 = MATRIX.header2annots[h_1]
## annots_2 = MATRIX.header2annots[h_2]
## assert len(annots_1) == len(annots_2)
## annots_dest = [""] * len(annots_1)
## for i in range(len(annots_1)):
## a1 = annots_1[i]
## a2 = annots_2[i]
## if not a1.strip() or not a2.strip():
## continue
## ends_with_comma = False
## a1 = a1.split(",")
## a2 = a2.split(",")
## if a1[-1] == "" or a2[-1] == "":
## ends_with_comma = True
## if ends_with_comma:
## assert a1[-1] == ""
## assert a2[-1] == ""
## a1 = a1[:-1]
## a2 = a2[:-1]
## a1 = [int(x) for x in a1]
## a2 = [int(x) for x in a2]
## d = [(a2[j]-a1[j]) for j in range(len(a1))]
## d = ",".join(map(str, d))
## if ends_with_comma:
## d = d + ","
## annots_dest[i] = d
## MATRIX.header2annots[h_dest] = annots_dest
## return MATRIX
def _int_or_float(x):
EPS = 1E-10
x1 = float(x)
try:
x2 = int(x)
except ValueError, x:
return x1
x = x1
if (x1-x2) < EPS:
x = x2
return x
FILENAME = None # for debugging
def main():
global FILENAME
import sys
import argparse
from genomicode import AnnotationMatrix
from genomicode import SimpleVariantMatrix
parser = argparse.ArgumentParser(
description="Perform operations on an annotation file.")
parser.add_argument("filename", nargs=1, help="Annotation file.")
parser.add_argument(
"--read_as_csv", action="store_true",
help="Read as a CSV file.")
parser.add_argument(
"--write_as_csv", action="store_true",
help="Write out as a CSV file.")
parser.add_argument(
"--read_as_svm", action="store_true",
help="Read as a simple variant matrix.")
#parser.add_argument(
# "--clean_svm_headers", action="store_true",
# help="Whether to clean up redundancies in SVM headers.")
parser.add_argument(
"--ignore_lines_startswith",
help="Ignore lines that starts with this string. "
'E.g. --ignore_lines_starswith "##" will ignore headers in VCF files.')
group = parser.add_argument_group(title="Matrix operations")
group.add_argument(
"--indexes", "--cut", dest="indexes", default=[], action="append",
help="Select only these indexes from the file e.g. 1-5,8 "
"(1-based, inclusive). (MULTI)")
group.add_argument(
"--select_cols_str", default=[], action="append",
help="Select the columns whose header contains matches this string. "
"(MULTI)")
group.add_argument(
"--select_cols_substr", default=[], action="append",
help="Select the columns whose header contains this substring. "
"(MULTI)")
group.add_argument(
"--add_column", default=[], action="append",
help="Add one or more columns. "
"Format: <index>,<header>,<default value>. The column will be "
"added before <index> (1-based). If <index> is 1, this will be "
'the new first column. If <index> is "END", this will be '
"the last column. (MULTI)")
group.add_argument(
"--copy_column", default=[], action="append",
help="Copy a column. Format: <old_header_or_index>,<new_header>. "
"(MULTI)")
group.add_argument(
"--add_desc_for_gmx", action="store_true",
help='Add "na" to each column to turn this into a GMX geneset file.')
group.add_argument(
"--add_uid_column",
help="Add a column that contains unique IDs. "
"Format: <index>,<header>,<prefix>. The column will be "
"added before <index> (1-based). If <index> is 1, this will be "
'the new first column. If <index> is "END", this will be '
"the last column. Unique IDs will be <prefix><num>.")
group.add_argument(
"--stratify_by_rank", action="append",
help="Stratify a column based on ranks and add a new column with "
"the groupings. Format: <index>;<breakpoints>. "
"Example of <breakpoints> is 0.25,0.50,0.75. Default 0.50. (MULTI)")
group = parser.add_argument_group(title="Changing headers")
group.add_argument(
"--add_header_line", default=[], action="append",
help="Add a header line to a file with no headers. "
"Format: <header1>[,<header2>...]. (MULTI)")
group.add_argument(
"--fill_empty_headers", action="store_true",
help="If the header line contains some blanks, fill them in with "
"defaults.")
group.add_argument(
"--remove_header_line", action="store_true",
help="Remove the header line from the file.")
group.add_argument(
"--reorder_headers_alphabetical", action="store_true",
help="Change the order of the headers.")
group.add_argument(
"--upper_headers", action="store_true",
help="Make headers upper case.")
group.add_argument(
"--lower_headers", action="store_true",
help="Make headers lower case.")
group.add_argument(
"--hash_headers", action="store_true",
help="Hash the names of the headers.")
group.add_argument(
"--remove_duplicate_headers", action="store_true",
help="If a matrix contains columns with the same header, "
"keep only the first column.")
group.add_argument(
"--rename_duplicate_headers", action="store_true",
help="Make all the headers unique.")
group.add_argument(
"--rename_header", default=[], action="append",
help="Rename a header. Format: <from>,<to>. "
"<from> will be replaced with <to>. "
"If there are already commas in the header names, can use ; instead. "
"(MULTI)")
group.add_argument(
"--rename_header_i", default=[], action="append",
help="Rename a header. Format: <index>,<to>. "
"<index> is a 1-based column index. (MULTI)")
group.add_argument(
"--append_to_headers", default=[], action="append",
help="Append text to one or more headers. "
"Format: <indexes>;<text_to_append>. (MULTI)")
group.add_argument(
"--prepend_to_headers", default=[], action="append",
help="Prepend text to one or more headers. "
"Format: <indexes>;<text_to_prepend>. (MULTI)")
group.add_argument(
"--replace_header", default=[], action="append",
help="Replace a (sub)string with another in all headers. "
"Format: <from>,<to>. <from> will be replaced with <to>. (MULTI)")
group.add_argument(
"--replace_header_re", default=[], action="append",
help="Like replace_header, but <from> can be a regular expression. "
"Format: <from>,<to>. <from> will be replaced with <to>. (MULTI)")
group = parser.add_argument_group(title="Changing Annotations")
group.add_argument(
"--strip_all_annots", action="store_true",
help="Get rid of spaces around each of the annotations.")
group.add_argument(
"--upper_annots",
help="Convert annotations to upper case. Format: 1-based indexes.")
group.add_argument(
"--lower_annots",
help="Convert annotations to lower case. Format: 1-based indexes.")
group.add_argument(
"--set_value_if_empty", default=[], action="append",
help="If an annotation is empty, set with this value. "
"Format: <index 1-based>,<value>. (MULTI)")
group.add_argument(
"--set_value_if_not_empty", default=[], action="append",
help="If an annotation is not empty, set with this value. "
"Format: <index 1-based>,<value>. (MULTI)")
group.add_argument(
"--set_value_if_other_annot_equals", default=[], action="append",
help="If the annotation of another column is a specific value, "
"then set this annotation with this value. Format: "
"<this_index 1-based>,<this_value>,<other_index>,<other_value>. "
"(MULTI)")
group.add_argument(
"--set_value_if_other_annot_not_empty", default=[], action="append",
help="If the annotation of another column is not empty, "
"then set this annotation with this value. Format: "
"<this_index 1-based>,<this_value>,<other_index(es)>. "
"(MULTI)")
group.add_argument(
"--copy_value_if_empty", default=[], action="append",
help="If the dest column is empty, copy the value from the src "
"columns. "
"Format: <dest col>,<src col 1>[, <src col 2>...]. Columns "
"are given as 1-based indexes. (MULTI)")
group.add_argument(
"--copy_value_if_empty_header", default=[], action="append",
help="Fill empty annotations with values from other columns "
"with this header. Gets the value from the left-most non-empty "
"column with the same header. "
"Format: <dest header>,<src header 1>[, <src header 2>...]. (MULTI)")
group.add_argument(
"--copy_value_if_empty_same_header", default=[], action="append",
help="Fill empty annotations with values from other columns "
"that share this header. Gets the value from the left-most non-empty "
"column with the same header. (MULTI)")
group.add_argument(
"--copy_value_if_empty_same_header_all", action="store_true",
help="Fill empty annotations with values from other columns "
"that share the same header. Do for all columns that share the same "
"header.")
group.add_argument(
"--rename_annot", default=[], action="append",
help="Replace one whole annotation (not a substring) with another. "
"Format: <indexes>;<src>;<dst>. (MULTI)")
group.add_argument(
"--replace_annot", default=[], action="append",
help="Replace a substring of an annotation with another substring. "
"Format: <indexes>;<src>;<dst>. (MULTI)")
group.add_argument(
"--rename_duplicate_annot",
help="If an annotation is duplicated, then rename them with unique "
"names. Format: <indexes>")
group.add_argument(
"--prepend_to_annots", default=[], action="append",
help="Prepend text to the values in one or more columns. "
"Format: <indexes>;<text_to_prepend>. (MULTI)")
group.add_argument(
"--apply_re_to_annots", default=[], action="append",
help="Apply a regular expression to annots. "
"Format: <indexes>;<regular expression>. (MULTI)")
group.add_argument(
"--merge_annots", default=[], action="append",
help="Merge a multiple annotations into one string. "
"Format: <src indexes>;<dst index>;<merge char>. (MULTI)")
group.add_argument(
"--merge_annots_to_new_col", default=[], action="append",
help="Merge a multiple annotations into one string. "
"Format: <src indexes>;<dst name>;<merge char>. (MULTI)")
group.add_argument(
"--merge_annots_to_new_col_skip_empty", default=[], action="append",
help="Merge a multiple annotations into one string. "
"Ignores annotations that are blank. "
"Format: <src indexes>;<dst name>;<merge char>. (MULTI)")
group.add_argument(
"--split_annots", default=[], action="append",
help="Split an annotation across columns. "
"Format: <src index>;<dst indexes>;<split char>. "
"There should be at least one dst index for each item split. (MULTI)")
group.add_argument(
"--split_annots_and_take_elem", default=[], action="append",
help="Split an annotation, take one element, and put into new column. "
"Format: <src index>;<split_char>;<element index>;<new header>. "
"For example, suppose the annotation has the syntax: "
'"NM_004091 // E2F2 // E2F transcription factor 2". '
'Then "<index>;//;1;RefSeq ID" will split this by "//", pull out '
"the first element, and put it into a new column with header "
'"RefSeq ID". (MULTI)')
group.add_argument(
"--split_chr_start_end", default=[], action="append",
help='Split a chromosome location string (e.g. "chr1:320117-320142") '
"into separate colummns: <chrom> <start> <end>. "
"Format: <header>. <header> may be the name of the header, "
"or a 1-based index. (MULTI)")
group = parser.add_argument_group(title="TCGA barcode operations")
group.add_argument(
"--tcga_relabel_patient_barcodes",
help="Simplify barcodes to just the patient information. "
"Format: <header>. <header> may be the name of the header, "
"or a 1-based index. Will change this column in place.")
group.add_argument(
"--tcga_label_patient_barcodes",
help="Simplify barcodes to just the patient information. "
"Format: <src header>,<dst header>. <src header> may be the name "
"of the header, or a 1-based index. Will save the results to "
"<dst header>. If <dst header> doesn't exist, will create it.")
group.add_argument(
"--tcga_label_by_tissue_type",
help="Label PRIMARY, RECURRENT, METASTATIC, ADDITIONAL_METASTATIC, "
"NORMAL_BLOOD, or NORMAL_SOLID. "
"Format: <src header>,<dst header>. <src header> may be the name "
"of the header, or a 1-based index. Will save the results to "
"<dst header>. If <dst header> doesn't exist, will create it.")
group = parser.add_argument_group(title="Select by annotation")
group.add_argument(
"--select_if_annot_is", action="append",
help="Keep the rows where an annotation is a specific value. "
"Format: <header>,<value>. (MULTI)")
group.add_argument(
"--select_if_annot_startswith",
help="Keep the rows where an annotation starts with a specific value."
" Format: <header>,<value>")
group = parser.add_argument_group(title="Mathematical Operations")
group.add_argument(
"--flip01",
help="Flip 0's to 1's and 1's to 0's. "
"Format: indexes of columns to flip.")
group.add_argument(
"--all_same",
help="Sets a 0 or 1 depending on whether the values in <indexes> "
"are all the same. "
"Format: <indexes>;<index dest>. All indexes should be 1-based.")
group.add_argument(
"--min_annots",
help="Calculate the minimum value across a set of annotations. "
"Format: <indexes>;<index dest>. All indexes should be 1-based.")
group.add_argument(
"--max_annots",
help="Calculate the maximum value across a set of annotations. "
"Format: <indexes>;<index dest>. All indexes should be 1-based.")
group.add_argument(
"--add_to", default=[], action="append",
help="Add a number to a column. "
"Format: <header>,<number>. "
"Header can be the name of the header or a 1-based index. (MULTI)")
group.add_argument(
"--multiply_by", default=[], action="append",
help="Multiply a column by a number. "
"Format: <index>,<number>. "
"All indexes should be 1-based. (MULTI)")
group.add_argument(
"--normalize_to_max", default=[], action="append",
help="Normalize all values in this column to the maximum value. "
"Format: <name>. (MULTI)")
group.add_argument(
"--log_base", default=[], action="append",
help="Log a column with a specific base. "
"Format: <index>,<base>. "
"All indexes should be 1-based. (MULTI)")
group.add_argument(
"--neg_log_base", default=[], action="append",
help="Log a column with a specific base and multiply by -1. "
"Format: <index>,<base>. "
"All indexes should be 1-based. (MULTI)")
group.add_argument(
"--add_two_annots", default=[], action="append",
help="Add column 1 to column 2 and save to a third column. "
"Format: <index 1>,<index 2>,<index dest>. "
"All indexes should be 1-based. (MULTI)")
group.add_argument(
"--subtract_two_annots", default=[], action="append",
help="Subtract column 2 from column 1 and save to a third column. "
"Format: <index 1>,<index 2>,<index dest>. "
"<index dest> = <index 1> - <index 2>. "
"All indexes should be 1-based. (MULTI)")
group.add_argument(
"--divide_two_annots", default=[], action="append",
help="Divide one column by another and save to a third column. "
"Format: <index numerator>,<index denominator>,<index dest>. "
"All indexes should be 1-based. (MULTI)")
group.add_argument(
"--divide_many_annots", default=[], action="append",
help="Divide a list of columns (in place) by another. "
"Format: <indexes numerator>;<index denominator>. "
"All indexes should be 1-based. (MULTI)")
group.add_argument(
"--average_same_header", action="store_true",
help="Average the annotations that have the same header.")
group.add_argument(
"--round", default=[], action="append",
help="Round the values of a column to integers. "
"Format: <index>. All indexes should be 1-based. (MULTI)")
group.add_argument(
"--convert_percent_to_decimal", default=[], action="append",
help='Remove "%%" (if necessary) and divide by 100. '
"Format: <index>. All indexes should be 1-based. (MULTI)")
group = parser.add_argument_group(title="VCF files")
group.add_argument(
"--vcf_standardize",
help="Take a VCF file (from IACS, Platypus, or GATK) and put into "
"a standard format. "
"Format:<info_header>,<format_header>[,<genotype_header>]. "
"<genotype_header> is a list of optional headers for the genotype "
"information. If not given, will use the last-most columns in "
"the file.")
group.add_argument(
"--vcf_remove_bad_coords", action="store_true",
help="Somve VCF files contain bad start or end positions, "
"e.g. 8e+07. Maybe been through Excel? Remove them.")
group.add_argument(
"--vcf_remove_multicalls", action="store_true",
help="Take a VCF file in standard format and make sure there is "
"only one call per variant.")
group.add_argument(
"--vcf_extract_format_values",
help="Take a VCF file and extract the values from the corresponding "
"format column. "
"Format: <format header>,<values header>,<value>[,value]. "
"Example: FORMAT,Sample1,RD,AD,DP,FREQ")
group.add_argument(
"--vcf_extract_info_values",
help="Take a VCF file and extract the values from the INFO "
"column. Creates new columns."
"Format: <INFO header>,<value>[,value]. "
"Example: INFO,TC,TR")
group.add_argument(
"--vcf_split_AD", default=[], action="append",
help="Split the AD value across columns. "
"Format: <src index>;<dst indexes>. "
"There should be at least one dst index for each item split.")
group.add_argument(
"--vcf_calc_vaf", default=[], action="append",
help="Calculate the variant allele frequency. "
"Format: <ref index>,<alt index>,<vaf index>. (MULTI)")
group = parser.add_argument_group(title="Application-Specific Stuff")
## group.add_argument(
## "--calc_blockSizes",
## help="For BED files, calculate blockSizes from blockStarts and "
## "blockEnds. "
## "Format: <blockStarts index>,<blockEnds index>,<index dest>. "
## "All indexes should be 1-based.")
group.add_argument(
"--subtract_two_bed_lists",
help="For BED files, subtract column 2 (comma-separated values) "
"from column 1 (comma-separated values) and save to a third "
"column (comma-separated values). "
"Format: <index 1>,<index 2>,<index dest>. "
"<index dest> = <index 1> - <index 2>. "
"All indexes should be 1-based.")
group.add_argument(
"--subtract_value_from_bed_list",
help="For BED files, subtract column 2 (one value) "
"from column 1 (comma-separated values) and save to a third "
"column (comma-separated values). "
"Format: <index 1>,<index 2>,<index dest>. "
"<index dest> = <index 1> - <index 2>. "
"All indexes should be 1-based.")
args = parser.parse_args()
assert len(args.filename) == 1
FILENAME = args.filename[0]
assert not (args.read_as_csv and args.read_as_svm)
# Do operations that do not take a matrix.
if args.add_header_line:
assert not args.read_as_svm
MATRIX = add_header_line(
args.filename[0], args.add_header_line, args.read_as_csv)
elif args.remove_header_line:
assert not args.read_as_svm
remove_header_line(args.filename[0], args.read_as_csv)
sys.exit(0)
elif args.read_as_svm:
assert not args.ignore_lines_startswith
MATRIX = SimpleVariantMatrix.read_as_am(args.filename[0])
else:
# Read the matrix.
MATRIX = AnnotationMatrix.read(
args.filename[0], args.read_as_csv, args.ignore_lines_startswith)
# Perform operations.
MATRIX = indexes_matrix(MATRIX, args.indexes)
MATRIX = select_cols_str(MATRIX, args.select_cols_str)
MATRIX = select_cols_substr(MATRIX, args.select_cols_substr)
MATRIX = add_column(MATRIX, args.add_column)
MATRIX = copy_column(MATRIX, args.copy_column)
MATRIX = add_desc_for_gmx(MATRIX, args.add_desc_for_gmx)
MATRIX = add_uid_column(MATRIX, args.add_uid_column)
MATRIX = stratify_by_rank(MATRIX, args.stratify_by_rank)
# Changing the headers.
MATRIX = fill_empty_headers(MATRIX, args.fill_empty_headers)
MATRIX = reorder_headers_alphabetical(
MATRIX, args.reorder_headers_alphabetical)
MATRIX = upper_headers(MATRIX, args.upper_headers)
MATRIX = lower_headers(MATRIX, args.lower_headers)
MATRIX = hash_headers(MATRIX, args.hash_headers)
MATRIX = remove_duplicate_headers(MATRIX, args.remove_duplicate_headers)
MATRIX = rename_duplicate_headers(MATRIX, args.rename_duplicate_headers)
MATRIX = rename_header(MATRIX, args.rename_header)
MATRIX = rename_header_i(MATRIX, args.rename_header_i)
MATRIX = replace_header(MATRIX, args.replace_header)
MATRIX = replace_header_re(MATRIX, args.replace_header_re)
MATRIX = append_to_headers(MATRIX, args.append_to_headers)
MATRIX = prepend_to_headers(MATRIX, args.prepend_to_headers)
# Changing the values.
MATRIX = strip_all_annots(MATRIX, args.strip_all_annots)
MATRIX = upper_annots(MATRIX, args.upper_annots)
MATRIX = lower_annots(MATRIX, args.lower_annots)
MATRIX = set_value_if_empty(MATRIX, args.set_value_if_empty)
MATRIX = set_value_if_not_empty(MATRIX, args.set_value_if_not_empty)
MATRIX = set_value_if_other_annot_equals(
MATRIX, args.set_value_if_other_annot_equals)
MATRIX = set_value_if_other_annot_not_empty(
MATRIX, args.set_value_if_other_annot_not_empty)
MATRIX = copy_value_if_empty(MATRIX, args.copy_value_if_empty)
MATRIX = copy_value_if_empty_header(
MATRIX, args.copy_value_if_empty_header)
MATRIX = copy_value_if_empty_same_header(
MATRIX, args.copy_value_if_empty_same_header)
MATRIX = copy_value_if_empty_same_header_all(
MATRIX, args.copy_value_if_empty_same_header_all)
MATRIX = replace_annot(MATRIX, args.replace_annot)
MATRIX = replace_whole_annot(MATRIX, args.rename_annot)
MATRIX = rename_duplicate_annot(MATRIX, args.rename_duplicate_annot)
MATRIX = prepend_to_annots(MATRIX, args.prepend_to_annots)
MATRIX = apply_re_to_annots(MATRIX, args.apply_re_to_annots)
MATRIX = merge_annots(MATRIX, args.merge_annots)
MATRIX = merge_annots_to_new_col(MATRIX, args.merge_annots_to_new_col)
MATRIX = merge_annots_to_new_col_skip_empty(
MATRIX, args.merge_annots_to_new_col_skip_empty)
MATRIX = split_annots(MATRIX, args.split_annots)
MATRIX = split_annots_and_take_elem(
MATRIX, args.split_annots_and_take_elem)
MATRIX = split_chr_start_end(MATRIX, args.split_chr_start_end)
# TCGA stuff
MATRIX = tcga_relabel_patient_barcodes(
MATRIX, args.tcga_relabel_patient_barcodes)
MATRIX = tcga_label_patient_barcodes(
MATRIX, args.tcga_label_patient_barcodes)
MATRIX = tcga_label_by_tissue_type(MATRIX, args.tcga_label_by_tissue_type)
# Selection by annotation.
MATRIX = select_if_annot_is(MATRIX, args.select_if_annot_is)
MATRIX = select_if_annot_startswith(
MATRIX, args.select_if_annot_startswith)
# Math operations.
MATRIX = flip01_matrix(MATRIX, args.flip01)
MATRIX = all_same(MATRIX, args.all_same)
MATRIX = min_annots(MATRIX, args.min_annots)
MATRIX = max_annots(MATRIX, args.max_annots)
MATRIX = log_base(MATRIX, args.log_base)
MATRIX = neg_log_base(MATRIX, args.neg_log_base)
MATRIX = add_to(MATRIX, args.add_to)
MATRIX = multiply_by(MATRIX, args.multiply_by)
MATRIX = normalize_to_max(MATRIX, args.normalize_to_max)
MATRIX = add_two_annots(MATRIX, args.add_two_annots)
MATRIX = subtract_two_annots(MATRIX, args.subtract_two_annots)
MATRIX = divide_two_annots(MATRIX, args.divide_two_annots)
MATRIX = divide_many_annots(MATRIX, args.divide_many_annots)
MATRIX = average_same_header(MATRIX, args.average_same_header)
MATRIX = round_annots(MATRIX, args.round)
MATRIX = convert_percent_to_decimal(
MATRIX, args.convert_percent_to_decimal)
# VCF
MATRIX = vcf_standardize(MATRIX, args.vcf_standardize)
MATRIX = vcf_remove_bad_coords(MATRIX, args.vcf_remove_bad_coords)
MATRIX = vcf_remove_multicalls(MATRIX, args.vcf_remove_multicalls)
MATRIX = vcf_extract_format_values(MATRIX, args.vcf_extract_format_values)
MATRIX = vcf_extract_info_values(MATRIX, args.vcf_extract_info_values)
MATRIX = vcf_split_AD(MATRIX, args.vcf_split_AD)
MATRIX = vcf_calc_vaf(MATRIX, args.vcf_calc_vaf)
# Application-specific stuff
#MATRIX = calc_blockSizes(MATRIX, args.calc_blockSizes)
MATRIX = subtract_two_bed_lists(MATRIX, args.subtract_two_bed_lists)
MATRIX = subtract_value_from_bed_list(
MATRIX, args.subtract_value_from_bed_list)
# Write the matrix back out.
delim = None
if args.write_as_csv:
delim = ","
AnnotationMatrix.write(sys.stdout, MATRIX, delim=delim)
if __name__ == '__main__':
main()
|
jefftc/changlab
|
scripts/slice_annot.py
|
Python
|
mit
| 110,648
|
[
"ADF"
] |
63a379fadc816b2c74e12914809de696131d68ada7c10e1717dcf7d30852279f
|
import argparse
import torch
import pickle
import numpy as np
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# Training settings
parser = argparse.ArgumentParser(description='PyTorch semi-supervised MNIST')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training (default: 100)')
parser.add_argument('--epochs', type=int, default=500, metavar='N',
help='number of epochs to train (default: 10)')
args = parser.parse_args()
cuda = torch.cuda.is_available()
seed = 10
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
n_classes = 10
z_dim = 2
X_dim = 784
y_dim = 10
train_batch_size = args.batch_size
valid_batch_size = args.batch_size
N = 1000
epochs = args.epochs
##################################
# Load data and create Data loaders
##################################
def load_data(data_path='../data/'):
print('loading data!')
trainset_labeled = pickle.load(open(data_path + "train_labeled.p", "rb"))
trainset_unlabeled = pickle.load(open(data_path + "train_unlabeled.p", "rb"))
# Set -1 as labels for unlabeled data
trainset_unlabeled.train_labels = torch.from_numpy(np.array([-1] * 47000))
validset = pickle.load(open(data_path + "validation.p", "rb"))
train_labeled_loader = torch.utils.data.DataLoader(trainset_labeled,
batch_size=train_batch_size,
shuffle=True, **kwargs)
train_unlabeled_loader = torch.utils.data.DataLoader(trainset_unlabeled,
batch_size=train_batch_size,
shuffle=True, **kwargs)
valid_loader = torch.utils.data.DataLoader(validset, batch_size=valid_batch_size, shuffle=True)
return train_labeled_loader, train_unlabeled_loader, valid_loader
##################################
# Define Networks
##################################
# Encoder
class Q_net(nn.Module):
def __init__(self):
super(Q_net, self).__init__()
self.lin1 = nn.Linear(X_dim, N)
self.lin2 = nn.Linear(N, N)
# Gaussian code (z)
self.lin3gauss = nn.Linear(N, z_dim)
def forward(self, x):
x = F.dropout(self.lin1(x), p=0.2, training=self.training)
x = F.relu(x)
x = F.dropout(self.lin2(x), p=0.2, training=self.training)
x = F.relu(x)
xgauss = self.lin3gauss(x)
return xgauss
# Decoder
class P_net(nn.Module):
def __init__(self):
super(P_net, self).__init__()
self.lin1 = nn.Linear(z_dim + n_classes, N)
self.lin2 = nn.Linear(N, N)
self.lin3 = nn.Linear(N, X_dim)
def forward(self, x):
x = self.lin1(x)
x = F.dropout(x, p=0.2, training=self.training)
x = F.relu(x)
x = self.lin2(x)
x = F.dropout(x, p=0.2, training=self.training)
x = self.lin3(x)
return F.sigmoid(x)
class D_net_gauss(nn.Module):
def __init__(self):
super(D_net_gauss, self).__init__()
self.lin1 = nn.Linear(z_dim, N)
self.lin2 = nn.Linear(N, N)
self.lin3 = nn.Linear(N, 1)
def forward(self, x):
x = F.dropout(self.lin1(x), p=0.2, training=self.training)
x = F.relu(x)
x = F.dropout(self.lin2(x), p=0.2, training=self.training)
x = F.relu(x)
return F.sigmoid(self.lin3(x))
####################
# Utility functions
####################
def save_model(model, filename):
print('Best model so far, saving it...')
torch.save(model.state_dict(), filename)
def report_loss(epoch, D_loss_gauss, G_loss, recon_loss):
'''
Print loss
'''
print('Epoch-{}; D_loss_gauss: {:.4}; G_loss: {:.4}; recon_loss: {:.4}'.format(epoch,
D_loss_gauss.data[0],
G_loss.data[0],
recon_loss.data[0]))
def create_latent(Q, loader):
'''
Creates the latent representation for the samples in loader
return:
z_values: numpy array with the latent representations
labels: the labels corresponding to the latent representations
'''
Q.eval()
labels = []
for batch_idx, (X, target) in enumerate(loader):
X = X * 0.3081 + 0.1307
# X.resize_(loader.batch_size, X_dim)
X, target = Variable(X), Variable(target)
labels.extend(target.data.tolist())
if cuda:
X, target = X.cuda(), target.cuda()
# Reconstruction phase
z_sample = Q(X)
if batch_idx > 0:
z_values = np.concatenate((z_values, np.array(z_sample.data.tolist())))
else:
z_values = np.array(z_sample.data.tolist())
labels = np.array(labels)
return z_values, labels
def get_categorical(labels, n_classes=10):
cat = np.array(labels.data.tolist())
cat = np.eye(n_classes)[cat].astype('float32')
cat = torch.from_numpy(cat)
return Variable(cat)
####################
# Train procedure
####################
def train(P, Q, D_gauss, P_decoder, Q_encoder, Q_generator, D_gauss_solver, data_loader):
'''
Train procedure for one epoch.
'''
TINY = 1e-15
# Set the networks in train mode (apply dropout when needed)
Q.train()
P.train()
D_gauss.train()
# Loop through the labeled and unlabeled dataset getting one batch of samples from each
# The batch size has to be a divisor of the size of the dataset or it will return
# invalid samples
for X, target in data_loader:
# Load batch and normalize samples to be between 0 and 1
X = X * 0.3081 + 0.1307
X.resize_(train_batch_size, X_dim)
X, target = Variable(X), Variable(target)
if cuda:
X, target = X.cuda(), target.cuda()
# Init gradients
P.zero_grad()
Q.zero_grad()
D_gauss.zero_grad()
#######################
# Reconstruction phase
#######################
z_gauss = Q(X)
z_cat = get_categorical(target, n_classes=10)
if cuda:
z_cat = z_cat.cuda()
z_sample = torch.cat((z_cat, z_gauss), 1)
X_sample = P(z_sample)
recon_loss = F.binary_cross_entropy(X_sample + TINY, X.resize(train_batch_size, X_dim) + TINY)
recon_loss.backward()
P_decoder.step()
Q_encoder.step()
P.zero_grad()
Q.zero_grad()
D_gauss.zero_grad()
#######################
# Regularization phase
#######################
# Discriminator
Q.eval()
z_real_gauss = Variable(torch.randn(train_batch_size, z_dim) * 5.)
if cuda:
z_real_gauss = z_real_gauss.cuda()
z_fake_gauss = Q(X)
D_real_gauss = D_gauss(z_real_gauss)
D_fake_gauss = D_gauss(z_fake_gauss)
D_loss = -torch.mean(torch.log(D_real_gauss + TINY) + torch.log(1 - D_fake_gauss + TINY))
D_loss.backward()
D_gauss_solver.step()
P.zero_grad()
Q.zero_grad()
D_gauss.zero_grad()
# Generator
Q.train()
z_fake_gauss = Q(X)
D_fake_gauss = D_gauss(z_fake_gauss)
G_loss = -torch.mean(torch.log(D_fake_gauss + TINY))
G_loss.backward()
Q_generator.step()
P.zero_grad()
Q.zero_grad()
D_gauss.zero_grad()
return D_loss, G_loss, recon_loss
def generate_model():
torch.manual_seed(10)
if cuda:
Q = Q_net().cuda()
P = P_net().cuda()
D_gauss = D_net_gauss().cuda()
else:
Q = Q_net()
P = P_net()
D_gauss = D_net_gauss()
# Set learning rates
gen_lr = 0.0001
reg_lr = 0.00005
# Set optimizators
P_decoder = optim.Adam(P.parameters(), lr=gen_lr)
Q_encoder = optim.Adam(Q.parameters(), lr=gen_lr)
Q_generator = optim.Adam(Q.parameters(), lr=reg_lr)
D_gauss_solver = optim.Adam(D_gauss.parameters(), lr=reg_lr)
for epoch in range(epochs):
D_loss_gauss, G_loss, recon_loss = train(P, Q, D_gauss, P_decoder, Q_encoder,
Q_generator,
D_gauss_solver,
valid_loader)
if epoch % 10 == 0:
report_loss(epoch, D_loss_gauss, G_loss, recon_loss)
if __name__ == '__main__':
train_labeled_loader, train_unlabeled_loader, valid_loader = load_data()
Q, P = generate_model(train_labeled_loader, train_unlabeled_loader, valid_loader)
|
fducau/AAE_pytorch
|
script/aae_supervised.py
|
Python
|
gpl-3.0
| 8,911
|
[
"Gaussian"
] |
ca1dc18f30633253478f48f3bc12bb68c97148cc694afd56d3d95df4b48cd465
|
"""Deal with Motifs or Signatures allowing ambiguity in the sequences.
This class contains Schema which deal with Motifs and Signatures at
a higher level, by introducing `don't care` (ambiguity) symbols into
the sequences. For instance, you could combine the following Motifs:
'GATC', 'GATG', 'GATG', 'GATT'
as all falling under a schema like 'GAT*', where the star indicates a
character can be anything. This helps us condense a whole ton of
motifs or signatures.
"""
# standard modules
import random
import string
import re
# biopython
from Bio import Alphabet
from Bio.Seq import MutableSeq
# neural network libraries
from Pattern import PatternRepository
# genetic algorithm libraries
from Bio.GA import Organism
from Bio.GA.Evolver import GenerationEvolver
from Bio.GA.Mutation.Simple import SinglePositionMutation
from Bio.GA.Crossover.Point import SinglePointCrossover
from Bio.GA.Repair.Stabilizing import AmbiguousRepair
from Bio.GA.Selection.Tournament import TournamentSelection
from Bio.GA.Selection.Diversity import DiversitySelection
class Schema:
"""Deal with motifs that have ambiguity characters in it.
This motif class allows specific ambiguity characters and tries to
speed up finding motifs using regular expressions.
This is likely to be a replacement for the Schema representation,
since it allows multiple ambiguity characters to be used.
"""
def __init__(self, ambiguity_info):
"""Initialize with ambiguity information.
Arguments:
o ambiguity_info - A dictionary which maps letters in the motifs to
the ambiguous characters which they might represent. For example,
{'R' : 'AG'} specifies that Rs in the motif can match a A or a G.
All letters in the motif must be represented in the ambiguity_info
dictionary.
"""
self._ambiguity_info = ambiguity_info
# a cache of all encoded motifs
self._motif_cache = {}
def encode_motif(self, motif):
"""Encode the passed motif as a regular expression pattern object.
Arguments:
o motif - The motif we want to encode. This should be a string.
Returns:
A compiled regular expression pattern object that can be used
for searching strings.
"""
regexp_string = ""
for motif_letter in motif:
try:
letter_matches = self._ambiguity_info[motif_letter]
except KeyError:
raise KeyError("No match information for letter %s"
% motif_letter)
if len(letter_matches) > 1:
regexp_match = "[" + letter_matches + "]"
elif len(letter_matches) == 1:
regexp_match = letter_matches
else:
raise ValueError("Unexpected match information %s"
% letter_matches)
regexp_string += regexp_match
return re.compile(regexp_string)
def find_ambiguous(self, motif):
"""Return the location of ambiguous items in the motif.
This just checks through the motif and compares each letter
against the ambiguity information. If a letter stands for multiple
items, it is ambiguous.
"""
ambig_positions = []
for motif_letter_pos in range(len(motif)):
motif_letter = motif[motif_letter_pos]
try:
letter_matches = self._ambiguity_info[motif_letter]
except KeyError:
raise KeyError("No match information for letter %s"
% motif_letter)
if len(letter_matches) > 1:
ambig_positions.append(motif_letter_pos)
return ambig_positions
def num_ambiguous(self, motif):
"""Return the number of ambiguous letters in a given motif.
"""
ambig_positions = self.find_ambiguous(motif)
return len(ambig_positions)
def find_matches(self, motif, query):
"""Return all non-overlapping motif matches in the query string.
This utilizes the regular expression findall function, and will
return a list of all non-overlapping occurances in query that
match the ambiguous motif.
"""
try:
motif_pattern = self._motif_cache[motif]
except KeyError:
motif_pattern = self.encode_motif(motif)
self._motif_cache[motif] = motif_pattern
return motif_pattern.findall(query)
def num_matches(self, motif, query):
"""Find the number of non-overlapping times motif occurs in query.
"""
all_matches = self.find_matches(motif, query)
return len(all_matches)
def all_unambiguous(self):
"""Return a listing of all unambiguous letters allowed in motifs.
"""
all_letters = self._ambiguity_info.keys()
all_letters.sort()
unambig_letters = []
for letter in all_letters:
possible_matches = self._ambiguity_info[letter]
if len(possible_matches) == 1:
unambig_letters.append(letter)
return unambig_letters
# --- helper classes and functions for the default SchemaFinder
# -- Alphabets
class SchemaDNAAlphabet(Alphabet.Alphabet):
"""Alphabet of a simple Schema for DNA sequences.
This defines a simple alphabet for DNA sequences that has a single
character which can match any other character.
o G,A,T,C - The standard unambiguous DNA alphabet.
o * - Any letter
"""
letters = ["G", "A", "T", "C", "*"]
alphabet_matches = {"G" : "G",
"A" : "A",
"T" : "T",
"C" : "C",
"*" : "GATC"}
# -- GA schema finder
class GeneticAlgorithmFinder:
"""Find schemas using a genetic algorithm approach.
This approach to finding schema uses Genetic Algorithms to evolve
a set of schema and find the best schema for a specific set of
records.
The 'default' finder searches for ambiguous DNA elements. This
can be overridden easily by creating a GeneticAlgorithmFinder
with a different alphabet.
"""
def __init__(self, alphabet = SchemaDNAAlphabet()):
"""Initialize a finder to get schemas using Genetic Algorithms.
Arguments:
o alphabet -- The alphabet which specifies the contents of the
schemas we'll be generating. This alphabet must contain the
attribute 'alphabet_matches', which is a dictionary specifying
the potential ambiguities of each letter in the alphabet. These
ambiguities will be used in building up the schema.
"""
self.alphabet = alphabet
self.initial_population = 500
self.min_generations = 10
self._set_up_genetic_algorithm()
def _set_up_genetic_algorithm(self):
"""Overrideable function to set up the genetic algorithm parameters.
This functions sole job is to set up the different genetic
algorithm functionality. Since this can be quite complicated, this
allows cusotmizablity of all of the parameters. If you want to
customize specially, you can inherit from this class and override
this function.
"""
self.motif_generator = RandomMotifGenerator(self.alphabet)
self.mutator = SinglePositionMutation(mutation_rate = 0.1)
self.crossover = SinglePointCrossover(crossover_prob = 0.25)
self.repair = AmbiguousRepair(Schema(self.alphabet.alphabet_matches),
4)
self.base_selector = TournamentSelection(self.mutator, self.crossover,
self.repair, 2)
self.selector = DiversitySelection(self.base_selector,
self.motif_generator.random_motif)
def find_schemas(self, fitness, num_schemas):
"""Find the given number of unique schemas using a genetic algorithm
Arguments:
o fitness - A callable object (ie. function) which will evaluate
the fitness of a motif.
o num_schemas - The number of unique schemas with good fitness
that we want to generate.
"""
start_population = \
Organism.function_population(self.motif_generator.random_motif,
self.initial_population,
fitness)
finisher = SimpleFinisher(num_schemas, self.min_generations)
# set up the evolver and do the evolution
evolver = GenerationEvolver(start_population, self.selector)
evolved_pop = evolver.evolve(finisher.is_finished)
# convert the evolved population into a PatternRepository
schema_info = {}
for org in evolved_pop:
# convert the Genome from a MutableSeq to a Seq so that
# the schemas are just strings (and not array("c")s)
seq_genome = org.genome.toseq()
schema_info[seq_genome.data] = org.fitness
return PatternRepository(schema_info)
# -- fitness classes
class DifferentialSchemaFitness:
"""Calculate fitness for schemas that differentiate between sequences.
"""
def __init__(self, positive_seqs, negative_seqs, schema_evaluator):
"""Initialize with different sequences to evaluate
Arguments:
o positive_seq - A list of SeqRecord objects which are the 'positive'
sequences -- the ones we want to select for.
o negative_seq - A list of SeqRecord objects which are the 'negative'
sequences that we want to avoid selecting.
o schema_evaluator - An Schema class which can be used to
evaluate find motif matches in sequences.
"""
self._pos_seqs = positive_seqs
self._neg_seqs = negative_seqs
self._schema_eval = schema_evaluator
def calculate_fitness(self, genome):
"""Calculate the fitness for a given schema.
Fitness is specified by the number of occurances of the schema in
the positive sequences minus the number of occurances in the
negative examples.
This fitness is then modified by multiplying by the length of the
schema and then dividing by the number of ambiguous characters in
the schema. This helps select for schema which are longer and have
less redundancy.
"""
# convert the genome into a string
seq_motif = genome.toseq()
motif = seq_motif.data
# get the counts in the positive examples
num_pos = 0
for seq_record in self._pos_seqs:
cur_counts = self._schema_eval.num_matches(motif,
seq_record.seq.data)
num_pos += cur_counts
# get the counts in the negative examples
num_neg = 0
for seq_record in self._neg_seqs:
cur_counts = self._schema_eval.num_matches(motif,
seq_record.seq.data)
num_neg += cur_counts
num_ambiguous = self._schema_eval.num_ambiguous(motif)
# weight the ambiguous stuff more highly
num_ambiguous = pow(2.0, num_ambiguous)
# increment num ambiguous to prevent division by zero errors.
num_ambiguous += 1
motif_size = len(motif)
motif_size = motif_size * 4.0
discerning_power = num_pos - num_neg
diff = (discerning_power * motif_size) / float(num_ambiguous)
return diff
class MostCountSchemaFitness:
"""Calculate a fitness giving weight to schemas that match many times.
This fitness function tries to maximize schemas which are found many
times in a group of sequences.
"""
def __init__(self, seq_records, schema_evaluator):
"""Initialize with sequences to evaluate.
Arguments:
o seq_records -- A set of SeqRecord objects which we use to
calculate the fitness.
o schema_evaluator - An Schema class which can be used to
evaluate find motif matches in sequences.
"""
self._records = seq_records
self._evaluator = schema_evaluator
def calculate_fitness(self, genome):
"""Calculate the fitness of a genome based on schema matches.
This bases the fitness of a genome completely on the number of times
it matches in the set of seq_records. Matching more times gives a
better fitness
"""
# convert the genome into a string
seq_motif = genome.toseq()
motif = seq_motif.data
# find the number of times the genome matches
num_times = 0
for seq_record in self._records:
cur_counts = self._evaluator.num_matches(motif,
seq_record.seq.data)
num_times += cur_counts
return num_times
# -- Helper classes
class RandomMotifGenerator:
"""Generate a random motif within given parameters.
"""
def __init__(self, alphabet, min_size = 12, max_size = 17):
"""Initialize with the motif parameters.
Arguments:
o alphabet - An alphabet specifying what letters can be inserted in
a motif.
o min_size, max_size - Specify the range of sizes for motifs.
"""
self._alphabet = alphabet
self._min_size = min_size
self._max_size = max_size
def random_motif(self):
"""Create a random motif within the given parameters.
This returns a single motif string with letters from the given
alphabet. The size of the motif will be randomly chosen between
max_size and min_size.
"""
motif_size = random.randrange(self._min_size, self._max_size)
motif = ""
for letter_num in range(motif_size):
cur_letter = random.choice(self._alphabet.letters)
motif += cur_letter
return MutableSeq(motif, self._alphabet)
class SimpleFinisher:
"""Determine when we are done evolving motifs.
This takes the very simple approach of halting evolution when the
GA has proceeded for a specified number of generations and has
a given number of unique schema with positive fitness.
"""
def __init__(self, num_schemas, min_generations = 100):
"""Initialize the finisher with its parameters.
Arguments:
o num_schemas -- the number of useful (positive fitness) schemas
we want to generation
o min_generations -- The minimum number of generations to allow
the GA to proceed.
"""
self.num_generations = 0
self.num_schemas = num_schemas
self.min_generations = min_generations
def is_finished(self, organisms):
"""Determine when we can stop evolving the population.
"""
self.num_generations += 1
# print "generation %s" % self.num_generations
if self.num_generations >= self.min_generations:
all_seqs = []
for org in organisms:
if org.fitness > 0:
if org.genome not in all_seqs:
all_seqs.append(org.genome)
if len(all_seqs) >= self.num_schemas:
return 1
return 0
# ---
class SchemaFinder:
"""Find schema in a set of sequences using a genetic algorithm approach.
Finding good schemas is very difficult because it takes forever to
enumerate all of the potential schemas. This finder using a genetic
algorithm approach to evolve good schema which match many times in
a set of sequences.
The default implementation of the finder is ready to find schemas
in a set of DNA sequences, but the finder can be customized to deal
with any type of data.
"""
def __init__(self, num_schemas = 100,
schema_finder = GeneticAlgorithmFinder()):
self.num_schemas = num_schemas
self._finder = schema_finder
self.evaluator = Schema(self._finder.alphabet.alphabet_matches)
def find(self, seq_records):
"""Find well-represented schemas in the given set of SeqRecords.
"""
fitness_evaluator = MostCountSchemaFitness(seq_records,
self.evaluator)
return self._finder.find_schemas(fitness_evaluator.calculate_fitness,
self.num_schemas)
def find_differences(self, first_records, second_records):
"""Find schemas which differentiate between the two sets of SeqRecords.
"""
fitness_evaluator = DifferentialSchemaFitness(first_records,
second_records,
self.evaluator)
return self._finder.find_schemas(fitness_evaluator.calculate_fitness,
self.num_schemas)
class SchemaCoder:
"""Convert a sequence into a representation of ambiguous motifs (schemas).
This takes a sequence, and returns the number of times specified
motifs are found in the sequence. This lets you represent a sequence
as just a count of (possibly ambiguous) motifs.
"""
def __init__(self, schemas, ambiguous_converter):
"""Initialize the coder to convert sequences
Arguments:
o schema - A list of all of the schemas we want to search for
in input sequences.
o ambiguous_converter - An Schema class which can be
used to convert motifs into regular expressions for searching.
"""
self._schemas = schemas
self._converter = ambiguous_converter
def representation(self, sequence):
"""Represent the given input sequence as a bunch of motif counts.
Arguments:
o sequence - A Bio.Seq object we are going to represent as schemas.
This takes the sequence, searches for the motifs within it, and then
returns counts specifying the relative number of times each motifs
was found. The frequencies are in the order the original motifs were
passed into the initializer.
"""
schema_counts = []
for schema in self._schemas:
num_counts = self._converter.num_matches(schema, sequence.data)
schema_counts.append(num_counts)
# normalize the counts to go between zero and one
min_count = 0
max_count = max(schema_counts)
# only normalize if we've actually found something, otherwise
# we'll just return 0 for everything
if max_count > 0:
for count_num in range(len(schema_counts)):
schema_counts[count_num] = (float(schema_counts[count_num]) -
float(min_count)) / float(max_count)
return schema_counts
def matches_schema(pattern, schema, ambiguity_character = '*'):
"""Determine whether or not the given pattern matches the schema.
Arguments:
o pattern - A string representing the pattern we want to check for
matching. This pattern can contain ambiguity characters (which are
assumed to be the same as those in the schema).
o schema - A string schema with ambiguity characters.
o ambiguity_character - The character used for ambiguity in the schema.
"""
if len(pattern) != len(schema):
return 0
# check each position, and return a non match if the schema and pattern
# are non ambiguous and don't match
for pos in range(len(pattern)):
if (schema[pos] != ambiguity_character and
pattern[pos] != ambiguity_character and
pattern[pos] != schema[pos]):
return 0
return 1
class SchemaFactory:
"""Generate Schema from inputs of Motifs or Signatures.
"""
def __init__(self, ambiguity_symbol = '*'):
"""Initialize the SchemaFactory
Arguments:
o ambiguity_symbol -- The symbol to use when specifying that
a position is arbitrary.
"""
self._ambiguity_symbol = ambiguity_symbol
def from_motifs(self, motif_repository, motif_percent, num_ambiguous):
"""Generate schema from a list of motifs.
Arguments:
o motif_repository - A MotifRepository class that has all of the
motifs we want to convert to Schema.
o motif_percent - The percentage of motifs in the motif bank which
should be matches. We'll try to create schema that match this
percentage of motifs.
o num_ambiguous - The number of ambiguous characters to include
in each schema. The positions of these ambiguous characters will
be randomly selected.
"""
# get all of the motifs we can deal with
all_motifs = motif_repository.get_top_percentage(motif_percent)
# start building up schemas
schema_info = {}
# continue until we've built schema matching the desired percentage
# of motifs
total_count = self._get_num_motifs(motif_repository, all_motifs)
matched_count = 0
assert total_count > 0, "Expected to have motifs to match"
while (float(matched_count) / float(total_count)) < motif_percent:
new_schema, matching_motifs = \
self._get_unique_schema(schema_info.keys(),
all_motifs, num_ambiguous)
# get the number of counts for the new schema and clean up
# the motif list
schema_counts = 0
for motif in matching_motifs:
# get the counts for the motif
schema_counts += motif_repository.count(motif)
# remove the motif from the motif list since it is already
# represented by this schema
all_motifs.remove(motif)
# all the schema info
schema_info[new_schema] = schema_counts
matched_count += schema_counts
# print "percentage:", float(matched_count) / float(total_count)
return PatternRepository(schema_info)
def _get_num_motifs(self, repository, motif_list):
"""Return the number of motif counts for the list of motifs.
"""
motif_count = 0
for motif in motif_list:
motif_count += repository.count(motif)
return motif_count
def _get_unique_schema(self, cur_schemas, motif_list, num_ambiguous):
"""Retrieve a unique schema from a motif.
We don't want to end up with schema that match the same thing,
since this could lead to ambiguous results, and be messy. This
tries to create schema, and checks that they do not match any
currently existing schema.
"""
# create a schema starting with a random motif
# we'll keep doing this until we get a completely new schema that
# doesn't match any old schema
num_tries = 0
while 1:
# pick a motif to work from and make a schema from it
cur_motif = random.choice(motif_list)
num_tries += 1
new_schema, matching_motifs = \
self._schema_from_motif(cur_motif, motif_list,
num_ambiguous)
has_match = 0
for old_schema in cur_schemas:
if matches_schema(new_schema, old_schema,
self._ambiguity_symbol):
has_match = 1
# if the schema doesn't match any other schema we've got
# a good one
if not(has_match):
break
# check for big loops in which we can't find a new schema
assert num_tries < 150, \
"Could not generate schema in %s tries from %s with %s" \
% (num_tries, motif_list, cur_schemas)
return new_schema, matching_motifs
def _schema_from_motif(self, motif, motif_list, num_ambiguous):
"""Create a schema from a given starting motif.
Arguments:
o motif - A motif with the pattern we will start from.
o motif_list - The total motifs we have.to match to.
o num_ambiguous - The number of ambiguous characters that should
be present in the schema.
Returns:
o A string representing the newly generated schema.
o A list of all of the motifs in motif_list that match the schema.
"""
assert motif in motif_list, \
"Expected starting motif present in remaining motifs."
# convert random positions in the motif to ambiguous characters
# convert the motif into a list of characters so we can manipulate it
new_schema_list = list(motif)
for add_ambiguous in range(num_ambiguous):
# add an ambiguous position in a new place in the motif
while 1:
ambig_pos = random.choice(range(len(new_schema_list)))
# only add a position if it isn't already ambiguous
# otherwise, we'll try again
if new_schema_list[ambig_pos] != self._ambiguity_symbol:
new_schema_list[ambig_pos] = self._ambiguity_symbol
break
# convert the schema back to a string
new_schema = string.join(new_schema_list, '')
# get the motifs that the schema matches
matched_motifs = []
for motif in motif_list:
if matches_schema(motif, new_schema, self._ambiguity_symbol):
matched_motifs.append(motif)
return new_schema, matched_motifs
def from_signatures(self, signature_repository, num_ambiguous):
raise NotImplementedError("Still need to code this.")
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/NeuralNetwork/Gene/Schema.py
|
Python
|
apache-2.0
| 26,134
|
[
"Biopython"
] |
12fdf797ef5c7010f1d0ae43530385fa3832ffc3d5ba09dd3677f7ada53829d5
|
"""This module adds support to easily import and export NumPy
(http://numpy.scipy.org) arrays into/out of VTK arrays. The code is
loosely based on TVTK (https://svn.enthought.com/enthought/wiki/TVTK).
This code depends on an addition to the VTK data arrays made by Berk
Geveci to make it support Python's buffer protocol (on Feb. 15, 2008).
The main functionality of this module is provided by the two functions:
numpy_to_vtk,
vtk_to_numpy.
Caveats:
--------
- Bit arrays in general do not have a numpy equivalent and are not
supported. Char arrays are also not easy to handle and might not
work as you expect. Patches welcome.
- You need to make sure you hold a reference to a Numpy array you want
to import into VTK. If not you'll get a segfault (in the best case).
The same holds in reverse when you convert a VTK array to a numpy
array -- don't delete the VTK array.
Created by Prabhu Ramachandran in Feb. 2008.
"""
import vtk
import numpy
# Useful constants for VTK arrays.
VTK_ID_TYPE_SIZE = vtk.vtkIdTypeArray().GetDataTypeSize()
if VTK_ID_TYPE_SIZE == 4:
ID_TYPE_CODE = numpy.int32
elif VTK_ID_TYPE_SIZE == 8:
ID_TYPE_CODE = numpy.int64
VTK_LONG_TYPE_SIZE = vtk.vtkLongArray().GetDataTypeSize()
if VTK_LONG_TYPE_SIZE == 4:
LONG_TYPE_CODE = numpy.int32
ULONG_TYPE_CODE = numpy.uint32
elif VTK_LONG_TYPE_SIZE == 8:
LONG_TYPE_CODE = numpy.int64
ULONG_TYPE_CODE = numpy.uint64
def get_vtk_array_type(numpy_array_type):
"""Returns a VTK typecode given a numpy array."""
# This is a Mapping from numpy array types to VTK array types.
_np_vtk = {numpy.character:vtk.VTK_UNSIGNED_CHAR,
numpy.uint8:vtk.VTK_UNSIGNED_CHAR,
numpy.uint16:vtk.VTK_UNSIGNED_SHORT,
numpy.uint32:vtk.VTK_UNSIGNED_INT,
numpy.uint64:vtk.VTK_UNSIGNED_LONG_LONG,
numpy.int8:vtk.VTK_CHAR,
numpy.int16:vtk.VTK_SHORT,
numpy.int32:vtk.VTK_INT,
numpy.int64:vtk.VTK_LONG_LONG,
numpy.float32:vtk.VTK_FLOAT,
numpy.float64:vtk.VTK_DOUBLE,
numpy.complex64:vtk.VTK_FLOAT,
numpy.complex128:vtk.VTK_DOUBLE}
for key, vtk_type in _np_vtk.items():
if numpy_array_type == key or \
numpy.issubdtype(numpy_array_type, key) or \
numpy_array_type == numpy.dtype(key):
return vtk_type
raise TypeError(
'Could not find a suitable VTK type for %s' % (str(numpy_array_type)))
def get_vtk_to_numpy_typemap():
"""Returns the VTK array type to numpy array type mapping."""
_vtk_np = {vtk.VTK_BIT:numpy.bool,
vtk.VTK_CHAR:numpy.int8,
vtk.VTK_UNSIGNED_CHAR:numpy.uint8,
vtk.VTK_SHORT:numpy.int16,
vtk.VTK_UNSIGNED_SHORT:numpy.uint16,
vtk.VTK_INT:numpy.int32,
vtk.VTK_UNSIGNED_INT:numpy.uint32,
vtk.VTK_LONG:LONG_TYPE_CODE,
vtk.VTK_LONG_LONG:numpy.int64,
vtk.VTK_UNSIGNED_LONG:ULONG_TYPE_CODE,
vtk.VTK_UNSIGNED_LONG_LONG:numpy.uint64,
vtk.VTK_ID_TYPE:ID_TYPE_CODE,
vtk.VTK_FLOAT:numpy.float32,
vtk.VTK_DOUBLE:numpy.float64}
return _vtk_np
def get_numpy_array_type(vtk_array_type):
"""Returns a numpy array typecode given a VTK array type."""
return get_vtk_to_numpy_typemap()[vtk_array_type]
def create_vtk_array(vtk_arr_type):
"""Internal function used to create a VTK data array from another
VTK array given the VTK array type.
"""
return vtk.vtkDataArray.CreateDataArray(vtk_arr_type)
def numpy_to_vtk(num_array, deep=0, array_type=None):
"""Converts a contiguous real numpy Array to a VTK array object.
This function only works for real arrays that are contiguous.
Complex arrays are NOT handled. It also works for multi-component
arrays. However, only 1, and 2 dimensional arrays are supported.
This function is very efficient, so large arrays should not be a
problem.
If the second argument is set to 1, the array is deep-copied from
from numpy. This is not as efficient as the default behavior
(shallow copy) and uses more memory but detaches the two arrays
such that the numpy array can be released.
WARNING: You must maintain a reference to the passed numpy array, if
the numpy data is gc'd and VTK will point to garbage which will in
the best case give you a segfault.
Parameters
----------
- num_array : a contiguous 1D or 2D, real numpy array.
"""
z = numpy.asarray(num_array)
shape = z.shape
assert z.flags.contiguous, 'Only contiguous arrays are supported.'
assert len(shape) < 3, \
"Only arrays of dimensionality 2 or lower are allowed!"
assert not numpy.issubdtype(z.dtype, complex), \
"Complex numpy arrays cannot be converted to vtk arrays."\
"Use real() or imag() to get a component of the array before"\
" passing it to vtk."
# First create an array of the right type by using the typecode.
if array_type:
vtk_typecode = array_type
else:
vtk_typecode = get_vtk_array_type(z.dtype)
result_array = create_vtk_array(vtk_typecode)
# Fixup shape in case its empty or scalar.
try:
testVar = shape[0]
except:
shape = (0,)
# Find the shape and set number of components.
if len(shape) == 1:
result_array.SetNumberOfComponents(1)
else:
result_array.SetNumberOfComponents(shape[1])
result_array.SetNumberOfTuples(shape[0])
# Ravel the array appropriately.
arr_dtype = get_numpy_array_type(vtk_typecode)
if numpy.issubdtype(z.dtype, arr_dtype) or \
z.dtype == numpy.dtype(arr_dtype):
z_flat = numpy.ravel(z)
else:
z_flat = numpy.ravel(z).astype(arr_dtype)
# z_flat is now a standalone object with no references from the caller.
# As such, it will drop out of this scope and cause memory issues if we
# do not deep copy its data.
deep = 1
# Point the VTK array to the numpy data. The last argument (1)
# tells the array not to deallocate.
result_array.SetVoidArray(z_flat, len(z_flat), 1)
if deep:
copy = result_array.NewInstance()
copy.DeepCopy(result_array)
result_array = copy
return result_array
def numpy_to_vtkIdTypeArray(num_array, deep=0):
isize = vtk.vtkIdTypeArray().GetDataTypeSize()
dtype = num_array.dtype
if isize == 4:
if dtype != numpy.int32:
raise ValueError(
'Expecting a numpy.int32 array, got %s instead.' % (str(dtype)))
else:
if dtype != numpy.int64:
raise ValueError(
'Expecting a numpy.int64 array, got %s instead.' % (str(dtype)))
return numpy_to_vtk(num_array, deep, vtk.VTK_ID_TYPE)
def vtk_to_numpy(vtk_array):
"""Converts a VTK data array to a numpy array.
Given a subclass of vtkDataArray, this function returns an
appropriate numpy array containing the same data -- it actually
points to the same data.
WARNING: This does not work for bit arrays.
Parameters
----------
- vtk_array : `vtkDataArray`
The VTK data array to be converted.
"""
typ = vtk_array.GetDataType()
assert typ in get_vtk_to_numpy_typemap().keys(), \
"Unsupported array type %s"%typ
assert typ != vtk.VTK_BIT, 'Bit arrays are not supported.'
shape = vtk_array.GetNumberOfTuples(), \
vtk_array.GetNumberOfComponents()
# Get the data via the buffer interface
dtype = get_numpy_array_type(typ)
try:
result = numpy.frombuffer(vtk_array, dtype=dtype)
except ValueError:
# http://mail.scipy.org/pipermail/numpy-tickets/2011-August/005859.html
# numpy 1.5.1 (and maybe earlier) has a bug where if frombuffer is
# called with an empty buffer, it throws ValueError exception. This
# handles that issue.
if shape[0] == 0:
# create an empty array with the given shape.
result = numpy.empty(shape, dtype=dtype)
else:
raise
if shape[1] == 1:
shape = (shape[0], )
try:
result.shape = shape
except ValueError:
if shape[0] == 0:
# Refer to https://github.com/numpy/numpy/issues/2536 .
# For empty array, reshape fails. Create the empty array explicitly
# if that happens.
result = numpy.empty(shape, dtype=dtype)
else: raise
return result
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Wrapping/Python/vtk/util/numpy_support.py
|
Python
|
bsd-3-clause
| 8,928
|
[
"VTK"
] |
b04829d049ac33fcede1d36612d043cdaaabde8be04789e96ff7d47c3b7cdba8
|
#!/usr/bin/env python
################################################################################
#
# qe_extractor.py
#
# Pulls all sorts of information from a QE output file and writes to standard
# output, e.g. the command "qe_extractor.py INPUTFILE homo" uses the number of
# electrons and the output KS eigenvalues to print the KS homo.
#
################################################################################
#
# Copyright 2015 Kane O'Donnell
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
#
# NOTES
#
# 1. A list of allowed commands appears early in the code below. Multiple commands
# leads to multiple output lines in the same order.
#
# 2. Output is simply "printed" (to stdout), so redirect to a file or a variable
# if you need the value.
#
# 3. The output isn't "safe", e.g. the code will give you a homo if you ask for
# one even if the input is a metal. That's by design, because the target use
# for my own work considers cases where smearing has been used for an insulating
# system to help convergence (small bandgap) but the homo and lumo are needed.
# Quantum Espresso is pretty silly about this case (again, by design) and only
# reports the (non-physical) fermi level.
#
# 4. Energy outputs are in eV, because Ry and Hartree are insane.
#
# 5. The output isn't (yet) clever to geometry steps - you might get an output for
# every single SCF cycle, or you might not, depending on the command.
#
################################################################################
from __future__ import division
import argparse
import sys
import os.path
from math import floor
Ry2eV = 13.605698066
SMALL = 1.0e-6 # small floating point number for equality comparisons
DEBUG = 0
valid_commands = ["homo", \
"lumo", \
"num_atoms", \
"num_electrons", \
"num_bands", \
"num_kpoints", \
"total_energy", \
"total_ae_energy", \
"efermi" ]
def get_eigs_from_string(str):
""" This is string.split() tweaked to address a bug in Quantum Espresso's formatted fortran
output where sometimes it prints two negative floats without a space e.g. -130.3940-120.6023.
In these cases, split() doesn't work directly. """
eigs = []
bits = str.split()
for b in bits:
if b is not '':
try:
tmpf = float(b)
eigs.append(tmpf)
except ValueError:
negs = b.split('-')
eigs += [-1 * float(c) for c in negs if c is not ''] # This is a bit obscure I know...
return eigs
parser = argparse.ArgumentParser(description="Extract information from QE(PWSCF) output file and print to stdout.")
parser.add_argument('inputfile', help="Quantum Espresso pw.x output file for input.")
parser.add_argument('commands', nargs="+", help="Parameters to be extracted.")
args = parser.parse_args()
# Check we have valid commands
for c in args.commands:
if c not in valid_commands:
print "ERROR: command %s is not valid, see source file for a list of possible commands." % (c)
# Some of the commands are easy, others require more complex parsing. Deal with all the easy ones
# first.
f = open(args.inputfile, 'r')
lines = f.readlines()
f.close()
found_fermi = False
found_homo = False
found_lumo = False
found_ae_energy = False
output_text = {}
for l in lines:
if "number of atoms/cell =" in l:
if "num_atoms" in args.commands:
output_text["num_atoms"] = l.split()[4]
if "number of electrons =" in l:
if "num_electrons" in args.commands:
output_text["num_electrons"] = l.split()[4]
nelec = float(l.split()[4])
if "number of Kohn-Sham states=" in l:
if "num_bands" in args.commands:
output_text["num_bands"] = l.split()[4]
nband = int(l.split()[4])
if "number of k points=" in l:
if "num_kpoints" in args.commands:
output_text["num_kpoints"] = l.split()[4]
nkpt = int(l.split()[4])
if "! total energy =" in l:
if "total_energy" in args.commands:
output_text["total_energy"] = float(l.split()[4]) * Ry2eV
if "total all-electron energy =" in l:
if "total_ae_energy" in args.commands:
output_text["total_ae_energy"] = float(l.split()[4]) * Ry2eV
found_ae_energy = True
if "highest occupied, lowest unoccupied level (ev):" in l:
# This might not be present - opportunistic!
homo = float(l.split()[6])
found_homo = True
lumo = float(l.split()[7])
found_lumo = True
if "homo" in args.commands:
output_text["homo"] = homo
if "lumo" in args.commands:
output_text["lumo"] = lumo
if "the Fermi energy is" in l:
efermi = float(l.split()[4])
found_fermi = True
if "efermi" in args.commands:
output_text["efermi"] = efermi
if "highest occupied level (ev):" in l:
homo = float(l.split()[4])
found_homo = True
if "homo" in args.commands:
output_text["homo"] = homo
# If PAW potentials aren't used, an all-electron energy won't be reported so if the user
# asked for one, give an error.
if "total_ae_energy" in args.commands and not found_ae_energy:
print "ERROR - All-electron energy not found. Check calculation used PAW and that it finished correctly."
# Ok, now for the slightly trickier ones - homo, lumo and fermi_level. First, QE might actually
# give us values, which we picked up earlier. If not, we need to do a bit more work.
if ("homo" in args.commands and found_homo is False) or \
("lumo" in args.commands and found_lumo is False) or \
("efermi" in args.commands and found_fermi is False):
# Lots of things to worry about here and we have to loop a lot. For performance, find
# the important section of the file.
for i,l in enumerate(lines):
if "End of self-consistent calculation" in l:
istart = i
if "convergence has been achieved in" in l:
iend = i
has_spin = False
for i,l in enumerate(lines[istart:iend]):
if "SPIN UP" in l:
has_spin = True
istart = i
if "SPIN DOWN" in l:
iend = i
# Find the k-point block locations
ks = []
for i,l in enumerate(lines[istart:iend]):
if "k =" in l:
if DEBUG:
print l
ks.append(i+istart + 1)
if DEBUG:
print "K-point indices are:"
print ks
# Add the iend value to act as an endpoint for the
# eigenvalue search.
ks.append(iend)
# For each k, look for eigenvalues until we have enough.
eigsk = []
for i in range(len(ks)-1):
eigs = []
for l in lines[ks[i]+1:ks[i+1]]:
# Now - pay attention! There is a bug in the output of espresso that means split() might
# not work here. This means we have to play a silly game here assuming eigenvalues are
# output in increasing order.
# This is done with a recursive function defined at the top of the file.
eigs += get_eigs_from_string(l)
if DEBUG:
print "Current length of eigs is %d, num_bands is %d." %(len(eigs), nband)
if len(eigs) == nband:
eigsk.append(eigs)
break
# Now, use number of electrons to figure out where the homo is.
max_occ = -1e6
min_unocc = 1e6
idx_homo = int(floor(nelec / 2)) - 1 # The -1 is because we have 0-based indices in python.
for ek in eigsk:
if ek[idx_homo] > max_occ:
max_occ = ek[idx_homo]
if ek[idx_homo + 1] < min_unocc:
min_unocc = ek[idx_homo + 1]
if not found_homo:
homo = max_occ
output_text["homo"] = homo
if not found_lumo:
lumo = min_unocc
output_text["lumo"] = lumo
if not found_fermi:
efermi = (homo + lumo) / 2
output_text["efermi"] = efermi
# Print output in the order requested.
for c in args.commands:
print output_text[c]
|
kaneod/physics
|
python/qe_extractor.py
|
Python
|
gpl-3.0
| 8,488
|
[
"ESPResSo",
"Quantum ESPRESSO"
] |
6bcae61c994939915867671d118ee729cde78b72206d9295a23535f671f0d8b9
|
import numpy as np
from astropy.io import fits
from astropy.io import ascii
from glob import glob
import pdb
import os
def collate(path, jobnum, name, destination, optthin=0, clob=0, high=0, noextinct = 0, noangle = 0, nowall = 0, nophot = 0, noscatt = 1):
"""
collate.py
PURPOSE:
Organizes and stores flux and parameters from the D'Alessio
disk/optically thin dust models and jobfiles in a fits
file with a header.
CALLING SEQUENCE:
collate(path, jobnum, name, destination, [optthin=1], [clob=1], [high = 1], [noextinc = 1], [noangle = 1], [nowall = 1], [nophot = 1], [noscatt = 0])
INPUTS:
path: String of with path to location of jobfiles and model result
files. Both MUST be in the same location!
jobnum: String or integer associated with a job number label end.
name: String of the name of the object
Destination: String with where you want the fits file to be
sent after it's made
OPTIONAL KEYWORDS
optthin: Set this value to 1 (or True) to run the optically thin dust
version of collate instead of the normal disk code. This will
also place a tag in the header.
clob: Set this value to 1 (or True) to overwrite a currently existing
fits file from a previous run.
high: Set this value to 1 (or True) if your job number is 4 digits long.
nowall: Set this value to 1 (or True) if you do NOT want to include a wall file
noangle: Set this value to 1 (or True) if you do NOT want to to include a disk file
NOTE: You cannot perform the self extinction correction without the angle file. If this is set to 1, then
the noextin keyword will also be set to 1 automatically.
nophot: Set this value to 1 (or True) if you do NOT want to include a photosphere file
noextin: Set this value to 1 (or True) if you do NOT want to apply extinction
to the inner wall and photosphere.
noscatt: !!!!! NOTE: THIS IS SET TO 1 BY DEFAULT !!!!!
Set this value to 1 (or True) if you do NOT want to include the scattered light file.
Set this value to 0 (or False) if you DO want to include the scattered light file
EXAMPLES:
To collate a single model run for the object 'myobject' under the
job number '001', use the following commands:
from collate import collate
path = 'Some/path/on/the/cluster/where/your/model/file/is/located/'
name = 'myobject'
dest = 'where/I/want/my/collated/file/to/go/'
modelnum = 1
collate(path, modelnum, name, dest)
Note that:
modelnum = '001' will also work.
collate.py cannot handle multiple models at once, and currently needs to be
run in a loop. An example run with 100 optically thin dust models would
look something like this:
from collate import collate
path = 'Some/path/on/the/cluster/where/your/model/files/are/located/'
name = 'myobject'
dest = 'where/I/want/my/collated/files/to/go/'
for i in range(100):
collate(path, i+1, name, dest, optthin = 1)
NOTES:
For the most current version of collate and EDGE, please visit the github respository:
https://github.com/danfeldman90/EDGE
Collate corrects the flux from the star and the inner wall for extinction from
the outer disk.
Label ends for model results should of form objectname_001,
For disk models, job file name convention is job001
For optically thin dust, job file name convention is job_optthin001
amax in the optthin model did not originally have an s after it. It is changed in
the header file to have the s to be consistant with the disk models.
MODIFICATION HISTORY
Connor Robinson, 12, Nov, 2015, Added parsing for MDOTSTAR in edge
Connor Robinson, 6 Aug 2015, Added error handling, the FAILED key in the header, and the failCheck and head functions
Connor Robinson, 30 July 2015, Added scattered light + ability to turn off components of the model
Connor Robinson, 24 July 2015, Added extinction from the outer disk + flag to turn it off
Connor Robinson, 23 July 2015, Updated documentation and added usage examples
Dan Feldman, 19 July 2015, added numCheck() and high kwarg to handle integer jobnums
Dan Feldman, 25 June 2015, Improved readability.
Connor Robinson, Dan Feldman, 24 June 2015, Finished all current functionality for use
Connor Robinson 26 May 2015, Began work on optically thin disk code
Connor Robinson, Dan Feldman, 22 May 2015, Wrote disk code in python
Connor Robinson 3, Mar, 2015, Added the /nounderscore and /photnum flags
Connor Robinson 6 Nov, 2014 First version uploaded to cluster
"""
# Convert jobnum into a string:
if type(jobnum) == int:
jobnum = numCheck(jobnum, high=high)
# If working with optically thin models
if optthin:
#Read in file
job = 'job_optthin'+jobnum
try:
f = open(path+job, 'r')
except IOError:
print('MISSING JOB NUMBER '+jobnum+', RETURNING...')
return
jobf = f.read()
f.close()
#Define what variables to record
sdparam = (['TSTAR', 'RSTAR', 'DISTANCIA', 'MUI', 'ROUT', 'RIN', 'TAUMIN', 'POWER',
'FUDGEORG', 'FUDGETROI', 'FRACSIL', 'FRACENT', 'FRACFORST', 'FRACAMC',
'AMAXS'])
dparam = np.zeros(len(sdparam), dtype = float)
#Read in the data associated with this model
dataarr = np.array([])
file = glob(path+'fort16*'+name+'*'+jobnum)
failed = 0
size = 0
miss = 0
try:
size = os.path.getsize(file[0])
except IndexError:
print("WARNING IN JOB "+jobnum+": MISSING FORT16 FILE (OPTICALLY THIN DUST MODEL), ADDED 'FAILED' TAG TO HEADER")
failed = True
miss = 1
if miss != 1 and size == 0:
print("WARNING IN JOB "+jobnum+": EMPTY FORT16 FILE (OPTICALLY THIN DUST MODEL), ADDED FAILED TAG TO HEADER")
failed = True
if failed == False:
data = ascii.read(file[0])
#Combine data into a single array to be consistant with previous version of collate
if size !=0:
dataarr = np.concatenate((dataarr, data['col1']))
dataarr = np.concatenate((dataarr, data['col3']))
#If the file is missing/empty, add an empty array to collated file
if failed != 0:
dataarr = np.array([])
#Convert anything that can't be read as a float into a nan
tempdata = np.zeros(len(dataarr))
floaterr = 0
if failed == 0:
for i, value in enumerate(dataarr):
try:
tempdata[i] = float(dataarr[i]) #dataarr[i].astype(float)
except ValueError:
floaterr = 1
tempdata[i] = float('nan')
if floaterr == 1:
print('WARNING IN JOB '+jobnum+': FILES CONTAIN FLOAT OVERFLOW/UNDERFLOW ERRORS, THESE VALUES HAVE BEEN SET TO NAN')
axis_count = 2; #One axis for flux, one for wavelength
dataarr = np.reshape(tempdata, (axis_count, len(tempdata)/axis_count))
#Make an HDU object to contain header/data
hdu = fits.PrimaryHDU(dataarr)
#Parse variables according to convention in job file
for ind, param in enumerate(sdparam):
#Handles the case of AMAXS which is formatted slightly differently
if param == 'AMAXS':
for num in range(10):
if jobf.split("lamax='amax")[num].split("\n")[-1][0] == 's':
samax = jobf.split("lamax='amax")[num+1].split("'")[0]
if samax == '1mm':
hdu.header.set(param, 1000.)
else:
hdu.header.set(param, float(samax.replace('p', '.')))
#Handle the rest of the variables
else:
paramold = param
if param == 'DISTANCIA':
param = 'DISTANCE' #Reduce the amount of Spanish here
elif param == 'FUDGETROI':
param = 'FUDGETRO'
elif param == 'FRACFORST':
param = 'FRACFORS'
hdu.header.set(param, float(jobf.split("set "+paramold+"='")[1].split("'")[0]))
hdu.header.set('OBJNAME', name)
hdu.header.set('JOBNUM', jobnum)
hdu.header.set('OPTTHIN', 1)
hdu.header.set('WLAXIS', 0)
hdu.header.set('LFLAXIS',1)
if failed == 1:
hdu.header.set('Failed', 1)
hdu.writeto(destination+name+'_OTD_'+jobnum+'.fits', clobber = clob)
if nowall == 1 or noangle == 1 or nophot == 1:
print("WARNING IN JOB "+jobnum+": KEYWORDS THAT HAVE NO AFFECT ON OPTICALLY THIN DUST HAVE BEEN USED (NOPHOT, NOWALL, NOANGLE)")
# If working with job models start here
elif optthin == 0 or optthin == 'False':
#read in file
job = 'job'+jobnum
try:
f = open(path+job, 'r')
except IOError:
print('MISSING JOB FILE '+jobnum+', RETURNING...')
return
jobf = f.read()
f.close()
#Check to see if the name + jobnum matches up with the labelend, if it doens't, return
labelend = jobf.split("set labelend='")[1].split("'")[0]
if labelend != name+'_'+jobnum:
print('NAME IS NOT THE SAME AS THE NAME IN JOB '+jobnum+' LABELEND: '+labelend+', RETURNING...')
return
#Define what variables to record
sparam = (['MSTAR', 'TSTAR', 'RSTAR', 'DISTANCIA','MDOT', 'MDOTSTAR','ALPHA', 'MUI', 'RDISK',
'AMAXS', 'EPS', 'WLCUT_ANGLE', 'WLCUT_SCATT', 'NSILCOMPOUNDS', 'SILTOTABUN',
'AMORPFRAC_OLIVINE', 'AMORPFRAC_PYROXENE', 'FORSTERITE_FRAC', 'ENSTATITE_FRAC',
'TEMP', 'ALTINH', 'TSHOCK'])
dparam = np.zeros(len(sparam), dtype = float)
#Parse variables according to convention in the job file
for ind, param in enumerate(sparam):
if param == 'AMAXS':
num_amax = 10 #Number of choices for AMAX, including the case where amax can be 1mm (1000 microns)
for num in range(num_amax):
if jobf.split("AMAXS='")[num+1].split("\n")[1][0] == '#':
continue
elif jobf.split("AMAXS='")[num+1].split("\n")[1][0] == 's':
dparam[ind] = float(jobf.split(param+"='")[num+1].split("'")[0])
elif dparam[ind] == 0. and num == num_amax-1:
dparam[ind] = 1000. #HANDLES THE CASE THAT MM SIZED DUST GRAINS EXIST IN JOBFILE
elif param == 'EPS':
for num in range(7):
if jobf.split("EPS='")[num+1].split("\n")[1][0] == '#' and num != 7:
continue
elif jobf.split("EPS='")[num+1].split("\n")[1][0] == 's':
dparam[ind] = float(jobf.split(param+"='")[num+1].split("'")[0])
else:
raise IOError('COLLATE FAILED ON EPSILON VALUE. FIX JOB FILE '+jobnum)
elif param == 'TEMP' or param == 'TSHOCK':
try:
dparam[ind] = float(jobf.split(param+"=")[1].split(".")[0])
except ValueError:
raise ValueError('COLLATE: MISSING . AFTER '+param+' VALUE, GO FIX IN JOB FILE ' +jobnum)
elif param == 'ALTINH':
try:
dparam[ind] = float(jobf.split(param+"=")[1].split(" ")[0])
except ValueError:
raise ValueError('COLLATE MISSING SPACE [ ] AFTER ALTINH VALUE, GO FIX IN JOB FILE '+jobnum)
pdb.set_trace()
elif param == 'MDOTSTAR':
#MDOTSTAR is set often set to $MDOT, but could also be set to a number
#If it is the same as MDOT/not there, grab the value of MDOT
try:
#Parse by " MDOTSTAR=' ", if it's a value will pick it out, if it's not there/$MDOT will throw value error.
dparam[ind] = float(jobf.split(param+"='")[1].split("'")[0])
except IndexError:
dparam[ind] = dparam[sparam.index("MDOT")]
try:
nomdotstar = jobf.split(param+"=")[1]
except IndexError:
print('WARNING IN JOB '+jobnum+ ': NO VALUE FOR MDOTSTAR IN JOBFILE, ASSUMING MDOTSTAR = MDOT')
else:
dparam[ind] = float(jobf.split(param+"='")[1].split("'")[0])
#Rename header labels that are too long
sparam[sparam.index('AMORPFRAC_OLIVINE')] = 'AMORF_OL'
sparam[sparam.index('AMORPFRAC_PYROXENE')] = 'AMORF_PY'
sparam[sparam.index('WLCUT_ANGLE')] = 'WLCUT_AN'
sparam[sparam.index('WLCUT_SCATT')] = 'WLCUT_SC'
sparam[sparam.index('NSILCOMPOUNDS')] = 'NSILCOMP'
sparam[sparam.index('SILTOTABUN')] = 'SILTOTAB'
sparam[sparam.index('FORSTERITE_FRAC')] = 'FORSTERI'
sparam[sparam.index('ENSTATITE_FRAC')] = 'ENSTATIT'
#Reduce the amount of Spanish here
sparam[sparam.index('DISTANCIA')] = 'DISTANCE'
#Read in data from outputs (if the no____ flags are not set)
#set up empty array to accept data, column names and axis number
dataarr = np.array([])
axis = {'WLAXIS':0}
axis_count = 1 #Starts at 1, axis 0 reserved for wavelength information
#Read in arrays and manage axis information
#Also handles errors for missing/empty files
failed = False;
size = 0
miss = 0
if nophot == 0:
photfile = glob(path+'Phot*'+jobnum)
try:
size = os.path.getsize(photfile[0])
except IndexError:
print("WARNING IN JOB "+jobnum+": MISSING PHOTOSPHERE FILE, ADDED 'FAILED' TAG TO HEADER. NOPHOT SET TO 1")
nophot = 1
failed = True
miss = 1
if miss != 1 and size != 0:
phot = ascii.read(photfile[0])
axis['PHOTAXIS'] = axis_count
dataarr = np.concatenate((dataarr, phot['col1']))
dataarr = np.concatenate((dataarr, phot['col2']))
axis_count += 1
elif miss != 1 and size == 0:
print("WARNING IN JOB "+jobnum+": PHOT FILE EMPTY, ADDED 'FAILED' TAG TO HEADER. NOPHOT SET TO 1")
nophot = 1
failed = True
elif nophot != 1 and nophot != 0:
raise IOError('COLLATE: INVALID INPUT FOR NOPHOT KEYWORD, SHOULD BE 1 OR 0')
size = 0
miss = 0
if nowall == 0:
wallfile = glob(path+'fort17*'+name+'_'+jobnum)
try:
size = os.path.getsize(wallfile[0])
except IndexError:
print("WARNING IN JOB "+jobnum+": MISSING FORT17 (WALL) FILE, ADDED 'FAILED' TAG TO HEADER. NOWALL SET TO 1")
nowall = 1
failed = True
miss = 1
if miss != 1 and size != 0:
wall = ascii.read(wallfile[0], data_start = 9)
axis['WALLAXIS'] = axis_count
#If the photosphere was not run, then grab wavelength information from wall file
if nophot != 0:
dataarr = np.concatenate((dataarr, wall['col1']))
dataarr = np.concatenate((dataarr, wall['col2']))
axis_count += 1
elif miss != 1 and size == 0:
print("WARNING IN JOB "+jobnum+": FORT17 (WALL) FILE EMPTY, ADDED 'FAILED' TAG TO HEADER. NOWALL SET TO 1")
failed = True
nowall = 1
elif nowall != 1 and nowall != 0:
raise IOError('COLLATE: INVALID INPUT FOR NOWALL KEYWORD, SHOULD BE 1 OR 0')
miss = 0
size = 0
if noangle == 0:
anglefile = glob(path+'angle*'+name+'_'+jobnum+'*')
try:
size = os.path.getsize(anglefile[0])
except IndexError:
print("WARNING IN JOB "+jobnum+": MISSING ANGLE (DISK) FILE, ADDED 'FAILED' TAG TO HEADER. NOANGLE SET TO 1")
noangle = 1
failed = True
miss = 1
if miss != 1 and size != 0:
angle = ascii.read(anglefile[0], data_start = 1)
axis['ANGAXIS'] = axis_count
#If the photosphere was not run, and the wall was not run then grab wavelength information from angle file
if nophot != 0 and nowall != 0:
dataarr = np.concatenate((dataarr, angle['col1']))
dataarr = np.concatenate((dataarr, angle['col4']))
axis_count += 1
elif miss != 1 and size == 0:
print("WARNING IN JOB "+jobnum+": ANGLE (DISK) FILE EMPTY, ADDED 'FAILED' TAG TO HEADER. NOANGLE SET TO 1")
failed = True
noangle = 1
elif noangle != 1 and noangle != 0:
raise IOError('COLLATE: INVALID INPUT FOR NOANGLE KEYWORD, SHOULD BE 1 OR 0')
miss = 0
size = 0
if noscatt == 0:
scattfile = glob(path+'scatt*'+name+'_'+jobnum+'*')
try:
size = os.path.getsize(scattfile[0])
except IndexError:
print("WARNING IN JOB "+jobnum+": MISSING SCATT FILE, ADDED 'FAILED' TAG TO HEADER. NOSCATT SET TO 1")
noscatt = 1
failed = True
miss = 1
if miss != 1 and size > 100:
scatt = ascii.read(scattfile[0], data_start = 1)
axis['SCATAXIS'] = axis_count
#If the photosphere, wall and disk were not run, then grab wavelength information from scatt file
if nophot != 0 and nowall != 0 and noangle != 0:
dataarr = np.concatenate((dataarr, scatt['col1']))
dataarr = np.concatenate((dataarr, scatt['col4']))
axis_count += 1
elif miss != 1 and size == 0 or miss != 1 and size < 100:
print("WARNING IN JOB "+jobnum+": SCATT FILE EMPTY, ADDED 'FAILED' TAG TO HEADER. NOSCATT SET TO 1")
failed = True
noscatt = 1
elif noscatt != 1 and noscatt != 0:
raise IOError('COLLATE: INVALID INPUT FOR NOSCATT KEYWORD, SHOULD BE 1 OR 0')
if noextinct == 0:
if noangle != 0:
print("WARNING IN JOB "+jobnum+": ANGLE (DISK) FILE "+jobnum+" REQUIRED FOR EXTINCTION FROM DISK. ADDED 'FAILED' TAG TO HEADER, NOEXTINCT SET TO 1")
failed = 1
noextinct = 1
else:
dataarr = np.concatenate((dataarr, angle['col6']))
axis['EXTAXIS'] = axis_count
axis_count += 1
elif noextinct != 1 and noextinct != 0:
raise IOError('COLLATE: INVALID INPUT FOR NOANGLE KEYWORD, SHOULD BE 1 OR 0')
#if data has values that overflow/underflow float type, replace them with NaN dataarr = tempdata
tempdata = np.zeros(len(dataarr))
floaterr = 0
for i, value in enumerate(dataarr):
try:
tempdata[i] = float(dataarr[i]) #dataarr[i].astype(float)
except ValueError:
floaterr = 1
tempdata[i] = float('nan')
if floaterr == 1:
print('WARNING IN JOB '+jobnum+': FILES CONTAIN FLOAT OVERFLOW/UNDERFLOW ERRORS, THESE VALUES HAVE BEEN SET TO NAN')
dataarr = tempdata
#Put data array into the standard form for EDGE
dataarr = np.reshape(dataarr, (axis_count, len(dataarr)/axis_count))
if noextinct == 0:
if nophot == 0:
dataarr[axis['PHOTAXIS'],:] *=np.exp((-1)*dataarr[axis['EXTAXIS'],:])
if nowall == 0:
dataarr[axis['WALLAXIS'],:] *=np.exp((-1)*dataarr[axis['EXTAXIS'],:])
#Create the header and add parameters
hdu = fits.PrimaryHDU(dataarr)
#Add a few misc tags to the header
hdu.header.set('OBJNAME', name)
hdu.header.set('JOBNUM', jobnum)
for i, param in enumerate(sparam):
hdu.header.set(param, dparam[i])
if nowall != 1:
hdu.header.set('RIN', float(np.loadtxt(glob(path+'rin*'+name+'_'+jobnum)[0])))
#Create tags in the header that match up each column to the data enclosed]
for naxis in axis:
hdu.header.set(naxis, axis[naxis])
#Add a tag to the header if the noextinct flag is on
if noextinct == 1:
hdu.header.set('NOEXT', 1)
#Add FAILED tag to header if any of the model elements were not found
if failed == 1:
hdu.header.set('FAILED', 1)
#Write header to fits file
hdu.writeto(destination+name+'_'+jobnum+'.fits', clobber = clob)
# If you don't give a valid input for the optthin keyword, raise an error
else:
raise IOError('COLLATE: INVALID INPUT FOR OPTTHIN KEYWORD, SHOULD BE 1 OR 0')
return
def numCheck(num, high=0):
"""
Takes a number between 0 and 9999 and converts it into a 3 or 4 digit string. E.g., 2 --> '002', 12 --> '012'
INPUT
num: A number between 0 and 9999. If this is a float, it will still work, but it will chop off the decimal.
high: BOOLEAN -- if True (1), output is forced to be a 4 digit string regardless of the number.
OUTPUT
numstr: A string of 3 or 4 digits, where leading zeroes fill in any spaces.
"""
if num > 9999 or num < 0:
raise ValueError('Number too small/large for string handling!')
if num > 999 or high == 1:
numstr = '%04d' % num
else:
numstr = '%03d' % num
return numstr
def failCheck(name, path = '', jobnum = 'all', high = 0, optthin = 0):
"""
Opens up each header, checks if 'FAILED' tag = 1 and records the job number in a list if it is
INPUTS:
name: String of the name of object
OPTIONAL INPUTS:
path: Path to the collated file. Default is the current directory
jobnum: Job number of object. Can be either a string or an int. If it's not set, failCheck
will return ALL collated jobs that failed in the path directory
KEYWORDS:
optthin: Set this to 1 if the collated file is an optically thin dust file
high: Set this to 1 if the jobnum has 4 digits.
OUTPUT
Returns a list of failed jobs. If none are found, array will be empty.
"""
opt = ''
if optthin == 1:
opt = 'OTD_'
#Set up wildcards depending on number formating
if high == 0:
wildhigh = '???'
if high == 1:
wildhigh = '????'
if jobnum == 'all':
if optthin == 1:
files = glob(path+name+'_'+opt+'*.fits')
if optthin == 0:
files = glob(path+name+'_'+wildhigh+'.fits')
failed = []
for file in files:
HDU = fits.open(file)
nofail = 0
try:
HDU[0].header['Failed'] == 1
except KeyError:
nofail = 1
if nofail != 1:
failed.append(file)
if jobnum != 'all':
if type(jobnum) == int:
jobnum = numCheck(jobnum, high = high)
failed = []
nofail = 0
file = glob(path+name+'_'+opt+jobnum+'.fits')
try:
HDU = fits.open(file[0])
except IndexError:
print('NO FILE MATCHING THOSE CRITERIA COULD BE FOUND, RETURNING...')
return
try:
HDU[0].header['Failed'] == 1
except KeyError:
nofail = 1
if nofail != 1:
failed = [file[0]]
return failed
def head(name, jobnum, path='', optthin = 0, high = 0):
"""
prints out the contents of the header of a collated file
INPUTS:
name: String of the name of object
jobnum: Job number of object. Can be either a string or an int
OPTIONAL INPUTS:
path: Path to the collated file. Default is the current directory
KEYWORDS:
optthin: Set this to 1 If the collated file is an optically thin dust file
high: Set this to 1 if the jobnum has 4 digits.
OUTPUTS:
Prints the contents of the header to the terminal. Returns nothing else.
"""
if type(jobnum) == int:
jobnum = numCheck(jobnum, high = high)
if optthin == 1:
otd = 'OTD_'
else:
otd = ''
file = path+name+'_'+otd+jobnum+'.fits'
HDU = fits.open(file)
print(repr(HDU[0].header))
|
danfeldman90/EDGE
|
collate.py
|
Python
|
mit
| 28,085
|
[
"VisIt"
] |
3b675186035fe616a62d831e6dde446c9594379dcdacb34619fab208ffdec0ee
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Blat(Package):
"""BLAT (BLAST-like alignment tool) is a pairwise sequence
alignment algorithm."""
homepage = "https://genome.ucsc.edu/FAQ/FAQblat.html"
url = "https://users.soe.ucsc.edu/~kent/src/blatSrc35.zip"
version('35', sha256='06d9bcf114ec4a4b21fef0540a0532556b6602322a5a2b33f159dc939ae53620')
depends_on('libpng')
def setup_build_environment(self, env):
env.set('MACHTYPE', 'x86_64')
def install(self, spec, prefix):
filter_file('CC=.*', 'CC={0}'.format(spack_cc), 'inc/common.mk')
mkdirp(prefix.bin)
make("BINDIR=%s" % prefix.bin)
|
iulian787/spack
|
var/spack/repos/builtin/packages/blat/package.py
|
Python
|
lgpl-2.1
| 842
|
[
"BLAST"
] |
8009f3d99d50afff6dc3f9bd3ce7221dabf809af4e9f3caf1da461bf6d68f81f
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
Created on Jun 9, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jun 9, 2012"
import unittest
import os
from pymatgen.matproj.rest import MPRester, MPRestError
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure, Composition
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
from pymatgen.entries.compatibility import MaterialsProjectCompatibility
from pymatgen.phasediagram.pdmaker import PhaseDiagram
from pymatgen.phasediagram.pdanalyzer import PDAnalyzer
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
@unittest.skipIf("MAPI_KEY" not in os.environ,
"MAPI_KEY environment variable not set.")
class MPResterTest(unittest.TestCase):
def setUp(self):
self.rester = MPRester()
def test_get_data(self):
props = ["energy", "energy_per_atom", "formation_energy_per_atom",
"nsites", "unit_cell_formula", "pretty_formula", "is_hubbard",
"elements", "nelements", "e_above_hull", "hubbards",
"is_compatible", "task_ids",
"density", "icsd_ids", "total_magnetization"]
# unicode literals have been reintroduced in py>3.2
expected_vals = [-191.33812137, -6.833504334642858, -2.551358929370749,
28, {k: v for k, v in {'P': 4, 'Fe': 4, 'O': 16, 'Li': 4}.items()},
"LiFePO4", True, ['Li', 'O', 'P', 'Fe'], 4, 0.0,
{k: v for k, v in {'Fe': 5.3, 'Li': 0.0, 'O': 0.0, 'P': 0.0}.items()}, True,
['mp-540081', 'mp-601412', 'mp-19017'],
3.4662026991351147,
[159107, 154117, 160776, 99860, 181272, 166815,
260571, 92198, 165000, 155580, 38209, 161479, 153699,
260569, 260570, 200155, 260572, 181341, 181342,
72545, 56291, 97764, 162282, 155635],
16.0002716]
for (i, prop) in enumerate(props):
if prop not in ['hubbards', 'unit_cell_formula', 'elements',
'icsd_ids', 'task_ids']:
val = self.rester.get_data("mp-19017", prop=prop)[0][prop]
self.assertAlmostEqual(expected_vals[i], val)
elif prop in ["elements", "icsd_ids", "task_ids"]:
self.assertEqual(set(expected_vals[i]),
set(self.rester.get_data("mp-19017",
prop=prop)[0][prop]))
else:
self.assertEqual(expected_vals[i],
self.rester.get_data("mp-19017",
prop=prop)[0][prop])
props = ['structure', 'initial_structure', 'final_structure', 'entry']
for prop in props:
obj = self.rester.get_data("mp-19017", prop=prop)[0][prop]
if prop.endswith("structure"):
self.assertIsInstance(obj, Structure)
elif prop == "entry":
obj = self.rester.get_data("mp-19017", prop=prop)[0][prop]
self.assertIsInstance(obj, ComputedEntry)
#Test chemsys search
data = self.rester.get_data('Fe-Li-O', prop='unit_cell_formula')
self.assertTrue(len(data) > 1)
elements = {Element("Li"), Element("Fe"), Element("O")}
for d in data:
self.assertTrue(
set(Composition(d['unit_cell_formula']).elements).issubset(
elements))
self.assertRaises(MPRestError, self.rester.get_data, "Fe2O3",
"badmethod")
def test_get_materials_id_from_task_id(self):
self.assertEqual(self.rester.get_materials_id_from_task_id(
"mp-540081"), "mp-19017")
def test_get_entries_in_chemsys(self):
syms = ["Li", "Fe", "O"]
all_entries = self.rester.get_entries_in_chemsys(syms, False)
entries = self.rester.get_entries_in_chemsys(syms)
self.assertTrue(len(entries) <= len(all_entries))
elements = set([Element(sym) for sym in syms])
for e in entries:
self.assertIsInstance(e, ComputedEntry)
self.assertTrue(set(e.composition.elements).issubset(elements))
def test_get_structure_by_material_id(self):
s1 = self.rester.get_structure_by_material_id("mp-1")
self.assertEqual(s1.formula, "Cs1")
def test_get_entry_by_material_id(self):
e = self.rester.get_entry_by_material_id("mp-19017")
self.assertIsInstance(e, ComputedEntry)
self.assertTrue(e.composition.reduced_formula, "LiFePO4")
def test_query(self):
criteria = {'elements': {'$in': ['Li', 'Na', 'K'], '$all': ['O']}}
props = ['pretty_formula', 'energy']
data = self.rester.query(criteria=criteria, properties=props)
self.assertTrue(len(data) > 6)
data = self.rester.query(criteria="*2O", properties=props)
self.assertGreaterEqual(len(data), 52)
self.assertIn("Li2O", (d["pretty_formula"] for d in data))
def test_get_exp_thermo_data(self):
data = self.rester.get_exp_thermo_data("Fe2O3")
self.assertTrue(len(data) > 0)
for d in data:
self.assertEqual(d.formula, "Fe2O3")
def test_get_dos_by_id(self):
dos = self.rester.get_dos_by_material_id("mp-2254")
self.assertIsInstance(dos, CompleteDos)
def test_get_bandstructure_by_material_id(self):
bs = self.rester.get_bandstructure_by_material_id("mp-2254")
self.assertIsInstance(bs, BandStructureSymmLine)
def test_get_structures(self):
structs = self.rester.get_structures("Mn3O4")
self.assertTrue(len(structs) > 0)
def test_get_entries(self):
entries = self.rester.get_entries("TiO2")
self.assertTrue(len(entries) > 1)
for e in entries:
self.assertEqual(e.composition.reduced_formula, "TiO2")
entries = self.rester.get_entries("TiO2", inc_structure="final")
self.assertTrue(len(entries) > 1)
for e in entries:
self.assertEqual(e.structure.composition.reduced_formula, "TiO2")
def test_get_exp_entry(self):
entry = self.rester.get_exp_entry("Fe2O3")
self.assertEqual(entry.energy, -825.5)
def test_submit_query_delete_snl(self):
s = Structure([[5, 0, 0], [0, 5, 0], [0, 0, 5]], ["Fe"], [[0, 0, 0]])
# d = self.rester.submit_snl(
# [s, s], remarks=["unittest"],
# authors="Test User <test@materialsproject.com>")
# self.assertEqual(len(d), 2)
# data = self.rester.query_snl({"about.remarks": "unittest"})
# self.assertEqual(len(data), 2)
# snlids = [d["_id"] for d in data]
# self.rester.delete_snl(snlids)
# data = self.rester.query_snl({"about.remarks": "unittest"})
# self.assertEqual(len(data), 0)
def test_get_stability(self):
entries = self.rester.get_entries_in_chemsys(["Fe", "O"])
modified_entries = []
for entry in entries:
# Create modified entries with energies that are 0.01eV higher
# than the corresponding entries.
if entry.composition.reduced_formula == "Fe2O3":
modified_entries.append(
ComputedEntry(entry.composition,
entry.uncorrected_energy + 0.01,
parameters=entry.parameters,
entry_id="mod_{}".format(entry.entry_id)))
rest_ehulls = self.rester.get_stability(modified_entries)
all_entries = entries + modified_entries
compat = MaterialsProjectCompatibility()
all_entries = compat.process_entries(all_entries)
pd = PhaseDiagram(all_entries)
a = PDAnalyzer(pd)
for e in all_entries:
if str(e.entry_id).startswith("mod"):
for d in rest_ehulls:
if d["entry_id"] == e.entry_id:
data = d
break
self.assertAlmostEqual(a.get_e_above_hull(e),
data["e_above_hull"])
def test_get_reaction(self):
rxn = self.rester.get_reaction(["Li", "O"], ["Li2O"])
self.assertIn("Li2O", rxn["Experimental_references"])
def test_parse_criteria(self):
crit = MPRester.parse_criteria("mp-1234 Li-*")
self.assertIn("Li-O", crit["$or"][1]["chemsys"]["$in"])
self.assertIn({"task_id": "mp-1234"}, crit["$or"])
crit = MPRester.parse_criteria("Li2*")
self.assertIn("Li2O", crit["pretty_formula"]["$in"])
self.assertIn("Li2I", crit["pretty_formula"]["$in"])
self.assertIn("CsLi2", crit["pretty_formula"]["$in"])
crit = MPRester.parse_criteria("Li-*-*")
self.assertIn("Li-Re-Ru", crit["chemsys"]["$in"])
self.assertNotIn("Li-Li", crit["chemsys"]["$in"])
comps = MPRester.parse_criteria("**O3")["pretty_formula"]["$in"]
for c in comps:
self.assertEqual(len(Composition(c)), 3)
#Let's test some invalid symbols
self.assertRaises(KeyError, MPRester.parse_criteria, "li-fe")
self.assertRaises(KeyError, MPRester.parse_criteria, "LO2")
if __name__ == "__main__":
unittest.main()
|
yanikou19/pymatgen
|
pymatgen/matproj/tests/test_rest.py
|
Python
|
mit
| 9,825
|
[
"pymatgen"
] |
5fe1a641d342c1455feec49d1771ffade26f706aaf3ac4ef26159581f3c6609f
|
#-----------------------------------------------------------------------------
# Copyright (c) 2010-2012 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import functools
import sys
import time
from threading import Thread
from unittest import TestCase
import zmq
from zmq.utils import jsonapi
try:
import gevent
from zmq import green as gzmq
have_gevent = True
except ImportError:
have_gevent = False
try:
from unittest import SkipTest
except ImportError:
try:
from nose import SkipTest
except ImportError:
class SkipTest(Exception):
pass
PYPY = 'PyPy' in sys.version
#-----------------------------------------------------------------------------
# skip decorators (directly from unittest)
#-----------------------------------------------------------------------------
_id = lambda x: x
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skip_if(condition, reason="Skipped"):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
skip_pypy = skip_if(PYPY, "Doesn't work on PyPy")
#-----------------------------------------------------------------------------
# Base test class
#-----------------------------------------------------------------------------
class BaseZMQTestCase(TestCase):
green = False
@property
def Context(self):
if self.green:
return gzmq.Context
else:
return zmq.Context
def socket(self, socket_type):
s = self.context.socket(socket_type)
self.sockets.append(s)
return s
def setUp(self):
if self.green and not have_gevent:
raise SkipTest("requires gevent")
self.context = self.Context.instance()
self.sockets = []
def tearDown(self):
contexts = set([self.context])
while self.sockets:
sock = self.sockets.pop()
contexts.add(sock.context) # in case additional contexts are created
sock.close(0)
for ctx in contexts:
t = Thread(target=ctx.term)
t.daemon = True
t.start()
t.join(timeout=2)
if t.is_alive():
# reset Context.instance, so the failure to term doesn't corrupt subsequent tests
zmq.sugar.context.Context._instance = None
raise RuntimeError("context could not terminate, open sockets likely remain in test")
def create_bound_pair(self, type1=zmq.PAIR, type2=zmq.PAIR, interface='tcp://127.0.0.1'):
"""Create a bound socket pair using a random port."""
s1 = self.context.socket(type1)
s1.setsockopt(zmq.LINGER, 0)
port = s1.bind_to_random_port(interface)
s2 = self.context.socket(type2)
s2.setsockopt(zmq.LINGER, 0)
s2.connect('%s:%s' % (interface, port))
self.sockets.extend([s1,s2])
return s1, s2
def ping_pong(self, s1, s2, msg):
s1.send(msg)
msg2 = s2.recv()
s2.send(msg2)
msg3 = s1.recv()
return msg3
def ping_pong_json(self, s1, s2, o):
if jsonapi.jsonmod is None:
raise SkipTest("No json library")
s1.send_json(o)
o2 = s2.recv_json()
s2.send_json(o2)
o3 = s1.recv_json()
return o3
def ping_pong_pyobj(self, s1, s2, o):
s1.send_pyobj(o)
o2 = s2.recv_pyobj()
s2.send_pyobj(o2)
o3 = s1.recv_pyobj()
return o3
def assertRaisesErrno(self, errno, func, *args, **kwargs):
try:
func(*args, **kwargs)
except zmq.ZMQError as e:
self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \
got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno)))
else:
self.fail("Function did not raise any error")
def _select_recv(self, multipart, socket, **kwargs):
"""call recv[_multipart] in a way that raises if there is nothing to receive"""
if zmq.zmq_version_info() >= (3,1,0):
# zmq 3.1 has a bug, where poll can return false positives,
# so we wait a little bit just in case
# See LIBZMQ-280 on JIRA
time.sleep(0.1)
r,w,x = zmq.select([socket], [], [], timeout=5)
assert len(r) > 0, "Should have received a message"
kwargs['flags'] = zmq.DONTWAIT | kwargs.get('flags', 0)
recv = socket.recv_multipart if multipart else socket.recv
return recv(**kwargs)
def recv(self, socket, **kwargs):
"""call recv in a way that raises if there is nothing to receive"""
return self._select_recv(False, socket, **kwargs)
def recv_multipart(self, socket, **kwargs):
"""call recv_multipart in a way that raises if there is nothing to receive"""
return self._select_recv(True, socket, **kwargs)
class PollZMQTestCase(BaseZMQTestCase):
pass
class GreenTest:
"""Mixin for making green versions of test classes"""
green = True
def assertRaisesErrno(self, errno, func, *args, **kwargs):
if errno == zmq.EAGAIN:
raise SkipTest("Skipping because we're green.")
try:
func(*args, **kwargs)
except zmq.ZMQError:
e = sys.exc_info()[1]
self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \
got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno)))
else:
self.fail("Function did not raise any error")
def tearDown(self):
contexts = set([self.context])
while self.sockets:
sock = self.sockets.pop()
contexts.add(sock.context) # in case additional contexts are created
sock.close()
try:
gevent.joinall([gevent.spawn(ctx.term) for ctx in contexts], timeout=2, raise_error=True)
except gevent.Timeout:
raise RuntimeError("context could not terminate, open sockets likely remain in test")
def skip_green(self):
raise SkipTest("Skipping because we are green")
def skip_green(f):
def skipping_test(self, *args, **kwargs):
if self.green:
raise SkipTest("Skipping because we are green")
else:
return f(self, *args, **kwargs)
return skipping_test
|
ellisonbg/pyzmq
|
zmq/tests/__init__.py
|
Python
|
lgpl-3.0
| 7,203
|
[
"Brian"
] |
515a38277390a0c3e61f83ad2bde503ceed2a24f03f10832b4d57fd459899ed3
|
#!/usr/bin/python
########################################################################
# File : DIRACbenchmark.py
# Author : Andrew McNab
########################################################################
""" DIRAC Benchmark 2012 by Ricardo Graciani, and wrapper functions to
run multiple copies in parallel by Andrew McNab.
This file (DIRACbenchmark.py) is intended to be the ultimate upstream
shared by different users of the DIRAC Benchmark 2012 (DB12). The
canonical version can be found at https://github.com/DIRACGrid/DB12
This script can either be imported or run from the command line:
./DIRACbenchmark.py NUMBER
where NUMBER gives the number of benchmark processes to run in parallel.
Run ./DIRACbenchmark.py help to see more options.
"""
import os
import sys
import random
import urllib
import multiprocessing
version = '00.04 DB12'
def singleDiracBenchmark( iterations = 1, measuredCopies = None ):
""" Get Normalized Power of one CPU in DIRAC Benchmark 2012 units (DB12)
"""
# This number of iterations corresponds to 1kHS2k.seconds, i.e. 250 HS06 seconds
n = int( 1000 * 1000 * 12.5 )
calib = 250.0
m = long( 0 )
m2 = long( 0 )
p = 0
p2 = 0
# Do one iteration extra to allow CPUs with variable speed (we ignore zeroth iteration)
# Do one or more extra iterations to avoid tail effects when copies run in parallel
i = 0
while (i <= iterations) or (measuredCopies is not None and measuredCopies.value > 0):
if i == 1:
start = os.times()
# Now the iterations
for _j in xrange( n ):
t = random.normalvariate( 10, 1 )
m += t
m2 += t * t
p += t
p2 += t * t
if i == iterations:
end = os.times()
if measuredCopies is not None:
# Reduce the total of running copies by one
measuredCopies.value -= 1
i += 1
cput = sum( end[:4] ) - sum( start[:4] )
wall = end[4] - start[4]
if not cput:
return None
# Return DIRAC-compatible values
return { 'CPU' : cput, 'WALL' : wall, 'NORM' : calib * iterations / cput, 'UNIT' : 'DB12' }
def singleDiracBenchmarkProcess( resultObject, iterations = 1, measuredCopies = None ):
""" Run singleDiracBenchmark() in a multiprocessing friendly way
"""
benchmarkResult = singleDiracBenchmark( iterations = iterations, measuredCopies = measuredCopies )
if not benchmarkResult or 'NORM' not in benchmarkResult:
return None
# This makes it easy to use with multiprocessing.Process
resultObject.value = benchmarkResult['NORM']
def multipleDiracBenchmark( copies = 1, iterations = 1, extraIteration = False ):
""" Run multiple copies of the DIRAC Benchmark in parallel
"""
processes = []
results = []
if extraIteration:
# If true, then we run one or more extra iterations in each
# copy until the number still being meausured is zero.
measuredCopies = multiprocessing.Value('i', copies)
else:
measuredCopies = None
# Set up all the subprocesses
for i in range( copies ):
results.append( multiprocessing.Value('d', 0.0) )
processes.append( multiprocessing.Process( target = singleDiracBenchmarkProcess, args = ( results[i], iterations, measuredCopies ) ) )
# Start them all off at the same time
for p in processes:
p.start()
# Wait for them all to finish
for p in processes:
p.join()
raw = []
product = 1.0
for result in results:
raw.append( result.value )
product *= result.value
raw.sort()
# Return the list of raw results and various averages
return { 'raw' : raw,
'copies' : copies,
'sum' : sum(raw),
'arithmetic_mean' : sum(raw)/copies,
'geometric_mean' : product ** (1.0 / copies),
'median' : raw[(copies-1) / 2] }
def wholenodeDiracBenchmark( copies = None, iterations = 1, extraIteration = False ):
""" Run as many copies as needed to occupy the whole machine
"""
# Try $MACHINEFEATURES first if not given by caller
if copies is None and 'MACHINEFEATURES' in os.environ:
try:
copies = int( urllib.urlopen( os.environ['MACHINEFEATURES'] + '/total_cpu' ).read() )
except:
pass
# If not given by caller or $MACHINEFEATURES/total_cpu then just count CPUs
if copies is None:
try:
copies = multiprocessing.cpu_count()
except:
copies = 1
return multipleDiracBenchmark( copies = copies, iterations = iterations, extraIteration = extraIteration )
def jobslotDiracBenchmark( copies = None, iterations = 1, extraIteration = False ):
""" Run as many copies as needed to occupy the job slot
"""
# Try $JOBFEATURES first if not given by caller
if copies is None and 'JOBFEATURES' in os.environ:
try:
copies = int( urllib.urlopen( os.environ['JOBFEATURES'] + '/allocated_cpu' ).read() )
except:
pass
# If not given by caller or $JOBFEATURES/allocated_cpu then just run one copy
if copies is None:
copies = 1
return multipleDiracBenchmark( copies = copies, iterations = iterations, extraIteration = extraIteration )
#
# If we run as a command
#
if __name__ == "__main__":
helpString = """DIRACbenchmark.py [--iterations ITERATIONS] [--extra-iteration]
[COPIES|single|wholenode|jobslot|version|help]
Uses the functions within DIRACbenchmark.py to run the DB12 benchmark from the
command line.
By default one benchmarking iteration is run, in addition to the initial
iteration which DB12 runs and ignores to avoid ramp-up effects at the start.
The number of benchmarking iterations can be increased using the --iterations
option. Additional iterations which are also ignored can be added with the
--extra-iteration option to avoid tail effects. In this case copies which
finish early run additional iterations until all the measurements finish.
The COPIES (ie an integer) argument causes multiple copies of the benchmark to
be run in parallel. The tokens "wholenode", "jobslot" and "single" can be
given instead to use $MACHINEFEATURES/total_cpu, $JOBFEATURES/allocated_cpu,
or 1 as the number of copies respectively. If $MACHINEFEATURES/total_cpu is
not available, then the number of (logical) processors visible to the
operating system is used.
Unless the token "single" is used, the script prints the following results to
two lines on stdout:
COPIES SUM ARITHMETIC-MEAN GEOMETRIC-MEAN MEDIAN
RAW-RESULTS
The tokens "version" and "help" print information about the script.
The source code of DIRACbenchmark.py provides examples of how the functions
within DIRACbenchmark.py can be used by other Python programs.
DIRACbenchmark.py is distributed from https://github.com/DIRACGrid/DB12
"""
copies = None
iterations = 1
extraIteration = False
for arg in sys.argv[1:]:
if arg.startswith('--iterations='):
iterations = int(arg[13:])
elif arg == '--extra-iteration':
extraIteration = True
elif arg == '--help' or arg == 'help':
print helpString
sys.exit(0)
elif not arg.startswith('--'):
copies = arg
if copies == 'version':
print version
sys.exit(0)
if copies is None or copies == 'single':
print singleDiracBenchmark()['NORM']
sys.exit(0)
if copies == 'wholenode':
result = wholenodeDiracBenchmark( iterations = iterations, extraIteration = extraIteration )
print result['copies'],result['sum'],result['arithmetic_mean'],result['geometric_mean'],result['median']
print ' '.join([str(i) for i in result['raw']])
sys.exit(0)
if copies == 'jobslot':
result = jobslotDiracBenchmark( iterations = iterations, extraIteration = extraIteration )
print result['copies'],result['sum'],result['arithmetic_mean'],result['geometric_mean'],result['median']
print ' '.join([str(i) for i in result['raw']])
sys.exit(0)
result = multipleDiracBenchmark( copies = int(copies), iterations = iterations, extraIteration = extraIteration )
print result['copies'],result['sum'],result['arithmetic_mean'],result['geometric_mean'],result['median']
print ' '.join([str(i) for i in result['raw']])
sys.exit(0)
|
hgiemza/DIRAC
|
WorkloadManagementSystem/Client/DIRACbenchmark.py
|
Python
|
gpl-3.0
| 8,236
|
[
"DIRAC"
] |
c125e2a3b376d1f4572b0faf03fd502e5c25da13bd92100928478bfd30ac9bb8
|
"""
============= DON'T MODIFY THIS FILE ============
This is the boilerplate default configuration file.
Changes and additions to settings should be done in
/bp_content/themes/<YOUR_THEME>/config/ rather than this config.
"""
import os
config = {
# webapp2 sessions
'webapp2_extras.sessions': {'secret_key': '_PUT_KEY_HERE_YOUR_SECRET_KEY_'},
# webapp2 authentication
'webapp2_extras.auth': {'user_model': 'bp_includes.models.User',
'cookie_name': 'session_name'},
# jinja2 templates
'webapp2_extras.jinja2': {'template_path': ['bp_admin/templates', 'bp_content/themes/%s/templates' % os.environ['theme']],
'environment_args': {'extensions': ['jinja2.ext.i18n']}},
# application name
'app_name': "Google App Engine Boilerplate",
# the default language code for the application.
# should match whatever language the site uses when i18n is disabled
'app_lang': 'en',
# Locale code = <language>_<territory> (ie 'en_US')
# to pick locale codes see http://cldr.unicode.org/index/cldr-spec/picking-the-right-language-code
# also see http://www.sil.org/iso639-3/codes.asp
# Language codes defined under iso 639-1 http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# Territory codes defined under iso 3166-1 alpha-2 http://en.wikipedia.org/wiki/ISO_3166-1
# disable i18n if locales array is empty or None
'locales': ['en_US', 'es_ES', 'it_IT', 'zh_CN', 'id_ID', 'fr_FR', 'de_DE', 'ru_RU', 'pt_BR', 'cs_CZ','vi_VN','nl_NL'],
# contact page email settings
'contact_sender': "SENDER_EMAIL_HERE",
'contact_recipient': "RECIPIENT_EMAIL_HERE",
# Password AES Encryption Parameters
# aes_key must be only 16 (*AES-128*), 24 (*AES-192*), or 32 (*AES-256*) bytes (characters) long.
'aes_key': "12_24_32_BYTES_KEY_FOR_PASSWORDS",
'salt': "_PUT_SALT_HERE_TO_SHA512_PASSWORDS_",
# get your own consumer key and consumer secret by registering at https://dev.twitter.com/apps
# callback url must be: http://[YOUR DOMAIN]/login/twitter/complete
'twitter_consumer_key': 'TWITTER_CONSUMER_KEY',
'twitter_consumer_secret': 'TWITTER_CONSUMER_SECRET',
#Facebook Login
# get your own consumer key and consumer secret by registering at https://developers.facebook.com/apps
#Very Important: set the site_url= your domain in the application settings in the facebook app settings page
# callback url must be: http://[YOUR DOMAIN]/login/facebook/complete
'fb_api_key': 'FACEBOOK_API_KEY',
'fb_secret': 'FACEBOOK_SECRET',
#Linkedin Login
#Get you own api key and secret from https://www.linkedin.com/secure/developer
'linkedin_api': 'LINKEDIN_API',
'linkedin_secret': 'LINKEDIN_SECRET',
# Github login
# Register apps here: https://github.com/settings/applications/new
'github_server': 'github.com',
'github_redirect_uri': 'http://www.example.com/social_login/github/complete',
'github_client_id': 'GITHUB_CLIENT_ID',
'github_client_secret': 'GITHUB_CLIENT_SECRET',
# get your own recaptcha keys by registering at http://www.google.com/recaptcha/
'captcha_public_key': "CAPTCHA_PUBLIC_KEY",
'captcha_private_key': "CAPTCHA_PRIVATE_KEY",
# Use a complete Google Analytics code, no just the Tracking ID
'google_analytics_code': "",
# add status codes and templates used to catch and display errors
# if a status code is not listed here it will use the default app engine
# stacktrace error page or browser error page
'error_templates': {
403: 'errors/default_error.html',
404: 'errors/default_error.html',
500: 'errors/default_error.html',
},
# Enable Federated login (OpenID and OAuth)
# Google App Engine Settings must be set to Authentication Options: Federated Login
'enable_federated_login': True,
# jinja2 base layout template
'base_layout': 'base.html',
# send error emails to developers
'send_mail_developer': False,
# fellas' list
'developers': (
('Santa Klauss', 'snowypal@northpole.com'),
),
# If true, it will write in datastore a log of every email sent
'log_email': True,
# If true, it will write in datastore a log of every visit
'log_visit': True,
# ----> ADD MORE CONFIGURATION OPTIONS HERE <----
} # end config
|
joshainglis/sa-tools
|
bp_includes/config.py
|
Python
|
lgpl-3.0
| 4,390
|
[
"VisIt"
] |
33046706804d80ee531fe91c04182144e054f492f1aae3c6b754a961f612e64b
|
""" Write ccData object to file """
from __future__ import print_function
from os.path import join
from qcl import templates
from qcl import periodictable as pt
def xyzfile(ccdata, fname, append=False):
"""xyzfile"""
if append:
permission = 'a'
else:
permission = 'w'
with open(fname, permission) as handle:
handle.write(_xyzfile(ccdata))
def _xyzfile(ccdata):
"""xyzfile string"""
string = ''
string += str(len(ccdata.atomnos)) + '\n'
if hasattr(ccdata, 'comment'):
string += ccdata.comment
else:
string += '\n'
atomnos = [pt.Element[x] for x in ccdata.atomnos]
atomcoords = ccdata.atomcoords[-1]
if not type(atomcoords) is list:
atomcoords = [x.tolist() for x in atomcoords]
for i in range(len(atomcoords)):
atomcoords[i].insert(0, atomnos[i])
for atom in atomcoords:
string += ' {0} {1:10.8f} {2:10.8f} {3:10.8f}\n'.format(*atom)
return string
def inputfiles(ccdatas, templatefiles, path='./', indexed=False):
""" Write multiple inpfiles for multiple templates and ccdatas
indexed assumed the ccdata object has filename and starts with number
"""
for ccdata in ccdatas:
if indexed:
index = ccdata.filename.split('.')[0]
else:
index = str(ccdatas.index(ccdata))
for templatefile in templatefiles:
inpfile = join(path, index)
inpfile = inpfile + '.' + templatefile
inputfile(ccdata, templatefile, inpfile)
def inputfile(ccdata, templatefile, inpfile=None):
"""Generic write ccdata + templatefile to inpfile"""
if templates.exists(templatefile):
if type(ccdata) is list \
and 'fsm' in templatefile \
and '.qcm' in templatefile:
string = _qchemfsminputfile(ccdata, templatefile, inpfile)
elif '.mop' in templatefile:
string = _mopacinputfile(ccdata, templatefile, inpfile)
elif '.qcm' in templatefile:
string = _qcheminputfile(ccdata, templatefile, inpfile)
else:
print(templatefile, "failed -not a valid extension")
return
if inpfile:
with open(inpfile, 'w') as handle:
handle.write(string)
else:
return string
def _qcheminputfile(ccdata, templatefile, inpfile):
"""
Generate input file from geometry (list of lines) depending on job type
:ccdata: ccData object
:templatefile: templatefile - tells us which template file to use
:inpfile: OUTPUT - expects a path/to/inputfile to write inpfile
"""
string = ''
if hasattr(ccdata, 'charge'):
charge = ccdata.charge
else:
charge = 0
if hasattr(ccdata, 'mult'):
mult = ccdata.mult
else:
print('Multiplicity not found, set to 1 by default')
mult = 1
# $molecule
string += '$molecule\n'
string += '{0} {1}\n'.format(charge, mult)
# Geometry (Maybe a cleaner way to do this..)
atomnos = [pt.Element[x] for x in ccdata.atomnos]
atomcoords = ccdata.atomcoords[-1]
if not type(atomcoords) is list:
atomcoords = atomcoords.tolist()
for i in range(len(atomcoords)):
atomcoords[i].insert(0, atomnos[i])
for atom in atomcoords:
string += ' {0} {1:10.8f} {2:10.8f} {3:10.8f}\n'.format(*atom)
string += '$end\n\n'
# $end
# $rem
with open(templates.get(templatefile), 'r') as templatehandle:
templatelines = [x for x in templatehandle.readlines()]
for line in templatelines:
string += line
# $end
return string
def _qchemfsminputfile(ccdatas, templatefile, inpfile):
"""
Temporary fix for the need of a different input format for
frozen string method
"""
string = ''
# fsm assertions
if len(ccdatas) != 2:
print('2 ccdata objects were not passed for a fsm method')
raise StandardError
ccdata = ccdatas[0]
if hasattr(ccdata, 'charge'):
charge = ccdata.charge
else:
print("Charge not found, set to 0 by default")
charge = 0
if hasattr(ccdata, 'mult'):
mult = ccdata.mult
else:
print("Multiplicity not found, set to 1 by default")
mult = 1
# $molecule
string += '$molecule\n'
string += '{0} {1}\n'.format(charge, mult)
# Geometry (Maybe a cleaner way to do this..)
atomnos = [pt.Element[x] for x in ccdata.atomnos]
atomcoords = ccdata.atomcoords[-1]
if not type(atomcoords) is list:
atomcoords = [x.tolist() for x in atomcoords]
for i in range(len(atomcoords)):
atomcoords[i].insert(0, atomnos[i])
for atom in atomcoords:
string += ' {0} {1:10.8f} {2:10.8f} {3:10.8f}\n'.format(*atom)
string += '******\n'
ccdata = ccdatas[1]
# Geometry (Maybe a cleaner way to do this..)
atomnos = [pt.Element[x] for x in ccdata.atomnos]
atomcoords = ccdata.atomcoords[-1]
if not type(atomcoords) is list:
atomcoords = [x.tolist() for x in atomcoords]
for i in range(len(atomcoords)):
atomcoords[i].insert(0, atomnos[i])
for atom in atomcoords:
string += ' {0} {1:10.8f} {2:10.8f} {3:10.8f}\n'.format(*atom)
string += '$end\n\n'
# $end
# $rem
with open(templates.get(templatefile), 'r') as templatehandle:
template = [x for x in templatehandle.readlines()]
for line in template:
string += line
# $end
return string
def _mopacinputfile(ccdata, templatefile, inpfile):
"""
Generate input file from geometry (list of lines) depending on job type
:ccdata: ccData object
:templatefile: templatefile- tells us which template file to use
:inputfile: OUTPUT - expects a path/to/inputfile to write inpfile
"""
mopacmult = {1: 'SINGLET',
2: 'DOUBLET',
3: 'TRIPLET',
4: 'QUARTET',
5: 'QUINTET',
6: 'SEXTET',
7: 'SEPTET',
8: 'OCTET',
9: 'NONET'
}
string = ''
attributes = ccdata.getattributes()
with open(templates.get(templatefile), 'r') as templatehandle:
template = [x for x in templatehandle.readlines()]
# We assume first line is input commands
template[0] = template[0].rstrip('\n')
template[0] += ' CHARGE={0} {1}\n'.format(ccdata.charge,
mopacmult[ccdata.mult])
for line in template:
string += line
# Maybe some day I will write something meaningful here
string += 'comment line 1\n'
string += 'comment line 2\n'
# The MOPAC input is basically an xyz file
# Geometry (Maybe a cleaner way to do this..)
atomnos = [pt.Element[x] for x in attributes['atomnos']]
atomcoords = ccdata.atomcoords[-1]
if not type(atomcoords) is list:
atomcoords = [x.tolist() for x in atomcoords]
for i in range(len(atomcoords)):
atomcoords[i].insert(0, atomnos[i])
for atom in atomcoords:
string += ' {0} {1:10.8f} {2:10.8f} {3:10.8f}\n'.format(*atom)
return string
|
ben-albrecht/qcl
|
qcl/write.py
|
Python
|
mit
| 7,255
|
[
"MOPAC"
] |
ccfe6fba9e7985a07f3e6de1eab20ab93f671a3c6ddcd27e343e82cefa9313c7
|
"""
basic support for running library as script
"""
import os
import os.path as op
import shutil
import signal
import sys
import logging
from httplib import HTTPSConnection
from urllib import urlencode
from socket import gethostname
from subprocess import PIPE, call
from optparse import OptionParser as OptionP, OptionGroup, SUPPRESS_HELP
os.environ["LC_ALL"] = "C"
class ActionDispatcher (object):
"""
This class will be invoked
a) when either a directory is run via __main__, listing all SCRIPTs
b) when a script is run directly, listing all ACTIONs
This is controlled through the meta variable, which is automatically
determined in get_meta().
"""
def __init__(self, actions):
self.actions = actions
if not actions:
actions = [(None, None)]
self.valid_actions, self.action_helps = zip(*actions)
def get_meta(self):
args = splitall(sys.argv[0])[-3:]
args[-1] = args[-1].replace(".py", "")
meta = "SCRIPT" if args[-1] == "__main__" else "ACTION"
return meta, args
def print_help(self):
meta, args = self.get_meta()
if meta == "SCRIPT":
args[-1] = meta
else:
args[-1] += " " + meta
help = "Usage:\n python -m {0}\n\n\n".format(".".join(args))
help += "Available {0}s:\n".format(meta)
max_action_len = max(len(action) for action, ah in self.actions)
for action, action_help in sorted(self.actions):
action = action.rjust(max_action_len + 4)
help += " | ".join((action, action_help[0].upper() + \
action_help[1:])) + '\n'
sys.stderr.write(help)
sys.exit(1)
def dispatch(self, globals):
from difflib import get_close_matches
meta = "ACTION" # function is only invoked for listing ACTIONs
if len(sys.argv) == 1:
self.print_help()
action = sys.argv[1]
if not action in self.valid_actions:
print >> sys.stderr, "[error] {0} not a valid {1}\n".format(action, meta)
alt = get_close_matches(action, self.valid_actions)
print >> sys.stderr, "Did you mean one of these?\n\t{0}\n".\
format(", ".join(alt))
self.print_help()
globals[action](sys.argv[2:])
class OptionParser (OptionP):
def __init__(self, doc):
OptionP.__init__(self, doc)
def parse_args(self, args=None):
dests = set()
ol = []
for g in [self] + self.option_groups:
ol += g.option_list
for o in ol:
if o.dest in dests:
continue
self.add_help_from_choices(o)
dests.add(o.dest)
return OptionP.parse_args(self, args)
def add_help_from_choices(self, o):
from jcvi.utils.natsort import natsorted
if o.help == SUPPRESS_HELP:
return
default_tag = "%default"
help_pf = o.help[:1].upper() + o.help[1:]
if "[" in help_pf:
help_pf = help_pf.rsplit("[", 1)[0]
help_pf = help_pf.strip()
if o.type == "choice":
if o.default is None:
default_tag = "guess"
ctext = "|".join(natsorted(o.choices))
if len(ctext) > 100:
ctext = ctext[:100] + " ... "
choice_text = "must be one of {0}".format(ctext)
o.help = "{0}, {1} [default: {2}]".format(help_pf,
choice_text, default_tag)
else:
o.help = help_pf
if o.default is None:
default_tag = "disabled"
if o.get_opt_string() != "--help" and o.action != "store_false":
o.help += " [default: {0}]".format(default_tag)
def set_grid(self):
"""
Add --grid options for command line programs
"""
self.add_option("--grid", dest="grid",
default=False, action="store_true",
help="Run on the grid [default: %default]")
def set_grid_opts(self, array=False, vcode="99999"):
queue_choices = ("default", "fast", "medium", "himem")
valid_pcodes = popen("qconf -sprjl", debug=False).read().strip().split("\n")
valid_pcodes.append(vcode)
group = OptionGroup(self, "Grid parameters")
group.add_option("-P", dest="pcode", default=vcode, choices=valid_pcodes,
help="Specify accounting project code [default: %default]")
group.add_option("-l", dest="queue", default="default", choices=queue_choices,
help="Name of the queue [default: %default]")
group.add_option("-t", dest="threaded", default=None, type="int",
help="Append '-pe threaded N' [default: %default]")
if array:
group.add_option("-c", dest="concurrency", type="int",
help="Append task concurrency limit '-tc N'")
group.add_option("-d", dest="outdir", default=".",
help="Specify directory to store grid output/error files")
group.add_option("-N", dest="name", default=None,
help="Specify descriptive name for the job [default: %default]")
group.add_option("-H", dest="hold_jid", default=None,
help="Define the job dependency list [default: %default]")
self.add_option_group(group)
def set_table(self, sep=",", align=False):
group = OptionGroup(self, "Table formatting")
group.add_option("--sep", default=sep, help="Separator")
if align:
group.add_option("--noalign", dest="align", default=True,
action="store_false", help="Cell alignment")
else:
group.add_option("--align", default=False,
action="store_true", help="Cell alignment")
self.add_option_group(group)
def set_params(self, dest=None):
"""
Add --params options for given command line programs
"""
dest_prog = "to {0}".format(dest) if dest else ""
self.add_option("--params", dest="extra", default="",
help="Extra parameters to pass {0}".format(dest_prog) + \
" (these WILL NOT be validated) [default: %default]")
def set_outfile(self, outfile="stdout"):
"""
Add --outfile options to print out to filename.
"""
self.add_option("-o", "--outfile", default=outfile,
help="Outfile name [default: %default]")
def set_email(self):
"""
Add --email option to specify an email address
"""
self.add_option("--email", default=get_email_address(),
help='Specify an email address [default: "%default"]')
def set_tmpdir(self, tmpdir=None):
"""
Add --temporary_directory option to specify unix `sort` tmpdir
"""
self.add_option("-T", "--tmpdir", default=tmpdir,
help="Use temp directory instead of $TMP [default: %default]")
def set_cpus(self, cpus=0):
"""
Add --cpus options to specify how many threads to use.
"""
from multiprocessing import cpu_count
max_cpus = cpu_count()
if not 0 < cpus < max_cpus:
cpus = max_cpus
self.add_option("--cpus", default=cpus, type="int",
help="Number of CPUs to use, 0=unlimited [default: %default]")
def set_db_opts(self, dbname="mta4", credentials=True):
"""
Add db connection specific attributes
"""
from jcvi.utils.db import valid_dbconn, get_profile
self.add_option("--db", default=dbname, dest="dbname",
help="Specify name of database to query [default: %default]")
self.add_option("--connector", default="Sybase", dest="dbconn",
choices=valid_dbconn.keys(), help="Specify database connector [default: %default]")
hostname, username, password = get_profile()
if credentials:
self.add_option("--hostname", default=hostname,
help="Specify hostname [default: %default]")
self.add_option("--username", default=username,
help="Username to connect to database [default: %default]")
self.add_option("--password", default=password,
help="Password to connect to database [default: %default]")
self.add_option("--port", type="int",
help="Specify port number [default: %default]")
def set_stripnames(self, default=True):
if default:
self.add_option("--no_strip_names", dest="strip_names",
action="store_false", default=True,
help="do not strip alternative splicing "
"(e.g. At5g06540.1 -> At5g06540)")
else:
self.add_option("--strip_names",
action="store_true", default=False,
help="strip alternative splicing "
"(e.g. At5g06540.1 -> At5g06540)")
def set_fixchrnames(self, orgn="medicago"):
self.add_option("--fixchrname", default=orgn, dest="fix_chr_name",
help="Fix quirky chromosome names [default: %default]")
def set_SO_opts(self):
verifySO_choices = ("verify", "resolve:prefix", "resolve:suffix")
self.add_option("--verifySO", choices=verifySO_choices,
help="Verify validity of GFF3 feature type against the SO; " + \
"`resolve` will try to converge towards a valid SO " + \
"term by removing elements from the feature type " + \
"string by splitting at underscores. Example: " + \
"`mRNA_TE_gene` resolves to `mRNA` using 'resolve:prefix'")
def set_beds(self):
self.add_option("--qbed", help="Path to qbed")
self.add_option("--sbed", help="Path to sbed")
def set_sam_options(self, extra=True, bowtie=False):
self.add_option("--sam", dest="bam", default=True, action="store_false",
help="Write to SAM file instead of BAM")
self.add_option("--uniq", default=False, action="store_true",
help="Keep only uniquely mapped [default: %default]")
if bowtie:
self.add_option("--mapped", default=False, action="store_true",
help="Keep mapped reads [default: %default]")
self.add_option("--unmapped", default=False, action="store_true",
help="Keep unmapped reads [default: %default]")
if extra:
self.set_cpus()
self.set_params()
def set_mingap(self, default=100):
self.add_option("--mingap", default=default, type="int",
help="Minimum size of gaps [default: %default]")
def set_align(self, pctid=None, hitlen=None, pctcov=None, evalue=None, \
compreh_pctid=None, compreh_pctcov=None, intron=None, bpsplice=None):
if pctid is not None:
self.add_option("--pctid", default=pctid, type="int",
help="Sequence percent identity [default: %default]")
if hitlen is not None:
self.add_option("--hitlen", default=hitlen, type="int",
help="Minimum overlap length [default: %default]")
if pctcov is not None:
self.add_option("--pctcov", default=pctcov, type="int",
help="Percentage coverage cutoff [default: %default]")
if evalue is not None:
self.add_option("--evalue", default=evalue, type="float",
help="E-value cutoff [default: %default]")
if compreh_pctid is not None:
self.add_option("--compreh_pctid", default=pctid, type="int",
help="Sequence percent identity cutoff used to " + \
"build PASA comprehensive transcriptome [default: %default]")
if compreh_pctcov is not None:
self.add_option("--compreh_pctcov", default=compreh_pctcov, \
type="int", help="Percent coverage cutoff used to " + \
"build PASA comprehensive transcriptome [default: %default]")
if intron is not None:
self.add_option("--intron", default=intron, type="int",
help="Maximum intron length used for mapping " + \
"[default: %default]")
if bpsplice is not None:
self.add_option("--bpsplice", default=bpsplice, type="int",
help="Number of bp of perfect splice boundary " + \
"[default: %default]")
def set_image_options(self, args=None, figsize="6x6", dpi=300,
format="pdf", font="Helvetica", palette="deep",
style="darkgrid", cmap="jet"):
"""
Add image format options for given command line programs.
"""
from jcvi.graphics.base import ImageOptions, setup_theme
allowed_format = ("emf", "eps", "pdf", "png", "ps", \
"raw", "rgba", "svg", "svgz")
allowed_fonts = ("Helvetica", "Palatino", "Schoolbook", "Arial")
allowed_styles = ("darkgrid", "whitegrid", "dark", "white", "ticks")
allowed_diverge = ("BrBG", "PiYG", "PRGn", "PuOr", "RdBu", \
"RdGy", "RdYlBu", "RdYlGn", "Spectral")
group = OptionGroup(self, "Image options")
self.add_option_group(group)
group.add_option("--figsize", default=figsize,
help="Figure size `width`x`height` in inches [default: %default]")
group.add_option("--dpi", default=dpi, type="int",
help="Physical dot density (dots per inch) [default: %default]")
group.add_option("--format", default=format, choices=allowed_format,
help="Generate image of format [default: %default]")
group.add_option("--font", default=font, choices=allowed_fonts,
help="Font name")
group.add_option("--style", default=style, choices=allowed_styles,
help="Axes background")
group.add_option("--diverge", default="PiYG", choices=allowed_diverge,
help="Contrasting color scheme")
group.add_option("--cmap", default=cmap, help="Use this color map")
if args is None:
args = sys.argv[1:]
opts, args = self.parse_args(args)
assert opts.dpi > 0
assert "x" in opts.figsize
setup_theme(style=opts.style, font=opts.font)
return opts, args, ImageOptions(opts)
def set_depth(self, depth=50):
self.add_option("--depth", default=depth, type="int",
help="Desired depth [default: %default]")
def set_rclip(self, rclip=0):
self.add_option("--rclip", default=rclip, type="int",
help="Pair ID is derived from rstrip N chars [default: %default]")
def set_cutoff(self, cutoff=0):
self.add_option("--cutoff", default=cutoff, type="int",
help="Distance to call valid links between mates")
def set_mateorientation(self, mateorientation=None):
self.add_option("--mateorientation", default=mateorientation,
choices=("++", "--", "+-", "-+"),
help="Use only certain mate orientations [default: %default]")
def set_mates(self, rclip=0, cutoff=0, mateorientation=None):
self.set_rclip(rclip=rclip)
self.set_cutoff(cutoff=cutoff)
self.set_mateorientation(mateorientation=mateorientation)
def set_bedpe(self):
self.add_option("--rc", default=False, action="store_true",
help="Reverse complement the reads before alignment")
self.add_option("--minlen", default=2000, type="int",
help="Minimum insert size")
self.add_option("--maxlen", default=8000, type="int",
help="Maximum insert size")
def set_pairs(self):
"""
%prog pairs <blastfile|samfile|casfile|bedfile|posmapfile>
Report how many paired ends mapped, avg distance between paired ends, etc.
Paired reads must have the same prefix, use --rclip to remove trailing
part, e.g. /1, /2, or .f, .r, default behavior is to truncate until last
char.
"""
self.set_usage(self.set_pairs.__doc__)
self.add_option("--pairsfile", default=None,
help="Write valid pairs to pairsfile [default: %default]")
self.add_option("--nrows", default=200000, type="int",
help="Only use the first n lines [default: %default]")
self.set_mates()
self.add_option("--pdf", default=False, action="store_true",
help="Print PDF instead ASCII histogram [default: %default]")
self.add_option("--bins", default=20, type="int",
help="Number of bins in the histogram [default: %default]")
self.add_option("--distmode", default="ss", choices=("ss", "ee"),
help="Distance mode between paired reads, ss is outer distance, " \
"ee is inner distance [default: %default]")
def set_sep(self, sep='\t', help="Separator in the tabfile", multiple=False):
if multiple:
help += ", multiple values allowed"
self.add_option("--sep", default=sep,
help="{0} [default: '%default']".format(help))
def set_firstN(self, firstN=100000):
self.add_option("--firstN", default=firstN, type="int",
help="Use only the first N reads [default: %default]")
def set_tag(self, tag=False, specify_tag=False):
if not specify_tag:
self.add_option("--tag", default=tag, action="store_true",
help="Add tag (/1, /2) to the read name")
else:
tag_choices = ["/1", "/2"]
self.add_option("--tag", default=None, choices=tag_choices,
help="Specify tag to be added to read name")
def set_phred(self, phred=None):
phdchoices = ("33", "64")
self.add_option("--phred", default=phred, choices=phdchoices,
help="Phred score offset {0} [default: guess]".format(phdchoices))
def set_size(self, size=0):
self.add_option("--size", default=size, type="int",
help="Insert mean size, stdev assumed to be 20% around mean")
def set_trinity_opts(self, gg=False):
self.set_home("trinity")
self.set_cpus()
self.set_params(dest="Trinity")
topts = OptionGroup(self, "General Trinity options")
self.add_option_group(topts)
topts.add_option("--JM", default="100G", type="str",
help="Jellyfish memory allocation [default: %default]")
topts.add_option("--min_contig_length", default=90, type="int",
help="Minimum assembled contig length to report" + \
" [default: %default]")
topts.add_option("--bflyGCThreads", default=None, type="int",
help="Threads for garbage collection [default: %default]")
topts.add_option("--grid_conf_file", default="$TRINITY_HOME/htc_conf/JCVI_SGE.0611.conf", \
type="str", help="Configuration file for supported compute farms" + \
" [default: %default]")
ggopts = OptionGroup(self, "Genome-guided Trinity options")
self.add_option_group(ggopts)
ggopts.add_option("--use_bam", default=None, type="str",
help="provide coord-sorted bam file as starting point" + \
" [default: %default]")
ggopts.add_option("--max_intron", default=2000, type="int",
help="maximum allowed intron length [default: %default]")
ggopts.add_option("--gg_cpu", default=None, type="int",
help="set number of threads for individual GG-Trinity" + \
" commands. if not defined, inherits from `--cpu`" + \
" [default: %default]")
def set_pasa_opts(self, action="assemble"):
self.set_home("pasa")
if action == "assemble":
self.set_home("tgi")
self.add_option("--clean", default=False, action="store_true",
help="Clean transcripts using tgi seqclean [default: %default]")
self.set_align(pctid=95, pctcov=90, intron=2000, bpsplice=3)
self.add_option("--aligners", default="blat,gmap",
help="Specify splice aligners to use for mapping [default: %default]")
self.add_option("--fl_accs", default=None, type="str",
help="File containing list of FL-cDNA accessions [default: %default]")
self.set_cpus()
self.add_option("--compreh", default=False, action="store_true",
help="Run comprehensive transcriptome assembly [default: %default]")
self.set_align(compreh_pctid=95, compreh_pctcov=30)
self.add_option("--prefix", default="compreh_init_build", type="str",
help="Prefix for compreh_trans output file names [default: %default]")
elif action == "compare":
self.add_option("--annots_gff3", default=None, type="str",
help="Reference annotation to load and compare against" + \
" [default: %default]")
genetic_code = ["universal", "Euplotes", "Tetrahymena", "Candida", "Acetabularia"]
self.add_option("--genetic_code", default="universal", choices=genetic_code,
help="Choose translation table [default: %default]")
self.add_option("--pctovl", default=50, type="int",
help="Minimum pct overlap between gene and FL assembly " + \
"[default: %default]")
self.add_option("--pct_coding", default=50, type="int",
help="Minimum pct of cDNA sequence to be protein coding " + \
"[default: %default]")
self.add_option("--orf_size", default=0, type="int",
help="Minimum size of ORF encoded protein [default: %default]")
self.add_option("--utr_exons", default=2, type="int",
help="Maximum number of UTR exons [default: %default]")
self.add_option("--pctlen_FL", default=70, type="int",
help="Minimum protein length for comparisons involving " + \
"FL assemblies [default: %default]")
self.add_option("--pctlen_nonFL", default=70, type="int",
help="Minimum protein length for comparisons involving " + \
"non-FL assemblies [default: %default]")
self.add_option("--pctid_prot", default=70, type="int",
help="Minimum pctid allowed for protein pairwise comparison" + \
"[default: %default]")
self.add_option("--pct_aln", default=70, type="int",
help="Minimum pct of shorter protein length aligning to " + \
"update protein or isoform [default: %default]")
self.add_option("--pctovl_gene", default=80, type="int",
help="Minimum pct overlap among genome span of the ORF of " + \
"each overlapping gene to allow merging [default: %default]")
self.add_option("--stompovl", default="", action="store_true",
help="Ignore alignment results, only consider genome span of ORF" + \
"[default: %default]")
self.add_option("--trust_FL", default="", action="store_true",
help="Trust FL-status of cDNA [default: %default]")
def set_annot_reformat_opts(self):
self.add_option("--pad0", default=6, type="int",
help="Pad gene identifiers with 0 [default: %default]")
self.add_option("--prefix", default="Medtr",
help="Genome prefix [default: %default]")
self.add_option("--uc", default=False, action="store_true",
help="Toggle gene identifier upper case" \
+ " [default: %default]")
def set_home(self, prog, default=None):
tag = "--{0}_home".format(prog)
default = default or {"amos": "~/code/amos-code",
"trinity": "~/export/trinityrnaseq",
"cdhit": "~/export/cd-hit-v4.6.1-2012-08-27",
"maker": "~/export/maker",
"pasa": "~/export/PASA",
"gmes": "~/export/gmes",
"gt": "~/export/genometools",
"sspace": "~/export/SSPACE-BASIC-2.0_linux-x86_64",
"gapfiller": "~/export/GapFiller_v1-11_linux-x86_64",
"pbjelly": "/usr/local/projects/MTG4/PacBio/PBJelly_12.9.14/",
"khmer": "~/export/khmer",
"tassel": "/usr/local/projects/MTG4/packages/tassel",
"tgi": "/usr/local/projects/tgi/bin",
"eddyyeh": "/home/shared/scripts/eddyyeh",
"fiona": "~/export/fiona-0.2.0-Linux-x86_64",
"fermi": "~/export/fermi",
}.get(prog, None)
if default is None: # Last attempt at guessing the path
try:
default = op.dirname(which(prog))
except:
default = None
help = "Home directory for {0} [default: %default]".format(prog.upper())
self.add_option(tag, default=default, help=help)
def set_aligner(self, aligner="bowtie"):
valid_aligners = ("clc", "bowtie", "bwa")
self.add_option("--aligner", default=aligner, choices=valid_aligners,
help="Use aligner [default: %default]")
def set_verbose(self, help="Print detailed reports"):
self.add_option("--verbose", default=False, action="store_true", help=help)
def ConfigSectionMap(Config, section):
"""
Read a specific section from a ConfigParser() object and return
a dict() of all key-value pairs in that section
"""
cfg = {}
options = Config.options(section)
for option in options:
try:
cfg[option] = Config.get(section, option)
if cfg[option] == -1:
logging.debug("skip: {0}".format(option))
except:
logging.debug("exception on {0}!".format(option))
cfg[option] = None
return cfg
def get_abs_path(link_name):
source = link_name
if op.islink(source):
source = os.readlink(source)
else:
source = op.basename(source)
link_dir = op.dirname(link_name)
source = op.normpath(op.join(link_dir, source))
source = op.abspath(source)
if source == link_name:
return source
else:
return get_abs_path(source)
datadir = get_abs_path(op.join(op.dirname(__file__), '../utils/data'))
def splitall(path):
allparts = []
while True:
path, p1 = op.split(path)
if not p1:
break
allparts.append(p1)
allparts = allparts[::-1]
return allparts
def get_module_docstring(filepath):
"Get module-level docstring of Python module at filepath, e.g. 'path/to/file.py'."
co = compile(open(filepath).read(), filepath, 'exec')
if co.co_consts and isinstance(co.co_consts[0], basestring):
docstring = co.co_consts[0]
else:
docstring = None
return docstring
def dmain(mainfile):
cwd = op.dirname(mainfile)
pyscripts = glob(op.join(cwd, "*.py"))
actions = []
for ps in sorted(pyscripts):
action = op.basename(ps).replace(".py", "")
if action[0] == "_": # hidden namespace
continue
pd = get_module_docstring(ps)
action_help = [x.rstrip(":.,\n") for x in pd.splitlines(True) \
if len(x.strip()) > 10 and x[0] != '%'][0] \
if pd else "no docstring found"
actions.append((action, action_help))
a = ActionDispatcher(actions)
a.print_help()
def backup(filename):
if op.exists(filename):
bakname = filename + ".bak"
logging.debug("Backup `{0}` to `{1}`".format(filename, bakname))
sh("mv {0} {1}".format(filename, bakname))
return bakname
def getusername():
from getpass import getuser
return getuser()
def getdomainname():
from socket import getfqdn
return ".".join(str(x) for x in getfqdn().split(".")[1:])
def sh(cmd, grid=False, infile=None, outfile=None, errfile=None,
append=False, background=False, threaded=None, log=True,
grid_opts=None, shell="/bin/bash"):
"""
simple wrapper for system calls
"""
if not cmd:
return 1
if grid:
from jcvi.apps.grid import GridProcess
pr = GridProcess(cmd, infile=infile, outfile=outfile, errfile=errfile,
threaded=threaded, grid_opts=grid_opts)
pr.start()
return pr.jobid
else:
if infile:
cat = "cat"
if infile.endswith(".gz"):
cat = "zcat"
cmd = "{0} {1} |".format(cat, infile) + cmd
if outfile and outfile != "stdout":
if outfile.endswith(".gz"):
cmd += " | gzip"
tag = ">"
if append:
tag = ">>"
cmd += " {0}{1}".format(tag, outfile)
if errfile:
if errfile == outfile:
errfile = "&1"
cmd += " 2>{0}".format(errfile)
if background:
cmd += " &"
if log:
logging.debug(cmd)
return call(cmd, shell=True, executable=shell)
def Popen(cmd, stdin=None, stdout=PIPE, debug=False, shell="/bin/bash"):
"""
Capture the cmd stdout output to a file handle.
"""
from subprocess import Popen as P
if debug:
logging.debug(cmd)
# See: <https://blog.nelhage.com/2010/02/a-very-subtle-bug/>
proc = P(cmd, bufsize=1, stdin=stdin, stdout=stdout, \
shell=True, executable=shell,
preexec_fn=lambda: signal.signal(signal.SIGPIPE,
signal.SIG_DFL))
return proc
def popen(cmd, debug=True, shell="/bin/bash"):
return Popen(cmd, debug=debug, shell=shell).stdout
def is_exe(fpath):
return op.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
"""
Emulates the unix which command.
>>> which("cat")
"/bin/cat"
>>> which("nosuchprogram")
"""
fpath, fname = op.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = op.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def glob(pathname, pattern=None):
"""
Wraps around glob.glob(), but return a sorted list.
"""
import glob as gl
if pattern:
pathname = op.join(pathname, pattern)
return sorted(gl.glob(pathname))
def iglob(pathname, *patterns):
"""
Allow multiple file formats. For example:
>>> iglob("apps", "*.py", "*.pyc")
"""
from itertools import chain
it = chain.from_iterable(glob(pathname, pattern) for pattern in patterns)
return sorted(list(it))
def mkdir(dirname, overwrite=False):
"""
Wraps around os.mkdir(), but checks for existence first.
"""
if op.isdir(dirname):
if overwrite:
shutil.rmtree(dirname)
os.mkdir(dirname)
logging.debug("Overwrite folder `{0}`.".format(dirname))
else:
return False # Nothing is changed
else:
try:
os.mkdir(dirname)
except:
os.makedirs(dirname)
logging.debug("`{0}` not found. Creating new.".format(dirname))
return True
def is_newer_file(a, b):
"""
Check if the file a is newer than file b
"""
if not (op.exists(a) and op.exists(b)):
return False
am = os.stat(a).st_mtime
bm = os.stat(b).st_mtime
return am > bm
def parse_multi_values(param):
values = None
if param:
if op.isfile(param):
values = list(set(x.strip() for x in open(param)))
else:
values = list(set(param.split(",")))
return values
def listify(a):
return a if (isinstance(a, list) or isinstance(a, tuple)) else [a]
def last_updated(a):
"""
Check the time since file was last updated.
"""
import time
return time.time() - op.getmtime(a)
def need_update(a, b):
"""
Check if file a is newer than file b and decide whether or not to update
file b. Can generalize to two lists.
"""
a = listify(a)
b = listify(b)
return any((not op.exists(x)) for x in b) or \
all((os.stat(x).st_size == 0 for x in b)) or \
any(is_newer_file(x, y) for x in a for y in b)
def get_today():
"""
Returns the date in 2010-07-14 format
"""
from datetime import date
return str(date.today())
def ls_ftp(dir):
from urlparse import urlparse
from ftplib import FTP, error_perm
o = urlparse(dir)
ftp = FTP(o.netloc)
ftp.login()
ftp.cwd(o.path)
files = []
try:
files = ftp.nlst()
except error_perm, resp:
if str(resp) == "550 No files found":
print "no files in this directory"
else:
raise
return files
def download(url, filename=None, debug=True, cookies=None):
from urlparse import urlsplit
from subprocess import CalledProcessError
from jcvi.formats.base import FileShredder
scheme, netloc, path, query, fragment = urlsplit(url)
filename = filename or op.basename(path)
filename = filename.strip()
if not filename:
filename = "index.html"
if op.exists(filename):
if debug:
msg = "File `{0}` exists. Download skipped.".format(filename)
logging.error(msg)
else:
from jcvi.utils.ez_setup import get_best_downloader
downloader = get_best_downloader()
try:
downloader(url, filename, cookies=cookies)
except (CalledProcessError, KeyboardInterrupt) as e:
print >> sys.stderr, e
FileShredder([filename])
return filename
def getfilesize(filename, ratio=None):
rawsize = op.getsize(filename)
if not filename.endswith(".gz"):
return rawsize
import struct
fo = open(filename, 'rb')
fo.seek(-4, 2)
r = fo.read()
fo.close()
size = struct.unpack('<I', r)[0]
# This is only ISIZE, which is the UNCOMPRESSED modulo 2 ** 32
if ratio is None:
return size
# Heuristic
heuristicsize = rawsize / ratio
while size < heuristicsize:
size += 2 ** 32
if size > 2 ** 32:
logging.warn(\
"Gzip file estimated uncompressed size: {0}.".format(size))
return size
def debug():
"""
Turn on the debugging
"""
from jcvi.apps.console import magenta, yellow
format = yellow("%(asctime)s [%(module)s]")
format += magenta(" %(message)s")
logging.basicConfig(level=logging.DEBUG,
format=format,
datefmt="%H:%M:%S")
debug()
def main():
actions = (
('less', 'enhance the unix `less` command'),
('timestamp', 'record timestamps for all files in the current folder'),
('expand', 'move files in subfolders into the current folder'),
('touch', 'recover timestamps for files in the current folder'),
('mdownload', 'multiple download a list of files'),
('waitpid', 'wait for a PID to finish and then perform desired action'),
('notify', 'send an email/push notification'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def mdownload(args):
"""
%prog mdownload links.txt
Multiple download a list of files. Use formats.html.links() to extract the
links file.
"""
from jcvi.apps.grid import Jobs
p = OptionParser(mdownload.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
linksfile, = args
links = [(x.strip(),) for x in open(linksfile)]
j = Jobs(download, links)
j.run()
def expand(args):
"""
%prog expand */*
Move files in subfolders into the current folder. Use --symlink to create a
link instead.
"""
p = OptionParser(expand.__doc__)
p.add_option("--symlink", default=False, action="store_true",
help="Create symbolic link [default: %default]")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
seen = set()
for a in args:
oa = a.replace("/", "_")
if oa in seen:
logging.debug("Name collision `{0}`, ignored.".format(oa))
continue
cmd = "cp -s" if opts.symlink else "mv"
cmd += " {0} {1}".format(a, oa)
sh(cmd)
seen.add(oa)
def fname():
return sys._getframe().f_back.f_code.co_name
def get_times(filename):
st = os.stat(filename)
atime = st.st_atime
mtime = st.st_mtime
return (atime, mtime)
def timestamp(args):
"""
%prog timestamp path > timestamp.info
Record the timestamps for all files in the current folder.
filename atime mtime
This file can be used later to recover previous timestamps through touch().
"""
p = OptionParser(timestamp.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
path, = args
for root, dirs, files in os.walk(path):
for f in files:
filename = op.join(root, f)
atime, mtime = get_times(filename)
print filename, atime, mtime
def touch(args):
"""
%prog touch timestamp.info
Recover timestamps for files in the current folder.
CAUTION: you must execute this in the same directory as timestamp().
"""
from time import ctime
p = OptionParser(touch.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
info, = args
fp = open(info)
for row in fp:
path, atime, mtime = row.split()
atime = float(atime)
mtime = float(mtime)
current_atime, current_mtime = get_times(path)
# Check if the time has changed, with resolution up to 1 sec
if int(atime) == int(current_atime) and \
int(mtime) == int(current_mtime):
continue
times = [ctime(x) for x in (current_atime, current_mtime, atime, mtime)]
msg = "{0} : ".format(path)
msg += "({0}, {1}) => ({2}, {3})".format(*times)
print >> sys.stderr, msg
os.utime(path, (atime, mtime))
def snapshot(fp, p, fsize, counts=None):
pos = int(p * fsize)
print "==>> File `{0}`: {1} ({2}%)".format(fp.name, pos, int(p * 100))
fp.seek(pos)
fp.next()
for i, row in enumerate(fp):
if counts and i > counts:
break
try:
sys.stdout.write(row)
except IOError:
break
def less(args):
"""
%prog less filename position | less
Enhance the unix `less` command by seeking to a file location first. This is
useful to browse big files. Position is relative 0.00 - 1.00, or bytenumber.
$ %prog less myfile 0.1 # Go to 10% of the current file and streaming
$ %prog less myfile 0.1,0.2 # Stream at several positions
$ %prog less myfile 100 # Go to certain byte number and streaming
$ %prog less myfile 100,200 # Stream at several positions
$ %prog less myfile all # Generate a snapshot every 10% (10%, 20%, ..)
"""
from jcvi.formats.base import must_open
p = OptionParser(less.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
filename, pos = args
fsize = getfilesize(filename)
if pos == "all":
pos = [x / 10. for x in range(0, 10)]
else:
pos = [float(x) for x in pos.split(",")]
if pos[0] > 1:
pos = [x / fsize for x in pos]
if len(pos) > 1:
counts = 20
else:
counts = None
fp = must_open(filename)
for p in pos:
snapshot(fp, p, fsize, counts=counts)
# notification specific variables
valid_notif_methods = ["email"]
available_push_api = {"push" : ["pushover", "nma", "pushbullet"]}
def pushover(message, token, user, title="JCVI: Job Monitor", \
priority=0, timestamp=None):
"""
pushover.net python API
<https://pushover.net/faq#library-python>
"""
assert -1 <= priority <= 2, \
"Priority should be an int() between -1 and 2"
if timestamp == None:
from time import time
timestamp = int(time())
retry, expire = (300, 3600) if priority == 2 \
else (None, None)
conn = HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urlencode({
"token": token,
"user": user,
"message": message,
"title": title,
"priority": priority,
"timestamp": timestamp,
"retry": retry,
"expire": expire,
}), { "Content-type": "application/x-www-form-urlencoded" })
conn.getresponse()
def nma(description, apikey, event="JCVI: Job Monitor", priority=0):
"""
notifymyandroid.com API
<http://www.notifymyandroid.com/api.jsp>
"""
assert -2 <= priority <= 2, \
"Priority should be an int() between -2 and 2"
conn = HTTPSConnection("www.notifymyandroid.com")
conn.request("POST", "/publicapi/notify",
urlencode({
"apikey": apikey,
"application": "python notify",
"event": event,
"description": description,
"priority": priority,
}), { "Content-type": "application/x-www-form-urlencoded" })
conn.getresponse()
def pushbullet(body, apikey, device, title="JCVI: Job Monitor", type="note"):
"""
pushbullet.com API
<https://www.pushbullet.com/api>
"""
import base64
headers = {}
auth = base64.encodestring("{0}:".format(apikey)).strip()
headers['Authorization'] = "Basic {0}".format(auth)
headers['Content-type'] = "application/x-www-form-urlencoded"
conn = HTTPSConnection("api.pushbullet.com".format(apikey))
conn.request("POST", "/api/pushes",
urlencode({
"iden": device,
"type": "note",
"title": title,
"body": body,
}), headers)
conn.getresponse()
def pushnotify(subject, message, api="pushover", priority=0, timestamp=None):
"""
Send push notifications using pre-existing APIs
Requires a config `pushnotify.ini` file in the user home area containing
the necessary api tokens and user keys.
Default API: "pushover"
Config file format:
-------------------
[pushover]
token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
user: yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
[nma]
apikey: zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
[pushbullet]
apikey: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
iden: dddddddddddddddddddddddddddddddddddd
"""
import types
assert type(priority) is types.IntType and -1 <= priority <= 2, \
"Priority should be and int() between -1 and 2"
import ConfigParser
cfgfile = op.join(op.expanduser("~"), "pushnotify.ini")
Config = ConfigParser.ConfigParser()
if op.exists(cfgfile):
Config.read(cfgfile)
else:
sys.exit("Push notification config file `{0}`".format(cfgfile) + \
" does not exist!")
if api == "pushover":
cfg = ConfigSectionMap(Config, api)
token, key = cfg["token"], cfg["user"]
pushover(message, token, key, title=subject, \
priority=priority, timestamp=timestamp)
elif api == "nma":
cfg = ConfigSectionMap(Config, api)
apikey = cfg["apikey"]
nma(message, apikey, event=subject, \
priority=priority)
elif api == "pushbullet":
cfg = ConfigSectionMap(Config, api)
apikey, iden = cfg["apikey"], cfg['iden']
pushbullet(message, apikey, iden, title=subject, \
type="note")
def send_email(fromaddr, toaddr, subject, message):
"""
Send an email message
"""
from smtplib import SMTP
SERVER = "localhost"
message = "Subject: {0}\n{1}".format(subject, message)
server = SMTP(SERVER)
server.sendmail(fromaddr, toaddr, message)
server.quit()
def get_email_address(whoami="user"):
""" Auto-generate the FROM and TO email address """
if whoami == "user":
username = getusername()
domain = getdomainname()
myemail = "{0}@{1}".format(username, domain)
return myemail
else:
fromaddr = "notifier-donotreply@{0}".format(getdomainname())
return fromaddr
def is_valid_email(email):
"""
RFC822 Email Address Regex
--------------------------
Originally written by Cal Henderson
c.f. http://iamcal.com/publish/articles/php/parsing_email/
Translated to Python by Tim Fletcher, with changes suggested by Dan Kubb.
Licensed under a Creative Commons Attribution-ShareAlike 2.5 License
http://creativecommons.org/licenses/by-sa/2.5/
"""
import re
qtext = '[^\\x0d\\x22\\x5c\\x80-\\xff]'
dtext = '[^\\x0d\\x5b-\\x5d\\x80-\\xff]'
atom = '[^\\x00-\\x20\\x22\\x28\\x29\\x2c\\x2e\\x3a-\\x3c\\x3e\\x40\\x5b-\\x5d\\x7f-\\xff]+'
quoted_pair = '\\x5c[\\x00-\\x7f]'
domain_literal = "\\x5b(?:%s|%s)*\\x5d" % (dtext, quoted_pair)
quoted_string = "\\x22(?:%s|%s)*\\x22" % (qtext, quoted_pair)
domain_ref = atom
sub_domain = "(?:%s|%s)" % (domain_ref, domain_literal)
word = "(?:%s|%s)" % (atom, quoted_string)
domain = "%s(?:\\x2e%s)*" % (sub_domain, sub_domain)
local_part = "%s(?:\\x2e%s)*" % (word, word)
addr_spec = "%s\\x40%s" % (local_part, domain)
email_address = re.compile('\A%s\Z' % addr_spec)
if email_address.match(email):
return True
return False
def notify(args):
"""
%prog notify "Message to be sent"
Send a message via email/push notification.
Email notify: Recipient email address is constructed by joining the login `username`
and `dnsdomainname` of the server
Push notify: Uses available API
"""
from jcvi.utils.iter import flatten
valid_notif_methods.extend(available_push_api.keys())
fromaddr = get_email_address(whoami="notifier")
p = OptionParser(notify.__doc__)
p.add_option("--method", default="email", choices=valid_notif_methods,
help="Specify the mode of notification [default: %default]")
p.add_option("--subject", default="JCVI: job monitor",
help="Specify the subject of the notification message")
p.set_email()
g1 = OptionGroup(p, "Optional `push` parameters")
g1.add_option("--api", default="pushover", \
choices=list(flatten(available_push_api.values())),
help="Specify API used to send the push notification")
g1.add_option("--priority", default=0, type="int",
help="Message priority (-1 <= p <= 2) [default: %default]")
g1.add_option("--timestamp", default=None, type="int", \
dest="timestamp", \
help="Message timestamp in unix format [default: %default]")
p.add_option_group(g1)
opts, args = p.parse_args(args)
if len(args) == 0:
logging.error("Please provide a brief message to be sent")
sys.exit(not p.print_help())
subject = opts.subject
message = " ".join(args).strip()
if opts.method == "email":
if not is_valid_email(opts.email):
logging.debug("Email address `{0}` is not valid!".format(opts.email))
sys.exit()
toaddr = [opts.email] # TO address should be in a list
send_email(fromaddr, toaddr, subject, message)
else:
pushnotify(subject, message, api=opts.api, priority=opts.priority, \
timestamp=opts.timestamp)
def is_running(pid):
"""Check whether pid exists in the current process table."""
if pid < 0:
return False
import errno
try:
os.kill(pid, 0)
except OSError, e:
return e.errno == errno.EPERM
else:
return True
def waitpid(args):
"""
%prog waitpid PID ::: "./command_to_run param1 param2 ...."
Given a PID, this script will wait for the PID to finish running and
then perform a desired action (notify user and/or execute a new command)
Specify "--notify=METHOD` to send the user a notification after waiting for PID
Specify `--grid` option to send the new process to the grid after waiting for PID
"""
import shlex
from time import sleep
from jcvi.utils.iter import flatten
valid_notif_methods.extend(list(flatten(available_push_api.values())))
p = OptionParser(waitpid.__doc__)
p.add_option("--notify", default=None, choices=valid_notif_methods,
help="Specify type of notification to be sent after waiting")
p.add_option("--interval", default=120, type="int",
help="Specify PID polling interval in seconds")
p.add_option("--message",
help="Specify notification message [default: %default]")
p.set_email()
p.set_grid()
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
if not opts.message:
"""
If notification message not specified by user, just get
the name of the running command and use it as the message
"""
from subprocess import check_output
sep = ":::"
cmd = None
if sep in args:
sepidx = args.index(sep)
cmd = " ".join(args[sepidx + 1:]).strip()
args = args[:sepidx]
pid = int(" ".join(args).strip())
status = is_running(pid)
if status:
if opts.message:
msg = opts.message
else:
get_origcmd = "ps -p {0} -o cmd h".format(pid)
msg = check_output(shlex.split(get_origcmd)).strip()
while is_running(pid):
sleep(opts.interval)
else:
logging.debug("Process with PID {0} does not exist".format(pid))
sys.exit()
if opts.notify:
notifycmd = ["[completed] {0}: `{1}`".format(gethostname(), msg)]
if opts.notify != "email":
notifycmd.append("--method={0}".format("push"))
notifycmd.append("--api={0}".format(opts.notify))
else:
notifycmd.append('--email={0}'.format(opts.email))
notify(notifycmd)
if cmd is not None:
bg = False if opts.grid else True
sh(cmd, grid=opts.grid, background=bg)
def getpath(cmd, name=None, url=None, cfg="~/.jcvirc", warn="exit"):
"""
Get install locations of common binaries
First, check ~/.jcvirc file to get the full path
If not present, ask on the console and store
"""
import ConfigParser
p = which(cmd) # if in PATH, just returns it
if p:
return p
PATH = "Path"
config = ConfigParser.RawConfigParser()
cfg = op.expanduser(cfg)
changed = False
if op.exists(cfg):
config.read(cfg)
assert name is not None, "Need a program name"
try:
fullpath = config.get(PATH, name)
except ConfigParser.NoSectionError:
config.add_section(PATH)
changed = True
except:
pass
try:
fullpath = config.get(PATH, name)
except ConfigParser.NoOptionError:
msg = "=== Configure path for {0} ===\n".format(name, cfg)
if url:
msg += "URL: {0}\n".format(url)
msg += "[Directory that contains `{0}`]: ".format(cmd)
fullpath = raw_input(msg).strip()
config.set(PATH, name, fullpath)
changed = True
path = op.join(op.expanduser(fullpath), cmd)
try:
assert is_exe(path), \
"***ERROR: Cannot execute binary `{0}`. ".format(path)
except AssertionError, e:
if warn == "exit":
sys.exit("{0!s}Please verify and rerun.".format(e))
elif warn == "warn":
logging.warning("{0!s}Some functions may not work.***".format(e))
if changed:
configfile = open(cfg, "w")
config.write(configfile)
logging.debug("Configuration written to `{0}`.".format(cfg))
return path
if __name__ == '__main__':
main()
|
sgordon007/jcvi_062915
|
apps/base.py
|
Python
|
bsd-2-clause
| 53,020
|
[
"BWA",
"Bowtie"
] |
c4a68f410e696153233b0bd9801443ef3f27b83f4ea8f1121b0fdc10d77e7ef2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Extract fragments for molecules in given SDF files and save then into JSON.
Path and circular fragments use different indexing method, so they may collide.
Usage:
python extract_fragments.py
-i {input file or directory with input files}
-o {path to output}
-f {optional, comma separated list of fragment types to extract}
-t {type of input files, 'sdf', 'smi'. Default is 'sdf'}
--kekule {generated kekule form of SMILES for fragments}
--isomeric {put stereochemistry information into fragments SMILES}
Fragments type:
- tt.{SIZE}
- ecfp.{SIZE}
where {SIZE} should be replaced by required fragment size. Usage example:
tt.3,ecfp.2
default value:
tt.3
Kekule smiles form has no aromatic bonds. Use of --kekule option thus may
reduce the number of generated unique fragments.
This file can be also imported as a python script. In such case please
use the extract_fragments method.
"""
import os
import argparse
import logging
import json
import rdkit
import rdkit.Chem
from rdkit.Chem import AllChem
import rdkit.Chem.AtomPairs.Utils
__author__ = 'Petr Škoda'
__license__ = 'X11'
__email__ = 'skoda@ksi.mff.cuni.cz'
# region Path fragments
atom_code = {
'bits': {
'type': 4,
'pi': 2,
'branch': 4,
'total': 10
}
}
def get_atom_code(atom, branch_subtract):
# Constants;
num_type_bits = atom_code['bits']['type']
num_pi_bits = atom_code['bits']['pi']
num_branch_bits = atom_code['bits']['branch']
# code = typeIdx | numPiElectrons | numBranches
max_num_branches = (1 << num_branch_bits) - 1
max_num_pi = (1 << num_pi_bits) - 1
# Original publication use :
# [5, 6, 7, 8, 9, 14, 15, 16, 17, 33, 34, 35, 53]
# RDKit use:
# We must add trailing zero as we need 16 elements in the array
# for atom_code.bits.type equal 4.
atom_number_types = [5, 6, 7, 8, 9, 14, 15, 16, 17, 33, 34, 35, 51, 52, 43,
0]
# Number of non-hydrogen? neighbor
if atom.GetDegree() > branch_subtract:
num_branches = atom.GetDegree() - branch_subtract
else:
num_branches = 0
code = num_branches % max_num_branches
# Number of bonding pi-electrons.
n_pi = rdkit.Chem.AtomPairs.Utils.NumPiElectrons(atom) % max_num_pi
code |= n_pi << num_branch_bits
# If atom.getAtomicNum() is in atomNumberTypes then return
# exact match. Otherwise return smallest bigger value.
type_idx = 0
n_types = 1 << num_type_bits;
while type_idx < n_types:
if atom_number_types[type_idx] == atom.GetAtomicNum():
break
elif atom_number_types[type_idx] > atom.GetAtomicNum():
type_idx = n_types
break
else:
type_idx += 1
# Make sure we do not point outside the array.
if type_idx == n_types:
type_idx -= 1
# Atom type.
code |= type_idx << (num_branch_bits + num_pi_bits);
return code
def score_path(molecule, path, size):
codes = [None] * size
for i in range(size):
if i == 0 or i == (size - 1):
sub = 1
else:
sub = 2
# We use this branch airways as we do not use custom atomCodes.
codes[i] = get_atom_code(molecule.GetAtomWithIdx(path[i]), sub)
# We scan the vector for both sides, we want to make sure that
# the begging is less or equal to the end.
# "canonize" the code vector:
beg = 0
end = len(codes) - 1
while beg < end:
if codes[beg] > codes[end]:
codes.reverse()
break
elif codes[beg] == codes[end]:
beg += 1
end -= 1
else:
break
# Just add all together.
accum = 0
for i in range(size):
accum |= (codes[i]) << (atom_code['bits']['total'] * i)
return accum
def extract_path_fragments(molecule, size, options):
output = []
pattern = rdkit.Chem.MolFromSmarts('*' + ('~*' * (size - 1)))
for atoms in molecule.GetSubstructMatches(pattern):
smiles = rdkit.Chem.MolFragmentToSmiles(
molecule, atomsToUse=list(atoms),
kekuleSmiles=options['kekule'],
isomericSmiles=options['isomeric'])
output.append({
'smiles': smiles,
'index': score_path(molecule, atoms, size),
'type': 'TT',
'size': size
})
return output
# endregion
# region Circular fragments
def extract_neighbourhood_fragments(molecule, size, options):
"""Extract and return circular fragments.
:param molecule:
:param size:
:param options:
:return:
"""
output = []
info = {}
AllChem.GetMorganFingerprint(molecule, radius=size, bitInfo=info)
for element in info:
for item in info[element]:
# item = [rooted atom, radius]
if item[1] < size:
continue
# assemble fragments into atom
env = rdkit.Chem.FindAtomEnvironmentOfRadiusN(
molecule, item[1], item[0])
atoms = set()
for bidx in env:
atoms.add(molecule.GetBondWithIdx(bidx).GetBeginAtomIdx())
atoms.add(molecule.GetBondWithIdx(bidx).GetEndAtomIdx())
# check if we have some atoms
if len(atoms) > 0:
try:
# kekuleSmiles - we may lost some information
# about aromatic atoms, but if we do not kekulize
# we can get invalid smiles
smiles = rdkit.Chem.MolFragmentToSmiles(
molecule, atomsToUse=list(atoms), bondsToUse=env,
rootedAtAtom=item[0], kekuleSmiles=options['kekule'],
isomericSmiles=options['isomeric'])
except Exception:
logging.exception('Invalid fragment detected.')
logging.info('Molecule: %s', molecule.GetProp('_Name'))
logging.info('Atoms: %s', ','.join([str(x) for x in atoms]))
output.append({
'smiles': smiles,
'index': element,
'type': 'ECFP',
'size': size
})
return output
# endregion
def extract_fragments_from_molecule(molecule, types, options):
"""Return fragments for given molecule.
:param molecule:
:param types: Types of fragments to extract.
:param options
:return:
"""
output = []
for item in types:
if item['name'] == 'tt':
output.extend(extract_path_fragments(
molecule, item['size'], options))
elif item['name'] == 'ecfp':
output.extend(extract_neighbourhood_fragments(
molecule, item['size'], options))
return output
def _read_configuration():
"""Get and return application settings.
:return:
"""
parser = argparse.ArgumentParser(
description='Extract molecular fragments. '
'See file header for more details.')
parser.add_argument('-i', type=str, dest='input', required=True)
parser.add_argument('-o', type=str, dest='output', required=True)
parser.add_argument('-f', type=str, dest='fragments', required=False)
parser.add_argument('-t', type=str, dest='input_type', default='sdf')
parser.add_argument('--recursive', dest='recursive', action='store_true',
required=False)
parser.add_argument('--kekule', dest='kekule',
action='store_true', required=False)
parser.add_argument('--isomeric', dest='isomeric',
action='store_true', required=False)
configuration = vars(parser.parse_args());
if 'fragments' not in configuration or configuration['fragments'] is None:
configuration['fragments'] = 'tt.3'
# Parse fragment types.
parsed_types = []
for item in configuration['fragments'].split(','):
item_split = item.split('.')
if not len(item_split) == 2:
logging.error('Invalid fragment type: %s', item)
logging.info(' Expected format {TYPE}.{SIZE}')
exit(1)
parsed_types.append({
'name': item_split[0],
'size': int(item_split[1])
})
configuration['fragments'] = parsed_types
configuration['input_type'] = configuration['input_type'].lower()
return configuration
def load_sdf(path):
"""Generate molecules from SDF file.
:param path:
:param types:
"""
logging.info('Loading (SDF): %s' % path)
for molecule in rdkit.Chem.SDMolSupplier(path):
if molecule is None:
logging.error('Invalid molecule detected.')
continue
yield molecule
def load_smi(path):
"""Generate molecules from SMI file.
:param path:
:return:
"""
logging.info('Loading (SMI): %s' % path)
with open(path, 'r') as stream:
for line in stream:
line = line.strip()
molecule = rdkit.Chem.MolFromSmiles(line)
if molecule is None:
logging.error('Invalid molecule detected.')
continue
# Molecules created from SMILES does not have any name,
# so we use the SMILES as a name.
molecule.SetProp('_Name', line)
yield molecule
def recursive_scan_for_input(path, recursive, extension):
"""Perform recursive scan for input files.
:param path:
:param recursive
:param extension
:return:
"""
result = []
for file_name in os.listdir(path):
file_path = path + '/' + file_name
if os.path.isdir(file_path):
if recursive:
result.extend(recursive_scan_for_input(
file_path, recursive, extension))
elif os.path.isfile(file_path) \
and file_name.lower().endswith(extension):
result.append(file_path)
return result
def append_object_to_json(output_stream, item, holder):
"""Write given molecule as a JSON into stream.
Optionally put separator before the record based on 'holder'.
:param output_stream:
:param item: Item to append to JSON file.
:param holder: Object shared by all calls of this method on the same stream.
:return:
"""
if holder['first']:
holder['first'] = False
else:
output_stream.write(',')
json.dump(item, output_stream)
def create_parent_directory(path):
"""Create directory if it does not exists.
:param path:
:return:
"""
dir_name = os.path.dirname(path)
if not os.path.exists(dir_name) and not dir_name == "":
os.makedirs(dir_name)
_load_functions = {
'sdf': load_sdf,
'smi': load_smi
}
def extract_fragments(input_files, input_type, output_file, extraction_options):
"""Extract fragments from molecules and write them to output JSON file.
The extraction_options['fragments'] must be a list with objects describing
fragments to extract, see _read_configuration for more details.
:param input_files: List of files with molecules.
:param input_type: Type of input see _load_functions property.
:param output_file: Path to output JSON file.
:param extraction_options: See usage in _main for more information.
:return: Object with summary about computation.
"""
# The write_molecule_json need some static info.
holder = {'first': True}
# Count some statistics.
total_fragments = 0
#
create_parent_directory(output_file)
with open(output_file, 'w') as output_stream:
output_stream.write('[')
for path in input_files:
for molecule in _load_functions[input_type](path):
item = {
'name': molecule.GetProp('_Name'),
'smiles': rdkit.Chem.MolToSmiles(molecule),
'fragments': extract_fragments_from_molecule(
molecule, extraction_options['fragments'],
extraction_options)
}
total_fragments += len(item['fragments'])
# Append to output.
append_object_to_json(output_stream, item, holder)
output_stream.write(']')
# Log nad return summary.
logging.info('Report')
logging.info('\tfragments total: %d', total_fragments)
return {
'total_fragments': total_fragments
}
def _main():
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] %(module)s - %(message)s',
datefmt='%H:%M:%S')
configuration = _read_configuration()
# Read files to load.
if os.path.isdir(configuration['input']):
input_files = recursive_scan_for_input(configuration['input'],
configuration['recursive'],
configuration['input_type'])
else:
input_files = [configuration['input']]
# Prepare configuration for the extraction.
extraction_options = {
'kekule': configuration['kekule'],
'isomeric': configuration['isomeric'],
'fragments': configuration['fragments']
}
#
extract_fragments(input_files, configuration['input_type'],
configuration['output'], extraction_options)
if __name__ == '__main__':
_main()
|
davidhoksza/bayescreen
|
biochem_tools/extract_fragments.py
|
Python
|
mit
| 13,552
|
[
"RDKit"
] |
7102294c5025b93d1a096627167a8273b07311cee7bb6dd41763025656c9143b
|
# Modified by FrancoisMalan 2011-12-06 so that it can handle an input with larger
# extent than its source. Changes constitute the padder module and pad_source method
import gen_utils
from module_base import ModuleBase
from module_mixins import NoConfigModuleMixin
import module_utils
import vtk
class probeFilter(NoConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
# initialise our base class
ModuleBase.__init__(self, module_manager)
# what a lame-assed filter, we have to make dummy inputs!
# if we don't have a dummy input (but instead a None input) it
# bitterly complains when we do a GetOutput() (it needs the input
# to know the type of the output) - and GetPolyDataOutput() also
# doesn't work.
# NB: this does mean that our probeFilter NEEDS a PolyData as
# probe geometry!
ss = vtk.vtkSphereSource()
ss.SetRadius(0)
self._dummyInput = ss.GetOutput()
#This is also retarded - we (sometimes, see below) need the "padder"
#to get the image extent big enough to satisfy the probe filter.
#No apparent logical reason, but it throws an exception if we don't.
self._padder = vtk.vtkImageConstantPad()
self._source = None
self._input = None
self._probeFilter = vtk.vtkProbeFilter()
self._probeFilter.SetInput(self._dummyInput)
NoConfigModuleMixin.__init__(
self,
{'Module (self)' : self,
'vtkProbeFilter' : self._probeFilter})
module_utils.setup_vtk_object_progress(self, self._probeFilter,
'Mapping source on input')
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
NoConfigModuleMixin.close(self)
# get rid of our reference
del self._probeFilter
del self._dummyInput
del self._padder
del self._source
del self._input
def get_input_descriptions(self):
return ('Input', 'Source')
def set_input(self, idx, inputStream):
if idx == 0:
self._input = inputStream
else:
self._source = inputStream
def get_output_descriptions(self):
return ('Input with mapped source values',)
def get_output(self, idx):
return self._probeFilter.GetOutput()
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def view_to_config(self):
pass
def config_to_view(self):
pass
def pad_source(self):
input_extent = self._input.GetExtent()
source_extent = self._source.GetExtent()
if (input_extent[0] < source_extent[0]) or (input_extent[2] < source_extent[2]) or (input_extent[4] < source_extent[4]):
raise Exception('Output extent starts at lower index than source extent. Assumed that both should be zero?')
elif (input_extent[1] > source_extent[1]) or (input_extent[3] > source_extent[3]) or (input_extent[5] > source_extent[5]):
extX = max(input_extent[1], source_extent[1])
extY = max(input_extent[3], source_extent[3])
extZ = max(input_extent[5], source_extent[5])
padX = extX - source_extent[1]
padY = extY - source_extent[3]
padZ = extZ - source_extent[5]
print 'Zero-padding source by (%d, %d, %d) voxels to force extent to match/exceed input''s extent. Lame, eh?' % (padX, padY, padZ)
self._padder.SetInput(self._source)
self._padder.SetConstant(0.0)
self._padder.SetOutputWholeExtent(source_extent[0],extX,source_extent[2],extY,source_extent[4],extZ)
self._padder.Update()
self._source.DeepCopy(self._padder.GetOutput())
def execute_module(self):
if self._source.IsA('vtkImageData') and self._input.IsA('vtkImageData'):
self.pad_source()
self._probeFilter.SetInput(self._input)
self._probeFilter.SetSource(self._source)
self._probeFilter.Update()
|
nagyistoce/devide
|
modules/filters/probeFilter.py
|
Python
|
bsd-3-clause
| 4,578
|
[
"VTK"
] |
92b35d2357c0875073216146deb3799cd342907b1f66ee35c016b8acb32ddbd3
|
from __future__ import print_function
from __future__ import absolute_import
import sys
from metatlas import metatlas_objects as metob
from metatlas.io import h5_query as h5q
import qgrid
from metatlas.helpers import metatlas_get_data_helper_fun as mgd
from matplotlib import pyplot as plt
from matplotlib import colors as matcolors
from matplotlib.widgets import RadioButtons, CheckButtons
import pandas as pd
import os
import tables
import pickle
import h5py
import dill
import numpy as np
from requests import Session
import os.path
import glob as glob
import json
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
# from rdkit.Chem.rdMolDescriptors import ExactMolWt
from rdkit.Chem import Descriptors
from rdkit.Chem import rdMolDescriptors
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem.Draw import IPythonConsole
from IPython.display import SVG,display
from PIL import Image
import six
from six.moves import range
from six.moves import zip
def import_network(network_file='network_v1p0.cyjs'):
with open(network_file) as data_file:
data = json.load(data_file)
print(list(data['elements']['nodes'][0]['data'].keys()))
network = {}
network['data'] = data
network['x'] = []
network['y'] = []
network['node_id'] = []
network['node_name'] = []
network['node_inchi_key'] = []
for n in data['elements']['nodes']:
network['x'].append(n['position']['x'])
network['y'].append(n['position']['y'])
network['node_id'].append(float(n['data']['SUID']))
network['node_name'].append(n['data']['compound_name'])
network['node_inchi_key'].append(n['data']['inchi_key'])
network['x'] = np.asarray(network['x'])
network['y'] = np.asarray(network['y'])
network['node_id'] = np.asarray(network['node_id'])
return network
def merge_sheets(my_sheets,score_cutoff=0.1,mz_cutoff=0.1):
dfs = []
for sheet in my_sheets:
df = pd.read_csv(sheet,index_col=False)
df['source_file'] = os.path.basename(sheet)
df = filter_hits(df)
df = df.sort_values(by='score').drop_duplicates(subset=['inchi_key'], keep='last')
dfs.append(df)
df_all_files = pd.concat(dfs)
#print 'making key'
#df_all_files['inchi_key'] = df_all_files.inchi.apply(lambda x: Chem.InchiToInchiKey(str(x)))
print(list(df_all_files.keys()))
#df_all_files.set_index(['inchi','inchi_key','metatlas name'],inplace=True)
df_all_files.set_index(['inchi_key','mass'],inplace=True)
return df_all_files
def read_pacolus_results(pactolus_file,min_score=0.0):
"""
This is a new 20161213 version of the pactolus file readers.
The hope is to circumvent all the copies from the original hdf5 file.
Input:
pactolus_file: the full path to a conforming pactolus search result
Output:
scan_df:
tree_df:
"""
with h5py.File(pactolus_file,'r') as fid:
#read score_matrix, convert all by all matrix to lists of scores
idx = list(range(fid['score_matrix'].shape[0]))
d = {'retention time':fid['scan_metadata']['peak_rt'][idx],
'precursor intensity':fid['scan_metadata']['peak_intensity'][idx],
'precursor mz':fid['scan_metadata']['peak_mz'][idx],
'polarity': fid['scan_metadata']['polarity'][idx],
'index': idx}
scan_df = pd.DataFrame(d)
scan_df['filename'] = pactolus_file
m = fid['score_matrix'][:]
hits = []
for mm in m:
idx = np.where(mm>min_score)[0]
hits.append(sorted([(mm[i],i) for i in idx])[::-1])
df = pd.DataFrame({'scores':hits})
b_flat = pd.DataFrame([[i, x[0], x[1]]
for i, y in six.iteritems(df.scores.apply(list))
for x in y], columns=['index','score','compound']).set_index('index')
scan_df = scan_df.merge(b_flat, how = 'outer',left_index=True, right_index=True)
#get a list of True/False if any hits for a compound:
f = np.any(m.T>min_score,axis=1)
#only do this for ones that get a hit
idx = np.where(f)[0]#range(fid['score_matrix'].shape[1])
lookup = fid['tree_file_lookup_table'][:]
d = {'filename': fid['tree_file_lookup_table']['filename'][idx],
'ms1_mass': fid['tree_file_lookup_table']['ms1_mass'][idx],
'inchi': fid['tree_file_lookup_table']['inchi'][idx],
'permanent_charge': fid['tree_file_lookup_table']['permanent_charge'][idx],
'index': idx}
# get inchikey like this:
d['inchi_key'] = [os.path.basename(a).split('.')[0].split('_')[-1] for a in fid['tree_file_lookup_table']['filename'][idx]]
tree_df = pd.DataFrame(d)
# tree_df.set_index('index',drop=True,inplace=True)
return scan_df,tree_df
def filter_hits(df,score_cutoff=0.1,mz_cutoff=0.1):
df = df.drop( df[df['score'] < score_cutoff].index )
mass = df['mass']
mz = df['precursor mz']
adduct = 1.007276
if len(mz)>0:
df = df.drop( df[(abs( mass + adduct - mz ) > mz_cutoff) & (df['polarity'] == 1)].index )
df = df.drop( df[(abs( mass - adduct - mz ) > mz_cutoff) & (df['polarity'] == 0)].index )
return df
def join_pactolus_tables(my_sheets,score_cutoff=0.001,mz_cutoff=0.02):#,use_field='precursor intensity'):
output_df = pd.DataFrame()
#rewrite to have a list in each cell: do the max command last
#pull group info from metob
for sheet in my_sheets:
df = pd.read_excel(sheet)
print(df.shape)
df = df.drop( df[df['score'] < score_cutoff].index )
mass = df['mass'][df['polarity'] == 1]
mz = df['precursor mz'][df['polarity'] == 1]
adduct = 1.007276
if len(mz)>0:
df = df.drop( df[abs( mass + adduct - mz ) > mz_cutoff].index )
mass = df['mass'][df['polarity'] == 0]
mz = df['precursor mz'][df['polarity'] == 0]
adduct = 1.007276
if len(mz)>0:
df = df.drop( df[abs( mass - adduct - mz ) > mz_cutoff].index )
print(df.shape)
for index, row in input_df.iterrows():
if type(row['name']) != float:
try:
output_df.index.tolist().index(row['name']) #see if the compound has been added before
if pd.notnull(output_df.loc[row['name'],os.path.basename(sheet)]):
if row['score'] > output_df.loc[row['name'],os.path.basename(sheet)]:
output_df.loc[row['name'],os.path.basename(sheet)] = row['score']
except:
output_df.loc[row['name'],os.path.basename(sheet)] = row['score']
return output_df
def is_pactolus_result_file(output_file):
my_keys = list(output_file.keys())
counter = 0
print('checking file',output_file)
for k in my_keys:
if 'match_matrix' not in k:
counter = counter +1
try:
score_matrix = output_file['score_matrix'][:]
num = score_matrix.shape[0]
except:
num = 0
if counter > 3:# 7:
return True
else:
return False
def broadcast_hits_to_expand_dataframe(df):
df.loc[:,'score'] = df.score.apply(np.atleast_1d)
df.loc[:,'inchi_key'] = df.inchi_key.apply(np.atleast_1d)
df.loc[:,'mass'] = df.mass.apply(np.atleast_1d)
all_scores = np.hstack(df.score)
all_inchi_key = np.hstack(df.inchi_key)
all_mass = np.hstack(df.mass)
all_polarity = np.hstack([[n]*len(l) for n, l in df[['polarity', 'score']].values])
all_precursor_intensity = np.hstack([[n]*len(l) for n, l in df[['precursor intensity', 'score']].values])
all_precursor_mz = np.hstack([[n]*len(l) for n, l in df[['precursor mz', 'score']].values])
all_retention_time = np.hstack([[n]*len(l) for n, l in df[['retention time', 'score']].values])
df2 = pd.DataFrame({'polarity':all_polarity,'precursor intensity':all_precursor_intensity,'precursor mz':all_precursor_mz,'retention time':all_retention_time,'score':all_scores,'inchi_key':all_inchi_key, 'mass':all_mass})
return df2
def make_output_tables(target_dir,score_cutoff = 0.0,to_csv=True):
# , overwrite=True,
# score_cutoff=0.1,rt_min=0,rt_max=20,intensity_min = 1e4,to_excel=True):
"""
"""
# df_lookup = pd.DataFrame({'inchi':neutral_inchi,'metatlas name':metatlas_name,'mass':neutral_mass})
files = glob.glob(os.path.join(target_dir,'*.h5'))
files = [f for f in files if os.path.basename(f).startswith('pactolus_results')]
scan_dfs = []
compound_dfs = []
all_dfs = []
# my_file = '/project/projectdirs/openmsi/projects/ben_run_pactolus/rccoates/pactolus_results_20151215_RCC_C18_ACN_Phz_POS_MSMS_WCS417_PCARhizo_S2.h5'
for my_file in files:
do_process = False #stupid thing to only operate on results of valid files without having to check twice
outfile = os.path.join(target_dir,'%s.csv'%os.path.basename(my_file).split('.')[0])
if (not os.path.isfile(outfile)) or (overwrite):
with h5py.File(my_file) as output_file:
if is_pactolus_result_file(output_file):
score_matrix = output_file['score_matrix'][:]
#if 'inchi' in output_file['compound_metadata'].keys():
# inchi = output_file['compound_metadata']['inchi'][:]
#else:
# print score_matrix.shape
inchi_key = np.asarray([os.path.basename(a[0]).split('.')[0].split('_')[-1] for a in output_file['tree_file_lookup_table']]) #np.asarray(range(score_matrix.shape[1]))
mass = np.asarray([a[1] for a in output_file['tree_file_lookup_table']]) #np.asarray(range(score_matrix.shape[1]))
# idx = np.argwhere(score_matrix > score_cutoff)
# d = {'retention time': [output_file['scan_metadata']['peak_rt'][i] for i in idx[:,0]],
# 'precursor intensity': [output_file['scan_metadata']['peak_intensity'][i] for i in idx[:,0]],
# 'precursor mz': [output_file['scan_metadata']['peak_mz'][i] for i in idx[:,0]],
# 'polarity': [output_file['scan_metadata']['polarity'][i] for i in idx[:,0]]}
d = {'retention time': output_file['scan_metadata']['peak_rt'][:],
'precursor intensity': output_file['scan_metadata']['peak_intensity'][:],
'precursor mz': output_file['scan_metadata']['peak_mz'][:],
'polarity': output_file['scan_metadata']['polarity'][:]}
d['score'] = []
d['inchi_key'] = []
d['mass'] = []
for i in range(score_matrix.shape[0]):
idx = np.argwhere(score_matrix[i,:] > score_cutoff).flatten()
d['score'].append(score_matrix[i,idx])
d['inchi_key'].append(inchi_key[idx])
d['mass'].append(mass[idx])
df = pd.DataFrame(d)
df = broadcast_hits_to_expand_dataframe(df)
#df = pd.merge(df,df_lookup,on='inchi_key',how='outer')#ignore_index=True,axis=0)
do_process = True
if do_process:
print(os.path.basename(outfile))
if df.shape[0]>0:
all_dfs.append(df)
if to_csv:
df.to_csv(outfile,index=False)
if all_dfs:
return all_dfs
def get_neutral_inchi_and_name(use_pickle=True):
import pickle
if use_pickle:
with open('metatlas_name.pickle', 'rb') as handle:
metatlas_name = pickle.load(handle)
with open('neutral_inchi.pickle', 'rb') as handle:
neutral_inchi = pickle.load(handle)
with open('neutral_mass.pickle', 'rb') as handle:
neutral_mass = pickle.load(handle)
else:
c = metob.retrieve('Compound',inchi='InChI=%',username='*')
neutral_inchi = []
metatlas_name = []
neutral_mass = []
for cc in c:
myMol = Chem.MolFromInchi(cc.inchi.encode('utf-8'))
myMol, neutralised = NeutraliseCharges(myMol)
neutral_mass.append(Chem.Descriptors.ExactMolWt(myMol))
inchi = Chem.MolToInchi(myMol)
neutral_inchi.append( inchi )
metatlas_name.append(cc.name)
with open('metatlas_name.pickle', 'wb') as handle:
pickle.dump(metatlas_name,handle)
with open('neutral_inchi.pickle', 'wb') as handle:
pickle.dump(neutral_inchi,handle)
with open('neutral_inchi_key.pickle', 'wb') as handle:
pickle.dump(neutral_inchi,handle)
with open('neutral_mass.pickle', 'wb') as handle:
pickle.dump(neutral_mass,handle)
return metatlas_name,neutral_inchi, neutral_mass
""" contribution from Hans de Winter """
def _InitialiseNeutralisationReactions():
patts= (
# Imidazoles
('[n+;H]','n'),
# Amines
('[N+;!H0]','N'),
# Carboxylic acids and alcohols
('[$([O-]);!$([O-][#7])]','O'),
# Thiols
('[S-;X1]','S'),
# Sulfonamides
('[$([N-;X2]S(=O)=O)]','N'),
# Enamines
('[$([N-;X2][C,N]=C)]','N'),
# Tetrazoles
('[n-]','[nH]'),
# Sulfoxides
('[$([S-]=O)]','S'),
# Amides
('[$([N-]C=O)]','N'),
)
return [(Chem.MolFromSmarts(x),Chem.MolFromSmiles(y,False)) for x,y in patts]
_reactions=None
def NeutraliseCharges(mol, reactions=None):
global _reactions
if reactions is None:
if _reactions is None:
_reactions=_InitialiseNeutralisationReactions()
reactions=_reactions
# mol = Chem.MolFromSmiles(smiles)
replaced = False
for i,(reactant, product) in enumerate(reactions):
while mol.HasSubstructMatch(reactant):
replaced = True
rms = AllChem.ReplaceSubstructs(mol, reactant, product)
rms_smiles = Chem.MolToSmiles(rms[0])
mol = Chem.MolFromSmiles(rms_smiles)
if replaced:
return (mol, True) #Chem.MolToSmiles(mol,True)
else:
return (mol, False)
def check_for_failed_jobs(target_dir):
err_files = [os.path.splitext(os.path.basename(f))[0] for f in glob.glob(os.path.join(target_dir,'*.err'))]
sbatch_files_full_path = [f for f in glob.glob(os.path.join(target_dir,'*.sbatch'))]
sbatch_files = [os.path.splitext(os.path.basename(f))[0].split('.')[0] for f in glob.glob(os.path.join(target_dir,'*.sbatch'))]
hdf5_files = [os.path.splitext(os.path.basename(f))[0].replace('pactolus_results_','') for f in glob.glob(os.path.join(target_dir,'*.h5'))]
# print hdf5_files
# print len(err_files),len(sbatch_files),len(hdf5_files)
failed_jobs = list(set(sbatch_files) - set(hdf5_files))
if not failed_jobs:
print("no failed jobs exist")
else:
print("failed jobs:")
for f in failed_jobs:
print(f)
for j in failed_jobs:
print("sbatch",sbatch_files_full_path[index_containing_substring(sbatch_files_full_path,j)])
def index_containing_substring(the_list, substring):
for i, s in enumerate(the_list):
if substring in s:
return i
def check_job_status(do_print=True,computer = 'edison'):
my_session = Session()
import getpass
usr = getpass.getuser()
pwd = getpass.getpass("enter password for user %s: " % usr)
r = my_session.post("https://newt.nersc.gov/newt/auth", {"username": usr, "password": pwd})
r = my_session.get("https://newt.nersc.gov/newt/queue/%s/?user=%s"%(computer,usr))
my_jobs = r.json()
# print my_jobs
if do_print:
print("You have",len(my_jobs),"jobs running or in the queue to run")
for i,j in enumerate(my_jobs):
print(i,'\t',j['status'], j['name'],j['memory'],j['nodes'], j['procs'], j['timeuse'])
return my_jobs
def create_pactolus_msms_data_container(myfiles,target_directory,min_intensity,min_rt = 1,max_rt = 22,make_container=True):
# peak_arrayindex: This is a 2D array with the shape (num_spectra, 3).
# The dataset contains an index that tells us:
# i) the x location of each spectrum [:,0],
# ii) the y location of each spectrum [:,1], and
# iii) and the index where the spectrum starts in the peak_mz and peak_value array.
# In item 1/2 I first fill the array with [0,i,0] values to define unique x/y locations
# for each spectrum and in the second line I then create the last column with start index
# of the spectra which is just the cumulative-sum of the length of the spectra.
# when you create the start stop locations you will need to:
# prepend [0] to the cummulative sums (the first spectrum starts at 0 not its length).
# remove the last entry to make sure the array has the correct length
# That is why I did the following:
# np.cumsum([0] + [ ri['m/z array'].shape[0] for ri in good_list ])[:-1]
if not os.path.exists(target_directory):
try:
os.makedirs(target_directory)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
for myfile in myfiles:
finfo = h5q.get_info(myfile)
with tables.open_file(myfile) as fid:
num_pos_data = finfo['ms1_pos']['nrows'] + finfo['ms2_pos']['nrows']
num_neg_data = finfo['ms1_neg']['nrows'] + finfo['ms2_neg']['nrows']
do_polarity = []
if num_pos_data > 0:
do_polarity.append(1)
if num_neg_data > 0:
do_polarity.append(0)
scan_polarity = []
for my_polarity in do_polarity:
container_file = os.path.join(target_directory,'container_file_polarity_%d.h5'%(my_polarity))
if not os.path.isfile(container_file):
make_container=True
if make_container:
data = h5q.get_data(fid,ms_level=2,polarity=my_polarity,min_rt = min_rt,max_rt=max_rt,min_precursor_intensity=min_intensity)#TODO: filter by intensity,)
prt,pmz,pintensity = mgd.get_unique_scan_data(data)
for i in range(len(pintensity)):
scan_polarity.append(my_polarity)
msms_data = mgd.organize_msms_scan_data(data,prt,pmz,pintensity)
fpl = {}
# peak_mz : This is a 1D arrays with m/z values for all the spectra stored as spectrum_1, spectrum_2 etc.
fpl['peak_mz'] = np.concatenate(tuple( s[:,0] for s in msms_data['spectra']), axis = -1)
# peak_value: This is a 1D arrays with the intensity values corresponding to the m/z values stored in peak_mz
fpl['peak_value'] = np.concatenate(tuple( s[:,1] for s in msms_data['spectra']), axis = -1)
fpl['precursor_mz'] = np.asarray(msms_data['precursor_mz'])
fpl['peak_arrayindex'] = np.asarray([[0, i, 0] for i,rt in enumerate(prt)])
fpl['peak_arrayindex'][:,2] = np.cumsum([0] + [ s[:,0].shape[0] for s in msms_data['spectra'] ])[:-1]
with h5py.File(container_file,'a') as output_file:
group_name = os.path.basename(myfile)
if group_name in list(output_file.keys()):
output_file.__delitem__(group_name)
# if group_name not in output_file.keys():
output_group = output_file.create_group(group_name)
# else:
# output_group = output_file[group_name]
for key, value in six.iteritems(fpl):
output_group[key] = value
experiment_group = output_group.create_group('experiment_metadata')
experiment_group['filename'] = group_name
scan_group = output_group.create_group('scan_metadata')
scan_group['peak_mz'] = np.asarray(msms_data['precursor_mz'])
scan_group['peak_rt'] = np.asarray(msms_data['precursor_rt'])
scan_group['peak_intensity'] = np.asarray(msms_data['precursor_intensity'])
scan_group['polarity'] = np.asarray(scan_polarity) # 1 for pos and 0 for neg
write_pactolus_job_file(myfile,container_file,my_polarity)
return container_file
def write_pactolus_job_file(myfile,
container_file,
my_polarity,
new_tree_file = '/project/projectdirs/metatlas/projects/clean_pactolus_trees/tree_lookup.npy',
base_script_name = '/project/projectdirs/openmsi/projects/ben_run_pactolus/do_not_modify_template_pactolus_script.sh'):
#regexp the fpl_data path to create lots of jobs:
# /project/projectdirs/openmsi/projects/ben_run_pactolus/Pactolus_NERSC_BASTet_C18_POS_Archetypes.h5:/20150510_C18_POS_MSMS_HA13-1.h5
# regexp the outfile test_pactolus_72_2_realtime.h5
# regexp the log files
#SBATCH --output=job_pactolus_realtime1_out.txt
#SBATCH --error=job_pactolus_realtime1_err.txt
read_pat = '/project/projectdirs/openmsi/projects/ben_run_pactolus/Pactolus_NERSC_BASTet_C18_POS_Archetypes.h5:/20150510_C18_POS_MSMS_HA13-1.h5'
save_pat = 'test_pactolus_72_2_realtime.h5'
out_pat = 'job_pactolus_realtime1_out.txt'
err_pat = 'job_pactolus_realtime1_err.txt'
tmp_pat = 'placeholder_for_temp_path'
old_tree_file = '/project/projectdirs/openmsi/projects/ben_trees/metacyc_max_depth_5.npy'
pos_neutralizations = '[-1.00727646677,-2.0151015067699998,0.00054857990946]'
neg_neutralizations = '[1.00727646677,2.0151015067699998,-0.00054857990946]'
job_pat = 'job_pactolus_'
with open(base_script_name,'r') as fid:
base_script_text = fid.read()
# print base_script_text
group_name = os.path.basename(myfile)
no_extension = group_name.split('.')[0]
##### CHANGE THIS LINE ######
new_read_pat = '"%s:%s"'%(container_file,group_name)
#############################
new_save_pat = '"%s"'%os.path.join(os.path.dirname(container_file),'pactolus_results_' + group_name)
new_out_pat = '"%s"'%os.path.join(os.path.dirname(container_file),no_extension + '.out')
new_err_pat = '"%s"'%os.path.join(os.path.dirname(container_file),no_extension + '.err')
new_tmp_pat = '"%s"'%os.path.join(os.path.dirname(container_file),'tmp')
new_job_pat = '"%s"'%no_extension
replace_text = [(read_pat,new_read_pat),
(save_pat,new_save_pat),
(out_pat,new_out_pat),
(err_pat,new_err_pat),
(job_pat,new_job_pat),
(old_tree_file,new_tree_file),
(tmp_pat,new_tmp_pat)]
temp_text = base_script_text
for rt in replace_text:
temp_text = temp_text.replace(rt[0],rt[1])
# temp_text = temp_text.replace('#SBATCH --time=00:15:00','#SBATCH --time=00:45:00')
##### CHANGE THIS LINE ######
if my_polarity == 0:
temp_text = temp_text.replace(pos_neutralizations,neg_neutralizations)
#############################
#store the job name in a seperate script file so it can be submited to queue
#each job will be called <no_extension>.sbatch
#jobfile will be a list of squeue <no_extension.sbatch\n
##### CHANGE THIS LINE ######
new_job_name = '%s/%s_polarity_%d.sbatch'%(os.path.dirname(container_file),os.path.basename(myfile),my_polarity)
#############################
with open(new_job_name,'w') as fid:
fid.write('%s'%temp_text)
def submit_all_jobs(target_directory,computer='edison',usr=None,pwd=None):
import glob
from requests import Session
import getpass
if not usr:
usr = getpass.getuser()
if not pwd:
pwd = getpass.getpass("enter password for user %s: " % usr)
s = Session()
r = s.post("https://newt.nersc.gov/newt/auth", {"username": usr, "password": pwd})
all_files = glob.glob(os.path.join(target_directory,'*.sbatch'))
for a in all_files:
r = s.post("https://newt.nersc.gov/newt/queue/%s/"%computer, {"jobfile": a})
return s
#############################
#############################
# Pactolus Plotter Code
#############################
class FragmentManager:
"""
A Fragment Manager contains methods that make finding and drawing fragments easier.
"""
def __init__(self, data_masses, tree, mass_tol, border_colors):
self.data_masses = data_masses
self.tree = tree
self.mass_tol = mass_tol
self.border_colors = border_colors
# hard coded neut vals
self.neut_vals = [2.0151015067699998,1.00727646677,-0.00054857990946,
-2.0151015067699998,-1.00727646677,0.00054857990946]
def find_matching_neutralized_frags(self):
"""
Returns a list of fragments found for all possible neutralizations.
"""
# map function to subtract items in array by b
shift = np.vectorize(lambda a, b: a - b)
list_frags = []
# 6 different neutralizations
for i in range(len(self.neut_vals)):
peaks = shift(self.data_masses, self.neut_vals[i])
frags = self.find_matching_fragments(peaks, self.tree, self.mass_tol)
list_frags.append(frags[0]) # only care about the first element aka sets of matching fragments
return list_frags
# For now, lifting code from pactolus
def find_matching_fragments(self, data_masses, tree, mass_tol):
"""
Find node sets in a tree whose mass is within mass_tol of a data_mz value
:param data_masses: numpy 1D array, float, *neutralized* m/z values of data from an MS2 or MSn scan
:param tree: numpy structured array as output by FragDag
:param mass_tol: precursor m/z mass tolerance
:return: matching_frag_sets, list of lists; len is same as data_mzs; sublists are idxs to rows of tree that match
:return: unique_matching_frags, numpy 1d array, a flattened numpy version of unique idxs in matching_frag_sets
"""
# start_idx is element for which inserting data_mz directly ahead of it maintains sort order
start_idxs = np.searchsorted(tree['mass_vec'], data_masses-mass_tol)
# end_idx is element for which inserting data_mz directly after it maintains sort order
# found by searching negative reversed list since np.searchshorted requires increasing order
length = len(tree)
end_idxs = length - np.searchsorted(-tree['mass_vec'][::-1], -(data_masses+mass_tol))
# if the start and end idx is the same, the peak is too far away in mass from the data and will be empty
matching_frag_sets = [list(range(start, end)) for start, end in zip(start_idxs, end_idxs)] # if range(start, end)]
# flattening the list
unique_matching_frags = np.unique(np.concatenate(matching_frag_sets))
# Returning both the flat index array and the sets of arrays is good:
# matching_frag_sets makes maximizing the MIDAS score easy
# unique_matching_frags makes calculating the plausibility score easy
return matching_frag_sets, unique_matching_frags
def borderize(self, imgs, neut_i):
"""
Given a list of PILs, add a border to all of them.
The border color indicates what neutralization was applied to obtain that data.
Returns the list of new PILs. Does not modify in place.
"""
delta = []
for i in imgs:
if i:
old_im = i
old_size = old_im.size
new_size = (old_size[0] + 6, old_size[1] + 6)
new_im = Image.new("RGB", new_size, color=self.border_colors[neut_i])
post = ((new_size[0]-old_size[0])/2, (new_size[1]-old_size[1])/2)
new_im.paste(old_im, post)
delta.append(new_im)
else:
delta.append(False)
return delta
def draw_structure_fragment(self, fragment_idx, myMol_w_Hs):
"""
Modified code from Ben.
Draws a structure fragment and returns an annotated fragment with its depth.
"""
from copy import deepcopy
fragment_atoms = np.where(self.tree[fragment_idx]['atom_bool_arr'])[0]
depth_of_hit = np.sum(self.tree[fragment_idx]['bond_bool_arr'])
mol2 = deepcopy(myMol_w_Hs)
# Now set the atoms you'd like to remove to dummy atoms with atomic number 0
fragment_atoms = np.where(self.tree[fragment_idx]['atom_bool_arr']==False)[0]
for f in fragment_atoms:
mol2.GetAtomWithIdx(f).SetAtomicNum(0)
# Now remove dummy atoms using a query
mol3 = Chem.DeleteSubstructs(mol2, Chem.MolFromSmarts('[#0]'))
mol3 = Chem.RemoveHs(mol3)
# You get what you are looking for
return self.mol_to_img(mol3, depth_of_hit),depth_of_hit
def mol_to_img(self, mol, depth_of_hit, molSize=(200,120),kekulize=True):
"""
Helper function to draw_structure_fragment.
Returns an image of the mol as a PIL with an annotated depth.
"""
mc = Chem.Mol(mol.ToBinary())
if kekulize:
try:
Chem.Kekulize(mc)
except:
mc = Chem.Mol(mol.ToBinary())
if not mc.GetNumConformers():
rdDepictor.Compute2DCoords(mc)
return Chem.Draw.MolToImage(mc, molSize, kekulize, legend='depth : %d' % depth_of_hit)
class PactolusPlotter():
"""
Links buttons, graphs, and other interactive functions together.
"""
def __init__(self, df, data_loc, index = 0, quantile=True, quantile_param=.85, nlarge = 10):
# internal variables
self.border_colors = [(130, 224, 170), ( 248, 196, 113 ), ( 195, 155, 211 ),
(29, 131, 72), (154, 125, 10), (99, 57, 116)]
self.colors = np.array([[0, 0, 0, 1], [0, 0, 1, 1], [1.,0.,0.,1.]])
self.tree_file = df['filename_y'][index]
# DOES NOT INCLUDE THE DIRECTORY!! Must be supplied, unfortunately.
self.data_file = df['filename_x'][0].replace('pactolus_results_', '')
self.data_loc = data_loc
self.tree = self.get_tree_data()
self.data = self.get_dataset()
self.depth_limit = 3
self.fig = plt.figure(figsize=(12,12))
self.ax = self.fig.add_subplot(1,1,1)
# TO BE FIXED: Generate / user selected info
# This should be the row that we are checking in the pactolus results db
# Should be modular information, along with the tree and data_file
# For now, we'll keep this fixed and have someone else update these values.
self.index = index
self.tol = df['ppm'][self.index]
self.rt = df['retention_time'][self.index]
# get modules
# Spectrum graph with MS2.
self.pact_spectrum = PactolusSpectrum(self.rt, self.tree, self.data, self.colors, self.border_colors,
self.fig, self.ax, self.depth_limit, self.tol)
# The plot takes in if we are using quantile, the quantile threshold
# and the number for nlargest, whichever is applicable.
self.pact_spectrum.plot(quantile, quantile_param, nlarge)
# Text to tell the user about their row data.
data_string = ("Polarity: %d \n"
"Precursor intensity: %.2e \n"
"Precursor m/z: %.5f \n"
"Retention time: %.5f \n"
"Pactolus score: %.5f \n"
"Molecule name: %s") % (df["polarity"][index],
df["precursor intensity"][index],
df["precursor_mz"][index],
self.rt,
df["score"][index],
df["name"][index])
plt.figtext(0.225, 0.85, data_string, bbox=dict(facecolor='white', pad=10.0))
# hard code annotation for text
plt.figtext(0.32, 0.955, "Summary Information", size='large')
# Button to control depth of pactolus hits
self.depth_spot = plt.axes([0.075, 0.75, 0.10, 0.10])
self.depth_spot.set_title('Depth Limit')
self.depth_button = RadioButtons(self.depth_spot, ('3', '4', '5'))
self.depth_button.on_clicked(lambda x: self.radio_update())
self.normalized_colors = self.normalize()
# Buttons to control what neutralizations get shown
init_buttons = (True, False, False, False, False, False)
self.neut_spot = plt.axes([0.65, 0.75, 0.25, 0.20]) # hard coding atm
self.neut_spot.set_title('Neutralizations')
self.neut_buttons = CheckButtons(self.neut_spot,
('Proton w/ H: +2.008', 'Proton: +1.007', 'Electron: -0.0005',
'Proton w/ H: -2.008', 'Proton: -1.007', 'Electron: +0.0005',),
init_buttons)
self.neut_buttons.on_clicked(lambda x: self.check_update())
for line_tup in zip(list(range(len(self.neut_buttons.lines))),self.neut_buttons.lines):
for line in line_tup[1]:
line.set_color(self.normalized_colors[line_tup[0]])
# TO-DO: Make it so it does not plot right away with all the widgets
# There will be other plots in the future so we don't want to just plot
# everything
plt.show()
def radio_update(self):
"""
Updates internal depth value.
"""
a = int(self.depth_button.value_selected)
self.depth_limit = a
self.pact_spectrum.set_depth_limit(a)
def normalize(self):
"""
A helper function for colors if the color values are not normalized to [0, 1].
Matplotlib prefers a range from [0, 1] instead of the usual 256 range which
is why we need this.
"""
norm = []
normalizer = matcolors.Normalize(vmin=0, vmax=255)
for color in self.border_colors:
norm.append(tuple(normalizer(color)))
return norm
def check_update(self):
"""
Updates internal neutralization values.
"""
# Hacky way of obtaining the status of the buttons
self.neutralizations = []
for i in self.neut_buttons.lines:
self.neutralizations.append(i[0].get_visible())
self.pact_spectrum.set_neut(self.neutralizations)
def get_dataset(self, in_place=False, want_data=True):
"""
Gets the dataset from the raw data file.
Saves to its own dataset automatically if in_place is true.
Returns the dataset if want_data is true.
"""
extension = '.h5'
filename = os.path.join(self.data_loc,self.data_file+extension)
if not os.path.isfile(filename):
raise ValueError('Invalid file!')
data = mgd.df_container_from_metatlas_file(filename)
if in_place:
self.data = data
if want_data:
return data
def get_tree_data(self, in_place=False, want_data=True):
"""
Gets the tree file data from the tree file.
Saves to its own tree automatically if in_place is true.
Return the tree if want_data is true.
"""
with h5py.File(self.tree_file,'r') as tr:
first = list(tr.keys())[0]
k = list(tr[first].keys())[0]
tree = tr[first][k][:]
if in_place:
self.tree = tree
if want_data:
return tree
class PactolusSpectrum():
"""
A PactolusSpectrum contains information on what to plot, the graph itself, and
images of the various compounds.
Some values are not initialized until plot is called.
"""
def __init__(self, rt, tree, ds, colors, border_colors, fig, ax,
depth_limit = 3, tol = 1, neutralizations = [True, False, False, False, False, False]):
# Passed in params
# retention time
self.rt = rt
# peak colors
self.colors = colors
# color for borders
self.border_colors = border_colors
# depth limit
self.depth_limit = depth_limit
# ppm tolerance
self.tol = tol
# neutralizations in place
self.neutralizations = neutralizations
# Generated internal vars
self.dataset = ds
self.tree = tree
self.fig = fig
self.ax = ax
#self.ax.set_ylim(0, 2e6) # make this modular later
# Generated by plot
self.ploted_peaks = None
self.mz_peaks = None
# mz_peaks converted to a list, used for a helper
self.peaks_list = None
# list of lists of images and depths linked together.
# they are not ordered but they are lined up so iterating by index works
# index refers to a particular neutralization's info on the graph
# the lists inside the list refer to peaks: an image and a depth if applicable.
# images are False and depth = -1 if a fragment was not found
self.img = []
self.depth = []
# Post-processed images ready for display
self.preload_images = []
# A numpy array that indicates the colors a peak should display.
# 0: Unselected (Black)
# 1: Selected (Blue)
# 2: No fragment found (Red)
# Also is used to display images.
self.selected = None
# used for on_pick
self.selected_peaks = []
self.xoffset = 200 + 10 # hard coded for now
self.yoffset = 120 + 10 # hard coded for now
# frag text is constantly updated to show what is the mz peak
self.frag_text = plt.figtext(0.225, 0.8, "No fragment selected.", bbox=dict(facecolor='white', pad=10.0))
# setter function for neutralizations
def set_neut(self, neutralizations):
self.neutralizations = neutralizations
# re-draw plot
self.recolor()
# setter function for depth limit
def set_depth_limit(self, dl):
self.depth_limit = dl
# re-draw plot
self.recolor()
def reset(self):
"""
Set generated variables to their default blanks.
"""
self.ploted_peaks = None
self.mz_peaks = None
self.mz_peaks_list = None
self.img = []
self.depth = []
self.selected = None
self.preload_images = []
def recolor(self):
"""
Takes in a depth and neutralization and colors peaks without any matches red
and adjust values accordingly. Should reset the matcher.
"""
# Remove all selected peaks and prepared images
for tup in self.selected_peaks:
if tup:
self.selected_peaks.remove(tup)
for img in tup[1]:
img.remove()
self.frag_text.set_text("Recoloring the spectrum.")
# Figure out the neutralizations used
if not any(self.neutralizations):
self.frag_text.set_text("No neutralizations selected!")
# Start
tmp_selected = np.ones(len(self.depth[0]))
for i in range(len(self.depth)):
if self.neutralizations[i]:
s = (np.asarray(self.depth[i]) <= 0) # don't include peaks without frags or that include parent as frag
b = (np.asarray(self.depth[i]) > self.depth_limit) # don't include peaks above a depth
invalids = np.logical_or(s, b)
tmp_selected = np.multiply(tmp_selected, invalids)
# make unmatched peaks red
tmp_selected = (tmp_selected > 0).astype(int) * 2
self.ploted_peaks.set_color(self.colors[tmp_selected])
self.selected = tmp_selected
def plot(self, quantile=True, quantile_param=.85, nlarge = 10):
"""
Plot the data I was given.
If quantile, grabs peak by quantile_param.
Otherwise, grab n largest values by nlarge.
"""
self.reset()
dataset = self.dataset
# Grab MS2 data
ms2_df = dataset['ms2_pos']
mz = ms2_df[ms2_df.rt==self.rt]['mz']
intensity = ms2_df[ms2_df.rt==self.rt]['i']
# Gather peaks
self.mz_peaks = pd.concat([mz, intensity], axis=1)
# do it on quantile or by a fixed number?
if quantile:
self.mz_peaks = self.mz_peaks[self.mz_peaks.i >= self.mz_peaks.i.quantile(quantile_param)]
else:
self.mz_peaks = self.mz_peaks.nlargest(nlarge, 'i')
# plot the peaks
self.ploted_peaks = plt.vlines(self.mz_peaks['mz'], 0, self.mz_peaks['i'], picker=5, linewidths=2)
self.mz_peaks = self.mz_peaks['mz']
# convert to a list
self.peaks_list = self.mz_peaks.tolist()
# convert frags
fragger = FragmentManager(self.mz_peaks, self.tree, self.tol, self.border_colors)
match_frag_sets = fragger.find_matching_neutralized_frags()
frag = []
# this should be modular and defined elsewhere
mol_inchi = df['inchi'][0]
mol = Chem.MolFromInchi(mol_inchi, sanitize=False)
mol_h = Chem.rdmolops.AddHs(mol)
# calculate by set since grouped by peak
for frag_list in match_frag_sets:
ilist = []
dlist = []
for frag_set in frag_list:
if frag_set:
tup = fragger.draw_structure_fragment(frag_set[0], mol_h)
ilist.append(tup[0])
dlist.append(tup[1])
else:
ilist.append(False)
dlist.append(-1)
self.img.append(ilist)
self.depth.append(dlist)
# a list of lists which contain annotated images
tmp_index = -1
for img_set in range(len(self.img)):
tmp_index += 1
self.preload_images.append(fragger.borderize(self.img[img_set], tmp_index))
# grab only applicable peaks
# Reposition the graph so it'll look a bit better
pos1 = self.ax.get_position()
pos2 = [pos1.x0 - 0.05, 0.32, pos1.width + .05, pos1.height / 2.0]
self.ax.set_position(pos2)
self.ax.set_title('Pactolus Results')
self.ax.set_xlabel('m/z')
self.ax.set_ylabel('intensity')
plt.ticklabel_format(style='sci', axis='y',scilimits=(0,0))
self.fig.canvas.draw_idle()
self.fig.canvas.callbacks.connect('pick_event', lambda event: self.on_pick(event))
self.recolor()
def on_pick(self, event):
"""
Event to connect to the figure.
Displays fragments found in a peak and retains them after clicking others.
Supports deselecting.
Can filter by neutralization and depth.
"""
try:
thisline = event.artist
ind = event.ind[0]
mz = self.peaks_list[ind]
# don't redraw if there wasn't a fragment
if self.selected[ind] == 2:
self.frag_text.set_text("Fragments were not detected at mz = %.5f." % mz)
self.fig.canvas.draw_idle()
return
x = 0
y = 140
tup = [peak for peak in self.selected_peaks if peak[0] == mz]
# check if user is unselecting a peak
if tup:
tup = tup[0]
self.selected_peaks.remove(tup)
for img in tup[1]:
img.remove()
self.selected[ind] = 0
else:
imgs = []
yoff = 0
xoff = 0
c = 0
for i, j, k in zip(self.preload_images, self.depth, self.neutralizations):
if k and i[ind] and (j[ind] >= 1 and j[ind] <= self.depth_limit):
c += 1
imgs.append(self.fig.figimage(i[ind], xo=x + 25 + (self.xoffset * xoff),
yo=y + (self.yoffset * yoff), zorder=20))
xoff += 1
if xoff == 3:
xoff = 0
yoff = -1
if c == 1:
self.frag_text.set_text("Obtained a fragment at mz = %.5f." % mz)
else:
self.frag_text.set_text("Obtained fragments at mz = %.5f." % mz)
tup = (mz, imgs)
self.selected_peaks.append(tup)
self.selected[ind] = 1
thisline.set_color(self.colors[self.selected])
self.fig.canvas.draw_idle()
except Exception as e:
self.ax.set_title(e)
|
biorack/metatlas
|
metatlas/interfaces/pactolus_tools.py
|
Python
|
bsd-3-clause
| 45,482
|
[
"RDKit"
] |
9b12acf39fdcb01cf8830d877fbdeb5a5822a7d7b3229eaa27ba9f8f18c04ec8
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The fsl module provides classes for interfacing with the `FSL
<http://www.fmrib.ox.ac.uk/fsl/index.html>`_ command line tools. This
was written to work with FSL version 4.1.4.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os
from glob import glob
import warnings
from shutil import rmtree
import numpy as np
from nipype.interfaces.fsl.base import (FSLCommand, FSLCommandInputSpec)
from nipype.interfaces.base import (load_template, File, traits, isdefined,
TraitedSpec, BaseInterface, Directory,
InputMultiPath, OutputMultiPath,
BaseInterfaceInputSpec)
from nipype.utils.filemanip import (list_to_filename, filename_to_list)
from nibabel import load
warn = warnings.warn
warnings.filterwarnings('always', category=UserWarning)
class Level1DesignInputSpec(BaseInterfaceInputSpec):
interscan_interval = traits.Float(mandatory=True,
desc='Interscan interval (in secs)')
session_info = traits.Any(mandatory=True,
desc='Session specific information generated by ``modelgen.SpecifyModel``')
bases = traits.Either(traits.Dict(traits.Enum('dgamma'), traits.Dict(traits.Enum('derivs'), traits.Bool)),
traits.Dict(traits.Enum('gamma'), traits.Dict(traits.Enum('derivs'), traits.Bool)),
traits.Dict(traits.Enum('none'), traits.Enum(None)),
mandatory=True,
desc="name of basis function and options e.g., {'dgamma': {'derivs': True}}")
model_serial_correlations = traits.Bool(
desc="Option to model serial correlations using an \
autoregressive estimator (order 1). Setting this option is only \
useful in the context of the fsf file. If you set this to False, you need to repeat \
this option for FILMGLS by setting autocorr_noestimate to True", mandatory=True)
contrasts = traits.List(
traits.Either(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('F'),
traits.List(traits.Either(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float),
traits.List(traits.Float)))))),
desc="List of contrasts with each contrast being a list of the form - \
[('name', 'stat', [condition list], [weight list], [session list])]. if \
session list is None or not provided, all sessions are used. For F \
contrasts, the condition list should contain previously defined \
T-contrasts.")
class Level1DesignOutputSpec(TraitedSpec):
fsf_files = OutputMultiPath(File(exists=True),
desc='FSL feat specification files')
ev_files = OutputMultiPath(traits.List(File(exists=True)),
desc='condition information files')
class Level1Design(BaseInterface):
"""Generate FEAT specific files
Examples
--------
>>> level1design = Level1Design()
>>> level1design.inputs.interscan_interval = 2.5
>>> level1design.inputs.bases = {'dgamma':{'derivs': False}}
>>> level1design.inputs.session_info = 'session_info.npz'
>>> level1design.run() # doctest: +SKIP
"""
input_spec = Level1DesignInputSpec
output_spec = Level1DesignOutputSpec
def _create_ev_file(self, evfname, evinfo):
f = open(evfname, 'wt')
for i in evinfo:
if len(i) == 3:
f.write('%f %f %f\n' % (i[0], i[1], i[2]))
else:
f.write('%f\n' % i[0])
f.close()
def _create_ev_files(self, cwd, runinfo, runidx, usetd, contrasts, no_bases,
do_tempfilter):
"""Creates EV files from condition and regressor information.
Parameters:
-----------
runinfo : dict
Generated by `SpecifyModel` and contains information
about events and other regressors.
runidx : int
Index to run number
usetd : int
Whether or not to use temporal derivatives for
conditions
contrasts : list of lists
Information on contrasts to be evaluated
"""
conds = {}
evname = []
ev_hrf = load_template('feat_ev_hrf.tcl')
ev_none = load_template('feat_ev_none.tcl')
ev_ortho = load_template('feat_ev_ortho.tcl')
ev_txt = ''
# generate sections for conditions and other nuisance
# regressors
num_evs = [0, 0]
for field in ['cond', 'regress']:
for i, cond in enumerate(runinfo[field]):
name = cond['name']
evname.append(name)
evfname = os.path.join(cwd, 'ev_%s_%d_%d.txt' % (name, runidx,
len(evname)))
evinfo = []
num_evs[0] += 1
num_evs[1] += 1
if field == 'cond':
for j, onset in enumerate(cond['onset']):
try:
amplitudes = cond['amplitudes']
if len(amplitudes) > 1:
amp = amplitudes[j]
else:
amp = amplitudes[0]
except KeyError:
amp = 1
if len(cond['duration']) > 1:
evinfo.insert(j, [onset, cond['duration'][j], amp])
else:
evinfo.insert(j, [onset, cond['duration'][0], amp])
if no_bases:
ev_txt += ev_none.substitute(ev_num=num_evs[0],
ev_name=name,
tempfilt_yn=do_tempfilter,
cond_file=evfname)
else:
ev_txt += ev_hrf.substitute(ev_num=num_evs[0],
ev_name=name,
tempfilt_yn=do_tempfilter,
temporalderiv=usetd,
cond_file=evfname)
if usetd:
evname.append(name + 'TD')
num_evs[1] += 1
elif field == 'regress':
evinfo = [[j] for j in cond['val']]
ev_txt += ev_none.substitute(ev_num=num_evs[0],
ev_name=name,
tempfilt_yn=do_tempfilter,
cond_file=evfname)
ev_txt += "\n"
conds[name] = evfname
self._create_ev_file(evfname, evinfo)
# add ev orthogonalization
for i in range(1, num_evs[0] + 1):
for j in range(0, num_evs[0] + 1):
ev_txt += ev_ortho.substitute(c0=i, c1=j)
ev_txt += "\n"
# add contrast info to fsf file
if isdefined(contrasts):
contrast_header = load_template('feat_contrast_header.tcl')
contrast_prolog = load_template('feat_contrast_prolog.tcl')
contrast_element = load_template('feat_contrast_element.tcl')
contrast_ftest_element = load_template('feat_contrast_ftest_element.tcl')
contrastmask_header = load_template('feat_contrastmask_header.tcl')
contrastmask_footer = load_template('feat_contrastmask_footer.tcl')
contrastmask_element = load_template('feat_contrastmask_element.tcl')
# add t/f contrast info
ev_txt += contrast_header.substitute()
con_names = []
for j, con in enumerate(contrasts):
con_names.append(con[0])
con_map = {}
ftest_idx = []
ttest_idx = []
for j, con in enumerate(contrasts):
if con[1] == 'F':
ftest_idx.append(j)
for c in con[2]:
if c[0] not in con_map.keys():
con_map[c[0]] = []
con_map[c[0]].append(j)
else:
ttest_idx.append(j)
for ctype in ['real', 'orig']:
for j, con in enumerate(contrasts):
if con[1] == 'F':
continue
tidx = ttest_idx.index(j) + 1
ev_txt += contrast_prolog.substitute(cnum=tidx,
ctype=ctype,
cname=con[0])
count = 0
for c in range(1, len(evname) + 1):
if evname[c - 1].endswith('TD') and ctype == 'orig':
continue
count = count + 1
if evname[c - 1] in con[2]:
val = con[3][con[2].index(evname[c - 1])]
else:
val = 0.0
ev_txt += contrast_element.substitute(cnum=tidx,
element=count,
ctype=ctype, val=val)
ev_txt += "\n"
if con[0] in con_map.keys():
for fconidx in con_map[con[0]]:
ev_txt += contrast_ftest_element.substitute(cnum=ftest_idx.index(fconidx) + 1,
element=tidx,
ctype=ctype,
val=1)
ev_txt += "\n"
# add contrast mask info
ev_txt += contrastmask_header.substitute()
for j, _ in enumerate(contrasts):
for k, _ in enumerate(contrasts):
if j != k:
ev_txt += contrastmask_element.substitute(c1=j + 1,
c2=k + 1)
ev_txt += contrastmask_footer.substitute()
return num_evs, ev_txt
def _format_session_info(self, session_info):
if isinstance(session_info, dict):
session_info = [session_info]
return session_info
def _get_func_files(self, session_info):
"""Returns functional files in the order of runs
"""
func_files = []
for i, info in enumerate(session_info):
func_files.insert(i, info['scans'])
return func_files
def _run_interface(self, runtime):
cwd = os.getcwd()
fsf_header = load_template('feat_header_l1.tcl')
fsf_postscript = load_template('feat_nongui.tcl')
prewhiten = 0
if isdefined(self.inputs.model_serial_correlations):
prewhiten = int(self.inputs.model_serial_correlations)
usetd = 0
no_bases = False
basis_key = self.inputs.bases.keys()[0]
if basis_key in ['dgamma', 'gamma']:
usetd = int(self.inputs.bases[basis_key]['derivs'])
if basis_key == 'none':
no_bases = True
session_info = self._format_session_info(self.inputs.session_info)
func_files = self._get_func_files(session_info)
n_tcon = 0
n_fcon = 0
if isdefined(self.inputs.contrasts):
for i, c in enumerate(self.inputs.contrasts):
if c[1] == 'T':
n_tcon += 1
elif c[1] == 'F':
n_fcon += 1
for i, info in enumerate(session_info):
do_tempfilter = 1
if info['hpf'] == np.inf:
do_tempfilter = 0
num_evs, cond_txt = self._create_ev_files(cwd, info, i, usetd,
self.inputs.contrasts,
no_bases, do_tempfilter)
nim = load(func_files[i])
(_, _, _, timepoints) = nim.get_shape()
fsf_txt = fsf_header.substitute(run_num=i,
interscan_interval=self.inputs.interscan_interval,
num_vols=timepoints,
prewhiten=prewhiten,
num_evs=num_evs[0],
num_evs_real=num_evs[1],
num_tcon=n_tcon,
num_fcon=n_fcon,
high_pass_filter_cutoff=info['hpf'],
temphp_yn=do_tempfilter,
func_file=func_files[i])
fsf_txt += cond_txt
fsf_txt += fsf_postscript.substitute(overwrite=1)
f = open(os.path.join(cwd, 'run%d.fsf' % i), 'w')
f.write(fsf_txt)
f.close()
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
cwd = os.getcwd()
outputs['fsf_files'] = []
outputs['ev_files'] = []
usetd = 0
basis_key = self.inputs.bases.keys()[0]
if basis_key in ['dgamma', 'gamma']:
usetd = int(self.inputs.bases[basis_key]['derivs'])
for runno, runinfo in enumerate(self._format_session_info(self.inputs.session_info)):
outputs['fsf_files'].append(os.path.join(cwd, 'run%d.fsf' % runno))
outputs['ev_files'].insert(runno, [])
evname = []
for field in ['cond', 'regress']:
for i, cond in enumerate(runinfo[field]):
name = cond['name']
evname.append(name)
evfname = os.path.join(cwd, 'ev_%s_%d_%d.txt' % (name, runno,
len(evname)))
if field == 'cond':
if usetd:
evname.append(name + 'TD')
outputs['ev_files'][runno].append(os.path.join(cwd, evfname))
return outputs
class FEATInputSpec(FSLCommandInputSpec):
fsf_file = File(exist=True, mandatory=True, argstr="%s", position=0,
desc="File specifying the feat design spec file")
class FEATOutputSpec(TraitedSpec):
feat_dir = Directory(exists=True)
class FEAT(FSLCommand):
"""Uses FSL feat to calculate first level stats
"""
_cmd = 'feat'
input_spec = FEATInputSpec
output_spec = FEATOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs['feat_dir'] = glob(os.path.join(os.getcwd(), '*feat'))[0]
return outputs
class FEATModelInputSpec(FSLCommandInputSpec):
fsf_file = File(exist=True, mandatory=True, argstr="%s", position=0,
desc="File specifying the feat design spec file",
copyfile=False)
ev_files = traits.List(File(exists=True),
mandatory=True, argstr="%s",
desc="Event spec files generated by level1design",
position=1, copyfile=False)
class FEATModelOutpuSpec(TraitedSpec):
design_file = File(exists=True, desc='Mat file containing ascii matrix for design')
design_image = File(exists=True, desc='Graphical representation of design matrix')
design_cov = File(exists=True, desc='Graphical representation of design covariance')
con_file = File(exists=True, desc='Contrast file containing contrast vectors')
fcon_file = File(desc='Contrast file containing contrast vectors')
class FEATModel(FSLCommand):
"""Uses FSL feat_model to generate design.mat files
"""
_cmd = 'feat_model'
input_spec = FEATModelInputSpec
output_spec = FEATModelOutpuSpec
def _format_arg(self, name, trait_spec, value):
if name == 'fsf_file':
return super(FEATModel, self)._format_arg(name, trait_spec, self._get_design_root(value))
elif name == 'ev_files':
return ''
else:
return super(FEATModel, self)._format_arg(name, trait_spec, value)
def _get_design_root(self, infile):
_, fname = os.path.split(infile)
return fname.split('.')[0]
def _list_outputs(self):
#TODO: figure out file names and get rid off the globs
outputs = self._outputs().get()
root = self._get_design_root(list_to_filename(self.inputs.fsf_file))
design_file = glob(os.path.join(os.getcwd(), '%s*.mat' % root))
assert len(design_file) == 1, 'No mat file generated by FEAT Model'
outputs['design_file'] = design_file[0]
design_image = glob(os.path.join(os.getcwd(), '%s.png' % root))
assert len(design_image) == 1, 'No design image generated by FEAT Model'
outputs['design_image'] = design_image[0]
design_cov = glob(os.path.join(os.getcwd(), '%s_cov.png' % root))
assert len(design_cov) == 1, 'No covariance image generated by FEAT Model'
outputs['design_cov'] = design_cov[0]
con_file = glob(os.path.join(os.getcwd(), '%s*.con' % root))
assert len(con_file) == 1, 'No con file generated by FEAT Model'
outputs['con_file'] = con_file[0]
fcon_file = glob(os.path.join(os.getcwd(), '%s*.fts' % root))
if fcon_file:
assert len(fcon_file) == 1, 'No fts file generated by FEAT Model'
outputs['fcon_file'] = fcon_file[0]
return outputs
# interface to fsl command line model fit routines
# ohinds: 2009-12-28
class FILMGLSInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, mandatory=True, position=-3,
argstr='%s',
desc='input data file')
design_file = File(exists=True, position=-2,
argstr='%s',
desc='design matrix file')
threshold = traits.Float(1000, min=0, argstr='%f',
position=-1,
desc='threshold')
smooth_autocorr = traits.Bool(argstr='-sa',
desc='Smooth auto corr estimates')
mask_size = traits.Int(argstr='-ms %d',
desc="susan mask size")
brightness_threshold = traits.Int(min=0, argstr='-epith %d',
desc='susan brightness threshold, otherwise it is estimated')
full_data = traits.Bool(argstr='-v', desc='output full data')
_estimate_xor = ['autocorr_estimate_only', 'fit_armodel', 'tukey_window',
'multitaper_product', 'use_pava', 'autocorr_noestimate']
autocorr_estimate_only = traits.Bool(argstr='-ac',
xor=_estimate_xor,
desc='perform autocorrelation estimatation only')
fit_armodel = traits.Bool(argstr='-ar', xor=_estimate_xor,
desc='fits autoregressive model - default is to use tukey with M=sqrt(numvols)')
tukey_window = traits.Int(argstr='-tukey %d', xor=_estimate_xor,
desc='tukey window size to estimate autocorr')
multitaper_product = traits.Int(argstr='-mt %d', xor=_estimate_xor,
desc='multitapering with slepian tapers and num is the time-bandwidth product')
use_pava = traits.Bool(argstr='-pava', desc='estimates autocorr using PAVA')
autocorr_noestimate = traits.Bool(argstr='-noest', xor=_estimate_xor,
desc='do not estimate autocorrs')
output_pwdata = traits.Bool(argstr='-output_pwdata',
desc='output prewhitened data and average design matrix')
results_dir = Directory('results', argstr='-rn %s', usedefault=True,
desc='directory to store results in')
class FILMGLSOutputSpec(TraitedSpec):
param_estimates = OutputMultiPath(File(exists=True),
desc='Parameter estimates for each column of the design matrix')
residual4d = File(exists=True,
desc='Model fit residual mean-squared error for each time point')
dof_file = File(exists=True, desc='degrees of freedom')
sigmasquareds = File(exists=True, desc='summary of residuals, See Woolrich, et. al., 2001')
results_dir = Directory(exists=True,
desc='directory storing model estimation output')
corrections = File(exists=True,
desc='statistical corrections used within FILM modelling')
logfile = File(exists=True,
desc='FILM run logfile')
class FILMGLS(FSLCommand):
"""Use FSL film_gls command to fit a design matrix to voxel timeseries
Examples
--------
Initialize with no options, assigning them when calling run:
>>> from nipype.interfaces import fsl
>>> fgls = fsl.FILMGLS()
>>> res = fgls.run('in_file', 'design_file', 'thresh', rn='stats') #doctest: +SKIP
Assign options through the ``inputs`` attribute:
>>> fgls = fsl.FILMGLS()
>>> fgls.inputs.in_file = 'functional.nii'
>>> fgls.inputs.design_file = 'design.mat'
>>> fgls.inputs.threshold = 10
>>> fgls.inputs.results_dir = 'stats'
>>> res = fgls.run() #doctest: +SKIP
Specify options when creating an instance:
>>> fgls = fsl.FILMGLS(in_file='functional.nii', \
design_file='design.mat', \
threshold=10, results_dir='stats')
>>> res = fgls.run() #doctest: +SKIP
"""
_cmd = 'film_gls'
input_spec = FILMGLSInputSpec
output_spec = FILMGLSOutputSpec
def _get_pe_files(self, cwd):
files = None
if isdefined(self.inputs.design_file):
fp = open(self.inputs.design_file, 'rt')
for line in fp.readlines():
if line.startswith('/NumWaves'):
numpes = int(line.split()[-1])
files = []
for i in range(numpes):
files.append(self._gen_fname('pe%d.nii' % (i + 1),
cwd=cwd))
break
fp.close()
return files
def _list_outputs(self):
outputs = self._outputs().get()
cwd = os.getcwd()
results_dir = os.path.join(cwd, self.inputs.results_dir)
outputs['results_dir'] = results_dir
pe_files = self._get_pe_files(results_dir)
if pe_files:
outputs['param_estimates'] = pe_files
outputs['residual4d'] = self._gen_fname('res4d.nii', cwd=results_dir)
outputs['dof_file'] = os.path.join(results_dir, 'dof')
outputs['sigmasquareds'] = self._gen_fname('sigmasquareds.nii',
cwd=results_dir)
outputs['corrections'] = self._gen_fname('corrections.nii',
cwd=results_dir)
outputs['logfile'] = self._gen_fname('logfile',
change_ext=False,
cwd=results_dir)
return outputs
class FEATRegisterInputSpec(BaseInterfaceInputSpec):
feat_dirs = InputMultiPath(Directory(), exist=True, desc="Lower level feat dirs",
mandatory=True)
reg_image = File(exist=True, desc="image to register to (will be treated as standard)",
mandatory=True)
reg_dof = traits.Int(12, desc="registration degrees of freedom", usedefault=True)
class FEATRegisterOutputSpec(TraitedSpec):
fsf_file = File(exists=True,
desc="FSL feat specification file")
class FEATRegister(BaseInterface):
"""Register feat directories to a specific standard
"""
input_spec = FEATRegisterInputSpec
output_spec = FEATRegisterOutputSpec
def _run_interface(self, runtime):
fsf_header = load_template('featreg_header.tcl')
fsf_footer = load_template('feat_nongui.tcl')
fsf_dirs = load_template('feat_fe_featdirs.tcl')
num_runs = len(self.inputs.feat_dirs)
fsf_txt = fsf_header.substitute(num_runs=num_runs,
regimage=self.inputs.reg_image,
regdof=self.inputs.reg_dof)
for i, rundir in enumerate(filename_to_list(self.inputs.feat_dirs)):
fsf_txt += fsf_dirs.substitute(runno=i + 1,
rundir=os.path.abspath(rundir))
fsf_txt += fsf_footer.substitute()
f = open(os.path.join(os.getcwd(), 'register.fsf'), 'wt')
f.write(fsf_txt)
f.close()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['fsf_file'] = os.path.abspath(os.path.join(os.getcwd(), 'register.fsf'))
return outputs
class FLAMEOInputSpec(FSLCommandInputSpec):
cope_file = File(exists=True, argstr='--copefile=%s', mandatory=True,
desc='cope regressor data file')
var_cope_file = File(exists=True, argstr='--varcopefile=%s',
desc='varcope weightings data file')
dof_var_cope_file = File(exists=True, argstr='--dofvarcopefile=%s',
desc='dof data file for varcope data')
mask_file = File(exists=True, argstr='--maskfile=%s', mandatory=True,
desc='mask file')
design_file = File(exists=True, argstr='--designfile=%s', mandatory=True,
desc='design matrix file')
t_con_file = File(exists=True, argstr='--tcontrastsfile=%s', mandatory=True,
desc='ascii matrix specifying t-contrasts')
f_con_file = File(exists=True, argstr='--fcontrastsfile=%s',
desc='ascii matrix specifying f-contrasts')
cov_split_file = File(exists=True, argstr='--covsplitfile=%s', mandatory=True,
desc='ascii matrix specifying the groups the covariance is split into')
run_mode = traits.Enum('fe', 'ols', 'flame1', 'flame12', argstr='--runmode=%s',
mandatory=True, desc='inference to perform')
n_jumps = traits.Int(argstr='--njumps=%d', desc='number of jumps made by mcmc')
burnin = traits.Int(argstr='--burnin=%d',
desc='number of jumps at start of mcmc to be discarded')
sample_every = traits.Int(argstr='--sampleevery=%d',
desc='number of jumps for each sample')
fix_mean = traits.Bool(argstr='--fixmean', desc='fix mean for tfit')
infer_outliers = traits.Bool(argstr='--inferoutliers',
desc='infer outliers - not for fe')
no_pe_outputs = traits.Bool(argstr='--nopeoutput',
desc='do not output pe files')
sigma_dofs = traits.Int(argstr='--sigma_dofs=%d',
desc='sigma (in mm) to use for Gaussian smoothing the DOFs in FLAME 2. Default is 1mm, -1 indicates no smoothing')
outlier_iter = traits.Int(argstr='--ioni=%d',
desc='Number of max iterations to use when inferring outliers. Default is 12.')
log_dir = Directory("stats", argstr='--ld=%s', usedefault=True) # ohinds
# no support for ven, vef
class FLAMEOOutputSpec(TraitedSpec):
pes = OutputMultiPath(exists=True, desc="Parameter estimates for each column of the design matrix" +
"for each voxel")
res4d = OutputMultiPath(exists=True, desc="Model fit residual mean-squared error for each time point")
copes = OutputMultiPath(exists=True, desc="Contrast estimates for each contrast")
var_copes = OutputMultiPath(exists=True, desc="Variance estimates for each contrast")
zstats = OutputMultiPath(exists=True, desc="z-stat file for each contrast")
tstats = OutputMultiPath(exists=True, desc="t-stat file for each contrast")
mrefvars = OutputMultiPath(exists=True, desc="mean random effect variances for each contrast")
tdof = OutputMultiPath(exists=True, desc="temporal dof file for each contrast")
weights = OutputMultiPath(exists=True, desc="weights file for each contrast")
stats_dir = Directory(exists=True, desc="directory storing model estimation output")
# interface to fsl command line higher level model fit
# satra: 2010-01-09
class FLAMEO(FSLCommand):
"""Use FSL flameo command to perform higher level model fits
Examples
--------
Initialize FLAMEO with no options, assigning them when calling run:
>>> from nipype.interfaces import fsl
>>> import os
>>> flameo = fsl.FLAMEO(cope_file='cope.nii.gz', \
var_cope_file='varcope.nii.gz', \
cov_split_file='cov_split.mat', \
design_file='design.mat', \
t_con_file='design.con', \
mask_file='mask.nii', \
run_mode='fe')
>>> flameo.cmdline
'flameo --copefile=cope.nii.gz --covsplitfile=cov_split.mat --designfile=design.mat --ld=stats --maskfile=mask.nii --runmode=fe --tcontrastsfile=design.con --varcopefile=varcope.nii.gz'
"""
_cmd = 'flameo'
input_spec = FLAMEOInputSpec
output_spec = FLAMEOOutputSpec
# ohinds: 2010-04-06
def _run_interface(self, runtime):
log_dir = self.inputs.log_dir
cwd = os.getcwd()
if os.access(os.path.join(cwd, log_dir), os.F_OK):
rmtree(os.path.join(cwd, log_dir))
return super(FLAMEO, self)._run_interface(runtime)
# ohinds: 2010-04-06
# made these compatible with flameo
def _list_outputs(self):
outputs = self._outputs().get()
pth = os.path.join(os.getcwd(), self.inputs.log_dir)
pes = glob(os.path.join(pth, 'pe[0-9]*.*'))
assert len(pes) >= 1, 'No pe volumes generated by FSL Estimate'
outputs['pes'] = pes
res4d = glob(os.path.join(pth, 'res4d.*'))
assert len(res4d) == 1, 'No residual volume generated by FSL Estimate'
outputs['res4d'] = res4d[0]
copes = glob(os.path.join(pth, 'cope[0-9]*.*'))
assert len(copes) >= 1, 'No cope volumes generated by FSL CEstimate'
outputs['copes'] = copes
var_copes = glob(os.path.join(pth, 'varcope[0-9]*.*'))
assert len(var_copes) >= 1, 'No varcope volumes generated by FSL CEstimate'
outputs['var_copes'] = var_copes
zstats = glob(os.path.join(pth, 'zstat[0-9]*.*'))
assert len(zstats) >= 1, 'No zstat volumes generated by FSL CEstimate'
outputs['zstats'] = zstats
tstats = glob(os.path.join(pth, 'tstat[0-9]*.*'))
assert len(tstats) >= 1, 'No tstat volumes generated by FSL CEstimate'
outputs['tstats'] = tstats
mrefs = glob(os.path.join(pth, 'mean_random_effects_var[0-9]*.*'))
assert len(mrefs) >= 1, 'No mean random effects volumes generated by FLAMEO'
outputs['mrefvars'] = mrefs
tdof = glob(os.path.join(pth, 'tdof_t[0-9]*.*'))
assert len(tdof) >= 1, 'No T dof volumes generated by FLAMEO'
outputs['tdof'] = tdof
weights = glob(os.path.join(pth, 'weights[0-9]*.*'))
assert len(weights) >= 1, 'No weight volumes generated by FLAMEO'
outputs['weights'] = weights
outputs['stats_dir'] = pth
return outputs
class ContrastMgrInputSpec(FSLCommandInputSpec):
tcon_file = File(exists=True, mandatory=True,
argstr='%s', position=-1,
desc='contrast file containing T-contrasts')
fcon_file = File(exists=True, argstr='-f %s',
desc='contrast file containing F-contrasts')
param_estimates = InputMultiPath(File(exists=True),
argstr='', copyfile=False,
mandatory=True,
desc='Parameter estimates for each column of the design matrix')
corrections = File(exists=True, copyfile=False, mandatory=True,
desc='statistical corrections used within FILM modelling')
dof_file = File(exists=True, argstr='', copyfile=False, mandatory=True,
desc='degrees of freedom')
sigmasquareds = File(exists=True, argstr='', position=-2,
copyfile=False, mandatory=True,
desc='summary of residuals, See Woolrich, et. al., 2001')
contrast_num = traits.Int(min=1, argstr='-cope',
desc='contrast number to start labeling copes from')
suffix = traits.Str(argstr='-suffix %s',
desc='suffix to put on the end of the cope filename before the contrast number, default is nothing')
class ContrastMgrOutputSpec(TraitedSpec):
copes = OutputMultiPath(File(exists=True),
desc='Contrast estimates for each contrast')
varcopes = OutputMultiPath(File(exists=True),
desc='Variance estimates for each contrast')
zstats = OutputMultiPath(File(exists=True),
desc='z-stat file for each contrast')
tstats = OutputMultiPath(File(exists=True),
desc='t-stat file for each contrast')
fstats = OutputMultiPath(File(exists=True),
desc='f-stat file for each contrast')
zfstats = OutputMultiPath(File(exists=True),
desc='z-stat file for each F contrast')
neffs = OutputMultiPath(File(exists=True),
desc='neff file ?? for each contrast')
class ContrastMgr(FSLCommand):
"""Use FSL contrast_mgr command to evaluate contrasts
In interface mode this file assumes that all the required inputs are in the
same location.
"""
_cmd = 'contrast_mgr'
input_spec = ContrastMgrInputSpec
output_spec = ContrastMgrOutputSpec
def _run_interface(self, runtime):
# The returncode is meaningless in ContrastMgr. So check the output
# in stderr and if it's set, then update the returncode
# accordingly.
runtime = super(ContrastMgr, self)._run_interface(runtime)
if runtime.stderr:
self.raise_exception(runtime)
return runtime
def _format_arg(self, name, trait_spec, value):
if name in ['param_estimates', 'corrections', 'dof_file']:
return ''
elif name in ['sigmasquareds']:
path, _ = os.path.split(value)
return path
else:
return super(ContrastMgr, self)._format_arg(name, trait_spec, value)
def _get_design_root(self, infile):
_, fname = os.path.split(infile)
return fname.split('.')[0]
def _get_numcons(self):
numtcons = 0
numfcons = 0
if isdefined(self.inputs.tcon_file):
fp = open(self.inputs.tcon_file, 'rt')
for line in fp.readlines():
if line.startswith('/NumContrasts'):
numtcons = int(line.split()[-1])
break
fp.close()
if isdefined(self.inputs.fcon_file):
fp = open(self.inputs.fcon_file, 'rt')
for line in fp.readlines():
if line.startswith('/NumContrasts'):
numfcons = int(line.split()[-1])
break
fp.close()
return numtcons, numfcons
def _list_outputs(self):
outputs = self._outputs().get()
pth, _ = os.path.split(self.inputs.sigmasquareds)
numtcons, numfcons = self._get_numcons()
base_contrast = 1
if isdefined(self.inputs.contrast_num):
base_contrast = self.inputs.contrast_num
copes = []
varcopes = []
zstats = []
tstats = []
neffs = []
for i in range(numtcons):
copes.append(self._gen_fname('cope%d.nii' % (base_contrast + i),
cwd=pth))
varcopes.append(self._gen_fname('varcope%d.nii' % (base_contrast + i),
cwd=pth))
zstats.append(self._gen_fname('zstat%d.nii' % (base_contrast + i),
cwd=pth))
tstats.append(self._gen_fname('tstat%d.nii' % (base_contrast + i),
cwd=pth))
neffs.append(self._gen_fname('neff%d.nii' % (base_contrast + i),
cwd=pth))
if copes:
outputs['copes'] = copes
outputs['varcopes'] = varcopes
outputs['zstats'] = zstats
outputs['tstats'] = tstats
outputs['neffs'] = neffs
fstats = []
zfstats = []
for i in range(numfcons):
fstats.append(self._gen_fname('fstat%d.nii' % (base_contrast + i),
cwd=pth))
zfstats.append(self._gen_fname('zfstat%d.nii' % (base_contrast + i),
cwd=pth))
if fstats:
outputs['fstats'] = fstats
outputs['zfstats'] = zfstats
return outputs
class L2ModelInputSpec(BaseInterfaceInputSpec):
num_copes = traits.Int(min=1, mandatory=True,
desc='number of copes to be combined')
class L2ModelOutputSpec(TraitedSpec):
design_mat = File(exists=True, desc='design matrix file')
design_con = File(exists=True, desc='design contrast file')
design_grp = File(exists=True, desc='design group file')
class L2Model(BaseInterface):
"""Generate subject specific second level model
Examples
--------
>>> from nipype.interfaces.fsl import L2Model
>>> model = L2Model(num_copes=3) # 3 sessions
"""
input_spec = L2ModelInputSpec
output_spec = L2ModelOutputSpec
def _run_interface(self, runtime):
cwd = os.getcwd()
mat_txt = ['/NumWaves 1',
'/NumPoints %d' % self.inputs.num_copes,
'/PPheights %e' % 1,
'',
'/Matrix']
for i in range(self.inputs.num_copes):
mat_txt += ['%e' % 1]
mat_txt = '\n'.join(mat_txt)
con_txt = ['/ContrastName1 group mean',
'/NumWaves 1',
'/NumContrasts 1',
'/PPheights %e' % 1,
'/RequiredEffect 100.0', # XX where does this
#number come from
'',
'/Matrix',
'%e' % 1]
con_txt = '\n'.join(con_txt)
grp_txt = ['/NumWaves 1',
'/NumPoints %d' % self.inputs.num_copes,
'',
'/Matrix']
for i in range(self.inputs.num_copes):
grp_txt += ['1']
grp_txt = '\n'.join(grp_txt)
txt = {'design.mat': mat_txt,
'design.con': con_txt,
'design.grp': grp_txt}
# write design files
for i, name in enumerate(['design.mat', 'design.con', 'design.grp']):
f = open(os.path.join(cwd, name), 'wt')
f.write(txt[name])
f.close()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
for field in outputs.keys():
outputs[field] = os.path.join(os.getcwd(),
field.replace('_', '.'))
return outputs
class MultipleRegressDesignInputSpec(BaseInterfaceInputSpec):
contrasts = traits.List(
traits.Either(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('F'),
traits.List(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
))),
mandatory=True,
desc="List of contrasts with each contrast being a list of the form - \
[('name', 'stat', [condition list], [weight list])]. if \
session list is None or not provided, all sessions are used. For F \
contrasts, the condition list should contain previously defined \
T-contrasts without any weight list.")
regressors = traits.Dict(traits.Str, traits.List(traits.Float),
mandatory=True,
desc='dictionary containing named lists of regressors')
groups = traits.List(traits.Int,
desc='list of group identifiers (defaults to single group)')
class MultipleRegressDesignOutputSpec(TraitedSpec):
design_mat = File(exists=True, desc='design matrix file')
design_con = File(exists=True, desc='design t-contrast file')
design_fts = File(exists=True, desc='design f-contrast file')
design_grp = File(exists=True, desc='design group file')
class MultipleRegressDesign(BaseInterface):
"""Generate multiple regression design
.. note::
FSL does not demean columns for higher level analysis.
Please see `FSL documentation <http://www.fmrib.ox.ac.uk/fsl/feat5/detail.html#higher>`_
for more details on model specification for higher level analysis.
Examples
--------
>>> from nipype.interfaces.fsl import MultipleRegressDesign
>>> model = MultipleRegressDesign()
>>> model.inputs.contrasts = [['group mean', 'T',['reg1'],[1]]]
>>> model.inputs.regressors = dict(reg1=[1, 1, 1], reg2=[2.,-4, 3])
>>> model.run() # doctest: +SKIP
"""
input_spec = MultipleRegressDesignInputSpec
output_spec = MultipleRegressDesignOutputSpec
def _run_interface(self, runtime):
cwd = os.getcwd()
regs = sorted(self.inputs.regressors.keys())
nwaves = len(regs)
npoints = len(self.inputs.regressors[regs[0]])
ntcons = sum([1 for con in self.inputs.contrasts if con[1] == 'T'])
nfcons = sum([1 for con in self.inputs.contrasts if con[1] == 'F'])
# write mat file
mat_txt = ['/NumWaves %d' % nwaves,
'/NumPoints %d' % npoints]
ppheights = []
for reg in regs:
maxreg = np.max(self.inputs.regressors[reg])
minreg = np.min(self.inputs.regressors[reg])
if np.sign(maxreg) == np.sign(minreg):
regheight = max([abs(minreg), abs(maxreg)])
else:
regheight = abs(maxreg - minreg)
ppheights.append('%e' % regheight)
mat_txt += ['/PPheights ' + ' '.join(ppheights)]
mat_txt += ['',
'/Matrix']
for cidx in range(npoints):
mat_txt.append(' '.join(['%e' % self.inputs.regressors[key][cidx] for key in regs]))
mat_txt = '\n'.join(mat_txt)
# write t-con file
con_txt = []
counter = 0
tconmap = {}
for conidx, con in enumerate(self.inputs.contrasts):
if con[1] == 'T':
tconmap[conidx] = counter
counter += 1
con_txt += ['/ContrastName%d %s' % (counter, con[0])]
con_txt += ['/NumWaves %d' % nwaves,
'/NumContrasts %d' % ntcons,
'/PPheights %s' % ' '.join(['%e' % 1 for i in range(counter)]),
'/RequiredEffect %s' % ' '.join(['%.3f' % 100 for i in range(counter)]),
'',
'/Matrix']
for idx in sorted(tconmap.keys()):
convals = np.zeros((nwaves, 1))
for regidx, reg in enumerate(self.inputs.contrasts[idx][2]):
convals[regs.index(reg)] = self.inputs.contrasts[idx][3][regidx]
con_txt.append(' '.join(['%e' % val for val in convals]))
con_txt = '\n'.join(con_txt)
# write f-con file
fcon_txt = ''
if nfcons:
fcon_txt = ['/NumWaves %d' % ntcons,
'/NumContrasts %d' % nfcons,
'',
'/Matrix']
for conidx, con in enumerate(self.inputs.contrasts):
if con[1] == 'F':
convals = np.zeros((ntcons, 1))
for tcon in con[2]:
convals[tconmap[self.inputs.contrasts.index(tcon)]] = 1
fcon_txt.append(' '.join(['%d' % val for val in convals]))
fcon_txt = '\n'.join(fcon_txt)
# write group file
grp_txt = ['/NumWaves 1',
'/NumPoints %d' % npoints,
'',
'/Matrix']
for i in range(npoints):
if isdefined(self.inputs.groups):
grp_txt += ['%d' % self.inputs.groups[i]]
else:
grp_txt += ['1']
grp_txt = '\n'.join(grp_txt)
txt = {'design.mat': mat_txt,
'design.con': con_txt,
'design.fts': fcon_txt,
'design.grp': grp_txt}
# write design files
for key, val in txt.items():
if ('fts' in key) and (nfcons == 0):
continue
filename = key.replace('_', '.')
f = open(os.path.join(cwd, filename), 'wt')
f.write(val)
f.close()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
nfcons = sum([1 for con in self.inputs.contrasts if con[1] == 'F'])
for field in outputs.keys():
if ('fts' in field) and (nfcons == 0):
continue
outputs[field] = os.path.join(os.getcwd(),
field.replace('_', '.'))
return outputs
class SMMInputSpec(FSLCommandInputSpec):
spatial_data_file = File(exists=True, position=0, argstr='--sdf="%s"', mandatory=True,
desc="statistics spatial map", copyfile=False)
mask = File(exist=True, position=1, argstr='--mask="%s"', mandatory=True,
desc="mask file", copyfile=False)
no_deactivation_class = traits.Bool(position=2, argstr="--zfstatmode",
desc="enforces no deactivation class")
class SMMOutputSpec(TraitedSpec):
null_p_map = File(exists=True)
activation_p_map = File(exists=True)
deactivation_p_map = File(exists=True)
class SMM(FSLCommand):
'''
Spatial Mixture Modelling. For more detail on the spatial mixture modelling see
Mixture Models with Adaptive Spatial Regularisation for Segmentation with an Application to FMRI Data;
Woolrich, M., Behrens, T., Beckmann, C., and Smith, S.; IEEE Trans. Medical Imaging, 24(1):1-11, 2005.
'''
_cmd = 'mm --ld=logdir'
input_spec = SMMInputSpec
output_spec = SMMOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
#TODO get the true logdir from the stdout
outputs['null_p_map'] = self._gen_fname(basename="w1_mean", cwd="logdir")
outputs['activation_p_map'] = self._gen_fname(basename="w2_mean", cwd="logdir")
if not isdefined(self.inputs.no_deactivation_class) or not self.inputs.no_deactivation_class:
outputs['deactivation_p_map'] = self._gen_fname(basename="w3_mean", cwd="logdir")
return outputs
class MELODICInputSpec(FSLCommandInputSpec):
in_files = InputMultiPath(File(exists=True), argstr="-i %s", mandatory=True, position=0,
desc="input file names (either single file name or a list)")
out_dir = Directory(argstr="-o %s", desc="output directory name", genfile=True)
mask = File(exists=True, argstr="-m %s", desc="file name of mask for thresholding")
no_mask = traits.Bool(argstr="--nomask", desc="switch off masking")
update_mask = traits.Bool(argstr="--update_mask", desc="switch off mask updating")
no_bet = traits.Bool(argstr="--nobet", desc="switch off BET")
bg_threshold = traits.Float(argstr="--bgthreshold=%f", desc="brain/non-brain threshold used to mask non-brain voxels, as a percentage (only if --nobet selected)")
dim = traits.Int(argstr="-d %d", desc="dimensionality reduction into #num dimensions"\
"(default: automatic estimation)")
dim_est = traits.Str(argstr="--dimest=%s", desc="use specific dim. estimation technique:"\
" lap, bic, mdl, aic, mean (default: lap)")
sep_whiten = traits.Bool(argstr="--sep_whiten", desc="switch on separate whitening")
sep_vn = traits.Bool(argstr="--sep_vn", desc="switch off joined variance normalization")
num_ICs = traits.Int(argstr="-n %d", desc="number of IC's to extract (for deflation approach)")
approach = traits.Str(argstr="-a %s", desc="approach for decomposition, 2D: defl, symm (default), "\
" 3D: tica (default), concat")
non_linearity = traits.Str(argstr="--nl=%s", desc="nonlinearity: gauss, tanh, pow3, pow4")
var_norm = traits.Bool(argstr="--vn", desc="switch off variance normalization")
pbsc = traits.Bool(argstr="--pbsc", desc="switch off conversion to percent BOLD signal change")
cov_weight = traits.Float(argstr="--covarweight=%f", desc="voxel-wise weights for the covariance "\
"matrix (e.g. segmentation information)")
epsilon = traits.Float(argstr="--eps=%f", desc="minimum error change")
epsilonS = traits.Float(argstr="--epsS=%f", desc="minimum error change for rank-1 approximation in TICA")
maxit = traits.Int(argstr="--maxit=%d", desc="maximum number of iterations before restart")
max_restart = traits.Int(argstr="--maxrestart=%d", desc="maximum number of restarts")
mm_thresh = traits.Float(argstr="--mmthresh=%f", desc="threshold for Mixture Model based inference")
no_mm = traits.Bool(argstr="--no_mm", desc="switch off mixture modelling on IC maps")
ICs = File(exists=True, argstr="--ICs=%s", desc="filename of the IC components file for mixture modelling")
mix = File(exists=True, argstr="--mix=%s", desc="mixing matrix for mixture modelling / filtering")
smode = File(exists=True, argstr="--smode=%s", desc="matrix of session modes for report generation")
rem_cmp = traits.List(traits.Int, argstr="-f %d", desc="component numbers to remove")
report = traits.Bool(argstr="--report", desc="generate Melodic web report")
bg_image = File(exists=True, argstr="--bgimage=%s", desc="specify background image for report"\
" (default: mean image)")
tr_sec = traits.Float(argstr="--tr=%f", desc="TR in seconds")
log_power = traits.Bool(argstr="--logPower", desc="calculate log of power for frequency spectrum")
t_des = File(exists=True, argstr="--Tdes=%s", desc="design matrix across time-domain")
t_con = File(exists=True, argstr="--Tcon=%s", desc="t-contrast matrix across time-domain")
s_des = File(exists=True, argstr="--Sdes=%s", desc="design matrix across subject-domain")
s_con = File(exists=True, argstr="--Scon=%s", desc="t-contrast matrix across subject-domain")
out_all = traits.Bool(argstr="--Oall", desc="output everything")
out_unmix = traits.Bool(argstr="--Ounmix", desc="output unmixing matrix")
out_stats = traits.Bool(argstr="--Ostats", desc="output thresholded maps and probability maps")
out_pca = traits.Bool(argstr="--Opca", desc="output PCA results")
out_white = traits.Bool(argstr="--Owhite", desc="output whitening/dewhitening matrices")
out_orig = traits.Bool(argstr="--Oorig", desc="output the original ICs")
out_mean = traits.Bool(argstr="--Omean", desc="output mean volume")
report_maps = traits.Str(argstr="--report_maps=%s", desc="control string for spatial map images (see slicer)")
remove_deriv = traits.Bool(argstr="--remove_deriv", desc="removes every second entry in paradigm"\
" file (EV derivatives)")
class MELODICOutputSpec(TraitedSpec):
out_dir = Directory(exists=True)
report_dir = Directory(exists=True)
class MELODIC(FSLCommand):
"""Multivariate Exploratory Linear Optimised Decomposition into Independent Components
Examples
--------
>>> melodic_setup = MELODIC()
>>> melodic_setup.inputs.approach = 'tica'
>>> melodic_setup.inputs.in_files = ['functional.nii', 'functional2.nii', 'functional3.nii']
>>> melodic_setup.inputs.no_bet = True
>>> melodic_setup.inputs.bg_threshold = 10
>>> melodic_setup.inputs.tr_sec = 1.5
>>> melodic_setup.inputs.mm_thresh = 0.5
>>> melodic_setup.inputs.out_stats = True
>>> melodic_setup.inputs.t_des = 'timeDesign.mat'
>>> melodic_setup.inputs.t_con = 'timeDesign.con'
>>> melodic_setup.inputs.s_des = 'subjectDesign.mat'
>>> melodic_setup.inputs.s_con = 'subjectDesign.con'
>>> melodic_setup.inputs.out_dir = 'groupICA.out'
>>> melodic_setup.run() # doctest: +SKIP
"""
input_spec = MELODICInputSpec
output_spec = MELODICOutputSpec
_cmd = 'melodic'
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_dir'] = self.inputs.out_dir
if not isdefined(outputs['out_dir']):
outputs['out_dir'] = self._gen_filename("out_dir")
if isdefined(self.inputs.report) and self.inputs.report:
outputs['report_dir'] = os.path.join(self._gen_filename("out_dir"), "report")
return outputs
def _gen_filename(self, name):
if name == "out_dir":
return os.getcwd()
class SmoothEstimateInputSpec(FSLCommandInputSpec):
dof = traits.Int(argstr='--dof=%d', mandatory=True,
xor=['zstat_file'],
desc='number of degrees of freedom')
mask_file = File(argstr='--mask=%s',
exists=True, mandatory=True,
desc='brain mask volume')
residual_fit_file = File(argstr='--res=%s',
exists=True, requires=['dof'],
desc='residual-fit image file')
zstat_file = File(argstr='--zstat=%s',
exists=True, xor=['dof'],
desc='zstat image file')
class SmoothEstimateOutputSpec(TraitedSpec):
dlh = traits.Float(desc='smoothness estimate sqrt(det(Lambda))')
volume = traits.Int(desc='number of voxels in mask')
resels = traits.Float(desc='number of resels')
class SmoothEstimate(FSLCommand):
""" Estimates the smoothness of an image
Examples
--------
>>> est = SmoothEstimate()
>>> est.inputs.zstat_file = 'zstat1.nii.gz'
>>> est.inputs.mask_file = 'mask.nii'
>>> est.cmdline
'smoothest --mask=mask.nii --zstat=zstat1.nii.gz'
"""
input_spec = SmoothEstimateInputSpec
output_spec = SmoothEstimateOutputSpec
_cmd = 'smoothest'
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
stdout = runtime.stdout.split('\n')
outputs.dlh = float(stdout[0].split()[1])
outputs.volume = int(stdout[1].split()[1])
outputs.resels = float(stdout[2].split()[1])
return outputs
class ClusterInputSpec(FSLCommandInputSpec):
in_file = File(argstr='--in=%s', mandatory=True,
exists=True, desc='input volume')
threshold = traits.Float(argstr='--thresh=%.10f',
mandatory=True,
desc='threshold for input volume')
out_index_file = traits.Either(traits.Bool, File,
argstr='--oindex=%s',
desc='output of cluster index (in size order)', hash_files=False)
out_threshold_file = traits.Either(traits.Bool, File,
argstr='--othresh=%s',
desc='thresholded image', hash_files=False)
out_localmax_txt_file = traits.Either(traits.Bool, File,
argstr='--olmax=%s',
desc='local maxima text file', hash_files=False)
out_localmax_vol_file = traits.Either(traits.Bool, File,
argstr='--olmaxim=%s',
desc='output of local maxima volume', hash_files=False)
out_size_file = traits.Either(traits.Bool, File,
argstr='--osize=%s',
desc='filename for output of size image', hash_files=False)
out_max_file = traits.Either(traits.Bool, File,
argstr='--omax=%s',
desc='filename for output of max image', hash_files=False)
out_mean_file = traits.Either(traits.Bool, File,
argstr='--omean=%s',
desc='filename for output of mean image', hash_files=False)
out_pval_file = traits.Either(traits.Bool, File,
argstr='--opvals=%s',
desc='filename for image output of log pvals', hash_files=False)
pthreshold = traits.Float(argstr='--pthresh=%.10f',
requires=['dlh', 'volume'],
desc='p-threshold for clusters')
peak_distance = traits.Float(argstr='--peakdist=%.10f',
desc='minimum distance between local maxima/minima, in mm (default 0)')
cope_file = traits.File(argstr='--cope=%s',
desc='cope volume')
volume = traits.Int(argstr='--volume=%d',
desc='number of voxels in the mask')
dlh = traits.Float(argstr='--dlh=%.10f',
desc='smoothness estimate = sqrt(det(Lambda))')
fractional = traits.Bool('--fractional',
desc='interprets the threshold as a fraction of the robust range')
connectivity = traits.Int(argstr='--connectivity=%d',
desc='the connectivity of voxels (default 26)')
use_mm = traits.Bool('--mm', desc='use mm, not voxel, coordinates')
find_min = traits.Bool('--min', desc='find minima instead of maxima')
no_table = traits.Bool('--no_table', desc='suppresses printing of the table info')
minclustersize = traits.Bool(argstr='--minclustersize',
desc='prints out minimum significant cluster size')
xfm_file = File(argstr='--xfm=%s',
desc='filename for Linear: input->standard-space transform. Non-linear: input->highres transform')
std_space_file = File(argstr='--stdvol=%s',
desc='filename for standard-space volume')
num_maxima = traits.Int(argstr='--num=%d',
desc='no of local maxima to report')
warpfield_file = File(argstr='--warpvol=%s',
desc='file contining warpfield')
class ClusterOutputSpec(TraitedSpec):
index_file = File(desc='output of cluster index (in size order)')
threshold_file = File(desc='thresholded image')
localmax_txt_file = File(desc='local maxima text file')
localmax_vol_file = File(desc='output of local maxima volume')
size_file = File(desc='filename for output of size image')
max_file = File(desc='filename for output of max image')
mean_file = File(desc='filename for output of mean image')
pval_file = File(desc='filename for image output of log pvals')
class Cluster(FSLCommand):
""" Uses FSL cluster to perform clustering on statistical output
Examples
--------
>>> cl = Cluster()
>>> cl.inputs.threshold = 2.3
>>> cl.inputs.in_file = 'zstat1.nii.gz'
>>> cl.inputs.out_localmax_txt_file = 'stats.txt'
>>> cl.cmdline
'cluster --in=zstat1.nii.gz --olmax=stats.txt --thresh=2.3000000000'
"""
input_spec = ClusterInputSpec
output_spec = ClusterOutputSpec
_cmd = 'cluster'
filemap = {'out_index_file': 'index', 'out_threshold_file':'threshold',
'out_localmax_txt_file': 'localmax.txt',
'out_localmax_vol_file': 'localmax',
'out_size_file': 'size', 'out_max_file': 'max',
'out_mean_file': 'mean', 'out_pval_file': 'pval'}
def _list_outputs(self):
outputs = self.output_spec().get()
for key, suffix in self.filemap.items():
outkey = key[4:]
inval = getattr(self.inputs, key)
if isdefined(inval):
if isinstance(inval, bool):
if inval:
change_ext = True
if suffix.endswith('.txt'):
change_ext=False
outputs[outkey] = self._gen_fname(self.inputs.in_file,
suffix='_' + suffix,
change_ext=change_ext)
else:
outputs[outkey] = os.pardir.abspath(inval)
return outputs
def _format_arg(self, name, spec, value):
if name in self.filemap.keys():
if isinstance(value, bool):
fname = self._list_outputs()[name[4:]]
else:
fname = value
return spec.argstr % fname
return super(Cluster, self)._format_arg(name, spec, value)
class RandomiseInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, desc='4D input file', argstr='-i %s', position=0, mandatory=True)
base_name = traits.Str('tbss_', desc='the rootname that all generated files will have',
argstr='-o %s', position=1, usedefault=True)
design_mat = File(exists=True, desc='design matrix file', argstr='-d %s', position=2, mandatory=True)
tcon = File(exists=True, desc='t contrasts file', argstr='-t %s', position=3, mandatory=True)
fcon = File(exists=True, desc='f contrasts file', argstr='-f %s')
mask = File(exists=True, desc='mask image', argstr='-m %s')
x_block_labels = File(exists=True, desc='exchangeability block labels file', argstr='-e %s')
demean = traits.Bool(desc='demean data temporally before model fitting', argstr='-D')
one_sample_group_mean = traits.Bool(desc='perform 1-sample group-mean test instead of generic permutation test',
argstr='-l')
show_total_perms = traits.Bool(desc='print out how many unique permutations would be generated and exit',
argstr='-q')
show_info_parallel_mode = traits.Bool(desc='print out information required for parallel mode and exit',
argstr='-Q')
vox_p_values = traits.Bool(desc='output voxelwise (corrected and uncorrected) p-value images',
argstr='-x')
tfce = traits.Bool(desc='carry out Threshold-Free Cluster Enhancement', argstr='-T')
tfce2D = traits.Bool(desc='carry out Threshold-Free Cluster Enhancement with 2D optimisation',
argstr='--T2')
f_only = traits.Bool(desc='calculate f-statistics only', argstr='--f_only')
raw_stats_imgs = traits.Bool(desc='output raw ( unpermuted ) statistic images', argstr='-R')
p_vec_n_dist_files = traits.Bool(desc='output permutation vector and null distribution text files',
argstr='-P')
num_perm = traits.Int(argstr='-n %d', desc='number of permutations (default 5000, set to 0 for exhaustive)')
seed = traits.Int(argstr='--seed %d', desc='specific integer seed for random number generator')
var_smooth = traits.Int(argstr='-v %d', desc='use variance smoothing (std is in mm)')
c_thresh = traits.Float(argstr='-c %.2f', desc='carry out cluster-based thresholding')
cm_thresh = traits.Float(argstr='-C %.2f', desc='carry out cluster-mass-based thresholding')
f_c_thresh = traits.Float(argstr='-F %.2f', desc='carry out f cluster thresholding')
f_cm_thresh = traits.Float(argstr='-S %.2f', desc='carry out f cluster-mass thresholding')
tfce_H = traits.Float(argstr='--tfce_H %.2f', desc='TFCE height parameter (default=2)')
tfce_E = traits.Float(argstr='--tfce_E %.2f', desc='TFCE extent parameter (default=0.5)')
tfce_C = traits.Float(argstr='--tfce_C %.2f', desc='TFCE connectivity (6 or 26; default=6)')
vxl = traits.List(traits.Int, argstr='--vxl %d', desc='list of numbers indicating voxelwise EVs' +
'position in the design matrix (list order corresponds to files in vxf option)')
vxf = traits.List(traits.Int, argstr='--vxf %d', desc='list of 4D images containing voxelwise EVs' +
'(list order corresponds to numbers in vxl option)')
class RandomiseOutputSpec(TraitedSpec):
tstat_files = traits.List(
File(exists=True),
desc='t contrast raw statistic')
fstat_files = traits.List(
File(exists=True),
desc='f contrast raw statistic')
t_p_files = traits.List(
File(exists=True),
desc='f contrast uncorrected p values files')
f_p_files = traits.List(
File(exists=True),
desc='f contrast uncorrected p values files')
t_corrected_p_files = traits.List(
File(exists=True),
desc='t contrast FWE (Family-wise error) corrected p values files')
f_corrected_p_files = traits.List(
File(exists=True),
desc='f contrast FWE (Family-wise error) corrected p values files')
class Randomise(FSLCommand):
"""XXX UNSTABLE DO NOT USE
FSL Randomise: feeds the 4D projected FA data into GLM
modelling and thresholding
in order to find voxels which correlate with your model
Example
-------
>>> import nipype.interfaces.fsl as fsl
>>> rand = fsl.Randomise(in_file='allFA.nii', \
mask = 'mask.nii', \
tcon='design.con', \
design_mat='design.mat')
>>> rand.cmdline
'randomise -i allFA.nii -o tbss_ -d design.mat -t design.con -m mask.nii'
"""
_cmd = 'randomise'
input_spec = RandomiseInputSpec
output_spec = RandomiseOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['tstat_files'] = glob(self._gen_fname(\
'%s_tstat*.nii' % self.inputs.base_name))
outputs['fstat_files'] = glob(self._gen_fname(\
'%s_fstat*.nii' % self.inputs.base_name))
prefix = False
if self.inputs.tfce or self.inputs.tfce2D:
prefix = 'tfce'
elif self.inputs.vox_p_values:
prefix = 'vox'
elif self.inputs.c_thresh or self.inputs.f_c_thresh:
prefix = 'clustere'
elif self.inputs.cm_thresh or self.inputs.f_cm_thresh:
prefix = 'clusterm'
if prefix:
outputs['t_p_files'] = glob(self._gen_fname(\
'%s_%s_p_tstat*' % (self.inputs.base_name, prefix)))
outputs['t_corrected_p_files'] = glob(self._gen_fname(\
'%s_%s_corrp_tstat*.nii' % (self.inputs.base_name, prefix)))
outputs['f_p_files'] = glob(self._gen_fname(\
'%s_%s_p_fstat*.nii' % (self.inputs.base_name, prefix)))
outputs['f_corrected_p_files'] = glob(self._gen_fname(\
'%s_%s_corrp_fstat*.nii' % (self.inputs.base_name, prefix)))
return outputs
|
christianbrodbeck/nipype
|
nipype/interfaces/fsl/model.py
|
Python
|
bsd-3-clause
| 69,529
|
[
"Gaussian"
] |
2e33f326178064e910303e7cb5ebcf585988dff72f0ecb21498b4c2c02e77397
|
import fauxfactory
import pytest
from cfme import test_requirements
from cfme.ansible_tower.explorer import TowerCreateServiceDialogFromTemplateView
from cfme.infrastructure.config_management import AnsibleTower
from cfme.utils.testgen import config_managers
from cfme.utils.testgen import generate
from cfme.utils.update import update
pytest_generate_tests = generate(gen_func=config_managers)
TEMPLATE_TYPE = {
"job": "Job Template (Ansible Tower)",
"workflow": "Workflow Template (Ansible Tower)",
}
@pytest.fixture
def config_manager(config_manager_obj, appliance):
""" Fixture that provides a random config manager and sets it up"""
config_manager_obj.appliance = appliance
config_manager_obj.create()
yield config_manager_obj
config_manager_obj.delete()
@pytest.fixture
def config_system(config_manager):
return fauxfactory.gen_choice(config_manager.systems)
@pytest.mark.tier(3)
def test_config_manager_detail_config_btn(request, config_manager):
"""
Polarion:
assignee: nachandr
caseimportance: medium
initialEstimate: 1/2h
casecomponent: Ansible
"""
config_manager.refresh_relationships()
@pytest.mark.tier(2)
def test_config_manager_add(request, config_manager_obj):
"""
Polarion:
assignee: nachandr
casecomponent: Ansible
initialEstimate: 1/4h
"""
request.addfinalizer(config_manager_obj.delete)
config_manager_obj.create()
@pytest.mark.tier(3)
def test_config_manager_add_invalid_url(request, config_manager_obj):
"""
Polarion:
assignee: nachandr
caseimportance: medium
initialEstimate: 1/15h
casecomponent: Ansible
"""
request.addfinalizer(config_manager_obj.delete)
config_manager_obj.url = 'https://invalid_url'
error_message = 'getaddrinfo: Name or service not known'
with pytest.raises(Exception, match=error_message):
config_manager_obj.create()
@pytest.mark.tier(3)
def test_config_manager_add_invalid_creds(request, config_manager_obj):
"""
Polarion:
assignee: nachandr
caseimportance: medium
initialEstimate: 1/4h
casecomponent: Ansible
"""
request.addfinalizer(config_manager_obj.delete)
config_manager_obj.credentials.principal = 'invalid_user'
if config_manager_obj.type == "Ansible Tower":
msg = ('validation was not successful: {"detail":"Authentication credentials '
'were not provided. To establish a login session, visit /api/login/."}')
else:
msg = 'Credential validation was not successful: 401 Unauthorized'
with pytest.raises(Exception, match=msg):
config_manager_obj.create()
@pytest.mark.tier(3)
def test_config_manager_edit(request, config_manager):
"""
Polarion:
assignee: nachandr
caseimportance: medium
initialEstimate: 1/15h
casecomponent: Ansible
"""
new_name = fauxfactory.gen_alpha(8)
old_name = config_manager.name
with update(config_manager):
config_manager.name = new_name
request.addfinalizer(lambda: config_manager.update(updates={'name': old_name}))
assert (config_manager.name == new_name and config_manager.exists),\
"Failed to update configuration manager's name"
@pytest.mark.tier(3)
def test_config_manager_remove(config_manager):
"""
Polarion:
assignee: nachandr
caseimportance: medium
initialEstimate: 1/15h
casecomponent: Ansible
"""
config_manager.delete()
# Disable this test for Tower, no Configuration profiles can be retrieved from Tower side yet
# this is all real hackish because configmanager isn't a proper provider.
@pytest.mark.tier(3)
@test_requirements.tag
@pytest.mark.uncollectif(lambda config_manager_obj:
isinstance(config_manager_obj, AnsibleTower),
reason='Ansible tower not valid for this test')
def test_config_system_tag(config_system, tag, appliance, config_manager, config_manager_obj):
"""
Polarion:
assignee: anikifor
initialEstimate: 1/4h
casecomponent: Ansible
"""
config_system.add_tag(tag=tag, details=False)
assert tag in config_system.get_tags(), "Added tag not found on configuration system"
@pytest.mark.tier(3)
@test_requirements.tag
@pytest.mark.uncollectif(lambda config_manager_obj:
not isinstance(config_manager_obj, AnsibleTower),
reason='Only Ansible tower is valid for this test')
def test_ansible_tower_job_templates_tag(request, config_manager, tag, config_manager_obj):
"""
Polarion:
assignee: anikifor
initialEstimate: 1/4h
casecomponent: Ansible
caseimportance: high
Bugzilla:
1673104
"""
try:
job_template = config_manager.appliance.collections.ansible_tower_job_templates.all()[0]
except IndexError:
pytest.skip("No job template was found")
job_template.add_tag(tag=tag, details=False)
request.addfinalizer(lambda: job_template.remove_tag(tag=tag))
assert tag in job_template.get_tags(), "Added tag not found on configuration system"
# def test_config_system_reprovision(config_system):
# # TODO specify machine per stream in yamls or use mutex (by tagging/renaming)
# pass
@pytest.mark.tier(3)
@pytest.mark.uncollectif(lambda config_manager_obj:
not isinstance(config_manager_obj, AnsibleTower),
reason='Only Ansible tower is valid for this test')
@pytest.mark.parametrize('template_type', TEMPLATE_TYPE.values(), ids=list(TEMPLATE_TYPE.keys()))
def test_ansible_tower_service_dialog_creation_from_template(config_manager, appliance,
template_type, config_manager_obj):
"""
Polarion:
assignee: nachandr
initialEstimate: 1/4h
casecomponent: Ansible
caseimportance: high
"""
try:
job_template = config_manager.appliance.collections.ansible_tower_job_templates.filter(
{"job_type": template_type}).all()[0]
except IndexError:
pytest.skip("No job template was found")
dialog_label = fauxfactory.gen_alpha(8)
dialog = job_template.create_service_dailog(dialog_label)
view = job_template.browser.create_view(TowerCreateServiceDialogFromTemplateView)
view.flash.assert_success_message('Service Dialog "{}" was successfully created'.format(
dialog_label))
assert dialog.exists
dialog.delete_if_exists()
@pytest.mark.manual
@test_requirements.tower
@pytest.mark.tier(1)
def test_config_manager_add_multiple_times_ansible_tower_243():
"""
Try to add same Tower manager twice (use the same IP/hostname). It
should fail and flash message should be displayed.
Polarion:
assignee: nachandr
caseimportance: medium
caseposneg: negative
casecomponent: Ansible
initialEstimate: 1/4h
startsin: 5.7
"""
pass
@pytest.mark.manual
@test_requirements.tower
def test_config_manager_job_template_refresh():
"""
After first Tower refresh, go to Tower UI and change name of 1 job
template. Go back to CFME UI, perform refresh and check if job
template name was changed.
Polarion:
assignee: nachandr
casecomponent: Ansible
initialEstimate: 1/2h
"""
pass
@pytest.mark.manual
@test_requirements.tower
def test_config_manager_accordion_tree():
"""
Make sure there is accordion tree, once Tower is added to the UI.
Bugzilla:
1560552
Polarion:
assignee: nachandr
casecomponent: WebUI
caseimportance: low
initialEstimate: 1/4h
startsin: 5.8
"""
pass
@pytest.mark.manual
@test_requirements.tower
@pytest.mark.tier(1)
def test_config_manager_remove_objects_ansible_tower_310():
"""
1) Add Configuration manager
2) Perform refresh and wait until it is successfully refreshed
3) Remove provider
4) Click through accordion and double check that no objects (e.g.
tower job templates) were left in the UI
Polarion:
assignee: nachandr
caseimportance: medium
casecomponent: Ansible
initialEstimate: 1/4h
startsin: 5.7
"""
pass
@pytest.mark.manual
@test_requirements.tower
@pytest.mark.tier(1)
def test_config_manager_change_zone():
"""
Add Ansible Tower in multi appliance, add it to appliance with UI. Try
to change to zone where worker is enabled.
Bugzilla:
1353015
Polarion:
assignee: nachandr
casecomponent: Provisioning
caseimportance: medium
initialEstimate: 1h
startsin: 5.8
"""
pass
|
izapolsk/integration_tests
|
cfme/tests/infrastructure/test_config_management.py
|
Python
|
gpl-2.0
| 8,785
|
[
"VisIt"
] |
40a293222457f202505adca01b1ebad95714c792747959c227769468603e003d
|
# -*- coding: utf-8 -*-
"""
Lexer for the NesC language.
http://nescc.sourceforge.net/
:copyright: 2008 by Peter Vizi
:license: GPLv3, see LICENSE for more details.
"""
import re
try:
set
except NameError:
from sets import Set as set
from pygments.scanner import Scanner
from pygments.lexer import RegexLexer, include, bygroups, using, \
this
from pygments.util import get_bool_opt, get_list_opt
from pygments.token import \
Text, Comment, Operator, Keyword, Name, String, Number, Punctuation, \
Error
# backwards compatibility
from pygments.lexers.functional import OcamlLexer
__all__ = ['NesCLexer']
class NesCLexer(RegexLexer):
"""
For C source code with preprocessor directives.
"""
name = 'NesC'
aliases = ['nesc']
filenames = ['*.nc']
mimetypes = ['text/x-chdr', 'text/x-csrc', 'application/x-netcdf']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'whitespace': [
(r'^\s*#if\s+0', Comment.Preproc, 'if0'),
(r'^\s*#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(auto|break|case|const|continue|default|do|else|enum|extern|'
r'for|goto|if|register|restricted|return|sizeof|static|struct|'
r'new|as|call|command|components|configuration|event|implementation|interface|module|post|provides|signal|task|uses|includes|atomic|'
r'switch|typedef|union|volatile|virtual|while)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void|'
r'uint8_t|uint16_t|uint32_t|message_t|bool|error_t|am_id_t|am_addr_t|nx_am_id_t|am_group_t|nx_am_group_t|nx_am_addr_t|'
r'_Complex|_Imaginary|_Bool)\b', Keyword.Type),
(r'(_{0,2}inline|naked|restrict|thread|typename)\b', Keyword.Reserved),
(r'__(asm|int8|based|except|int16|stdcall|cdecl|fastcall|int32|'
r'declspec|finally|int64|try|leave)\b', Keyword.Reserved),
(r'(true|false|NULL|TRUE|FALSE|SUCCESS|FAIL|ESIZE|ECANCEL|EOFF|EBUSY|EINVAL|ERETRY|ERESERVE|EALREADY|ENOMEM|ENOACK|ELAST)\b', Name.Builtin),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_.]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')({)',
bygroups(using(this), Name.Function, using(this), Text, Punctuation),
'function'),
# function declarations
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_.]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function, using(this), Text, Punctuation)),
('', Text, 'statement'),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
('{', Punctuation, '#push'),
('}', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment),
(r'//.*?\n', Comment, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment, '#pop'),
(r'.*?\n', Comment),
]
}
|
petervizi/pygments-nesc
|
lexer/__init__.py
|
Python
|
gpl-3.0
| 5,072
|
[
"NetCDF"
] |
dda6c73e67c586c82e08cb0eace34550e9427c32b23437cd9f9505cd83c1105b
|
import os
import pysam
from os import path
from ...fileop import PosixFileSystem
from ....util import Utility
def run_sam_to_bam(*args, **kwargs):
paramindex = 0
if 'data' in kwargs.keys():
data = kwargs['data']
else:
if len(args) == paramindex:
raise ValueError("Argument not given.")
data = args[paramindex]
paramindex += 1
data = Utility.get_normalized_path(data)
if 'output' in kwargs.keys():
output = kwargs['output']
else:
if len(args) > paramindex:
output = args[paramindex]
if output:
output = Utility.get_normalized_path(output)
else:
output = Path(data).stem + ".bam"
output = os.path.join(os.path.dirname(data), os.path.basename(output))
output = Utility.get_normalized_path(output)
if os.path.exists(output):
os.remove(output)
infile = pysam.AlignmentFile(data, "r")
outfile = pysam.AlignmentFile(output, "wb", template=infile)
for s in infile:
outfile.write(s)
fs = PosixFileSystem(Utility.get_rootdir(2))
if not os.path.exists(output):
raise ValueError("pysam could not generate the file " + fs.strip_root(output))
return fs.strip_root(output)
|
mainulhossain/phenoproc
|
app/biowl/libraries/pysam/adapter.py
|
Python
|
mit
| 1,304
|
[
"pysam"
] |
a151cb5b7a73e9e625612db5177c7e59906d4d2fe26f4fe2f14423e135dee0c1
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Created on Mar 18, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 18, 2012"
import unittest
import os
import warnings
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class BorgQueenTest(unittest.TestCase):
def test_get_data(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
drone = VaspToComputedEntryDrone()
self.queen = BorgQueen(drone, test_dir, 1)
data = self.queen.get_data()
self.assertEqual(len(data), 11)
def test_load_data(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
drone = VaspToComputedEntryDrone()
queen = BorgQueen(drone)
queen.load_data(os.path.join(test_dir, "assimilated.json"))
self.assertEqual(len(queen.get_data()), 1)
if __name__ == "__main__":
unittest.main()
|
dongsenfo/pymatgen
|
pymatgen/apps/borg/tests/test_queen.py
|
Python
|
mit
| 1,323
|
[
"pymatgen"
] |
fa8dc8d955f408d1a2f2e844e6a3229d0f9b3dacaf0947d925f665d1c0f76717
|
from setuptools import setup
setup(
name = 'plumbing',
version = '2.0.3',
description = 'Helps with plumbing-type programing in python',
long_description = open('README.md').read(),
license = 'MIT',
url = 'http://github.com/xapple/plumbing/',
author = 'Lucas Sinclair',
author_email = 'lucas.sinclair@me.com',
classifiers = ['Topic :: Scientific/Engineering :: Bio-Informatics'],
packages = ['plumbing'],
install_requires = ['sh', 'biopython'],
# Install extra dependencies:
# $ pip install =e.[dev]
extras_require={
'dev': [
'setuptools',
'sphinx',
'sphinx_rtd_theme',
],
},
)
|
DC23/plumbing
|
setup.py
|
Python
|
mit
| 851
|
[
"Biopython"
] |
f393a7bbf02b852a47cceced94dcae754baf871d4114cdc0c14b8b15a1cbd4f5
|
#!/usr/bin/env python
"""
This module calculates thermal properties using different equations of state.
"""
from __future__ import division
import warnings
import sys
import subprocess
import unittest
import pymatgen
from pymatgen.agl_thermal.agl_polynomial import polfit
from pymatgen.agl_thermal.agl_polynomial import polin0
from pymatgen.agl_thermal.agl_polynomial import polin1
from pymatgen.agl_thermal.agl_polynomial import polin2
from pymatgen.agl_thermal.agl_polynomial import polin3
from pymatgen.agl_thermal.agl_polynomial import polin4
from pymatgen.agl_thermal.agl_thermal import gauleg
import numpy as np
import os
from numpy import matrix
from numpy import linalg
import math
__author__ = "Cormac Toher"
__copyright__ = "Copyright 2014, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Cormac Toher"
__email__ = "cormac.toher@duke.edu"
__date__ = "April 8, 2014"
# **************************************************************************************
# These functions calculate the thermal properties using different equations of state
# **************************************************************************************
#
#.....numer - numerical EOS calculation.
#
#.....Numer computes the derivatives of the Helmholtz function and the
# static energy needed to obtain Debye's temperature, the static
# pressure, and succesive derivatives of the bulk modulus.
#
# Adapted from original Fortran version written by M. A. Blanco et al.
# See Computer Physics Communications 158, 57-72 (2004) and Journal of Molecular Structure (Theochem) 368, 245-255 (1996) for details
#
def numer (volref, nepol, epol, nfpol, fpol, statcalc, agl_data):
#
#.....Compute Pfit(P), B(P), B'(P), and B''(P)
#
if (agl_data.ieos >= 0):
agl_data.outstr = agl_data.outstr + '\n'
agl_data.outstr = agl_data.outstr + 'NUMERICAL EOS PRESSURE DERIVATIVES \n'
agl_data.outstr = agl_data.outstr + '================================== \n'
agl_data.outstr = agl_data.outstr + " P(GPa) \t V(bohr^3) \t V/V0 \t Pfit(GPa) \t B(GPa) \t B' \t B''(GPa-1) \n"
agl_data.outstr = agl_data.outstr + ' ------------------------------------------------------------------------------------------------------ \n'
for k in xrange(agl_data.npressure):
xeqmin = (agl_data.voleqmin[k]/volref)**agl_data.third
f1 = polin1 (xeqmin, nfpol, fpol)
f2 = polin2 (xeqmin, nfpol, fpol)
f3 = polin3 (xeqmin, nfpol, fpol)
f4 = polin4 (xeqmin, nfpol, fpol)
pt = -xeqmin * f1 / (3.0*agl_data.voleqmin[k]) * agl_data.au2gpa
tmp = 2.0 * f1 - xeqmin * f2
agl_data.bulkmod[k] = -xeqmin / (9.0*agl_data.voleqmin[k]) * tmp * agl_data.au2gpa
tmp2 = (f2 - xeqmin * f3) / tmp
b1 = agl_data.third * (2.0 - xeqmin * tmp2)
b2 = -agl_data.voleqmin[k] * (tmp2*(1.0-xeqmin*tmp2) - xeqmin*xeqmin*f4/tmp) / (agl_data.au2gpa*tmp)
if (k == 0):
agl_data.bu0 = agl_data.bulkmod[k]
agl_data.bu1 = b1
agl_data.bu2 = b2
if (agl_data.ieos >= 0):
agl_data.outstr = agl_data.outstr + ' ' + str(agl_data.pressure[k]).rjust(6) + '\t' + str(agl_data.voleqmin[k]).rjust(10)[:10] + '\t' + str(agl_data.voleqmin[k]/agl_data.voleqmin[0]).rjust(8)[:8] + '\t' + str(pt).rjust(10)[:10] + '\t' + str(agl_data.bulkmod[k]).rjust(10)[:10] + '\t' + str(b1).rjust(6)[:6] + '\t ' + str(b2).rjust(7)[:7] + '\n'
#
#.....Static calculation: get second derivative of static energy
#
if (statcalc):
if (agl_data.ieos >= 0):
agl_data.outstr = agl_data.outstr +'\n'
agl_data.outstr = agl_data.outstr + 'INPUT AND FITTED VALUES OF THE LATTICE ENERGY \n'
agl_data.outstr = agl_data.outstr + '============================================= \n'
agl_data.outstr = agl_data.outstr + '\n'
agl_data.outstr = agl_data.outstr + ' V(bohr^3) E_inp(hartree) E_fit(hartree) \n'
agl_data.outstr = agl_data.outstr + ' --------------------------------------------------\n'
for i in xrange(agl_data.ndata):
f0 = polin0 (agl_data.xconfigvector[i], nepol, epol)
f1 = polin1 (agl_data.xconfigvector[i], nepol, epol)
f2 = polin2 (agl_data.xconfigvector[i], nepol, epol)
tmp = agl_data.xconfigvector[i] * f2 - 2.0 * f1
v3 = 3.0 * agl_data.vol_inp[i]
agl_data.uder.append(tmp * agl_data.xconfigvector[i] / (v3*v3))
if (agl_data.ieos >= 0):
agl_data.outstr = agl_data.outstr + ' ' + str(agl_data.vol_inp[i]).rjust(10)[:10] + '\t ' + str(agl_data.energ_inp[i]).rjust(14)[:14] + '\t ' + str(f0).rjust(14)[:14] + '\n'
#
#.....Dynamic calculation: get static pressure and second derivative of the energy
#
else:
for k in xrange(agl_data.npressure):
xeqmin = (agl_data.voleqmin[k]/volref)**agl_data.third
f1 = polin1 (xeqmin, nepol, epol)
f2 = polin2 (xeqmin, nepol, epol)
f3 = polin3 (xeqmin, nepol, epol)
f4 = polin4 (xeqmin, nepol, epol)
v3 = 3.0 * agl_data.voleqmin[k]
agl_data.pstatic[k] = -f1*agl_data.au2gpa * xeqmin / v3
tmp = xeqmin * f2 - 2.0 * f1;
agl_data.udyn[k] = tmp * xeqmin / (v3*v3)
tmp2 = f2 - xeqmin * f3;
agl_data.gamma_G[k] = (1.0 + xeqmin * tmp2 / tmp)/6.0
return
#...................................................................
#.....vinet - computes Vinet EOS from (P,V) data.
#
#.....VINET computes the EOS from the (P,V) data. The EOS has the
# following expresion:
# logH = A + B(1-x)
# being H = Px**2/(3(1-x))
# A = lnBo
# B = 3/2((Bo)'-1)
# X = (V/Vo)**(1/3)
#
# Adapted from original Fortran version written by M. A. Blanco et al.
# See Computer Physics Communications 158, 57-72 (2004) and Journal of Molecular Structure (Theochem) 368, 245-255 (1996) for details
#...................................................................
def vinet (vol0pres, gfe0pres, statcalc, agl_data):
#
#.....fit Log H vs. (1-x)
#
logh = [0.0 for k in range(agl_data.npressure)]
x = [0.0 for k in range(agl_data.npressure)]
db = []
d2b = []
sumz=0.0
sumy=0.0
sumzy=0.0
sumz2=0.0
sumy2=0.0
n=0
x[0] = 1.0
i = 1
while (i < agl_data.npressure):
x[i] = (agl_data.voleqmin[i]/vol0pres)**agl_data.third
h = agl_data.pressure[i]*x[i]*x[i]/(3.0*(1-x[i]))
logh[i] = math.log(h)
z = 1-x[i]
n=n+1
sumz = sumz + z
sumy = sumy + logh[i]
sumzy = sumzy + z*logh[i]
sumz2 = sumz2 + z*z
sumy2 = sumy2 + logh[i]*logh[i]
i = i + 1
lnb0=(sumy*sumz2 - sumzy*sumz)/(n*sumz2 - sumz*sumz)
A=(n*sumzy - sumz*sumy)/(n*sumz2 - sumz*sumz)
raiz=math.sqrt((sumz2 - sumz*sumz/n)*(sumy2 - sumy*sumy/n))
rfit=(sumzy-sumz*sumy/n)/raiz
logh[0] = lnb0
#
#.....obtain B0, B0', B0''
#
agl_data.bu0=math.exp(lnb0)
agl_data.bu1=2.0*A*agl_data.third+1.0
agl_data.bu2 = -(2.0+A*(A+6.0))/(9.0*agl_data.bu0)
#
#.....save static values
#
if (statcalc):
agl_data.g00k = gfe0pres
agl_data.v00k = vol0pres
agl_data.b00k = agl_data.bu0/agl_data.au2gpa
agl_data.A00k = A
#
#.....Compute Pfit(P), B(P), B'(P), and B''(P)
#
agl_data.bulkmod[0]=agl_data.bu0
db.append(agl_data.bu1)
d2b.append(agl_data.bu2)
agl_data.pfit[0]=0.0
i = 1
while (i < agl_data.npressure):
a1x = A * (1.0 - x[i])
ax1 = A * x[i] + 1.0
f0x = x[i] * (1.0-a1x) - 2.0
f1x = ax1 - a1x
f2x = 2.0 * A
f1f0 = f1x / f0x
f2f0 = f2x / f0x
fnw = 1.0 - x[i] * f1f0
x2inv = 1.0 / (x[i]*x[i])
b0exp = agl_data.bu0 * math.exp(a1x)
agl_data.bulkmod[i] = -b0exp * f0x * x2inv
db.append(agl_data.third * (ax1+fnw))
d2b.append(x[i]/(9.0*agl_data.bulkmod[i]) * (x[i]*f2f0 - A + f1f0*fnw))
agl_data.pfit[i] = 3.0 * (1.0-x[i]) * x2inv * b0exp
i = i + 1
#
#.....output
#
agl_data.outstr = agl_data.outstr + "\n"
agl_data.outstr = agl_data.outstr + "VINET EOS PRESSURE DERIVATIVES \n"
agl_data.outstr = agl_data.outstr + "============================== \n"
agl_data.outstr = agl_data.outstr + "\n"
agl_data.outstr = agl_data.outstr + " 1-V/V0 \t Vinet-Func \t P(GPa) \t Pfit(GPa) \t B(GPa) \t B' \t B''(GPa-1) \n"
agl_data.outstr = agl_data.outstr + " ------------------------------------------------------------------------------------------------------------ \n"
for i in xrange(agl_data.npressure):
agl_data.outstr = agl_data.outstr + ' ' + str(1.0-x[i]).rjust(6)[:6] + "\t " + str(logh[i]).rjust(10)[:10] + "\t " + str(agl_data.pressure[i]).rjust(6)[:6] + "\t " + str(agl_data.pfit[i]).rjust(10)[:10] + "\t" + str(agl_data.bulkmod[i]).rjust(10)[:10] + "\t" + str(db[i]).rjust(10)[:10] + "\t " + str(d2b[i]).rjust(10)[:10] + "\n"
agl_data.outstr = agl_data.outstr + "\n"
agl_data.outstr = agl_data.outstr + "B0 = " + str(agl_data.bu0) + ", B0' = " + str(agl_data.bu1) + ", B0'' = " + str(agl_data.bu2) + " reg.coef = " + str(rfit) + "\n"
agl_data.outstr = agl_data.outstr + "\n"
#
#.....Static calculation: get static energy and its second derivative
#
if (statcalc):
for i in xrange(agl_data.ndata):
x00k = (agl_data.vol_inp[i]/agl_data.v00k)**agl_data.third
a1x = agl_data.A00k * (1.0 - x00k)
f0x = x00k * (1.0-a1x) - 2.0
b0exp = agl_data.b00k * math.exp(a1x)
agl_data.ust.append(agl_data.g00k + 9.0*agl_data.v00k/(agl_data.A00k*agl_data.A00k) * (b0exp*(a1x-1.0)+agl_data.b00k))
agl_data.uder.append(-f0x / (x00k*x00k*agl_data.vol_inp[i]) * b0exp)
#
#.......Print input and fitted values of the lattice energy.
#
agl_data.outstr = agl_data.outstr + "\n"
agl_data.outstr = agl_data.outstr + "INPUT AND FITTED VALUES OF THE LATTICE ENERGY \n"
agl_data.outstr = agl_data.outstr + "============================================= \n"
agl_data.outstr = agl_data.outstr + "\n"
agl_data.outstr = agl_data.outstr + " V(bohr^3) E_inp(hartree) E_fit(hartree) \n"
agl_data.outstr = agl_data.outstr + " -------------------------------------------------- \n"
for i in xrange(agl_data.ndata):
agl_data.outstr = agl_data.outstr + ' ' + str(agl_data.vol_inp[i]).rjust(10)[:10] + "\t " + str(agl_data.energ_inp[i]).rjust(14)[:14] + "\t " + str(agl_data.ust[i]).rjust(14)[:14] + "\n"
#
#.....Dynamic calculation: get static pressure and second derivative
# of the energy
#
else:
for i in xrange(agl_data.npressure):
x00k = (agl_data.voleqmin[i]/agl_data.v00k)**agl_data.third
a1x = agl_data.A00k * (1.0 - x00k)
ax1 = agl_data.A00k * x00k + 1.0
f0x = x00k * (1.0-a1x) - 2.0
f1x = ax1 - a1x
f2x = 2.0 * agl_data.A00k
f1f0 = f1x / f0x
fnw = 1.0 - x00k * f1f0
x2inv = 1.0 / (x00k*x00k)
b0exp = agl_data.b00k * math.exp(a1x)
agl_data.pstatic[i] = 3.0 * (1.0-x00k) * x2inv * b0exp * agl_data.au2gpa
agl_data.udyn[i] = -f0x * x2inv * x2inv / (x00k*agl_data.v00k) * b0exp
agl_data.gamma_G[i] = (ax1 + fnw - 1.0)/6.0
return
#-----------------------------------------------------------------------------
#.....birch - computes the Birch-Murnaghan EOS of order iG from the
# (P,V) data.
#
# The EOS has the following expression:
#
# F = Sum (i=0,iG) a(i)*f^i
#
# being : F = P/[3f(1+2f)^(5/2)]
# f = [x^(-2)-1]/2
# x = [V(i)/V(1)]^(1/3)
#
#-----INPUT
# vol0pres : Molecular volume (bohr^3/mol) at P=0.
# gfe0pres : Gibbs energy (or 0k static energy) at v0 (hartree).
# iG : order of the fitting.
# press() : Pressure values (GPa). common /eos/.
# vinp() : Initial values of the volume (bohr^3/mol). common /input/.
# statcalc: Logical variable that determines if the calculation is
# static or dynamic. In the first case the second derivative
# of the static energy (uder) is computed for all the input
# values of the volume. In the second case the second
# derivative of the static energy (udyn) is computed for
# the equilibrium volumes at the different pressures.
#
#-----OUTPUT
# pstatic() : Static pressures in GPa (only on dynamic calculations).
# uder() : Second derivative of ust(k) for each vinp(). Hy/bohr^6
# udyn() : Second derivative of ust(k) for each V(). Hy/bohr^6
# rms : Root mean square deviation.
# bu0,bu1,bu2 : Bulk modulus and their derivatives at P=0.
#
#.....The output is stored in common /eos/
#
# Adapted from original Fortran version written by M. A. Blanco et al.
# See Computer Physics Communications 158, 57-72 (2004) and Journal of Molecular Structure (Theochem) 368, 245-255 (1996) for details
#-----------------------------------------------------------------------
def birch (vol0pres, gfe0pres, iG, statcalc, agl_data):
tol = 1e-12
npresm2 = agl_data.npressure - 2
izero = 0
if (iG > agl_data.maiG):
agl_data.logstr = agl_data.logstr + "MP AGL birch : Too high fitting order \n"
agl_data.brerr = 1
return
if (math.fabs(agl_data.pressure[0]) > tol):
agl_data.logstr = agl_data.logstr + "MP AGL birch : P(0) must be 0.0 \n"
agl_data.brerr = 1
return
acoef = [0.0 for i in range(agl_data.maiG+1)]
fstr = []
ybir = []
weight = []
db = []
d2b = []
#
#.....Compute the Birch function F and strain variable f.
#
weight.append(0.0)
i = 1
while (i < agl_data.npressure):
rr0 = (agl_data.voleqmin[i]/vol0pres)**agl_data.third
fstr.append((rr0**(-2)-1)/2.0)
ybir.append(agl_data.pressure[i]/agl_data.au2gpa/(3*fstr[i-1]*((1+2*fstr[i-1]**2.5))))
weight.append(1.0)
i = i + 1
#
#.....Fitting to a polynomial of order iG.
#
rms, acoef = polfit (izero, npresm2, fstr, ybir, weight, iG)
#
#.....Compute B0,B0',B0''.
#
agl_data.bu0=acoef[0]*agl_data.au2gpa
if (iG == 0):
agl_data.bu1=4.0
agl_data.bu2=-35.0/(9.0*agl_data.bu0)
elif (iG == 1):
agl_data.bu1=4.0+2.0*acoef[1]*agl_data.au2gpa/(3.0*agl_data.bu0)
agl_data.bu2=(-agl_data.bu1*(agl_data.bu1-7.0)-143.0/9.0)/agl_data.bu0
elif (iG >= 2):
agl_data.bu1=4.0+2.0*acoef[1]*agl_data.au2gpa/(3.0*agl_data.bu0);
agl_data.bu2=(2.0*acoef[2]/(agl_data.bu0*3.0)-agl_data.bu1*(agl_data.bu1-7.0)-143.0/9.0)/agl_data.bu0
#
#.....Compute B(P), B'(P), and B''(P). (b(), db(), and d2b().
#
for i in xrange(agl_data.npressure):
if (i == 0):
agl_data.pfit[i]=0.0
agl_data.bulkmod[i]=agl_data.bu0;
db.append(agl_data.bu1)
d2b.append(agl_data.bu2)
else:
st=fstr[i-1]
stsq=math.sqrt(1.0+2.0*st)
s2=stsq*stsq
st32=stsq*s2
st52=st32*s2
pol0 = polin0(st, iG, acoef)
pol1 = polin1(st, iG, acoef)
pol2 = polin2(st, iG, acoef)
pol3=0.0
if (iG > 2):
pol3 = polin3(st, iG, acoef)
#
#.........Fitted pressure and B(P).
#
agl_data.pfit[i]=3.0*st*st52*pol0*agl_data.au2gpa
sum1=st32*(st*s2*pol1+(1.0+7.0*st)*pol0)
agl_data.bulkmod[i]=s2*sum1
sum2=st52*(s2*pol1+2*st*pol1+st*s2*pol2+7*pol0+(1+7*st)*pol1)
den=3*st*st52*pol1+(3.0*st52+15.0*st*st32)*pol0
#
#.........B'(P).
#
db.append((5*sum1+sum2)/den)
d2bdf2=25*stsq*(st*s2*pol1+(1.0+7.0*st)*pol0)
d2bdf2=d2bdf2+10.0*st32*((2.0+11.0*st)*pol1+7*pol0+st*s2*pol2)
d2bdf2=d2bdf2+st52*((3.0+15.0*st)*pol2+18*pol1+st*s2*pol3)
d2pdf2=3*st52*pol1+15*st*st32*pol1+3*st*st52*pol2
d2pdf2=d2pdf2+(30*st32+45*st*stsq)*pol0
d2pdf2=d2pdf2+(3*st52+15*st*st32)*pol1
#
#.........B''(P).
#
d2b.append((den*d2bdf2-(5*sum1+sum2)*d2pdf2)/(den**3))
agl_data.bulkmod[i]=agl_data.bulkmod[i]*agl_data.au2gpa
d2b[i]=d2b[i]/agl_data.au2gpa
#
#.....Output.
#
agl_data.outstr = agl_data.outstr + "\n"
agl_data.outstr = agl_data.outstr + "BIRCH-MURNAGHAN EOS PRESSURE DERIVATIVES \n"
agl_data.outstr = agl_data.outstr + "======================================== \n"
agl_data.outstr = agl_data.outstr + "\n"
agl_data.outstr = agl_data.outstr + " Strain \t Birch-Func \t P(GPa) \t Pfit(GPa) \t B(GPa) \t B' \t B''(GPa-1) \n"
agl_data.outstr = agl_data.outstr + " ----------------------------------------------------------------------------------------------------------- \n"
for i in xrange(agl_data.npressure):
if (i == 0):
agl_data.outstr = agl_data.outstr + ' ' + str(0.0).rjust(6)[:6] + "\t " + str(agl_data.bu0/agl_data.au2gpa).rjust(10)[:10] + "\t " + str(agl_data.pressure[i]).rjust(6)[:6] + "\t " + str(agl_data.pfit[i]).rjust(14)[:14] + "\t" + str(agl_data.bulkmod[i]).rjust(10)[:10] + "\t" + str(db[i]).rjust(10)[:10] + "\t " + str(d2b[i]).rjust(10)[:10] + "\n"
else:
agl_data.outstr = agl_data.outstr + ' ' + str(fstr[i-1]).rjust(6)[:6] + "\t " + str(ybir[i-1]).rjust(10)[:10] + "\t " + str(agl_data.pressure[i]).rjust(6)[:6] + "\t " + str(agl_data.pfit[i]).rjust(14)[:14] + "\t" + str(agl_data.bulkmod[i]).rjust(10)[:10] + "\t" + str(db[i]).rjust(10)[:10] + "\t " + str(d2b[i]).rjust(10)[:10] + "\n"
agl_data.outstr = agl_data.outstr + "\n"
agl_data.outstr = agl_data.outstr + "B0 = " + str(agl_data.bu0) + ", B0' = " + str(agl_data.bu1) + ", B0'' = " + str(agl_data.bu2) + ", reg.coef = " + str(rms) + "\n"
agl_data.outstr = agl_data.outstr + "\n"
if (statcalc):
#
#.......Compute the static potential energy U(V) and its second
# derivative U''(V) with respect to V for all the input
# values of the volume.
#
agl_data.v00k=vol0pres
agl_data.g00k=gfe0pres
for k in xrange(iG + 1):
agl_data.astatic.append(acoef[k])
for k in xrange(agl_data.ndata):
agl_data.ust.append(agl_data.g00k)
agl_data.uder.append(0.0)
st=(agl_data.vol_inp[k]/agl_data.v00k)**agl_data.third
st=((st**(-2))-1)/2.0
s2=(1.0+2.0*st)
pol0 = polin0(st, iG, agl_data.astatic)
pol1 = polin1(st, iG, agl_data.astatic)
v9=9.0*agl_data.v00k
for j in xrange(iG + 1):
agl_data.ust[k]=agl_data.ust[k]+v9*agl_data.astatic[j]/(j+2)*(st**(j+2))
agl_data.uder[k]=s2*s2*s2*s2/agl_data.v00k*(st*s2*pol1+(1.0+7.0*st)*pol0)
#
#.......Print input and fitted values of the lattice energy.
#
agl_data.outstr = agl_data.outstr + "\n"
agl_data.outstr = agl_data.outstr + "INPUT AND FITTED VALUES OF THE LATTICE ENERGY \n"
agl_data.outstr = agl_data.outstr + "============================================= \n"
agl_data.outstr = agl_data.outstr + "\n"
agl_data.outstr = agl_data.outstr + " V(bohr^3) E_inp(hartree) E_fit(hartree) \n"
agl_data.outstr = agl_data.outstr + " -------------------------------------------------- \n"
for i in xrange(agl_data.ndata):
agl_data.outstr = agl_data.outstr + ' ' + str(agl_data.vol_inp[i]).rjust(10)[:10] + "\t " + str(agl_data.energ_inp[i]).rjust(14)[:14] + "\t " + str(agl_data.ust[i]).rjust(14)[:14] + "\n"
return
else:
#
#.......Compute the second derivative U''(V) with respect to V
# for all the equilibrium values of the volume at the
# different pressures.
#
for k in xrange(agl_data.npressure):
st=(agl_data.voleqmin[k]/agl_data.v00k)**agl_data.third
st=((st)**(-2)-1)/2.0
s2=(1.0+2.0*st)
s22=s2*s2
pol0 = polin0(st, iG, agl_data.astatic)
pol1 = polin1(st, iG, agl_data.astatic)
pol2 = polin2(st, iG, agl_data.astatic)
pol3=0.0
if (iG > 2):
pol3 = polin3(st, iG, agl_data.astatic)
agl_data.pstatic[k]=agl_data.au2gpa*3.0*st*(s2**2.5)*pol0
tmp = (1.0+7.0*st)*pol0 + st*s2*pol1
agl_data.udyn[k] = s22*s22 / agl_data.v00k * tmp
tmp = 1.0 / tmp
tmp2 = s2*tmp * (7.0*pol0 + (2.0+11.0*st)*pol1 + st*s2*pol2)
v3 = agl_data.voleqmin[k] / (3.0*agl_data.v00k)
agl_data.gamma_G[k] = -2.0*agl_data.third + 0.5*s2*math.sqrt(s2)*v3*(8.0+tmp2)
#
#.....end
#
return
#...................................................................
#
#.....bcnt - compute the Spinodal (BCNT) EOS from (B,p) data.
#
# The EOS has the following expresion:
#
# g
# B(p) = ( p - Psp ) / K
#
# where//c
# g = 0.85 (If opt_g = .true. ===> g is optimized)
# (-Psp) and K are the parameter to optimize.
#
# These parameters bear the following relation with Bo and Bo'.
# g -1
# Bo = (-Psp) K
#
# -1
# Bo' = g Bo (-Psp)
#
#-----Input parameters:
# lg : Logical unit for results output.
# vol0pres : Zero pressure volume, either static or dynamic.
# gfe0pres : Zero pressure Gibbs function.
# B0 : Bulk modulus used to compute the initial value of
# -Psp (GPa).
# opt_g : if .true. ==> g is optimized.
# static : if .true. ==> static calculation.
#
# Adapted from original Fortran version written by M. A. Blanco et al.
# See Computer Physics Communications 158, 57-72 (2004) and Journal of Molecular Structure (Theochem) 368, 245-255 (1996) for details
# ...................................................................
def bcnt (vol0pres, gfe0pres, b0, statcalc, agl_data):
eps=1e-10
maxnl = 100
tol = 1e-12
agl_data.volp0 = vol0pres
db = []
d2b = []
xg = [0.0 for i in range(maxnl)]
wg = [0.0 for i in range(maxnl)]
#
#.....Initial values of properties to optimize.
#
agl_data.gbao = 0.85
if (not statcalc):
if (agl_data.iopt_g == 2):
agl_data.gbao = agl_data.gbao0
x_Psp = agl_data.gbao*b0/4.0
#
#.....Optimize g and x_Psp.
#
ax = x_Psp*0.5
bx = x_Psp
cx = x_Psp*2.0
ax, bx, cx, fa, fb, fc = mnbrak (ax, bx, cx, agl_data)
x_Psp, desv = brent (ax, bx, cx, tol, x_Psp, agl_data)
#
#.....Final properties.
#
xx = math.exp((agl_data.gbao-1)*math.log(x_Psp))
agl_data.xmopt = agl_data.xkopt/xx/(1.0-agl_data.gbao)
agl_data.bu0 = xx * x_Psp / agl_data.xkopt
agl_data.bu1 = agl_data.gbao * agl_data.bu0 / x_Psp
agl_data.bu2 = agl_data.gbao * (agl_data.gbao - 1.0) * agl_data.bu0 / x_Psp / x_Psp
vsp = vol0pres * math.exp(agl_data.gbao/(1-agl_data.gbao)/agl_data.bu1)
agl_data.pspin = x_Psp
agl_data.xsupa = agl_data.xkopt
agl_data.vspin = vsp
agl_data.beta = agl_data.gbao
#
#.....save static values
#
if (statcalc):
agl_data.g00k = gfe0pres
agl_data.b00k = agl_data.bu0/agl_data.au2gpa
agl_data.v00k = vol0pres
agl_data.vsp0k = vsp
agl_data.xkopt0 = agl_data.xkopt
agl_data.xmopt0 = agl_data.xmopt
agl_data.x_Psp0 = x_Psp
agl_data.gbao0 = agl_data.gbao
#
#.....Compute Pfit(P), B(P), B'(P), and B''(P)
#
for i in xrange(agl_data.npressure):
xxx = (agl_data.xmopt + math.log (vol0pres/agl_data.voleqmin[i]))/agl_data.xmopt
ug = 1.0/(1-agl_data.gbao)
agl_data.pfit[i] = x_Psp * (math.exp(ug*math.log(xxx)) - 1.0);
xdu = math.exp((agl_data.gbao-1)*math.log(agl_data.pressure[i]+x_Psp));
agl_data.bulkmod[i] = xdu * (agl_data.pressure[i]+x_Psp) / agl_data.xkopt;
db.append(agl_data.gbao * xdu / agl_data.xkopt);
d2b.append(agl_data.gbao * (agl_data.gbao - 1) * xdu / agl_data.xkopt / (agl_data.pressure[i]+x_Psp));
#
#.....output
#
agl_data.outstr = agl_data.outstr + "\n"
agl_data.outstr = agl_data.outstr + "SPINODAL EOS PRESSURE DERIVATIVES \n"
agl_data.outstr = agl_data.outstr + "================================= \n"
agl_data.outstr = agl_data.outstr + "\n"
agl_data.outstr = agl_data.outstr + " P(GPa) \t Pfit(GPa) \t B(GPa) \t B' \t B''(GPa-1) \n"
agl_data.outstr = agl_data.outstr + " --------------------------------------------------------------------------- \n"
for i in xrange(agl_data.npressure):
agl_data.outstr = agl_data.outstr + ' ' + str(agl_data.pressure[i]).rjust(6)[:6] + "\t" + str(agl_data.pfit[i]).rjust(10)[:10] + "\t" + str(agl_data.bulkmod[i]).rjust(10)[:10] + "\t" + str(db[i]).rjust(8)[:8] + "\t " + str(d2b[i]).rjust(10)[:10] + "\n"
agl_data.outstr = agl_data.outstr + "\n"
if (agl_data.opt_g):
agl_data.outstr = agl_data.outstr + "B0 = " + str(agl_data.bu0) + ", B0' = " + str(db[0]) + ", B0'' = " + str(d2b[0]) + ", reg.coef = " + str(desv) + ", Vsp = " + str(vsp) + ", -Psp = " + str(x_Psp) + ", K* = " + str(agl_data.xkopt) + ", gamma = " + str(agl_data.gbao) + "\n"
else:
agl_data.outstr = agl_data.outstr + "B0 = " + str(agl_data.bulkmod[0]) + ", B0' = " + str(db[0]) + ", B0'' = " + str(d2b[0]) + ", reg.coef = " + str(desv) + ", Vsp = " + str(vsp) + ", -Psp = " + str(x_Psp) + ", K* = " + str(agl_data.xkopt) + ", gamma = " + str(agl_data.gbao) + "\n"
#
#.....Static calculation: get static energy and its second derivative.
#
if (statcalc):
for i in xrange(agl_data.ndata):
x = (agl_data.xmopt0+math.log(agl_data.v00k/agl_data.vol_inp[i]))/agl_data.xmopt0
auxg = 1.0/(1.0-agl_data.gbao0)
auxg2 = agl_data.gbao0/(1.0-agl_data.gbao0)
#
#.........Compute numerically the integrated Helmholtz function by means
# of a loop with increasing number of Legendre points.
#
xinf = 1.0;
xsup = x;
factor = 1.0;
if (xsup < xinf):
aux = xinf
xinf = xsup
xsup = aux
factor = -1.0
#
#.........Iterative loop.
#
sum0=1e30
nl = 5
xabs=1.0
while ((nl <= maxnl) and (xabs >= eps)):
gauleg (xinf, xsup, xg, wg, nl, agl_data)
sum=0.0
for ii in xrange(nl):
term = math.exp(agl_data.xmopt0*(1.0-xg[ii])) * (math.exp(auxg*math.log(xg[ii])) - 1.0)
sum = sum + wg[ii] * factor * term * agl_data.xmopt0 * agl_data.v00k * agl_data.x_Psp0
xabs = math.fabs(sum-sum0)
sum0 = sum
nl = nl + 5
agl_data.ust.append(agl_data.g00k + sum / agl_data.au2gpa)
agl_data.uder.append(agl_data.x_Psp0 / (agl_data.xmopt0 * agl_data.vol_inp[i] * (1.0 - agl_data.gbao0)) * math.exp(auxg2*math.log(x)) / agl_data.au2gpa)
#
#.......Print input and fitted values of the lattice energy.
#
agl_data.outstr = agl_data.outstr + "\n"
agl_data.outstr = agl_data.outstr + "INPUT AND FITTED VALUES OF THE LATTICE ENERGY \n"
agl_data.outstr = agl_data.outstr + "============================================= \n"
agl_data.outstr = agl_data.outstr + "\n"
agl_data.outstr = agl_data.outstr + " V(bohr^3) E_inp(hartree) E_fit(hartree) \n"
agl_data.outstr = agl_data.outstr + " -------------------------------------------------- \n"
for i in xrange(agl_data.ndata):
agl_data.outstr = agl_data.outstr + ' ' + str(agl_data.vol_inp[i]).rjust(10)[:10] + "\t " + str(agl_data.energ_inp[i]).rjust(14)[:14] + "\t " + str(agl_data.ust[i]).rjust(14)[:14] + "\n"
#
#.....Dynamic calculation: get static pressure and second derivative
# of the energy
#
else:
for i in xrange(agl_data.npressure):
xxx = (agl_data.xmopt0 + math.log (agl_data.v00k/agl_data.voleqmin[i]))/agl_data.xmopt0
ug = 1.0/(1.0-agl_data.gbao0);
auxg2 = agl_data.gbao0/(1.0-agl_data.gbao0);
agl_data.pstatic[i] = agl_data.x_Psp0 * (math.exp(ug*math.log(xxx)) - 1.0)
agl_data.udyn[i] = agl_data.x_Psp0 / (agl_data.xmopt0 * agl_data.v00k * (1.0 - agl_data.gbao0)) * math.exp(-agl_data.xmopt0*(1.0-xxx)) * math.exp(auxg2*math.log(xxx)) / agl_data.au2gpa
agl_data.gamma_G[i] = -1.0/6.0 + agl_data.gbao0 * ug / 2.0/ agl_data.xmopt0 / xxx
#
#.....end
#
return
# **************************************************************************************
# This set of functions implement routines required for the BCNT EOS
# **************************************************************************************
#
#.....mnbrak - brackets a minimum of the function f.
#
# Given a function, and two distinct initial points ax and bx,
# this routine searches in the downhill direction (defined by the
# function as evaluated at the initial points) and returns new
# points ax, bx, and cx which bracket a minimum of the function.
# Also returned are the function values at the three points: fa, fb,
# and fc.
#
# Adapted from original Fortran version written by M. A. Blanco et al.
# See Computer Physics Communications 158, 57-72 (2004) and Journal of Molecular Structure (Theochem) 368, 245-255 (1996) for details
#
def mnbrak (ax, bx, cx, agl_data):
gold = 1.618034
glimit = 100.0
tiny = 1e-20
fa = optm(ax, agl_data)
fb = optm(bx, agl_data)
if (fb > fa):
dum = ax
ax = bx
bx = dum
dum = fb
fb = fa
fa = dum
cx = bx+gold*(bx-ax)
fc = optm(cx, agl_data)
while ( fb >= fc ):
endpart = True
r = (bx-ax)*(fb-fc)
q = (bx-cx)*(fb-fa)
u = bx-((bx-cx)*q-(bx-ax)*r)/(2.0*math.copysign(max(math.fabs(q-r),tiny),q-r))
ulim = bx+glimit*(cx-bx)
if ((bx-u)*(u-cx) > 0.0):
fu = optm(u, agl_data)
if (fu < fc):
ax = bx
fa = fb
bx = u
fb = fu
endpart = False
elif (fu > fb):
cx = u
fc = fu
endpart = False
else:
u = cx+gold*(cx-bx)
fu = optm(u, agl_data)
elif ((cx-u)*(u-ulim) > 0.0):
fu = optm(u, agl_data)
if (fu < fc):
bx = cx
cx = u
u = cx+gold*(cx-bx)
fb = fc
fc = fu
fu = optm(u, agl_data)
elif ((u-ulim)*(ulim-cx) >= 0.0):
u = ulim
fu = optm(u, agl_data)
else:
u = cx+gold*(cx-bx)
fu = optm(u, agl_data)
if (endpart):
ax = bx
bx = cx
cx = u
fa = fb
fb = fc
fc = fu
return ax, bx, cx, fa, fb, fc
#
#.....brent - unidimensional minimization of f in the range [ax,cx].
#
# Given a function, and a bracketing triplet of abscissas this
# routine isolates the minimum to a fractional precission of tol
# using Brent's method. The bracketing triplet must be such that bx
# is between ax and cx, and that f(bx) is less than both f(ax) and
# f(cx). The abscissa of the minimum is returned as xmin, and the
# minimum function value as BRENT.
#
# Adapted from original Fortran version written by M. A. Blanco et al.
# See Computer Physics Communications 158, 57-72 (2004) and Journal of Molecular Structure (Theochem) 368, 245-255 (1996) for details
#
def brent (ax, bx, cx, tol, xmin, agl_data):
itmax = 100
cgold = 0.3819660;
zeps = 1.0e-10;
tol3 = 1e-12;
notskip1 = True
a = min(ax,cx)
b = max(ax,cx)
v = bx
w = v
x = v
e = 0.0
d = 0.0
fx = optm(x, agl_data)
fv = fx
fw = fx
iter = 1
while (iter <= itmax):
xm = 0.5*(a+b)
tol1 = tol*math.fabs(x)+zeps
tol2 = 2.0*tol1
if (math.fabs(x-xm) <= (tol2-0.5*(b-a))):
xmin = x
brentx = fx
return xmin, brentx
else:
if (math.fabs(e) > tol1):
r = (x-w)*(fx-fv)
q = (x-v)*(fx-fw)
p = (x-v)*q-(x-w)*r
q = 2.0*(q-r)
if (q > 0.0):
p = -p
q = math.fabs(q)
etemp = e
e = d
if (math.fabs(p) >= math.fabs(0.5*q*etemp) or p <= q*(a-x) or p >= q*(b-x)):
notskip1 = True
else:
d = p/q
u = x+d
if (u-a < tol2 or b-u < tol2):
d = math.copysign(tol1,xm-x)
if (math.fabs(d) >= tol1):
u = x+d
else:
u = x + math.copysign(tol1, d)
notskip1 = False
if (notskip1):
if (x >= xm):
e = a-x
else:
e = b-x
d = cgold*e;
if (math.fabs(d) >= tol1):
u = x+d
else:
u = x + math.copysign(tol1, d)
fu = optm(u, agl_data)
if (fu <= fx):
if (u >= x):
a = x
else:
b = x
v = w
fv = fw
w = x
fw = fx
x = u
fx = fu
else:
if (u < x):
a = u
else:
b = u
if (fu <= fw or math.fabs(w - x) < tol3 ):
v = w
fv = fw
w = u
fw = fu
elif (fu <= fv or math.fabs(v - x) < tol3 or math.fabs(v - w) < tol3 ):
v = u;
fv = fu;
iter = iter + 1
agl_data.logstr = agl_data.logstr + "MP AGL brent: exceeded maximum iterations. \n"
xmin = x
brentx = fx
return xmin, brentx
#
#.....optm - optimization of exponent parameter "g" (aka "beta") required for BCNT EOS.
#
# Adapted from original Fortran version written by M. A. Blanco et al.
# See Computer Physics Communications 158, 57-72 (2004) and Journal of Molecular Structure (Theochem) 368, 245-255 (1996) for details
#
def optm (x_Psp, agl_data):
xfunc = [0.0 for k in range(agl_data.npressure)]
yfunc = [0.0 for k in range(agl_data.npressure)]
if (x_Psp < 0.0):
agl_data.logstr = agl_data.logstr + "MP AGL optm: Warning: Spinodal pressure is negative \n"
desv = 1e30
return desv
if (agl_data.opt_g):
a11 = 0.0
a12 = 0.0
a21 = 0.0
a22 = 0.0
z1 = 0.0
z2 = 0.0
for i in xrange(agl_data.npressure):
xfunc[i] = math.log(agl_data.pressure[i]+x_Psp)
yfunc[i] = math.log(agl_data.bulkmod[i])
a11 = a11 + 1.0
a12 = a12 + xfunc[i]
a21 = a12
a22 = a22 + xfunc[i]*xfunc[i]
z1 = z1 + yfunc[i]
z2 = z2 + xfunc[i]*yfunc[i]
det = a11 * a22 - a12 * a21
x_in = (z1 * a22 - z2 * a12)/det
x_de = (a11 * z2 - z1 * a21)/det
desv = 0.0
for i in xrange(agl_data.npressure):
desv = desv + (yfunc[i] - x_in - x_de * xfunc[i])**2
agl_data.xkopt = math.exp(-x_in)
agl_data.gbao = x_de;
return desv
else:
a12 = 0.0
z1 = 0.0
for i in xrange(agl_data.npressure):
xfunc[i] = math.log (agl_data.pressure[i]+x_Psp);
yfunc[i] = math.log(agl_data.bulkmod[i]);
a12 = a12 + xfunc[i]
z1 = z1 + yfunc[i]
x_in = (z1-agl_data.gbao*a12)/agl_data.npressure
agl_data.xkopt = math.exp(-x_in)
desv = 0.0
for i in xrange(agl_data.npressure):
desv = desv + (yfunc[i] - x_in - agl_data.gbao * xfunc[i])**2
return desv
|
ctoher/pymatgen
|
pymatgen/agl_thermal/agl_eqn_state.py
|
Python
|
mit
| 37,058
|
[
"pymatgen"
] |
34a4b161c3a1c381982ff96fddd673d4b1522afd90c174234a912578a40d208e
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Pipeline
reader = vtk.vtkPNGReader()
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/fullhead15.png")
iso = vtk.vtkFlyingEdges2D()
iso.SetInputConnection(reader.GetOutputPort())
iso.GenerateValues(12,500,1150)
isoMapper = vtk.vtkPolyDataMapper()
isoMapper.SetInputConnection(iso.GetOutputPort())
isoMapper.ScalarVisibilityOff()
isoActor = vtk.vtkActor()
isoActor.SetMapper(isoMapper)
isoActor.GetProperty().SetColor(1,1,1)
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(reader.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineProp = outlineActor.GetProperty()
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(outlineActor)
ren1.AddActor(isoActor)
ren1.SetBackground(0,0,0)
renWin.SetSize(400,400)
ren1.ResetCamera()
iren.Initialize()
renWin.Render()
# --- end of script --
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Filters/Core/Testing/Python/TestFlyingEdges2D.py
|
Python
|
bsd-3-clause
| 1,383
|
[
"VTK"
] |
3dbb395c2b585f2a3586a4eac72fe683621f15f8ec2b7cc846591ff7bf408f42
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import types
import mock
import pytest
from google.api_core import page_iterator
def test__do_nothing_page_start():
assert page_iterator._do_nothing_page_start(None, None, None) is None
class TestPage(object):
def test_constructor(self):
parent = mock.sentinel.parent
item_to_value = mock.sentinel.item_to_value
page = page_iterator.Page(parent, (1, 2, 3), item_to_value)
assert page.num_items == 3
assert page.remaining == 3
assert page._parent is parent
assert page._item_to_value is item_to_value
assert page.raw_page is None
def test___iter__(self):
page = page_iterator.Page(None, (), None, None)
assert iter(page) is page
def test_iterator_calls_parent_item_to_value(self):
parent = mock.sentinel.parent
item_to_value = mock.Mock(
side_effect=lambda iterator, value: value, spec=["__call__"]
)
page = page_iterator.Page(parent, (10, 11, 12), item_to_value)
page._remaining = 100
assert item_to_value.call_count == 0
assert page.remaining == 100
assert next(page) == 10
assert item_to_value.call_count == 1
item_to_value.assert_called_with(parent, 10)
assert page.remaining == 99
assert next(page) == 11
assert item_to_value.call_count == 2
item_to_value.assert_called_with(parent, 11)
assert page.remaining == 98
assert next(page) == 12
assert item_to_value.call_count == 3
item_to_value.assert_called_with(parent, 12)
assert page.remaining == 97
def test_raw_page(self):
parent = mock.sentinel.parent
item_to_value = mock.sentinel.item_to_value
raw_page = mock.sentinel.raw_page
page = page_iterator.Page(parent, (1, 2, 3), item_to_value, raw_page=raw_page)
assert page.raw_page is raw_page
with pytest.raises(AttributeError):
page.raw_page = None
class PageIteratorImpl(page_iterator.Iterator):
def _next_page(self):
return mock.create_autospec(page_iterator.Page, instance=True)
class TestIterator(object):
def test_constructor(self):
client = mock.sentinel.client
item_to_value = mock.sentinel.item_to_value
token = "ab13nceor03"
max_results = 1337
iterator = PageIteratorImpl(
client, item_to_value, page_token=token, max_results=max_results
)
assert not iterator._started
assert iterator.client is client
assert iterator.item_to_value == item_to_value
assert iterator.max_results == max_results
# Changing attributes.
assert iterator.page_number == 0
assert iterator.next_page_token == token
assert iterator.num_results == 0
def test_next(self):
iterator = PageIteratorImpl(None, None)
page_1 = page_iterator.Page(
iterator, ("item 1.1", "item 1.2"), page_iterator._item_to_value_identity
)
page_2 = page_iterator.Page(
iterator, ("item 2.1",), page_iterator._item_to_value_identity
)
iterator._next_page = mock.Mock(side_effect=[page_1, page_2, None])
result = next(iterator)
assert result == "item 1.1"
result = next(iterator)
assert result == "item 1.2"
result = next(iterator)
assert result == "item 2.1"
with pytest.raises(StopIteration):
next(iterator)
def test_pages_property_starts(self):
iterator = PageIteratorImpl(None, None)
assert not iterator._started
assert isinstance(iterator.pages, types.GeneratorType)
assert iterator._started
def test_pages_property_restart(self):
iterator = PageIteratorImpl(None, None)
assert iterator.pages
# Make sure we cannot restart.
with pytest.raises(ValueError):
assert iterator.pages
def test__page_iter_increment(self):
iterator = PageIteratorImpl(None, None)
page = page_iterator.Page(
iterator, ("item",), page_iterator._item_to_value_identity
)
iterator._next_page = mock.Mock(side_effect=[page, None])
assert iterator.num_results == 0
page_iter = iterator._page_iter(increment=True)
next(page_iter)
assert iterator.num_results == 1
def test__page_iter_no_increment(self):
iterator = PageIteratorImpl(None, None)
assert iterator.num_results == 0
page_iter = iterator._page_iter(increment=False)
next(page_iter)
# results should still be 0 after fetching a page.
assert iterator.num_results == 0
def test__items_iter(self):
# Items to be returned.
item1 = 17
item2 = 100
item3 = 211
# Make pages from mock responses
parent = mock.sentinel.parent
page1 = page_iterator.Page(
parent, (item1, item2), page_iterator._item_to_value_identity
)
page2 = page_iterator.Page(
parent, (item3,), page_iterator._item_to_value_identity
)
iterator = PageIteratorImpl(None, None)
iterator._next_page = mock.Mock(side_effect=[page1, page2, None])
items_iter = iterator._items_iter()
assert isinstance(items_iter, types.GeneratorType)
# Consume items and check the state of the iterator.
assert iterator.num_results == 0
assert next(items_iter) == item1
assert iterator.num_results == 1
assert next(items_iter) == item2
assert iterator.num_results == 2
assert next(items_iter) == item3
assert iterator.num_results == 3
with pytest.raises(StopIteration):
next(items_iter)
def test___iter__(self):
iterator = PageIteratorImpl(None, None)
iterator._next_page = mock.Mock(side_effect=[(1, 2), (3,), None])
assert not iterator._started
result = list(iterator)
assert result == [1, 2, 3]
assert iterator._started
def test___iter__restart(self):
iterator = PageIteratorImpl(None, None)
iter(iterator)
# Make sure we cannot restart.
with pytest.raises(ValueError):
iter(iterator)
def test___iter___restart_after_page(self):
iterator = PageIteratorImpl(None, None)
assert iterator.pages
# Make sure we cannot restart after starting the page iterator
with pytest.raises(ValueError):
iter(iterator)
class TestHTTPIterator(object):
def test_constructor(self):
client = mock.sentinel.client
path = "/foo"
iterator = page_iterator.HTTPIterator(
client, mock.sentinel.api_request, path, mock.sentinel.item_to_value
)
assert not iterator._started
assert iterator.client is client
assert iterator.path == path
assert iterator.item_to_value is mock.sentinel.item_to_value
assert iterator._items_key == "items"
assert iterator.max_results is None
assert iterator.extra_params == {}
assert iterator._page_start == page_iterator._do_nothing_page_start
# Changing attributes.
assert iterator.page_number == 0
assert iterator.next_page_token is None
assert iterator.num_results == 0
assert iterator._page_size is None
def test_constructor_w_extra_param_collision(self):
extra_params = {"pageToken": "val"}
with pytest.raises(ValueError):
page_iterator.HTTPIterator(
mock.sentinel.client,
mock.sentinel.api_request,
mock.sentinel.path,
mock.sentinel.item_to_value,
extra_params=extra_params,
)
def test_iterate(self):
path = "/foo"
item1 = {"name": "1"}
item2 = {"name": "2"}
api_request = mock.Mock(return_value={"items": [item1, item2]})
iterator = page_iterator.HTTPIterator(
mock.sentinel.client,
api_request,
path=path,
item_to_value=page_iterator._item_to_value_identity,
)
assert iterator.num_results == 0
items_iter = iter(iterator)
val1 = next(items_iter)
assert val1 == item1
assert iterator.num_results == 1
val2 = next(items_iter)
assert val2 == item2
assert iterator.num_results == 2
with pytest.raises(StopIteration):
next(items_iter)
api_request.assert_called_once_with(method="GET", path=path, query_params={})
def test__has_next_page_new(self):
iterator = page_iterator.HTTPIterator(
mock.sentinel.client,
mock.sentinel.api_request,
mock.sentinel.path,
mock.sentinel.item_to_value,
)
# The iterator should *always* indicate that it has a next page
# when created so that it can fetch the initial page.
assert iterator._has_next_page()
def test__has_next_page_without_token(self):
iterator = page_iterator.HTTPIterator(
mock.sentinel.client,
mock.sentinel.api_request,
mock.sentinel.path,
mock.sentinel.item_to_value,
)
iterator.page_number = 1
# The iterator should not indicate that it has a new page if the
# initial page has been requested and there's no page token.
assert not iterator._has_next_page()
def test__has_next_page_w_number_w_token(self):
iterator = page_iterator.HTTPIterator(
mock.sentinel.client,
mock.sentinel.api_request,
mock.sentinel.path,
mock.sentinel.item_to_value,
)
iterator.page_number = 1
iterator.next_page_token = mock.sentinel.token
# The iterator should indicate that it has a new page if the
# initial page has been requested and there's is a page token.
assert iterator._has_next_page()
def test__has_next_page_w_max_results_not_done(self):
iterator = page_iterator.HTTPIterator(
mock.sentinel.client,
mock.sentinel.api_request,
mock.sentinel.path,
mock.sentinel.item_to_value,
max_results=3,
page_token=mock.sentinel.token,
)
iterator.page_number = 1
# The iterator should indicate that it has a new page if there
# is a page token and it has not consumed more than max_results.
assert iterator.num_results < iterator.max_results
assert iterator._has_next_page()
def test__has_next_page_w_max_results_done(self):
iterator = page_iterator.HTTPIterator(
mock.sentinel.client,
mock.sentinel.api_request,
mock.sentinel.path,
mock.sentinel.item_to_value,
max_results=3,
page_token=mock.sentinel.token,
)
iterator.page_number = 1
iterator.num_results = 3
# The iterator should not indicate that it has a new page if there
# if it has consumed more than max_results.
assert iterator.num_results == iterator.max_results
assert not iterator._has_next_page()
def test__get_query_params_no_token(self):
iterator = page_iterator.HTTPIterator(
mock.sentinel.client,
mock.sentinel.api_request,
mock.sentinel.path,
mock.sentinel.item_to_value,
)
assert iterator._get_query_params() == {}
def test__get_query_params_w_token(self):
iterator = page_iterator.HTTPIterator(
mock.sentinel.client,
mock.sentinel.api_request,
mock.sentinel.path,
mock.sentinel.item_to_value,
)
iterator.next_page_token = "token"
assert iterator._get_query_params() == {"pageToken": iterator.next_page_token}
def test__get_query_params_w_max_results(self):
max_results = 3
iterator = page_iterator.HTTPIterator(
mock.sentinel.client,
mock.sentinel.api_request,
mock.sentinel.path,
mock.sentinel.item_to_value,
max_results=max_results,
)
iterator.num_results = 1
local_max = max_results - iterator.num_results
assert iterator._get_query_params() == {"maxResults": local_max}
def test__get_query_params_extra_params(self):
extra_params = {"key": "val"}
iterator = page_iterator.HTTPIterator(
mock.sentinel.client,
mock.sentinel.api_request,
mock.sentinel.path,
mock.sentinel.item_to_value,
extra_params=extra_params,
)
assert iterator._get_query_params() == extra_params
def test__get_next_page_response_with_post(self):
path = "/foo"
page_response = {"items": ["one", "two"]}
api_request = mock.Mock(return_value=page_response)
iterator = page_iterator.HTTPIterator(
mock.sentinel.client,
api_request,
path=path,
item_to_value=page_iterator._item_to_value_identity,
)
iterator._HTTP_METHOD = "POST"
response = iterator._get_next_page_response()
assert response == page_response
api_request.assert_called_once_with(method="POST", path=path, data={})
def test__get_next_page_bad_http_method(self):
iterator = page_iterator.HTTPIterator(
mock.sentinel.client,
mock.sentinel.api_request,
mock.sentinel.path,
mock.sentinel.item_to_value,
)
iterator._HTTP_METHOD = "NOT-A-VERB"
with pytest.raises(ValueError):
iterator._get_next_page_response()
@pytest.mark.parametrize(
"page_size,max_results,pages",
[(3, None, False), (3, 8, False), (3, None, True), (3, 8, True)],
)
def test_page_size_items(self, page_size, max_results, pages):
path = "/foo"
NITEMS = 10
n = [0] # blast you python 2!
def api_request(*args, **kw):
assert not args
query_params = dict(
maxResults=(
page_size
if max_results is None
else min(page_size, max_results - n[0])
)
)
if n[0]:
query_params.update(pageToken="test")
assert kw == {"method": "GET", "path": "/foo", "query_params": query_params}
n_items = min(kw["query_params"]["maxResults"], NITEMS - n[0])
items = [dict(name=str(i + n[0])) for i in range(n_items)]
n[0] += n_items
result = dict(items=items)
if n[0] < NITEMS:
result.update(nextPageToken="test")
return result
iterator = page_iterator.HTTPIterator(
mock.sentinel.client,
api_request,
path=path,
item_to_value=page_iterator._item_to_value_identity,
page_size=page_size,
max_results=max_results,
)
assert iterator.num_results == 0
n_results = max_results if max_results is not None else NITEMS
if pages:
items_iter = iter(iterator.pages)
npages = int(math.ceil(float(n_results) / page_size))
for ipage in range(npages):
assert list(next(items_iter)) == [
dict(name=str(i))
for i in range(
ipage * page_size, min((ipage + 1) * page_size, n_results),
)
]
else:
items_iter = iter(iterator)
for i in range(n_results):
assert next(items_iter) == dict(name=str(i))
assert iterator.num_results == i + 1
with pytest.raises(StopIteration):
next(items_iter)
class TestGRPCIterator(object):
def test_constructor(self):
client = mock.sentinel.client
items_field = "items"
iterator = page_iterator.GRPCIterator(
client, mock.sentinel.method, mock.sentinel.request, items_field
)
assert not iterator._started
assert iterator.client is client
assert iterator.max_results is None
assert iterator.item_to_value is page_iterator._item_to_value_identity
assert iterator._method == mock.sentinel.method
assert iterator._request == mock.sentinel.request
assert iterator._items_field == items_field
assert (
iterator._request_token_field
== page_iterator.GRPCIterator._DEFAULT_REQUEST_TOKEN_FIELD
)
assert (
iterator._response_token_field
== page_iterator.GRPCIterator._DEFAULT_RESPONSE_TOKEN_FIELD
)
# Changing attributes.
assert iterator.page_number == 0
assert iterator.next_page_token is None
assert iterator.num_results == 0
def test_constructor_options(self):
client = mock.sentinel.client
items_field = "items"
request_field = "request"
response_field = "response"
iterator = page_iterator.GRPCIterator(
client,
mock.sentinel.method,
mock.sentinel.request,
items_field,
item_to_value=mock.sentinel.item_to_value,
request_token_field=request_field,
response_token_field=response_field,
max_results=42,
)
assert iterator.client is client
assert iterator.max_results == 42
assert iterator.item_to_value is mock.sentinel.item_to_value
assert iterator._method == mock.sentinel.method
assert iterator._request == mock.sentinel.request
assert iterator._items_field == items_field
assert iterator._request_token_field == request_field
assert iterator._response_token_field == response_field
def test_iterate(self):
request = mock.Mock(spec=["page_token"], page_token=None)
response1 = mock.Mock(items=["a", "b"], next_page_token="1")
response2 = mock.Mock(items=["c"], next_page_token="2")
response3 = mock.Mock(items=["d"], next_page_token="")
method = mock.Mock(side_effect=[response1, response2, response3])
iterator = page_iterator.GRPCIterator(
mock.sentinel.client, method, request, "items"
)
assert iterator.num_results == 0
items = list(iterator)
assert items == ["a", "b", "c", "d"]
method.assert_called_with(request)
assert method.call_count == 3
assert request.page_token == "2"
def test_iterate_with_max_results(self):
request = mock.Mock(spec=["page_token"], page_token=None)
response1 = mock.Mock(items=["a", "b"], next_page_token="1")
response2 = mock.Mock(items=["c"], next_page_token="2")
response3 = mock.Mock(items=["d"], next_page_token="")
method = mock.Mock(side_effect=[response1, response2, response3])
iterator = page_iterator.GRPCIterator(
mock.sentinel.client, method, request, "items", max_results=3
)
assert iterator.num_results == 0
items = list(iterator)
assert items == ["a", "b", "c"]
assert iterator.num_results == 3
method.assert_called_with(request)
assert method.call_count == 2
assert request.page_token == "1"
class GAXPageIterator(object):
"""Fake object that matches gax.PageIterator"""
def __init__(self, pages, page_token=None):
self._pages = iter(pages)
self.page_token = page_token
def next(self):
return next(self._pages)
__next__ = next
class TestGAXIterator(object):
def test_constructor(self):
client = mock.sentinel.client
token = "zzzyy78kl"
page_iter = GAXPageIterator((), page_token=token)
item_to_value = page_iterator._item_to_value_identity
max_results = 1337
iterator = page_iterator._GAXIterator(
client, page_iter, item_to_value, max_results=max_results
)
assert not iterator._started
assert iterator.client is client
assert iterator.item_to_value is item_to_value
assert iterator.max_results == max_results
assert iterator._gax_page_iter is page_iter
# Changing attributes.
assert iterator.page_number == 0
assert iterator.next_page_token == token
assert iterator.num_results == 0
def test__next_page(self):
page_items = (29, 31)
page_token = "2sde98ds2s0hh"
page_iter = GAXPageIterator([page_items], page_token=page_token)
iterator = page_iterator._GAXIterator(
mock.sentinel.client, page_iter, page_iterator._item_to_value_identity
)
page = iterator._next_page()
assert iterator.next_page_token == page_token
assert isinstance(page, page_iterator.Page)
assert list(page) == list(page_items)
next_page = iterator._next_page()
assert next_page is None
|
googleapis/python-api-core
|
tests/unit/test_page_iterator.py
|
Python
|
apache-2.0
| 21,887
|
[
"BLAST"
] |
09f40ab9e57f32e892c719a334ddc22532c05b1437d664a57b4c3e0a4f975df5
|
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2015 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier (bgoli@users.sourceforge.net)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
"""
This module contains simplified methods derived from the Pysces model class
Brett G. Olivier June 2010
"""
import os, copy, time
import numpy
from pysces import PyscesStoich
from pysces import PyscesParse
mach_spec = numpy.MachAr()
pscParser = PyscesParse.PySCeSParser(debug=0)
class PyscesInputFileParser(object):
"""
This class contains the PySCeS model loading and Stoichiometric Analysis methods
"""
ModelDir = None
ModelFile = None
ModelOutput = None
__settings__ = None
N = None
def __init__(self, model_file, directory, output_dir=None):
self.ModelDir = directory
self.ModelFile = model_file
if output_dir == None:
self.ModelOutput = os.getcwd()
else:
assert os.path.exists(output_dir), "\n%s is not a valid path" % output_dir
self.__settings__ = {}
# Initialize stoichiometric precision
self.__settings__['stoichiometric_analysis_fp_zero'] = mach_spec.eps*2.0e4
self.__settings__['stoichiometric_analysis_lu_precision'] = self.__settings__['stoichiometric_analysis_fp_zero']
self.__settings__['stoichiometric_analysis_gj_precision'] = self.__settings__['stoichiometric_analysis_lu_precision']*10.0
self.__settings__['enable_deprecated_attr'] = False
self.InitialiseInputFile()
self.N = self.buildN()
def InitialiseInputFile(self):
"""
InitialiseInputFile()
Parse the input file associated with the PySCeS model instance and assign the basic model attributes
Arguments:
None
"""
self.__parseOK = 1 # check that model has parsed ok?
try:
if os.path.exists(os.path.join(self.ModelDir,self.ModelFile)):
pass
else:
print '\nInvalid self.ModelFile: ' + os.path.join(self.ModelDir,self.ModelFile)
except:
print 'WARNING: Problem verifying: ' + os.path.join(self.ModelDir,self.ModelFile)
if self.ModelFile[-4:] == '.psc':
pass
else:
print 'Assuming extension is .psc'
self.ModelFile += '.psc'
print '\nParsing file: %s' % os.path.join(self.ModelDir, self.ModelFile)
pscParser.ParsePSC(self.ModelFile,self.ModelDir,self.ModelOutput)
print ' '
badlist = pscParser.KeywordCheck(pscParser.ReactionIDs)
badlist = pscParser.KeywordCheck(pscParser.Inits,badlist)
if len(badlist) != 0:
print '\n******************************\nPSC input file contains PySCeS keywords please rename them and reload:'
for item in badlist:
print ' --> ' + item
print '******************************\n'
self.__parseOK = 0
#assert len(badlist) != 0, 'Keyword error, please check input file'
if self.__parseOK:
# brett 2008
self.__nDict__ = pscParser.nDict.copy()
self.__sDict__ = pscParser.sDict.copy()
self.__pDict__ = pscParser.pDict.copy()
self.__uDict__ = pscParser.uDict.copy()
# model attributes are now initialised here brett2008
self.__InitDict__ = {}
# set parameters and add to __InitDict__
for p in self.__pDict__.keys():
setattr(self, self.__pDict__[p]['name'], self.__pDict__[p]['initial'])
self.__InitDict__.update({self.__pDict__[p]['name'] : self.__pDict__[p]['initial']})
# set species and add to __InitDict__ and set mod.Xi_init
for s in self.__sDict__.keys():
setattr(self, self.__sDict__[s]['name'], self.__sDict__[s]['initial'])
if not self.__sDict__[s]['fixed']:
setattr(self, self.__sDict__[s]['name']+'_init', self.__sDict__[s]['initial'])
self.__InitDict__.update({self.__sDict__[s]['name'] : self.__sDict__[s]['initial']})
# setup keywords
self.__KeyWords__ = pscParser.KeyWords.copy()
if self.__KeyWords__['Modelname'] == None:
self.__KeyWords__['Modelname'] = self.ModelFile.replace('.psc','')
if self.__KeyWords__['Description'] == None:
self.__KeyWords__['Description'] = self.ModelFile.replace('.psc','')
# if SpeciesTypes undefined assume []
if self.__KeyWords__['Species_In_Conc'] == None:
self.__KeyWords__['Species_In_Conc'] = True
# if OutputType is undefined assume it is the same as SpeciesType
if self.__KeyWords__['Output_In_Conc'] == None:
if self.__KeyWords__['Species_In_Conc']:
self.__KeyWords__['Output_In_Conc'] = True
else:
self.__KeyWords__['Output_In_Conc'] = False
# set the species type in sDict according to 'Species_In_Conc'
for s in self.__sDict__.keys():
if not self.__KeyWords__['Species_In_Conc']:
self.__sDict__[s]['isamount'] = True
else:
self.__sDict__[s]['isamount'] = False
# setup compartments
self.__compartments__ = pscParser.compartments.copy()
if len(self.__compartments__.keys()) > 0:
self.__HAS_COMPARTMENTS__ = True
else:
self.__HAS_COMPARTMENTS__ = False
# no (self.)
self.__fixed_species__ = copy.copy(pscParser.fixed_species)
self.__species__ = copy.copy(pscParser.species)
self.__parameters__ = copy.copy(pscParser.parameters)
self.__reactions__ = copy.copy(pscParser.reactions)
self.__modifiers__ = copy.copy(pscParser.modifiers)
# Initialize exposed stuff
self.fixed_species = tuple(pscParser.fixed_species)
self.species = tuple(pscParser.species)
self.parameters = tuple(pscParser.parameters)
self.reactions = tuple(pscParser.reactions)
self.modifiers = tuple(pscParser.modifiers)
# Add input file defined fuctions - brett 200500621
# TODO deprecated
# self._Function_time = copy.copy(pscParser.TimeFunc)
self._Function_user = copy.copy(pscParser.UserFunc)
self._Function_init = pscParser.InitFunc
self.__functions__ = pscParser.Functions.copy()
self.__rules__ = pscParser.AssignmentRules.copy()
self.__InitFuncs__ = pscParser.ModelInit.copy()
self.__userfuncs__ = pscParser.UserFuncs.copy()
self.__eDict__ = pscParser.Events.copy()
## if pscParser.ModelUsesNumpyFuncs:
## print 'Numpy functions detected in kinetic laws.\n'
else:
print '\nERROR: model parsing error, please check input file.\n'
# added in a check for model correctness and human error reporting (1=ok, 0=error)
if len(pscParser.SymbolErrors) != 0:
print '\nUndefined symbols:\n%s' % self.SymbolErrors
if not pscParser.ParseOK:
print '\n\n*****\nModel parsing errors detected in input file '+ self.ModelFile +'\n*****'
print '\nInput file errors'
for error in pscParser.LexErrors:
print error[0] + 'in line:\t' + str(error[1]) + ' ('+ error[2][:20] +' ...)'
print '\nParser errors'
for error in pscParser.ParseErrors:
try:
print error[0] + '- ' + error[2][:20]
except:
print error
assert pscParser.ParseOK == 1, 'Input File Error'
def buildN(self):
"""
buildN()
Generate the stoichiometric matrix N from the parsed model description.
Returns a stoichiometric matrix (N)
Arguments:
None
"""
VarReagents = ['self.'+s for s in self.__species__]
StoicMatrix = numpy.zeros((len(VarReagents),len(self.__reactions__)),'d')
for reag in VarReagents:
for id in self.__reactions__:
if reag in self.__nDict__[id]['Reagents'].keys():
StoicMatrix[VarReagents.index(reag)][self.__reactions__.index(id)] = self.__nDict__[id]['Reagents'][reag]
return StoicMatrix
def Stoichiometry_Init(self,nmatrix):
"""
Stoichiometry_Init(nmatrix,load=0)
Initialize the model stoichiometry. Given a stoichiometric matrix N, this method will return an instantiated PyscesStoich instance and status flag.
and test it's correctness. The status flag indicates 0 = reanalyse stoichiometry or
1 = complete structural analysis preloaded.
Arguments:
nmatrix: The input stoichiometric matrix, N
load [default=0]: try to load a saved stoichiometry (1)
"""
#print 'Instantiating new stoichiometry ...'
stc = PyscesStoich.Stoich(nmatrix)
status = 0
return stc,status
def Stoichiometry_Analyse(self):
override = 0
load = 0
"""
Stoichiometry_Analyse(override=0,load=0)
Perform a structural analyses. The default behaviour is to construct and analyse the model
from the parsed model information. Overriding this behaviour analyses the stoichiometry
based on the current stoichiometric matrix. If load is specified PySCeS tries to load a
saved stoichiometry, otherwise the stoichiometric analysis is run. The results of
the analysis are checked for floating point error and nullspace rank consistancy.
Arguments:
override [default=0]: override stoichiometric analysis intialisation from parsed data
load [default=0]: load a presaved stoichiometry
"""
if not override:
self.nmatrix = self.buildN() #Creates the model N
#print '\nintializing N\n'
else:
print '\nStoichiometric override active\n'
assert len(self.nmatrix) > 0, '\nUnable to generate Stoichiometric Matrix! model has:\n%s reactions\n%s species\nwhat did you have in mind?\n' % (len(self.__reactions__), len(self.__species__))
## self.__nmatrix__ = copy.copy(self.nmatrix)
self.__nmatrix__ = self.nmatrix # done with caution brett2008
self.__Nshape__ = self.nmatrix.shape #Get the shape of N
## self.__Vtemp__ = numpy.zeros((self.__Nshape__[1])) # going going ....
# get stoich instance and whether it was analysed or loaded - brett 20050830
self.__structural__, stc_load = self.Stoichiometry_Init(self.nmatrix)
# if not loaded analyze - brett 20050830
if not stc_load:
# technically this means we can define this on the fly - brett #20051013
self.__structural__.stoichiometric_analysis_fp_zero = self.__settings__['stoichiometric_analysis_fp_zero']
self.__structural__.stoichiometric_analysis_lu_precision = self.__settings__['stoichiometric_analysis_lu_precision']
self.__structural__.stoichiometric_analysis_gj_precision = self.__settings__['stoichiometric_analysis_gj_precision']
self.__structural__.AnalyseL() #Get all L related stuff
self.__structural__.AnalyseK() #Get all K related stuff
#test matrix values against __settings__['stoichiometric_analysis_lu_precision']
lsmall,lbig = self.__structural__.MatrixValueCompare(self.__structural__.lzeromatrix)
ksmall,kbig = self.__structural__.MatrixValueCompare(self.__structural__.kzeromatrix)
SmallValueError = 0
if abs(lsmall) < self.__structural__.stoichiometric_analysis_lu_precision*10.0:
print '\nWARNING: values in L0matrix are close to stoichiometric precision!'
print 'Stoichiometric LU precision:', self.__structural__.stoichiometric_analysis_lu_precision
print 'L0 smallest abs(value)', abs(lsmall)
print 'Machine precision:', mach_spec.eps
SmallValueError = 1
if abs(ksmall) < self.__structural__.stoichiometric_analysis_lu_precision*10.0:
print '\nWARNING: values in K0matrix are close to stoichiometric precision!'
print 'Stoichiometric precision:', self.__structural__.stoichiometric_analysis_lu_precision
print 'K0 smallest abs(value)', abs(ksmall)
print 'Machine precision:', mach_spec.eps
SmallValueError = 1
if SmallValueError:
raw_input('\nStructural Analysis results may not be reliable!!!.\n\nTry change <mod>.__settings__["stoichiometric_analysis_lu_precision"] (see reference manual for details)\n\n\t press any key to continue: ')
# cross check that rank is consistant between K0 and L0
if self.__structural__.kzeromatrix.shape[0] != self.__structural__.lzeromatrix.shape[1]:
print '\nWARNING: the rank calculated by the Kand L analysis methods are not the same!'
print '\tK analysis calculates the rank as: ' + `self.__structural__.kzeromatrix.shape[0]`
print '\tL analysis calculates the rank as: ' + `self.__structural__.lzeromatrix.shape[1]`
print 'This is not good! Structural Analysis results are not reliable!!!\n'
assert self.__structural__.kzeromatrix.shape[0] == self.__structural__.lzeromatrix.shape[1], '\nStructuralAnalysis Error: rank mismatch'
self.__HAS_FLUX_CONSERVATION__ = self.__structural__.info_flux_conserve
self.__HAS_MOIETY_CONSERVATION__ = self.__structural__.info_moiety_conserve
if self.__settings__['enable_deprecated_attr']:
self.nmatrix_row = self.__structural__.nmatrix_row
self.nmatrix_col = self.__structural__.nmatrix_col
self.kmatrix = self.__structural__.kmatrix
self.kmatrix_row = self.__structural__.kmatrix_row
self.kmatrix_col = self.__structural__.kmatrix_col
self.kzeromatrix = self.__structural__.kzeromatrix
self.kzeromatrix_row = self.__structural__.kzeromatrix_row
self.kzeromatrix_col = self.__structural__.kzeromatrix_col
self.lmatrix = self.__structural__.lmatrix
self.lmatrix_row = self.__structural__.lmatrix_row
self.lmatrix_col = self.__structural__.lmatrix_col
self.lzeromatrix = self.__structural__.lzeromatrix
self.lzeromatrix_row = self.__structural__.lzeromatrix_row
self.lzeromatrix_col = self.__structural__.lzeromatrix_col
self.conservation_matrix = self.__structural__.conservation_matrix
self.conservation_matrix_row = self.__structural__.conservation_matrix_row
self.conservation_matrix_col = self.__structural__.conservation_matrix_col
self.nrmatrix = self.__structural__.nrmatrix
self.nrmatrix_row = self.__structural__.nrmatrix_row
self.nrmatrix_col = self.__structural__.nrmatrix_col
self.__kmatrix__ = copy.copy(self.kmatrix)
self.__kzeromatrix__ = copy.copy(self.kzeromatrix)
self.__lmatrix__ = copy.copy(self.lmatrix)
self.__lzeromatrix__ = copy.copy(self.lzeromatrix)
self.__nrmatrix__ = copy.copy(self.nrmatrix)
# switch that is set if the stoichiometric analysis is up to date
self.__structural__.species = self.species
self.__structural__.reactions = self.reactions
self.Nmatrix = PyscesStoich.StructMatrix(self.__structural__.nmatrix, self.__structural__.nmatrix_row, self.__structural__.nmatrix_col)
self.Nmatrix.setRow(self.species)
self.Nmatrix.setCol(self.reactions)
self.Nrmatrix = PyscesStoich.StructMatrix(self.__structural__.nrmatrix, self.__structural__.nrmatrix_row, self.__structural__.nrmatrix_col)
self.Nrmatrix.setRow(self.species)
self.Nrmatrix.setCol(self.reactions)
self.Kmatrix = PyscesStoich.StructMatrix(self.__structural__.kmatrix, self.__structural__.kmatrix_row, self.__structural__.kmatrix_col)
self.Kmatrix.setRow(self.reactions)
self.Kmatrix.setCol(self.reactions)
self.K0matrix = PyscesStoich.StructMatrix(self.__structural__.kzeromatrix, self.__structural__.kzeromatrix_row, self.__structural__.kzeromatrix_col)
self.K0matrix.setRow(self.reactions)
self.K0matrix.setCol(self.reactions)
self.Lmatrix = PyscesStoich.StructMatrix(self.__structural__.lmatrix, self.__structural__.lmatrix_row, self.__structural__.lmatrix_col)
self.Lmatrix.setRow(self.species)
self.Lmatrix.setCol(self.species)
self.L0matrix = PyscesStoich.StructMatrix(self.__structural__.lzeromatrix, self.__structural__.lzeromatrix_row, self.__structural__.lzeromatrix_col)
self.L0matrix.setRow(self.species)
self.L0matrix.setCol(self.species)
if self.__structural__.info_moiety_conserve:
self.Consmatrix = PyscesStoich.StructMatrix(self.__structural__.conservation_matrix, self.__structural__.conservation_matrix_row, self.__structural__.conservation_matrix_col)
self.Consmatrix.setRow(self.species)
self.Consmatrix.setCol(self.species)
else:
self.Consmatrix = None
self.__StoichOK = 1
print ' '
if __name__ == '__main__':
ModelFile = 'pysces_test_linear1.psc'
ModelDir = '/home/bgoli/Pysces/psc'
mod = PyscesInputFileParser(ModelFile, ModelDir)
#~ mod.Stoichiometry_Analyse()
|
asttra/pysces
|
pysces/PyscesMiniModel.py
|
Python
|
bsd-3-clause
| 18,529
|
[
"PySCeS"
] |
045edf580199b1792c5417c1246d5a120b7748321dbc57009337e11cbedb7822
|
#!/usr/bin/env python
##############################################################
# B a r a K u d a
#
# Generate netcdf files of cross-sections
#
# L. Brodeau, 2016
##############################################################
import sys
import numpy as nmp
from netCDF4 import Dataset
import barakuda_orca as bo
import barakuda_tool as bt
import barakuda_ncio as bnc
venv_needed = {'ORCA','EXP','DIAG_D','i_do_sect','TS_SECTION_FILE','MM_FILE','NN_T','NN_S'}
vdic = bt.check_env_var(sys.argv[0], venv_needed)
i_do_sect = int(vdic['i_do_sect'])
if i_do_sect != 1: print 'ERROR: sys.argv[0] => why are we here when i_do_sect != 1 ???'; sys.exit(0)
f_sections = vdic['TS_SECTION_FILE']
CONFEXP = vdic['ORCA']+'-'+vdic['EXP']
cnexec = sys.argv[0]
na = len(sys.argv)
if na != 3:
print 'Usage : '+cnexec+' <EXP_grid_T.nc> <year>'
sys.exit(0)
cf_in = sys.argv[1]
cyear = sys.argv[2] ; jyear = int(cyear); cyear = '%4.4i'%jyear
cv_t = vdic['NN_T']
cv_s = vdic['NN_S']
print 'Current year is '+cyear+' !\n'
bt.chck4f(vdic['MM_FILE'])
id_mm = Dataset(vdic['MM_FILE'])
rmsk = id_mm.variables['tmask'][0,:,:,:]
xlon = id_mm.variables['glamt'][0,:,:]
xlat = id_mm.variables['gphit'][0,:,:]
id_mm.close()
[ nk, nj, ni ] = rmsk.shape
bt.chck4f(cf_in)
id_in = Dataset(cf_in)
vdepth = id_in.variables['deptht'][:]
XT = id_in.variables[cv_t][:,:,:,:]
XS = id_in.variables[cv_s][:,:,:,:]
id_in.close()
[ Nt, nk0, nj0, ni0 ] = XT.shape
if [ nk0, nj0, ni0 ] != [ nk, nj, ni ]: print 'ERROR: ssx_boxes.py => mask and field disagree in shape!'; sys.exit(0)
print 'Nt, nk, nj, ni =', Nt, nk, nj, ni
# Masking:
for jt in range(Nt):
XT[jt,:,:,:] = rmsk[:,:,:]*XT[jt,:,:,:] + (1. - rmsk[:,:,:])*-9999.
XS[jt,:,:,:] = rmsk[:,:,:]*XS[jt,:,:,:] + (1. - rmsk[:,:,:])*-9999.
vtime = nmp.zeros(Nt)
for jt in range(Nt): vtime[jt] = float(jyear) + (float(jt) + 0.5)/float(Nt)
# Getting sections:
vboxes, vlon1, vlat1, vlon2, vlat2 = bt.read_coor(f_sections, ctype='float', lTS_bounds=False)
js = -1
for csname in vboxes:
js = js + 1
print'\n *** '+sys.argv[0]+': treating section '+csname
( i1, i2, j1, j2 ) = bo.transect_zon_or_med(vlon1[js], vlon2[js], vlat1[js], vlat2[js], xlon, xlat)
print csname+' :'
print '(lon1, lon2, lat1, lat2) =', vlon1[js], vlon2[js], vlat1[js], vlat2[js]
print ' => i1, i2, j1, j2 =', i1, i2, j1, j2
print ''
if i1 > i2: print 'ERROR: cross_sections.py => i1 > i2 !'; sys.exit(0)
if j1 > j2: print 'ERROR: cross_sections.py => j1 > j2 !'; sys.exit(0)
if i1 == i2:
print 'Meridional section!'
caxis = 'y' ; cxn = 'lat'
vaxis = xlat[j1:j2,i1]
imsk = rmsk[:,j1:j2,i1]
ZT = XT[:,:,j1:j2,i1]
ZS = XS[:,:,j1:j2,i1]
if j1 == j2:
print 'Zonal section!'
caxis = 'x'; cxn = 'lon'
vx = xlon[j1,i1:i2] ; vaxis = nmp.zeros(len(vx)) ; vaxis[:] = vx[:]
ivf = nmp.where(vx>180); vaxis[ivf] = vx[ivf] - 360.
imsk = rmsk[:,j1,i1:i2]
ZT = XT[:,:,j1,i1:i2]
ZS = XS[:,:,j1,i1:i2]
cf_out = vdic['DIAG_D']+'/TS_section_'+csname+'.nc'
bnc.wrt_appnd_2dt_series(vaxis, -vdepth, vtime, ZT, cf_out, cv_t,
missing_val=-9999.,
cxdnm=cxn, cydnm='depth', cxvnm=cxn, cyvnm='depth',
cu_t='year', cu_d='deg.C', cln_d='Potential temperature',
xd2=ZS, cvar2=cv_s, cln_d2='Salinity', cun2='PSU')
|
brodeau/barakuda
|
python/exec/cross_sections.py
|
Python
|
gpl-2.0
| 3,545
|
[
"NetCDF",
"ORCA"
] |
5492994ffe5f0da9e6e225d8e2a8f0a5dc31ae84ae99a371821241e7e422ce95
|
# Copyright 2013 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of seq_crumbs.
# seq_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# seq_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
from copy import deepcopy
from collections import namedtuple
from crumbs.utils.optional_modules import SeqRecord
from crumbs.utils.tags import (SEQITEM, SEQRECORD, ILLUMINA_QUALITY,
SANGER_QUALITY, SANGER_FASTQ_FORMATS,
ILLUMINA_FASTQ_FORMATS)
# pylint: disable=C0111
SeqWrapper = namedtuple('SeqWrapper', ['kind', 'object', 'file_format'])
_SeqItem = namedtuple('SeqItem', ['name', 'lines', 'annotations'])
class SeqItem(_SeqItem):
def __new__(cls, name, lines, annotations=None):
# This subclass is required to have a default value in a namedtuple
if annotations is None:
annotations = {}
# add default values
return super(SeqItem, cls).__new__(cls, name, lines, annotations)
def get_title(seq):
'Given a seq it returns the title'
seq_class = seq.kind
seq = seq.object
if seq_class == SEQITEM:
title = seq.lines[0][1:].rstrip()
elif seq_class == SEQRECORD:
title = seq.id + ' ' + seq.description
else:
msg = 'Do not know how to guess title form this seq class'
raise NotImplementedError(msg)
return title
def get_description(seq):
seq_class = seq.kind
seq = seq.object
if seq_class == SEQITEM:
title_items = seq.lines[0].split(' ', 1)
desc = title_items[1] if len(title_items) == 2 else None
elif seq_class == SEQRECORD:
desc = seq.description
if desc == '<unknown description>': # BioPython default
return None
return desc
def get_name(seq):
if 'SeqRecord' in seq.__class__.__name__:
seq_class = SEQRECORD
else:
seq_class = seq.kind
seq = seq.object
if seq_class == SEQITEM:
name = seq.name
elif seq_class == SEQRECORD:
name = seq.id
return name
def get_file_format(seq):
seq_class = seq.kind
if seq_class == SEQITEM:
fmt = seq.file_format
elif seq_class == SEQRECORD:
fmt = None
return fmt
def _break():
raise StopIteration
def _is_fastq_plus_line(line, seq_name):
if line == '+\n' or line.startswith('+') and seq_name in line:
return True
else:
return False
def _get_seqitem_quals(seq):
fmt = seq.file_format
sitem = seq.object
if 'fastq' in fmt:
quals = sitem.lines[3].rstrip()
else:
quals = None
return quals
def get_str_seq(seq):
seq_class = seq.kind
if seq_class == SEQITEM:
seq = seq.object.lines[1].strip()
elif seq_class == SEQRECORD:
seq = str(seq.object.seq)
return seq.strip()
def get_length(seq):
return len(get_str_seq(seq))
SANGER_QUALS = {chr(i): i - 33 for i in range(33, 127)}
ILLUMINA_QUALS = {chr(i): i - 64 for i in range(64, 127)}
def _get_seqitem_qualities(seqwrap):
fmt = seqwrap.file_format.lower()
if 'fasta' in fmt:
raise AttributeError('A fasta file has no qualities')
elif 'fastq' in fmt:
if 'illumina' in fmt:
quals_map = ILLUMINA_QUALS
else:
quals_map = SANGER_QUALS
encoded_quals = seqwrap.object.lines[3].rstrip()
quals = [quals_map[qual] for qual in encoded_quals]
else:
raise RuntimeError('Qualities requested for an unknown SeqItem format')
return quals
def get_int_qualities(seq):
seq_class = seq.kind
if seq_class == SEQITEM:
return _get_seqitem_qualities(seq)
elif seq_class == SEQRECORD:
try:
quals = seq.object.letter_annotations['phred_quality']
except KeyError:
msg = 'The given SeqRecord has no phred_quality'
raise AttributeError(msg)
return quals
SANGER_STRS = {i - 33: chr(i) for i in range(33, 127)}
ILLUMINA_STRS = {i - 64: chr(i) for i in range(64, 127)}
def _int_quals_to_str_quals(int_quals, out_format):
if out_format == SANGER_QUALITY:
quals_map = SANGER_STRS
elif out_format == ILLUMINA_QUALITY:
quals_map = ILLUMINA_STRS
else:
msg = 'Unknown or not supported quality format'
raise ValueError(msg)
return ''.join([quals_map[int_quality] for int_quality in int_quals])
def get_str_qualities(seq, out_format=None):
if out_format is None:
out_format = seq.file_format
if out_format in SANGER_FASTQ_FORMATS:
out_format = SANGER_QUALITY
elif out_format in ILLUMINA_FASTQ_FORMATS:
out_format = ILLUMINA_QUALITY
seq_class = seq.kind
if seq_class == SEQITEM:
in_format = seq.file_format
if 'fasta' in in_format:
raise ValueError('A fasta file has no qualities')
if in_format in SANGER_FASTQ_FORMATS:
in_format = SANGER_QUALITY
elif in_format in ILLUMINA_FASTQ_FORMATS:
in_format = ILLUMINA_QUALITY
else:
msg = 'Unknown or not supported quality format: '
msg += in_format
raise ValueError(msg)
if in_format == out_format:
quals = seq.object.lines[3].rstrip()
else:
int_quals = get_int_qualities(seq)
quals = _int_quals_to_str_quals(int_quals, out_format)
elif seq_class == SEQRECORD:
int_quals = get_int_qualities(seq)
quals = _int_quals_to_str_quals(int_quals, out_format)
return quals
def get_annotations(seq):
return seq.object.annotations
def _copy_seqrecord(seqrec, seq=None, name=None, id_=None):
'Given a seqrecord it returns a new seqrecord with seq or qual changed.'
if seq is None:
seq = seqrec.seq
if id_ is None:
id_ = seqrec.id
if name is None:
name = seqrec.name
# the letter annotations
let_annot = {annot: v for annot, v in seqrec.letter_annotations.items()}
# the rest of parameters
description = seqrec.description
dbxrefs = seqrec.dbxrefs[:]
features = seqrec.features[:] # the features are not copied
annotations = deepcopy(seqrec.annotations)
# the new sequence
new_seq = SeqRecord(seq=seq, id=id_, name=name, description=description,
dbxrefs=dbxrefs, features=features,
annotations=annotations, letter_annotations=let_annot)
return new_seq
def _copy_seqitem(seqwrapper, seq=None, name=None):
seq_item = seqwrapper.object
lines = seq_item.lines
fmt = seqwrapper.file_format
if seq is None:
lines = lines[:]
else:
if 'fasta' in fmt:
lines = [lines[0], seq + '\n']
elif 'fastq' in fmt:
lines = [lines[0], seq + '\n', lines[2], lines[3]]
if len(lines[1]) != len(lines[3]):
msg = 'Sequence and quality line length do not match'
raise ValueError(msg)
else:
raise RuntimeError('Unknown format for a SequenceItem')
if name:
# name title line
lines[0] = lines[0][0] + name + '\n'
# change + line in case has the name in it.
if 'fastq' in fmt:
lines[2] = '+\n'
name = seq_item.name if name is None else name
annotations = seq_item.annotations
if annotations is not None:
annotations = annotations.copy()
seq = SeqWrapper(kind=seqwrapper.kind,
object=SeqItem(name, lines, annotations),
file_format=fmt)
return seq
def copy_seq(seqwrapper, seq=None, name=None):
seq_class = seqwrapper.kind
seq_obj = seqwrapper.object
if seq_class == SEQITEM:
seq = _copy_seqitem(seqwrapper, seq=seq, name=name)
elif seq_class == SEQRECORD:
seq_obj = _copy_seqrecord(seq_obj, seq=seq, name=name, id_=name)
seq = SeqWrapper(kind=seqwrapper.kind, object=seq_obj,
file_format=seqwrapper.file_format)
return seq
def _slice_seqitem(seqwrap, start, stop):
fmt = seqwrap.file_format
seq_obj = seqwrap.object
lines = seq_obj.lines
seq_str = get_str_seq(seqwrap)
seq_str = seq_str[start: stop] + '\n'
if 'fasta' in fmt:
lines = [lines[0], seq_str]
elif 'fastq' in fmt:
qual_str = get_str_qualities(seqwrap)
qual_str = qual_str[start: stop]
qual_str += '\n'
lines = [lines[0], seq_str, '+\n', qual_str]
else:
raise ValueError('Unknown SeqItem type')
seq_obj = SeqItem(name=seq_obj.name, lines=lines,
annotations=seq_obj.annotations)
return seq_obj
def slice_seq(seq, start=None, stop=None):
seq_class = seq.kind
if seq_class == SEQITEM:
seq_obj = _slice_seqitem(seq, start, stop)
elif seq_class == SEQRECORD:
seq_obj = seq.object[start:stop]
return SeqWrapper(seq.kind, object=seq_obj, file_format=seq.file_format)
def assing_kind_to_seqs(kind, seqs, file_format):
'It puts each seq into a NamedTuple named Seq'
return (SeqWrapper(kind, seq, file_format) for seq in seqs)
|
JoseBlanca/seq_crumbs
|
crumbs/seq/seq.py
|
Python
|
gpl-3.0
| 9,686
|
[
"Biopython"
] |
c67736aee94f997388f084ce61a9b61ac9d6328822d3c0e75c1d73dbcdb53d98
|
# This script exercsises some of the idiosyncracies
# of the descriptor class and PBLAS on a realistic
# case. See the BLACS descriptor documentation
# in trunk/gpaw/blacs.py for some discussions of
# these idiosyncracies.
import numpy as np
from gpaw.blacs import BlacsGrid, parallelprint
from gpaw.mpi import world, rank, size
from gpaw.utilities.scalapack import pblas_simple_gemm
gen = np.random.RandomState(42)
# simulate state-parallelization=2 and
# domain-decomposition.prod=32
B = 2
D = 32
mb = 32
grid = BlacsGrid(world, B, D)
nbands = 500
nG = 80**3
nGdesc = grid.new_descriptor(nbands, nG, nbands/B, nG/D)
nndesc = grid.new_descriptor(nbands, nbands, mb, mb)
psit_nG = gen.rand(*nGdesc.shape)
A_nn = gen.rand(*nndesc.shape)
assert nGdesc.check(psit_nG)
assert nndesc.check(A_nn)
parallelprint(world, (A_nn.shape, nndesc.shape, nndesc.lld))
pblas_simple_gemm(nGdesc, nGdesc, nndesc, psit_nG, psit_nG, A_nn,
transa='N', transb='T')
|
qsnake/gpaw
|
gpaw/test/big/miscellaneous/pblacs_oblong.py
|
Python
|
gpl-3.0
| 972
|
[
"GPAW"
] |
28a5d47158147e63e8cb3786726f976b84f845b577a5c6a755fc59ea2ea8ad19
|
"""
=============================================
Integration and ODEs (:mod:`scipy.integrate`)
=============================================
.. currentmodule:: scipy.integrate
Integrating functions, given function object
============================================
.. autosummary::
:toctree: generated/
quad -- General purpose integration
quad_vec -- General purpose integration of vector-valued functions
dblquad -- General purpose double integration
tplquad -- General purpose triple integration
nquad -- General purpose N-D integration
fixed_quad -- Integrate func(x) using Gaussian quadrature of order n
quadrature -- Integrate with given tolerance using Gaussian quadrature
romberg -- Integrate func using Romberg integration
quad_explain -- Print information for use of quad
newton_cotes -- Weights and error coefficient for Newton-Cotes integration
IntegrationWarning -- Warning on issues during integration
Integrating functions, given fixed samples
==========================================
.. autosummary::
:toctree: generated/
trapz -- Use trapezoidal rule to compute integral.
cumtrapz -- Use trapezoidal rule to cumulatively compute integral.
simps -- Use Simpson's rule to compute integral from samples.
romb -- Use Romberg Integration to compute integral from
-- (2**k + 1) evenly-spaced samples.
.. seealso::
:mod:`scipy.special` for orthogonal polynomials (special) for Gaussian
quadrature roots and weights for other weighting factors and regions.
Solving initial value problems for ODE systems
==============================================
The solvers are implemented as individual classes, which can be used directly
(low-level usage) or through a convenience function.
.. autosummary::
:toctree: generated/
solve_ivp -- Convenient function for ODE integration.
RK23 -- Explicit Runge-Kutta solver of order 3(2).
RK45 -- Explicit Runge-Kutta solver of order 5(4).
DOP853 -- Explicit Runge-Kutta solver of order 8.
Radau -- Implicit Runge-Kutta solver of order 5.
BDF -- Implicit multi-step variable order (1 to 5) solver.
LSODA -- LSODA solver from ODEPACK Fortran package.
OdeSolver -- Base class for ODE solvers.
DenseOutput -- Local interpolant for computing a dense output.
OdeSolution -- Class which represents a continuous ODE solution.
Old API
-------
These are the routines developed earlier for SciPy. They wrap older solvers
implemented in Fortran (mostly ODEPACK). While the interface to them is not
particularly convenient and certain features are missing compared to the new
API, the solvers themselves are of good quality and work fast as compiled
Fortran code. In some cases, it might be worth using this old API.
.. autosummary::
:toctree: generated/
odeint -- General integration of ordinary differential equations.
ode -- Integrate ODE using VODE and ZVODE routines.
complex_ode -- Convert a complex-valued ODE to real-valued and integrate.
Solving boundary value problems for ODE systems
===============================================
.. autosummary::
:toctree: generated/
solve_bvp -- Solve a boundary value problem for a system of ODEs.
"""
from .quadrature import *
from .odepack import *
from .quadpack import *
from ._ode import *
from ._bvp import solve_bvp
from ._ivp import (solve_ivp, OdeSolution, DenseOutput,
OdeSolver, RK23, RK45, DOP853, Radau, BDF, LSODA)
from ._quad_vec import quad_vec
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
aeklant/scipy
|
scipy/integrate/__init__.py
|
Python
|
bsd-3-clause
| 3,826
|
[
"Gaussian"
] |
6d7b544523f9fee7eabb865648cf9562e0aa9f90e2c274a08452c6214571f590
|
from __future__ import print_function
from six import iteritems
import vtk
from vtk import vtkQuad
from numpy import array, arange, cross
from pyNastran.converters.LaWGS.wgs_reader import LaWGS
from pyNastran.gui.gui_objects.gui_result import GuiResult
class LaWGS_IO(object):
def __init__(self):
pass
def get_lawgs_wildcard_geometry_results_functions(self):
data = ('LaWGS',
'LaWGS (*.inp; *.wgs)', self.load_lawgs_geometry,
None, None)
return data
def load_lawgs_geometry(self, lawgs_filename, dirname, name='main', plot=True):
#key = self.case_keys[self.icase]
#case = self.result_cases[key]
skip_reading = self._remove_old_geometry(lawgs_filename)
if skip_reading:
return
model = LaWGS(lawgs_filename)
self.model_type = model.model_type
model.read_lawgs()
nodes, elements, regions = model.get_points_elements_regions()
self.nNodes = len(nodes)
self.nElements = len(elements)
nodes = array(nodes, dtype='float32')
elements = array(elements, dtype='int32')
#print("nNodes = ",self.nNodes)
#print("nElements = ", self.nElements)
self.grid.Allocate(self.nElements, 1000)
#self.gridResult.SetNumberOfComponents(self.nElements)
points = vtk.vtkPoints()
points.SetNumberOfPoints(self.nNodes)
#self.gridResult.Allocate(self.nNodes, 1000)
#vectorReselt.SetNumberOfComponents(3)
self.nid_map = {}
#elem.SetNumberOfPoints(nNodes)
if 0:
fraction = 1. / self.nNodes # so you can color the nodes by ID
for nid, node in sorted(iteritems(nodes)):
points.InsertPoint(nid - 1, *node)
self.gridResult.InsertNextValue(nid * fraction)
#print(str(element))
#elem = vtk.vtkVertex()
#elem.GetPointIds().SetId(0, i)
#self.aQuadGrid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
#vectorResult.InsertTuple3(0, 0.0, 0.0, 1.0)
assert len(nodes) > 0, len(nodes)
assert len(elements) > 0, len(elements)
for nid, node in enumerate(nodes):
points.InsertPoint(nid, *node)
elem = vtkQuad()
etype = elem.GetCellType()
for eid, element in enumerate(elements):
(p1, p2, p3, p4) = element
elem = vtkQuad()
pts = elem.GetPointIds()
pts.SetId(0, p1)
pts.SetId(1, p2)
pts.SetId(2, p3)
pts.SetId(3, p4)
self.grid.InsertNextCell(etype, elem.GetPointIds())
self.grid.SetPoints(points)
#self.grid.GetPointData().SetScalars(self.gridResult)
#print(dir(self.grid) #.SetNumberOfComponents(0))
#self.grid.GetCellData().SetNumberOfTuples(1);
#self.grid.GetCellData().SetScalars(self.gridResult)
self.grid.Modified()
if hasattr(self.grid, 'Update'):
self.grid.Update()
# loadCart3dResults - regions/loads
#self. turn_text_on()
#self.scalarBar.VisibilityOn()
#self.scalarBar.Modified()
self.iSubcaseNameMap = {1: ['LaWGS', '']}
cases = {}
ID = 1
#print("nElements = %s" % nElements)
form, cases = self._fill_lawgs_case(cases, ID, nodes, elements, regions)
self._finish_results_io2(form, cases)
def _fill_lawgs_case(self, cases, ID, nodes, elements, regions):
eids = arange(1, len(elements) + 1, dtype='int32')
nids = arange(1, len(nodes) + 1, dtype='int32')
regions = array(regions, dtype='int32')
icase = 0
geometry_form = [
('Region', icase, []),
('ElementID', icase + 1, []),
('NodeID', icase + 2, []),
('X', icase + 3, []),
('Y', icase + 4, []),
('Z', icase + 5, []),
('NormalX', icase + 6, []),
('NormalY', icase + 7, []),
('NormalZ', icase + 8, []),
]
region_res = GuiResult(ID, header='Region', title='Region',
location='centroid', scalar=regions)
eid_res = GuiResult(ID, header='ElementID', title='ElementID',
location='centroid', scalar=eids)
nid_res = GuiResult(ID, header='NodeID', title='NodeID',
location='node', scalar=nids)
cases[icase] = (region_res, (ID, 'Region'))
cases[icase + 1] = (eid_res, (ID, 'ElementID'))
cases[icase + 2] = (nid_res, (ID, 'NodeID'))
#nnids = len(nids)
neids = len(elements)
a = nodes[elements[:, 2], :] - nodes[elements[:, 0], :]
b = nodes[elements[:, 3], :] - nodes[elements[:, 1], :]
normals = cross(a, b, axis=1)
assert normals.shape[0] == neids, normals.shape
assert normals.shape[1] == 3, normals.shape
x_res = GuiResult(ID, header='X', title='X',
location='node', scalar=nodes[:, 0])
y_res = GuiResult(ID, header='X', title='X',
location='node', scalar=nodes[:, 1])
z_res = GuiResult(ID, header='X', title='X',
location='node', scalar=nodes[:, 2])
nx_res = GuiResult(ID, header='NormalX', title='NormalX',
location='node', scalar=normals[:, 0])
ny_res = GuiResult(ID, header='NormalY', title='NormalY',
location='node', scalar=normals[:, 1])
nz_res = GuiResult(ID, header='NormalZ', title='NormalZ',
location='node', scalar=normals[:, 2])
cases[icase + 3] = (x_res, (ID, 'X'))
cases[icase + 4] = (y_res, (ID, 'Y'))
cases[icase + 5] = (z_res, (ID, 'Z'))
cases[icase + 6] = (nx_res, (ID, 'NormalX'))
cases[icase + 7] = (ny_res, (ID, 'NormalY'))
cases[icase + 8] = (nz_res, (ID, 'NormalZ'))
return geometry_form, cases
|
saullocastro/pyNastran
|
pyNastran/converters/LaWGS/wgs_io.py
|
Python
|
lgpl-3.0
| 6,233
|
[
"VTK"
] |
85d454ef4e6432031cabeae107f5868391f2c95ff0cace87743ab006ce13f039
|
#!/opt/miniconda/bin/python3
import sys
import subprocess
from textwrap import fill
import pandas as pd
from Bio import SeqIO
readfile = sys.argv[1]
allrefs = dict([(s.id.split('_')[0], str(s.seq))
for s in SeqIO.parse('/analyses/Diagnostics/Repositories/SmaltAlign/References/flugenomes_nonmixed.fasta', 'fasta')])
# index flugenomes.fasta
cml = 'bwa index /analyses/Diagnostics/Repositories/SmaltAlign/References/flugenomes_nonmixed.fasta'
subprocess.call(cml, shell=True)
# align against all genomes
cml = 'bwa mem -t 24 /analyses/Diagnostics/Repositories/SmaltAlign/References/flugenomes_nonmixed.fasta %s | samtools view -F 4 > aln.sam' % readfile
subprocess.call(cml, shell=True)
# extract accession number, segment, serotype
cml = 'cut -f 3 aln.sam | cut -d "_" -f 1-3 | tr -d ">" | tr "_" "\t" > ref.tsv'
subprocess.call(cml, shell=True)
# manipulate with pandas to find, for each segment, the sequence with most hits
df = pd.read_table('ref.tsv', names=['accn', 'segment', 'serotype'])
count_ref = df.groupby(['segment', 'accn', 'serotype']).size()
c = count_ref.reset_index(name='counts').sort_values(['segment', 'counts'], ascending=[True, False])
c.to_csv('counts.tsv', index=False, sep='\t')
print(c.groupby('segment').head(3))
for segment in range(1, 9):
counts = c[c['segment'] == segment]
if counts.counts.sum() < 200 or counts.empty:
print(segment, 'not enough')
continue
best_acc = counts.accn.tolist()[0]
print(segment, best_acc)
best_seq = allrefs[best_acc]
with open('segment-%d.fasta' % segment, 'w') as h:
h.write('>segment-%d-%s\n' % (segment, best_acc))
h.write(fill(best_seq, width=80))
|
medvir/SmaltAlign
|
select_ref.py
|
Python
|
mit
| 1,691
|
[
"BWA"
] |
c67e65456a25c9e9555486897c05125489336f6494073bad5ef52d3c07d101d2
|
#!/usr/bin/env python
import io
import netCDF4
import numpy
import m6plot
import m6toolbox
import matplotlib.pyplot as plt
import os
try: import argparse
except: raise Exception('This version of python is not new enough. python 2.7 or newer is required.')
def run():
parser = argparse.ArgumentParser(description='''Script for plotting depth vs. time plots of temperature and salinity drift''')
parser.add_argument('infile', type=str, help='''Directory containing annual time series thetao and so xyave files''')
parser.add_argument('-l','--label', type=str, default='', help='''Label to add to the plot.''')
parser.add_argument('-s','--suptitle', type=str, default='', help='''Super-title for experiment. Default is to read from netCDF file.''')
parser.add_argument('-o','--outdir', type=str, default='.', help='''Directory in which to place plots.''')
parser.add_argument('-t','--trange', type=str, default=None, help='''Tuple containing start and end years to plot''')
cmdLineArgs = parser.parse_args()
main(cmdLineArgs)
def main(cmdLineArgs,stream=False):
if not isinstance(cmdLineArgs.infile,list):
cmdLineArgs.infile = [cmdLineArgs.infile]
rootGroupT = [x+'.thetao_xyave.nc' for x in cmdLineArgs.infile]
rootGroupS = [x+'.so_xyave.nc' for x in cmdLineArgs.infile]
rootGroupT = netCDF4.MFDataset( rootGroupT )
rootGroupS = netCDF4.MFDataset( rootGroupS )
if 'thetao_xyave' not in rootGroupT.variables: raise Exception('Could not find "thetao_xyave" files "%s"'%(cmdLineArgs.infile))
if 'so_xyave' not in rootGroupS.variables: raise Exception('Could not find "so_xyave" files "%s"'%(cmdLineArgs.infile))
if 'zt' in rootGroupT.variables.keys():
zt = rootGroupT.variables['zt'][:] * -1
elif 'z_l' in rootGroupT.variables.keys():
zt = rootGroupT.variables['z_l'][:] * -1
timeT = rootGroupT.variables['time']
timeS = rootGroupS.variables['time']
timeT = numpy.array([int(x.year) for x in netCDF4.num2date(timeT[:],timeT.units,calendar=timeT.calendar)])
timeS = numpy.array([int(x.year) for x in netCDF4.num2date(timeS[:],timeS.units,calendar=timeS.calendar)])
variable = rootGroupT.variables['thetao_xyave']
T = variable[:]
T = T-T[0]
variable = rootGroupS.variables['so_xyave']
S = variable[:]
S = S-S[0]
if cmdLineArgs.suptitle != '': suptitle = cmdLineArgs.suptitle + ' ' + cmdLineArgs.label
else: suptitle = rootGroupT.title + ' ' + cmdLineArgs.label
imgbufs = []
if stream is True: objOut = io.BytesIO()
else: objOut = cmdLineArgs.outdir+'/T_drift.png'
m6plot.ztplot( T, timeT, zt, splitscale=[0., -2000., -6500.],
suptitle=suptitle, title='Potential Temperature [C]',
extend='both', colormap='dunnePM', autocenter=True,
save=objOut)
if stream is True: imgbufs.append(objOut)
if stream is True: objOut = io.BytesIO()
else: objOut = cmdLineArgs.outdir+'/S_drift.png'
m6plot.ztplot( S, timeS, zt, splitscale=[0., -2000., -6500.],
suptitle=suptitle, title='Salinity [psu]',
extend='both', colormap='dunnePM', autocenter=True,
save=objOut)
if stream is True: imgbufs.append(objOut)
if stream is True:
return imgbufs
if __name__ == '__main__':
run()
|
nicjhan/MOM6-examples
|
tools/analysis/TS_drift.py
|
Python
|
gpl-3.0
| 3,201
|
[
"NetCDF"
] |
e627840032caf7ec0f2458786999d5fd10ef8b166d3c3a531863a1b699f88521
|
######################################################################
# Simple script to test VTK export of periodic cell
######################################################################
# enable periodic cell
O.periodic=True
# insert some bodies
sp = randomPeriPack(radius=1,initSize=(10,20,30),memoizeDb='/tmp/vtkPeriodicCell.sqlite')
sp.toSimulation()
# transform the cell a bit
O.cell.hSize *= Matrix3(1,.1,.1, .1,1,0, .1,0,1) # skew the cell in xy and xz plane
O.cell.hSize *= Matrix3(1,0,0, 0,.8,.6, 0,-.6,.8) # rotate it along x axis
O.step()
# test of export.VTKExporter
from yade import export
vtk1 = export.VTKExporter('/tmp/vtkPeriodicCell-VTKExporter')
vtk1.exportSpheres()
vtk1.exportPeriodicCell()
# test of VTKReorder
vtk2 = VTKRecorder(fileName='/tmp/vtkPeriodicCell-VTKRecorder-',recorders=['spheres','pericell'])
vtk2() # do the export
|
bcharlas/mytrunk
|
examples/test/vtkPeriodicCell.py
|
Python
|
gpl-2.0
| 865
|
[
"VTK"
] |
3b93c5c29d599b48dc7894fd57c023f516eb614a67bbf9c7b8f46f4ad20abe66
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# rmvgridowner - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.rmvgridowner import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
heromod/migrid
|
mig/cgi-bin/rmvgridowner.py
|
Python
|
gpl-2.0
| 1,112
|
[
"Brian"
] |
62d3c6bcef12c5953a8a7ba3d909bbd7d04141a5cda21763b76eee8850f747b8
|
"""Handle surfaces."""
from __future__ import absolute_import, division, print_function
from nibabel.freesurfer.io import read_geometry
import numpy as np
from scipy.io.matlab.mio import loadmat
def vertex_values_to_colors(vertex_values):
"""Convert vertex values to RGB representation.
Parameters
----------
vertex_values : array_like
List of scalar vertex values
Returns
-------
colors : array_like
Array of size number of vertices times 3 with values between 0 and 1.
"""
values_max = max(vertex_values)
values_min = min(vertex_values)
if values_max == values_min:
return np.ones((vertex_values.shape[0], 3))
colors = np.tile(
(np.asarray(vertex_values)[:, np.newaxis] - values_min) /
(values_max - values_min),
(1, 3))
return colors
class Surface(object):
"""Representation of a surface with faces and vertices."""
def __init__(self, vertices=None, faces=None, vertex_values=None):
"""Setup vertices, faces and optionally vertex values."""
self._vertices = vertices
self._faces = faces
self._vertex_values = vertex_values
def __repr__(self):
"""Return string representation."""
return "Surface(n_vertices={}, n_faces={})".format(
len(self._vertices), len(self._faces))
def __str__(self):
"""Return string representation."""
return self.__repr__()
@property
def vertices(self):
"""Return vertices."""
return self._vertices
@vertices.setter
def vertices(self, values):
"""Set vertices."""
self._vertices
@property
def faces(self):
"""Return faces."""
return self._faces
@property
def vertex_values(self):
"""Return vertex values."""
return self._vertex_values
@vertex_values.setter
def vertex_values(self, values):
"""Check and set vertex values."""
if values is None:
self._vertex_values = None
elif len(values) == self._vertices.shape[0]:
self._vertex_values = np.asarray(values)
else:
raise ValueError('values should be None or length of vertices')
def find_closest_vertex(self, coordinate):
"""Return the index of the vertex closest to a given point.
The distance is computed as the Euclidean distance.
Parameters
----------
coordinate : tuple of int or float
Returns
-------
index : int
Index of the vertex that is closest to the coordinate
Examples
--------
>>> vertices = [[0, 1, 0], [1, 0, 0], [0, 0, -1], [-1, 0, 0],
... [0, 1, 0], [0, -1, 0]]
>>> faces = [[0, 2, 1], [0, 3, 2], [0, 4, 3], [0, 1, 4],
... [5, 1, 2], [5, 2, 3], [5, 3, 4], [5, 4, 1]]
>>> surface = Surface(vertices, faces)
>>> surface.find_closest_vertex((2, 0, 0))
1
"""
if self._vertices is None:
return None
distances = np.sum((np.asarray(self.vertices) - coordinate) ** 2,
axis=1)
index = np.argmin(distances)
return index
class TriSurface(Surface):
"""Representation of a triangularized surface with faces and vertices.
Attributes
----------
vertices : numpy.array
N x 3 array with vertex coordinates
faces : numpy.array
M x 3 array with indices to vertice.
Examples
--------
>>> vertices = [[0, 1, 0], [1, 0, 0], [0, 0, -1], [-1, 0, 0],
... [0, 1, 0], [0, -1, 0]]
>>> faces = [[0, 2, 1], [0, 3, 2], [0, 4, 3], [0, 1, 4],
... [5, 1, 2], [5, 2, 3], [5, 3, 4], [5, 4, 1]]
>>> surface = TriSurface(vertices, faces)
"""
def __init__(self, vertices=None, faces=None, vertex_values=None):
"""Setup vertices, faces and optionally vertex values."""
self._vertices = np.array(vertices)
self._faces = np.array(faces)
self.vertex_values = vertex_values
def __repr__(self):
"""Return string representation."""
return "TriSurface(n_vertices={}, n_faces={})".format(
self._vertices.shape[0], self._faces.shape[0])
@classmethod
def read_freesurfer(cls, filename):
"""Read a triangular format Freesurfer surface mesh.
Parameters
----------
filename : str
Filename for the file with the triangular data
Returns
-------
surface : TriSurface
"""
vertices, faces = read_geometry(filename)
return cls(vertices=vertices, faces=faces)
@classmethod
def read_mat(cls, filename, vertices_name='vert', faces_name='face',
scale=1.0):
"""Read matlab mat file.
Only faces and vertices are read from the Matlab file.
Parameters
----------
filename : str
Filename for mat file. It is expected that the vertices is
an a variable called 'vert' and the faces in a variable
called 'face'.
scale : float
Scale vertices
Returns
-------
surface : Surface
Surface object with the read surface.
"""
data = loadmat(filename)
vertices = data[vertices_name] * scale
faces = data[faces_name] - 1
return cls(vertices, faces)
@classmethod
def read_obj(cls, filename):
"""Read Wavefront obj file.
Only faces and vertices are read from the Wavefront file.
Parameters
----------
filename : str
Filename for Wavefront file.
Returns
-------
surface : Surface
Surface object with the read surface.
"""
vertices = []
faces = []
with open(filename) as fid:
for line in fid:
elements = line.split()
if not elements:
# Empty line
continue
if elements[0] == 'v':
vertices.append([float(element)
for element in elements[1:4]])
elif elements[0] == 'vn':
# TODO
pass
elif elements[0] == 'f':
faces.append([int(element.split('//')[0])
for element in elements[1:]])
else:
# TODO
pass
return cls(np.array(vertices), np.array(faces) - 1)
def plot(self, *args, **kwargs):
"""Plot surface.
Presently Mayavi plots the surface.
Parameters
----------
title : str
String to use as title in the plot
"""
return self._plot_mayavi(*args, **kwargs)
def _plot_mayavi(self, *args, **kwargs):
"""Plot surface with Mayavi.
The x-axis is switched to account for the Mayavi's right-handed
coordinate system and Talairach's left-handed coordinate system.
Parameters
----------
title : str
String to use as title in the plot
"""
# Delayed import of Mayavi
from mayavi.mlab import title as mlab_title, triangular_mesh
title = kwargs.pop('title', None)
if self._vertex_values is None:
handle = triangular_mesh(
self._vertices[:, 0],
self._vertices[:, 1],
self._vertices[:, 2],
self._faces,
scalars=self._vertex_values,
*args, **kwargs)
else:
handle = triangular_mesh(
self._vertices[:, 0],
self._vertices[:, 1],
self._vertices[:, 2],
self._faces,
scalars=self._vertex_values,
*args, **kwargs)
if title is not None:
mlab_title(title)
return handle
def colorbar(self, *args, **kwargs):
"""Show colorbar for rendered surface."""
return self._colorbar_mayavi(*args, **kwargs)
def _colorbar_mayavi(self, *args, **kwargs):
"""Show colorbar in Mayavi."""
# Delayed import of Mayavi
from mayavi.mlab import colorbar
colorbar()
def show(self):
"""Show the plotted surface."""
self._show_mayavi()
def _show_mayavi(self):
from mayavi.mlab import show
show()
def write_obj(self, filename, scale=1.0, mirror_triangles=False):
"""Write obj file.
Parameters
----------
filename : str
Filename of obj to be written.
mirror_triangles : bool, optional
Determines whether triangles should be mirrored, i.e.,
counter-clockwise triangles should be converted to clockwise
triangles.
scale : float
Scale for vertices: multiply the vertice with the scale value.
"""
step = 1
if mirror_triangles:
step = -1
if self.vertex_values is not None:
vertex_colors = vertex_values_to_colors(self.vertex_values)
with open(filename, 'w') as f:
for n in range(self.vertices.shape[0]):
f.write('v {} {} {}'.format(*(
self.vertices[n, :] * scale)))
if self.vertex_values is not None:
f.write(' {} {} {}\n'.format(*vertex_colors[n, :]))
f.write('\n')
for n in range(self.faces.shape[0]):
f.write('f {} {} {}\n'.format(*(self.faces[n, ::step] + 1)))
read_mat = TriSurface.read_mat
read_obj = TriSurface.read_obj
|
fnielsen/brede
|
brede/surface/core.py
|
Python
|
gpl-3.0
| 9,816
|
[
"Mayavi"
] |
3f78104daf57f322461e37dcc0f68d1ce253df54b5582a51fdd099a1227c25bc
|
# -*- coding: utf-8 -*-
"""Factories for the OSF models, including an abstract ModularOdmFactory.
Example usage: ::
>>> from tests.factories import UserFactory
>>> user1 = UserFactory()
>>> user1.username
fred0@example.com
>>> user2 = UserFactory()
fred1@example.com
Factory boy docs: http://factoryboy.readthedocs.org/
"""
import datetime
import functools
from factory import base, Sequence, SubFactory, post_generation, LazyAttribute
from mock import patch, Mock
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from framework.mongo import StoredObject
from framework.auth import User, Auth
from framework.auth.utils import impute_names_model
from framework.sessions.model import Session
from website.addons import base as addons_base
from website.oauth.models import (
ApiOAuth2Application,
ApiOAuth2PersonalToken,
ExternalAccount,
ExternalProvider
)
from website.project.model import (
Comment, DraftRegistration, Embargo, MetaSchema, Node, NodeLog, Pointer,
PrivateLink, RegistrationApproval, Retraction, Sanction, Tag, WatchConfig,
ensure_schemas
)
from website.notifications.model import NotificationSubscription, NotificationDigest
from website.archiver.model import ArchiveTarget, ArchiveJob
from website.archiver import ARCHIVER_SUCCESS
from website.project.licenses import NodeLicense, NodeLicenseRecord, ensure_licenses
ensure_licenses = functools.partial(ensure_licenses, warn=False)
from website.addons.wiki.model import NodeWikiPage
from tests.base import fake
from tests.base import DEFAULT_METASCHEMA
# TODO: This is a hack. Check whether FactoryBoy can do this better
def save_kwargs(**kwargs):
for value in kwargs.itervalues():
if isinstance(value, StoredObject) and not value._is_loaded:
value.save()
def FakerAttribute(provider, **kwargs):
"""Attribute that lazily generates a value using the Faker library.
Example: ::
class UserFactory(ModularOdmFactory):
name = FakerAttribute('name')
"""
fake_gen = getattr(fake, provider)
if not fake_gen:
raise ValueError('{0!r} is not a valid faker provider.'.format(provider))
return LazyAttribute(lambda x: fake_gen(**kwargs))
class ModularOdmFactory(base.Factory):
"""Base factory for modular-odm objects.
"""
ABSTRACT_FACTORY = True
@classmethod
def _build(cls, target_class, *args, **kwargs):
"""Build an object without saving it."""
save_kwargs(**kwargs)
return target_class(*args, **kwargs)
@classmethod
def _create(cls, target_class, *args, **kwargs):
save_kwargs(**kwargs)
instance = target_class(*args, **kwargs)
instance.save()
return instance
class UserFactory(ModularOdmFactory):
FACTORY_FOR = User
username = Sequence(lambda n: "fred{0}@example.com".format(n))
# Don't use post generation call to set_password because
# It slows down the tests dramatically
password = "password"
fullname = Sequence(lambda n: "Freddie Mercury{0}".format(n))
is_registered = True
is_claimed = True
date_confirmed = datetime.datetime(2014, 2, 21)
merged_by = None
email_verifications = {}
verification_key = None
@post_generation
def set_names(self, create, extracted):
parsed = impute_names_model(self.fullname)
for key, value in parsed.items():
setattr(self, key, value)
if create:
self.save()
@post_generation
def set_emails(self, create, extracted):
if self.username not in self.emails:
self.emails.append(self.username)
self.save()
class AuthUserFactory(UserFactory):
"""A user that automatically has an api key, for quick authentication.
Example: ::
user = AuthUserFactory()
res = self.app.get(url, auth=user.auth) # user is "logged in"
"""
@post_generation
def add_auth(self, create, extracted):
self.set_password('password')
self.save()
self.auth = (self.username, 'password')
class TagFactory(ModularOdmFactory):
FACTORY_FOR = Tag
_id = Sequence(lambda n: "scientastic-{}".format(n))
class ApiOAuth2ApplicationFactory(ModularOdmFactory):
FACTORY_FOR = ApiOAuth2Application
owner = SubFactory(UserFactory)
name = Sequence(lambda n: 'Example OAuth2 Application #{}'.format(n))
home_url = 'ftp://ftp.ncbi.nlm.nimh.gov/'
callback_url = 'http://example.uk'
class ApiOAuth2PersonalTokenFactory(ModularOdmFactory):
FACTORY_FOR = ApiOAuth2PersonalToken
owner = SubFactory(UserFactory)
scopes = 'osf.full_write osf.full_read'
name = Sequence(lambda n: 'Example OAuth2 Personal Token #{}'.format(n))
class PrivateLinkFactory(ModularOdmFactory):
FACTORY_FOR = PrivateLink
name = "link"
key = "foobarblaz"
anonymous = False
creator = SubFactory(AuthUserFactory)
class AbstractNodeFactory(ModularOdmFactory):
FACTORY_FOR = Node
title = 'The meaning of life'
description = 'The meaning of life is 42.'
creator = SubFactory(AuthUserFactory)
class ProjectFactory(AbstractNodeFactory):
category = 'project'
class FolderFactory(ProjectFactory):
is_folder = True
class DashboardFactory(FolderFactory):
is_dashboard = True
class NodeFactory(AbstractNodeFactory):
category = 'hypothesis'
parent = SubFactory(ProjectFactory)
class RegistrationFactory(AbstractNodeFactory):
# Default project is created if not provided
category = 'project'
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise Exception("Cannot build registration without saving.")
@classmethod
def _create(cls, target_class, project=None, schema=None, user=None,
data=None, archive=False, embargo=None, registration_approval=None, retraction=None, is_public=False,
*args, **kwargs):
save_kwargs(**kwargs)
# Original project to be registered
project = project or target_class(*args, **kwargs)
project.save()
# Default registration parameters
schema = schema or DEFAULT_METASCHEMA
user = user or project.creator
data = data or {'some': 'data'}
auth = Auth(user=user)
register = lambda: project.register_node(
schema=schema,
auth=auth,
data=data
)
def add_approval_step(reg):
if embargo:
reg.embargo = embargo
elif registration_approval:
reg.registration_approval = registration_approval
elif retraction:
reg.retraction = retraction
else:
reg.require_approval(reg.creator)
reg.save()
reg.sanction.add_authorizer(reg.creator)
reg.sanction.save()
if archive:
reg = register()
add_approval_step(reg)
else:
with patch('framework.tasks.handlers.enqueue_task'):
reg = register()
add_approval_step(reg)
with patch.object(reg.archive_job, 'archive_tree_finished', Mock(return_value=True)):
reg.archive_job.status = ARCHIVER_SUCCESS
reg.archive_job.save()
reg.sanction.state = Sanction.APPROVED
reg.sanction.save()
ArchiveJob(
src_node=project,
dst_node=reg,
initiator=user,
)
if is_public:
reg.is_public = True
reg.save()
return reg
class PointerFactory(ModularOdmFactory):
FACTORY_FOR = Pointer
node = SubFactory(NodeFactory)
class NodeLogFactory(ModularOdmFactory):
FACTORY_FOR = NodeLog
action = 'file_added'
user = SubFactory(UserFactory)
class WatchConfigFactory(ModularOdmFactory):
FACTORY_FOR = WatchConfig
node = SubFactory(NodeFactory)
class SanctionFactory(ModularOdmFactory):
ABSTRACT_FACTORY = True
@classmethod
def _create(cls, target_class, approve=False, *args, **kwargs):
user = kwargs.get('user') or UserFactory()
sanction = ModularOdmFactory._create(target_class, initiated_by=user, *args, **kwargs)
reg_kwargs = {
'creator': user,
'user': user,
sanction.SHORT_NAME: sanction
}
RegistrationFactory(**reg_kwargs)
if not approve:
sanction.state = Sanction.UNAPPROVED
sanction.save()
return sanction
class RetractionFactory(SanctionFactory):
FACTORY_FOR = Retraction
user = SubFactory(UserFactory)
class EmbargoFactory(SanctionFactory):
FACTORY_FOR = Embargo
user = SubFactory(UserFactory)
class RegistrationApprovalFactory(SanctionFactory):
FACTORY_FOR = RegistrationApproval
user = SubFactory(UserFactory)
class NodeWikiFactory(ModularOdmFactory):
FACTORY_FOR = NodeWikiPage
page_name = 'home'
content = 'Some content'
version = 1
user = SubFactory(UserFactory)
node = SubFactory(NodeFactory)
@post_generation
def set_node_keys(self, create, extracted):
self.node.wiki_pages_current[self.page_name] = self._id
self.node.wiki_pages_versions[self.page_name] = [self._id]
self.node.save()
class UnregUserFactory(ModularOdmFactory):
"""Factory for an unregistered user. Uses User.create_unregistered()
to create an instance.
"""
FACTORY_FOR = User
email = Sequence(lambda n: "brian{0}@queen.com".format(n))
fullname = Sequence(lambda n: "Brian May{0}".format(n))
@classmethod
def _build(cls, target_class, *args, **kwargs):
'''Build an object without saving it.'''
return target_class.create_unregistered(*args, **kwargs)
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = target_class.create_unregistered(*args, **kwargs)
instance.save()
return instance
class UnconfirmedUserFactory(ModularOdmFactory):
"""Factory for a user that has not yet confirmed their primary email
address (username).
"""
FACTORY_FOR = User
username = Sequence(lambda n: 'roger{0}@queen.com'.format(n))
fullname = Sequence(lambda n: 'Roger Taylor{0}'.format(n))
password = 'killerqueen'
@classmethod
def _build(cls, target_class, username, password, fullname):
'''Build an object without saving it.'''
return target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
@classmethod
def _create(cls, target_class, username, password, fullname):
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.save()
return instance
class AuthFactory(base.Factory):
FACTORY_FOR = Auth
user = SubFactory(UserFactory)
class ProjectWithAddonFactory(ProjectFactory):
"""Factory for a project that has an addon. The addon will be added to
both the Node and the creator records. ::
p = ProjectWithAddonFactory(addon='github')
p.get_addon('github') # => github node settings object
p.creator.get_addon('github') # => github user settings object
"""
# TODO: Should use mock addon objects
@classmethod
def _build(cls, target_class, addon='s3', *args, **kwargs):
'''Build an object without saving it.'''
instance = ProjectFactory._build(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
return instance
@classmethod
def _create(cls, target_class, addon='s3', *args, **kwargs):
instance = ProjectFactory._create(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
instance.save()
return instance
# Deprecated unregistered user factory, used mainly for testing migration
class DeprecatedUnregUser(object):
'''A dummy "model" for an unregistered user.'''
def __init__(self, nr_name, nr_email):
self.nr_name = nr_name
self.nr_email = nr_email
def to_dict(self):
return {"nr_name": self.nr_name, "nr_email": self.nr_email}
class DeprecatedUnregUserFactory(base.Factory):
"""Generates a dictonary represenation of an unregistered user, in the
format expected by the OSF.
::
>>> from tests.factories import UnregUserFactory
>>> UnregUserFactory()
{'nr_name': 'Tom Jones0', 'nr_email': 'tom0@example.com'}
>>> UnregUserFactory()
{'nr_name': 'Tom Jones1', 'nr_email': 'tom1@example.com'}
"""
FACTORY_FOR = DeprecatedUnregUser
nr_name = Sequence(lambda n: "Tom Jones{0}".format(n))
nr_email = Sequence(lambda n: "tom{0}@example.com".format(n))
@classmethod
def _create(cls, target_class, *args, **kwargs):
return target_class(*args, **kwargs).to_dict()
_build = _create
class CommentFactory(ModularOdmFactory):
FACTORY_FOR = Comment
content = Sequence(lambda n: 'Comment {0}'.format(n))
is_public = True
@classmethod
def _build(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or node
instance = target_class(
node=node,
user=user,
target=target,
*args, **kwargs
)
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or node
instance = target_class(
node=node,
user=user,
target=target,
*args, **kwargs
)
instance.save()
return instance
class NotificationSubscriptionFactory(ModularOdmFactory):
FACTORY_FOR = NotificationSubscription
class NotificationDigestFactory(ModularOdmFactory):
FACTORY_FOR = NotificationDigest
class ExternalAccountFactory(ModularOdmFactory):
FACTORY_FOR = ExternalAccount
provider = 'mock2'
provider_id = Sequence(lambda n: 'user-{0}'.format(n))
provider_name = 'Fake Provider'
display_name = Sequence(lambda n: 'user-{0}'.format(n))
class SessionFactory(ModularOdmFactory):
FACTORY_FOR = Session
@classmethod
def _build(cls, target_class, *args, **kwargs):
user = kwargs.pop('user', None)
instance = target_class(*args, **kwargs)
if user:
instance.data['auth_user_username'] = user.username
instance.data['auth_user_id'] = user._primary_key
instance.data['auth_user_fullname'] = user.fullname
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = cls._build(target_class, *args, **kwargs)
instance.save()
return instance
class MockOAuth2Provider(ExternalProvider):
name = "Mock OAuth 2.0 Provider"
short_name = "mock2"
client_id = "mock2_client_id"
client_secret = "mock2_client_secret"
auth_url_base = "https://mock2.com/auth"
callback_url = "https://mock2.com/callback"
def handle_callback(self, response):
return {
'provider_id': 'mock_provider_id'
}
class MockAddonNodeSettings(addons_base.AddonNodeSettingsBase):
pass
class MockAddonUserSettings(addons_base.AddonUserSettingsBase):
pass
class MockAddonUserSettingsMergeable(addons_base.AddonUserSettingsBase):
def merge(self):
pass
class MockOAuthAddonUserSettings(addons_base.AddonOAuthUserSettingsBase):
oauth_provider = MockOAuth2Provider
class MockOAuthAddonNodeSettings(addons_base.AddonOAuthNodeSettingsBase):
oauth_provider = MockOAuth2Provider
class ArchiveTargetFactory(ModularOdmFactory):
FACTORY_FOR = ArchiveTarget
class ArchiveJobFactory(ModularOdmFactory):
FACTORY_FOR = ArchiveJob
class DraftRegistrationFactory(ModularOdmFactory):
FACTORY_FOR = DraftRegistration
@classmethod
def _create(cls, *args, **kwargs):
branched_from = kwargs.get('branched_from')
initiator = kwargs.get('initiator')
registration_schema = kwargs.get('registration_schema')
registration_metadata = kwargs.get('registration_metadata')
if not branched_from:
project_params = {}
if initiator:
project_params['creator'] = initiator
branched_from = ProjectFactory(**project_params)
initiator = branched_from.creator
try:
registration_schema = registration_schema or MetaSchema.find()[0]
except IndexError:
ensure_schemas()
registration_metadata = registration_metadata or {}
draft = DraftRegistration.create_from_node(
branched_from,
user=initiator,
schema=registration_schema,
data=registration_metadata,
)
return draft
class NodeLicenseRecordFactory(ModularOdmFactory):
FACTORY_FOR = NodeLicenseRecord
@classmethod
def _create(cls, *args, **kwargs):
try:
NodeLicense.find_one(
Q('name', 'eq', 'No license')
)
except NoResultsFound:
ensure_licenses()
kwargs['node_license'] = kwargs.get(
'node_license',
NodeLicense.find_one(
Q('name', 'eq', 'No license')
)
)
return super(NodeLicenseRecordFactory, cls)._create(*args, **kwargs)
|
caseyrygt/osf.io
|
tests/factories.py
|
Python
|
apache-2.0
| 17,903
|
[
"Brian"
] |
e8f3fadf0bd35f9cec5e9b1ff70da04a261c9e09b9bd296cad10a7cc54844c4f
|
#!/usr/bin/env python
# http://arxiv.org/pdf/1512.09300.pdf
import pickle,subprocess, argparse, urllib
from astropy.io import fits
import scipy.ndimage.interpolation as intp
import numpy as np
import os,re
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import chainer
from chainer import computational_graph
from chainer import cuda
from chainer import optimizers
from chainer import serializers
from chainer import Variable
from chainer.utils import type_check
from chainer import function
import chainer.functions as F
import chainer.links as L
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', default=0, type=int,
help='GPU ID')
parser.add_argument('--batchsize', default=2, type=int,
help='how many batches to train simultaneously.')
parser.add_argument('--gamma', default=1.0,type=float,
help='weight of content similarity over style similarity')
parser.add_argument('--creativity-weight', default=1.0,type=float,
help='weight of creativity over emulation')
parser.add_argument('--stride','-s', default=4,type=int,
help='stride size of the final layer')
parser.add_argument('--final-filter-size', default=8,type=int,
help='size of the final filter')
parser.add_argument('--nz', default=100,type=int,
help='the size of encoding space')
parser.add_argument('--dropout', action='store_true',
help='use dropout when training dis.')
parser.add_argument('--shake-camera', action='store_true',
help='shake camera to prevent overlearning.')
parser.add_argument('--enc-norm', default = 'dis',
help='use (dis/L2) norm to train encoder.')
parser.add_argument('--normalization', default = 'batch',
help='use (batch/channel) normalization.')
parser.add_argument('--prior-distribution', default = 'gaussian',
help='use (uniform/gaussian) distribution for z prior.')
parser.add_argument('--Phase', default = 'gen',
help='train (gen/enc/evol).')
args = parser.parse_args()
xp = cuda.cupy
cuda.get_device(args.gpu).use()
def foldername(args):
x = urllib.quote(str(args))
x = re.sub('%..','_',x)
x = re.sub('___','-',x)
x = re.sub('Namespace_','Vectorizer-',x)
return x
work_image_dir = '/mnt/work-{}'.format(args.gpu)
out_image_dir = '/mnt/public_html/out-images-{}'.format(foldername(args))
out_image_show_dir = '/mnt/public_html/out-images-{}'.format(args.gpu)
out_model_dir = './out-models-{}'.format(args.gpu)
img_w=512 # size of the image
img_h=512
nz = args.nz # # of dim for Z
n_signal = 2 # # of signal
zw = (img_w/16-args.final_filter_size) / args.stride +1 # size of in-vivo z patch
zh = zw
n_epoch=10000
n_train=10000
image_save_interval = 200
def average(x):
return F.sum(x/x.data.size)
# A scaling for human perception of SDO-AIA 193 image.
# c.f. page 11 of
# http://helio.cfa.harvard.edu/trace/SSXG/ynsu/Ji/sdo_primer_V1.1.pdf
#
# AIA orthodox color table found at
# https://darts.jaxa.jp/pub/ssw/sdo/aia/idl/pubrel/aia_lct.pro
def scale_brightness(x):
lo = 50.0
hi = 1250.0
x2 = np.minimum(hi, np.maximum(lo,x))
x3 = (np.log(x2)-np.log(lo)) / (np.log(hi) - np.log(lo))
return x3
def variable_to_image(var):
img = var.data.get()[0,0]
img = np.maximum(0.0, np.minimum(1.0, img))
rgb = np.zeros((img_h, img_w, 3), dtype=np.float32)
rgb[:, :, 0] = np.sqrt(img)
rgb[:, :, 1] = img
rgb[:, :, 2] = img ** 2
return rgb
class ELU(function.Function):
"""Exponential Linear Unit."""
# https://github.com/muupan/chainer-elu
def __init__(self, alpha=1.0):
self.alpha = np.float32(alpha)
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype == np.float32,
)
def forward_cpu(self, x):
y = x[0].copy()
neg_indices = x[0] < 0
y[neg_indices] = self.alpha * (np.exp(y[neg_indices]) - 1)
return y,
def forward_gpu(self, x):
y = cuda.elementwise(
'T x, T alpha', 'T y',
'y = x >= 0 ? x : alpha * (exp(x) - 1)', 'elu_fwd')(
x[0], self.alpha)
return y,
def backward_cpu(self, x, gy):
gx = gy[0].copy()
neg_indices = x[0] < 0
gx[neg_indices] *= self.alpha * np.exp(x[0][neg_indices])
return gx,
def backward_gpu(self, x, gy):
gx = cuda.elementwise(
'T x, T gy, T alpha', 'T gx',
'gx = x >= 0 ? gy : gy * alpha * exp(x)', 'elu_bwd')(
x[0], gy[0], self.alpha)
return gx,
def elu(x, alpha=1.0):
"""Exponential Linear Unit function."""
# https://github.com/muupan/chainer-elu
return ELU(alpha=alpha)(x)
def channel_normalize(x, test=False):
s0,s1,s2,s3 = x.data.shape
cavg = F.reshape(F.sum(x, axis=1) / s1, (s0,1,s2,s3))
xavg = F.concat(s1 * [cavg])
cvar = F.reshape(F.sum((x - xavg)**2, axis=1) / s1, (s0,1,s2,s3))
xvar = F.concat(s1 * [cvar])
return (x - xavg) / (xvar + 1e-5)**0.5
def shake_camera(img):
if not args.shake_camera:
return img
s0,s1,s2,s3 = img.data.shape
zerobar = Variable(xp.zeros((s0,s1,4,s3),dtype=np.float32))
img = F.concat([zerobar, img, zerobar],axis=2)
randshift=np.random.randint(1,8)
img = F.split_axis(img, [randshift,randshift+img_w],axis=2)[1]
zerobar = Variable(xp.zeros((s0,s1,s2,4,1),dtype=np.float32))
img = F.reshape(img,(s0,s1,s2,s3,1))
img = F.concat([zerobar, img, zerobar],axis=3)
randshift=np.random.randint(1,8)
img = F.split_axis(img, [randshift,randshift+img_w],axis=3)[1]
img = F.reshape(img,(s0,s1,s2,s3))
return img
def position_signal(i,w):
ww = w/2
return (i - ww)/float(ww)
z_signal =np.zeros((args.batchsize, 2, zh, zw)).astype(np.float32)
# embed the position signal in z vector
for y in range (zh):
for x in range (zw):
z_signal[:,0,y,x] = position_signal(x, zw)
z_signal[:,1,y,x] = position_signal(y, zh)
z_signal = Variable(cuda.to_gpu(z_signal))
class Generator(chainer.Chain):
def __init__(self):
super(Generator, self).__init__(
dc0z = L.Deconvolution2D(nz, 512, args.final_filter_size, stride=args.stride, wscale=0.02*math.sqrt(nz)),
dc0s = L.Deconvolution2D(n_signal, 512, args.final_filter_size, stride=args.stride, wscale=0.02*math.sqrt(n_signal)),
dc1 = L.Deconvolution2D(512, 256, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*512)),
dc2 = L.Deconvolution2D(256, 128, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*256)),
dc3 = L.Deconvolution2D(128, 64, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*128)),
dc4 = L.Deconvolution2D(64, 1, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*64)),
bn0 = L.BatchNormalization(512),
bn1 = L.BatchNormalization(256),
bn2 = L.BatchNormalization(128),
bn3 = L.BatchNormalization(64),
)
def __call__(self, z, test=False):
# h = F.relu(channel_normalize(self.dc0z(z) + self.dc0s(z_signal), test=test))
# h = F.relu(channel_normalize(self.dc1(h), test=test))
# h = F.relu(channel_normalize(self.dc2(h), test=test))
# h = F.relu(channel_normalize(self.dc3(h), test=test))
# x = (self.dc4(h))
# return x
h = F.relu(self.bn0(self.dc0z(z) + self.dc0s(z_signal), test=test))
h = F.relu(self.bn1(self.dc1(h), test=test))
h = F.relu(self.bn2(self.dc2(h), test=test))
h = F.relu(self.bn3(self.dc3(h), test=test))
x = (self.dc4(h))
return x
class Encoder(chainer.Chain):
def __init__(self):
super(Encoder, self).__init__(
c0 = L.Convolution2D(1, 64, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*3)),
c1 = L.Convolution2D(64, 128, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*64)),
c2 = L.Convolution2D(128, 256, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*128)),
c3 = L.Convolution2D(256, 512, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*256)),
cz = L.Convolution2D(512, nz , args.final_filter_size, stride=args.stride, wscale=0.02*math.sqrt(8*8*512)),
bn0 = L.BatchNormalization(64),
bn1 = L.BatchNormalization(128),
bn2 = L.BatchNormalization(256),
bn3 = L.BatchNormalization(512),
)
def __call__(self, x, test=False):
h = F.relu(self.c0(x)) # no bn because images from generator will katayotteru?
h = F.relu(self.bn1(self.c1(h), test=test))
h = F.relu(self.bn2(self.c2(h), test=test))
h = F.relu(self.bn3(self.c3(h), test=test))
return self.cz(h)
global coord_image
coord_image = np.zeros((args.batchsize,1,img_h, img_w), dtype=np.float32)
for iy in range(img_h):
for ix in range(img_w):
x = 2*float(ix - img_w/2)/img_w
y = 2*float(iy - img_h/2)/img_h
coord_image[:,0,iy,ix] = x**2 + y**2
x_signal=Variable(cuda.to_gpu(coord_image))
class Discriminator(chainer.Chain):
def __init__(self):
super(Discriminator, self).__init__(
c0 = L.Convolution2D(1, 64, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*1)),
c0s= L.Convolution2D(1, 64, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*1)),
c1 = L.Convolution2D(64, 128, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*32)),
c2 = L.Convolution2D(128, 256, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*128)),
c3 = L.Convolution2D(256, 512, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*256)),
cz = L.Convolution2D(512, 2, args.final_filter_size, stride=args.stride,wscale=0.02*math.sqrt(8*8*512)),
bn0 = L.BatchNormalization(64),
bn1 = L.BatchNormalization(128),
bn2 = L.BatchNormalization(256),
bn3 = L.BatchNormalization(512),
)
def __call__(self, x, test=False, compare=None):
if compare is not None:
h = elu(self.c0(x) + self.c0s(x_signal))
h = elu(channel_normalize(self.c1(h), test=test))
h = channel_normalize(self.c2(h), test=test)
h2 = elu(self.c0(compare) + self.c0s(x_signal))
h2 = elu(channel_normalize(self.c1(h2), test=test))
h2 = channel_normalize(self.c2(h2), test=test)
return average((h-h2)**2)
h = elu(self.c0(x) + self.c0s(x_signal)) # no bn because images from generator will katayotteru?
#h = elu(channel_normalize(self.c1(h), test=test))
#h = elu(channel_normalize(self.c2(F.dropout(h)), test=test))
#h = elu(channel_normalize(self.c3(F.dropout(h)), test=test))
h = elu(self.bn1(self.c1(h), test=test))
h = elu(self.bn2(self.c2(F.dropout(h,train = args.dropout)), test=test))
h = elu(self.bn3(self.c3(F.dropout(h,train = args.dropout)), test=test))
h=self.cz(F.dropout(h,train = args.dropout))
l = F.sum(h,axis=(2,3))/(h.data.size / 2)
return l
def load_image():
ret = np.zeros((args.batchsize,1,img_h,img_w),dtype=np.float32)
i=0
while i<args.batchsize:
try:
year = 2011 + np.random.randint(4)
month = 1 + np.random.randint(12)
day = 1 + np.random.randint(32)
hour = np.random.randint(24)
minu = np.random.randint(5)*12
subprocess.call('rm {}/*'.format(work_image_dir),shell=True)
local_fn = work_image_dir + '/image.fits'
cmd = 'aws s3 cp "s3://sdo/aia193/720s/{:04}/{:02}/{:02}/{:02}{:02}.fits" {} --region us-west-2 --quiet'.format(year,month,day,hour,minu, local_fn)
subprocess.call(cmd, shell=True)
h = fits.open(local_fn); h[1].verify('fix')
exptime = h[1].header['EXPTIME']
if exptime <=0:
print "EXPTIME <=0"
continue
img = intp.zoom(h[1].data.astype(np.float32),zoom=img_w/4096.0,order=0)
img = scale_brightness(img / exptime)
ret[i, :, :, :] = np.reshape(img, (1,1,img_h,img_w))
i += 1
except:
continue
return ret
def train_vaegan_labeled(gen, enc, dis, epoch0=0):
o_gen = optimizers.Adam(alpha=0.0002, beta1=0.5)
o_enc = optimizers.Adam(alpha=0.0002, beta1=0.5)
o_dis = optimizers.Adam(alpha=0.0002, beta1=0.5)
o_gen.setup(gen)
o_enc.setup(enc)
o_dis.setup(dis)
o_gen.add_hook(chainer.optimizer.WeightDecay(0.00001))
o_enc.add_hook(chainer.optimizer.WeightDecay(0.00001))
o_dis.add_hook(chainer.optimizer.WeightDecay(0.00001))
gamma_p = 1.0
for epoch in xrange(epoch0,n_epoch):
for i in xrange(0, n_train, args.batchsize):
print (epoch,i),
# discriminator
# 0: from dataset
# 1: from noise
#print "load image start ", i
x_train_data = load_image()
x_train = Variable(cuda.to_gpu(x_train_data))
# generate prior and signal
if args.prior_distribution == 'uniform':
z_prior = np.random.uniform(-1,1,(args.batchsize, nz, zh, zw)).astype(np.float32)
else:
z_prior = np.random.standard_normal((args.batchsize, nz, zh, zw)).astype(np.float32)
z_prior = Variable(cuda.to_gpu(z_prior))
x_creative = shake_camera(gen(z_prior))
x_train = shake_camera(x_train)
yl_train = dis(x_train)
yl_prior = dis(x_creative)
# use encoder
z_enc = enc(x_creative)
train_is_genuine = F.softmax_cross_entropy(yl_train, Variable(xp.zeros(args.batchsize, dtype=np.int32)))
train_is_fake = F.softmax_cross_entropy(yl_train, Variable(xp.ones(args.batchsize, dtype=np.int32)))
prior_is_genuine= F.softmax_cross_entropy(yl_prior, Variable(xp.zeros(args.batchsize, dtype=np.int32)))
prior_is_fake = F.softmax_cross_entropy(yl_prior, Variable(xp.ones(args.batchsize, dtype=np.int32)))
if args.Phase == 'gen':
L_gen = args.creativity_weight * prior_is_genuine
L_dis = train_is_genuine + prior_is_fake
L_enc = average((z_enc - z_prior)**2)
else:
L_gen = args.creativity_weight * prior_is_genuine + vae_is_genuine + args.gamma * yl_dislike
L_enc = vae_is_genuine + gamma_p * l_prior + (yl_L2like if args.enc_norm == 'L2' else yl_dislike)
L_dis = 2*train_is_genuine + vae_is_fake + prior_is_fake
for x in ['yl_train', 'yl_vae', 'yl_prior', 'yl_dislike', 'yl_L2like','l_prior','l_prior0','gamma_p','train_is_genuine', 'train_is_fake', 'vae_is_genuine', 'vae_is_fake', 'prior_is_genuine', 'prior_is_fake', 'L_gen', 'L_enc', 'L_dis']:
print x+":",
try:
vx = eval(x).data.get()
if vx.size==1:
print float(vx),
else:
print vx.flatten(),' ',
except AttributeError:
print eval(x),
except:
pass
print
o_gen.zero_grads()
L_gen.backward()
o_gen.update()
if args.Phase != 'gen':
o_enc.zero_grads()
L_enc.backward()
o_enc.update()
o_dis.zero_grads()
L_dis.backward()
o_dis.update()
L_gen.unchain_backward()
L_dis.unchain_backward()
if args.Phase != 'gen':
L_enc.unchain_backward()
#print "backward done"
if i%image_save_interval==0:
fn0 = '%s/tmp.png'%(out_image_show_dir)
fn2 = '%s/latest.png'%(out_image_show_dir)
fn1 = '%s/vis_%02d_%06d.png'%(out_image_dir, epoch,i)
plt.rcParams['figure.figsize'] = (12.0,12.0)
plt.clf()
plt.subplot(2,2,1)
plt.imshow(variable_to_image(x_train))
plt.title('train')
plt.subplot(2,2,2)
plt.imshow(variable_to_image(x_creative))
plt.title('gen(z)')
plt.subplot(2,2,3)
plt.imshow(variable_to_image(gen(enc(x_train))))
plt.title('gen(enc(train))')
plt.subplot(2,2,4)
plt.imshow(variable_to_image(gen(z_enc)))
plt.title('gen(enc(gen(z)))')
plt.suptitle(str(args)+"\n"+'epoch{}-{}'.format(epoch,i))
plt.savefig(fn0)
subprocess.call("cp {} {}".format(fn0,fn2), shell=True)
subprocess.call("cp {} {}".format(fn0,fn1), shell=True)
serializers.save_hdf5("%s/vaegan_model_dis_%d.h5"%(out_model_dir, epoch),dis)
serializers.save_hdf5("%s/vaegan_state_dis_%d.h5"%(out_model_dir, epoch),o_dis)
serializers.save_hdf5("%s/vaegan_model_gen_%d.h5"%(out_model_dir, epoch),gen)
serializers.save_hdf5("%s/vaegan_state_gen_%d.h5"%(out_model_dir, epoch),o_gen)
serializers.save_hdf5("%s/vaegan_model_enc_%d.h5"%(out_model_dir, epoch),enc)
serializers.save_hdf5("%s/vaegan_state_enc_%d.h5"%(out_model_dir, epoch),o_enc)
print('epoch end', epoch)
gen = Generator()
enc = Encoder()
dis = Discriminator()
gen.to_gpu()
enc.to_gpu()
dis.to_gpu()
try:
subprocess.call('mkdir -p ' + work_image_dir, shell=True)
subprocess.call('mkdir -p ' + out_image_dir, shell=True)
subprocess.call('mkdir -p ' + out_image_show_dir, shell=True)
subprocess.call('mkdir -p ' + out_model_dir, shell=True)
except:
pass
train_vaegan_labeled(gen, enc, dis)
|
nushio3/UFCORIN
|
script/suntomorrow-VAEGAN/main-vectorizer.py
|
Python
|
mit
| 18,295
|
[
"Gaussian"
] |
1dda3b2d0200dcfed86c55d7c4fd3e1438630b869b82376f7d6882b6941f1d7f
|
from keys import *
from simulation_params import *
import nest
import numpy.random as random
# Neuron parameters
iaf_neuronparams = {'E_L': -70., # Resting membrane potential in mV
'V_th': -50., # Spike threshold in mV
'V_reset': -67., # Reset membrane potential after a spike in mV
'C_m': 2., # Capacity of the membrane in pF
't_ref': 2., # Duration of refractory period (V_m = V_reset) in ms
'V_m': -60., # Membrane potential in mV at start
'tau_syn_ex': 1., # Time constant of postsynaptic excitatory currents in ms
'tau_syn_in': 1.33} # Time constant of postsynaptic inhibitory currents in ms
# Synapse common parameters
STDP_synapseparams = {
'alpha': random.normal(0.5, 5.0), # Asymmetry parameter (scales depressing increments as alpha*lambda)
'lambda': 0.5 # Step size
}
# Glutamate synapse
STDP_synparams_Glu = dict({'delay': random.uniform(low=1.0, high=1.3), # Distribution of delay values for connections
'weight': w_Glu, # Weight (power) of synapse
'Wmax': 20.}, **STDP_synapseparams) # Maximum allowed weight
# GABA synapse
STDP_synparams_GABA = dict({'delay': random.uniform(low=1.0, high=1.3),
'weight': w_GABA,
'Wmax': -20.}, **STDP_synapseparams)
# Acetylcholine synapse
STDP_synparams_ACh = dict({'delay': random.uniform(low=1.0, high=1.3),
'weight': w_ACh,
'Wmax': 20.}, **STDP_synapseparams)
# Dopamine excitatory synapse
DOPA_synparams_ex = dict({'delay': 1.,
'weight': w_DA_ex,
'Wmax': 100.})
# Dopamine inhibitory synapse
DOPA_synparams_in = dict({'delay': 1.,
'weight': w_DA_in,
'Wmax': -100.})
# Dictionary of synapses with keys and their parameters
synapses = {GABA: (gaba_synapse, w_GABA ),
Glu: (glu_synapse, w_Glu ),
ACh: (ach_synapse, w_ACh ),
DA_ex: (dopa_synapse_ex, w_DA_ex),
DA_in: (dopa_synapse_in, w_DA_in)
}
# Parameters for generator
static_syn = {
'weight': w_Glu * 5,
'delay': pg_delay
}
# Device parameters
multimeter_param = {'to_memory': True,
'to_file': False,
'withtime': True,
'interval': 0.1,
'record_from': ['V_m'],
'withgid': True}
detector_param = {'label': 'spikes',
'withtime': True,
'withgid': True,
'to_file': False,
'to_memory': True,
'scientific': True}
|
research-team/NEUCOGAR
|
NEST/cube/dopamine/3d/scripts/synapses.py
|
Python
|
gpl-2.0
| 2,943
|
[
"NEURON"
] |
e6ba039c1b1cb860ccaa7ed8af2f042a7004ef81b899f0ca135a1668d4487767
|
""" Coarse Ricci matrix. """
import numpy as np
import numexpr as ne
import scipy.linalg as sl
from pyfftw import zeros_aligned
def add_AB_to_C(A, B, C):
"""
Compute C += AB in-place.
This uses gemm from whatever BLAS is available.
MKL requires Fortran ordered arrays to avoid copies.
Hence we work with transpositions of default c-style arrays.
This function throws error if computation is not in-place.
"""
gemm = sl.get_blas_funcs("gemm", (A, B, C))
assert np.isfortran(C.T) and np.isfortran(A.T) and np.isfortran(B.T)
D = gemm(1.0, B.T, A.T, beta=1, c=C.T, overwrite_c=1)
assert D.base is C or D.base is C.base
def applyRicci(sqdist, eta, T, Ricci, mode='sym'):
"""
Apply coarse Ricci to a squared distance matrix.
Can handle symmetric, max, and nonsymmetric modes.
Gaussian localizing kernel is used with T as variance parameter.
"""
if 'sym' in mode:
ne.evaluate('sqdist - (eta/2)*exp(-sqdist/T)*(Ricci+RicciT)',
global_dict={'RicciT': Ricci.T}, out=sqdist)
elif 'max' in mode:
ne.evaluate(
'sqdist - eta*exp(-sqdist/T)*where(Ricci<RicciT, RicciT, Ricci)',
global_dict={'RicciT': Ricci.T}, out=sqdist)
elif 'dumb' in mode:
ne.evaluate('sqdist*(1 - eta*exp(-sqdist/T))', out=sqdist)
else:
ne.evaluate('sqdist - eta*exp(-sqdist/T)*Ricci',
global_dict={'RicciT': Ricci.T}, out=sqdist)
def coarseRicci(L, sqdist, R, temp1=None, temp2=None):
"""
Fully optimized Ricci matrix computation.
Requires 7 matrix multiplications and many entrywise operations.
Only 2 temporary matrices are needed, and can be provided as arguments.
Uses full gemm functionality to avoid creating intermediate matrices.
R is the output array, while temp1 and temp2 are temporary matrices.
"""
D = sqdist
if temp1 is None:
temp1 = zeros_aligned(sqdist.shape, n=32)
if temp2 is None:
temp2 = zeros_aligned(sqdist.shape, n=32)
A = temp1
B = temp2
# this C should not exist
B = ne.evaluate("D*D/4.0")
L.dot(B, out=A)
L.dot(D, out=B)
ne.evaluate("A-D*B", out=A)
L.dot(A, out=R)
# the first two terms done
L.dot(B, out=A)
ne.evaluate("R+0.5*(D*A+B*B)", out=R)
# Now R contains everything under overline
ne.evaluate("R+dR-0.5*dA*D-dB*B",
global_dict={'dA': np.diag(A).copy()[:, None],
'dB': np.diag(B).copy()[:, None],
'dR': np.diag(R).copy()[:, None]}, out=R)
# Now R contains all but two matrix products from line 2
L.dot(L, out=A)
ne.evaluate("L*BT-0.5*A*D", global_dict={'BT': B.T}, out=A)
add_AB_to_C(A, D, R)
ne.evaluate("L*D", out=A)
add_AB_to_C(A, B, R)
# done!
np.fill_diagonal(R, 0.0)
def getScalar(Ricci, sqdist, t):
""" Compute scalar curvature. """
density = ne.evaluate("sum(exp(-sqdist/t), axis=1)")
# Scalar = np.diag(Ricci.dot(kernel))
# same as
Scalar = ne.evaluate("sum(Ricci*exp(-sqdist/t), axis=1)")
# density = kernel.sum(axis=1)
ne.evaluate("Scalar/density", out=Scalar)
return Scalar
#
# tests based on old Ricci
#
import unittest
class RicciTests (unittest.TestCase):
""" Correctness and speed tests. """
def speed(self, f, points=[100, 200]):
""" Test speed on larger data sets. """
import data
from Laplacian import Laplacian
from tools import test_speed
for p in points:
d = data.closefarsimplices(p, 0.1, 5)[0]
print "\nPoints: {}".format(2*p)
L = np.zeros_like(d)
R = np.zeros_like(d)
print "Laplacian: ",
test_speed(Laplacian, d, 0.1, L)
Laplacian(d, 0.1, L)
print "Ricci: ",
test_speed(f, L, d, R)
def test_speed_Ricci(self):
""" Speed of coarse Ricci compared to Laplacian. """
self.speed(coarseRicci)
self.speed(coarseRicci, points=[500, 1000])
if __name__ == "__main__":
# FIXME any correctness tests?
# FIXME add scalar curvature tests
suite = unittest.TestLoader().loadTestsFromTestCase(RicciTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
siudej/Ricci
|
Ricci.py
|
Python
|
bsd-3-clause
| 4,298
|
[
"Gaussian"
] |
64e70d5766f6b7eeb7e137a36a58221dd750ace47ac2e9531db5c24661e8ff59
|
import matplotlib
matplotlib.use('Agg')
print "importing stuff..."
import numpy as np
import pdb
import matplotlib.pylab as plt
from scipy import special
from .context import aep
from .context import config
# import sys
# import os
# sys.path.insert(0, os.path.abspath(
# os.path.join(os.path.dirname(__file__), '..')))
# import geepee.aep_models as aep
np.random.seed(42)
def run_cluster_MM(nat_param=True):
np.random.seed(42)
import GPy
# create dataset
print "creating dataset..."
N = 100
k1 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[10, 10, 10, 0.1, 0.1]), ARD=True)
k2 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[10, 0.1, 10, 0.1, 10]), ARD=True)
k3 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[0.1, 0.1, 10, 10, 10]), ARD=True)
X = np.random.normal(0, 1, (N, 5))
A = np.random.multivariate_normal(np.zeros(N), k1.K(X), 10).T
B = np.random.multivariate_normal(np.zeros(N), k2.K(X), 10).T
C = np.random.multivariate_normal(np.zeros(N), k3.K(X), 10).T
Y = np.vstack((A, B, C))
labels = np.hstack((np.zeros(A.shape[0]), np.ones(
B.shape[0]), np.ones(C.shape[0]) * 2))
# inference
print "inference ..."
M = 30
D = 5
alpha = 0.5
lvm = aep.SGPLVM(Y, D, M, lik='Gaussian', nat_param=nat_param)
lvm.optimise(method='L-BFGS-B', alpha=alpha, maxiter=2000)
ls = np.exp(lvm.sgp_layer.ls)
print ls
inds = np.argsort(ls)
plt.figure()
mx, vx = lvm.get_posterior_x()
plt.scatter(mx[:, inds[0]], mx[:, inds[1]], c=labels)
zu = lvm.sgp_layer.zu
plt.plot(zu[:, inds[0]], zu[:, inds[1]], 'ko')
plt.show()
def run_cluster_MC():
import GPy
# create dataset
print "creating dataset..."
N = 100
k1 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[10, 10, 10, 0.1, 0.1]), ARD=True)
k2 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[10, 0.1, 10, 0.1, 10]), ARD=True)
k3 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[0.1, 0.1, 10, 10, 10]), ARD=True)
X = np.random.normal(0, 1, (N, 5))
A = np.random.multivariate_normal(np.zeros(N), k1.K(X), 10).T
B = np.random.multivariate_normal(np.zeros(N), k2.K(X), 10).T
C = np.random.multivariate_normal(np.zeros(N), k3.K(X), 10).T
Y = np.vstack((A, B, C))
labels = np.hstack((np.zeros(A.shape[0]), np.ones(
B.shape[0]), np.ones(C.shape[0]) * 2))
# inference
print "inference ..."
M = 30
D = 5
alpha = 0.5
lvm = aep.SGPLVM(Y, D, M, lik='Gaussian')
lvm.optimise(method='adam', adam_lr=0.05, maxiter=2000,
alpha=alpha, prop_mode=config.PROP_MC)
ls = np.exp(lvm.sgp_layer.ls)
print ls
inds = np.argsort(ls)
plt.figure()
mx, vx = lvm.get_posterior_x()
plt.scatter(mx[:, inds[0]], mx[:, inds[1]], c=labels)
zu = lvm.sgp_layer.zu
# plt.plot(zu[:, inds[0]], zu[:, inds[1]], 'ko')
# plt.show()
plt.savefig('/tmp/gplvm_cluster.pdf')
def run_mnist():
np.random.seed(42)
# import dataset
f = gzip.open('./tmp/data/mnist.pkl.gz', 'rb')
(x_train, t_train), (x_valid, t_valid), (x_test, t_test) = cPickle.load(f)
f.close()
Y = x_train[:100, :]
labels = t_train[:100]
Y[Y < 0.5] = -1
Y[Y > 0.5] = 1
# inference
print "inference ..."
M = 30
D = 2
# lvm = aep.SGPLVM(Y, D, M, lik='Gaussian')
lvm = aep.SGPLVM(Y, D, M, lik='Probit')
# lvm.train(alpha=0.5, no_epochs=10, n_per_mb=100, lrate=0.1, fixed_params=['sn'])
lvm.optimise(method='L-BFGS-B', alpha=0.1)
plt.figure()
mx, vx = lvm.get_posterior_x()
zu = lvm.sgp_layer.zu
plt.scatter(mx[:, 0], mx[:, 1], c=labels)
plt.plot(zu[:, 0], zu[:, 1], 'ko')
nx = ny = 30
x_values = np.linspace(-5, 5, nx)
y_values = np.linspace(-5, 5, ny)
sx = 28
sy = 28
canvas = np.empty((sx * ny, sy * nx))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
z_mu = np.array([[xi, yi]])
x_mean, x_var = lvm.predict_f(z_mu)
t = x_mean / np.sqrt(1 + x_var)
Z = 0.5 * (1 + special.erf(t / np.sqrt(2)))
canvas[(nx - i - 1) * sx:(nx - i) * sx, j *
sy:(j + 1) * sy] = Z.reshape(sx, sy)
plt.figure(figsize=(8, 10))
Xi, Yi = np.meshgrid(x_values, y_values)
plt.imshow(canvas, origin="upper", cmap="gray")
plt.tight_layout()
plt.show()
def run_oil():
data_path = '/scratch/tdb40/datasets/lvm/three_phase_oil_flow/'
def oil(data_set='oil'):
"""The three phase oil data from Bishop and James (1993)."""
oil_train_file = os.path.join(data_path, data_set, 'DataTrn.txt')
oil_trainlbls_file = os.path.join(
data_path, data_set, 'DataTrnLbls.txt')
oil_test_file = os.path.join(data_path, data_set, 'DataTst.txt')
oil_testlbls_file = os.path.join(
data_path, data_set, 'DataTstLbls.txt')
oil_valid_file = os.path.join(data_path, data_set, 'DataVdn.txt')
oil_validlbls_file = os.path.join(
data_path, data_set, 'DataVdnLbls.txt')
fid = open(oil_train_file)
X = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_test_file)
Xtest = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_valid_file)
Xvalid = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_trainlbls_file)
Y = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_testlbls_file)
Ytest = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_validlbls_file)
Yvalid = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
return {'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'Xtest': Xtest, 'Xvalid': Xvalid, 'Yvalid': Yvalid}
def oil_100(data_set='oil'):
data = oil()
indices = np.random.permutation(1000)
indices = indices[0:100]
X = data['X'][indices, :]
Y = data['Y'][indices, :]
return {'X': X, 'Y': Y, 'info': "Subsample of the full oil data extracting 100 values randomly without replacement"}
# create dataset
print "loading dataset..."
# data = oil_100()
data = oil()
Y = data['X']
# Y_mean = np.mean(Y, axis=0)
# Y_std = np.std(Y, axis=0)
# Y = (Y - Y_mean) / Y_std
labels = data['Y'].argmax(axis=1)
colors = cm.rainbow(np.linspace(0, 1, len(np.unique(labels))))
# inference
print "inference ..."
M = 20
D = 5
lvm = aep.SGPLVM(Y, D, M, lik='Gaussian')
# lvm.set_fixed_params('sn')
lvm.optimise(method='L-BFGS-B', alpha=0.3, maxiter=3000)
# np.random.seed(0)
# # lvm.set_fixed_params('sn')
# lvm.optimise(method='adam', alpha=0.2, adam_lr=0.05, maxiter=200)
ls = np.exp(lvm.sgp_layer.ls)
print ls
inds = np.argsort(ls)
colors = cm.rainbow(np.linspace(0, 1, len(np.unique(labels))))
plt.figure()
mx, vx = lvm.get_posterior_x()
plt.scatter(mx[:, inds[0]], mx[:, inds[1]], c=labels)
zu = lvm.sgp_layer.zu
plt.plot(zu[:, inds[0]], zu[:, inds[1]], 'ko')
plt.show()
def run_pinwheel():
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate,
rs=np.random.RandomState(0)):
"""Based on code by Ryan P. Adams."""
rads = np.linspace(0, 2 * np.pi, num_classes, endpoint=False)
features = rs.randn(num_classes * num_per_class, 2) \
* np.array([radial_std, tangential_std])
features[:, 0] += 1
labels = np.repeat(np.arange(num_classes), num_per_class)
angles = rads[labels] + rate * np.exp(features[:, 0])
rotations = np.stack([np.cos(angles), -np.sin(angles),
np.sin(angles), np.cos(angles)])
rotations = np.reshape(rotations.T, (-1, 2, 2))
return np.einsum('ti,tij->tj', features, rotations)
# create dataset
print "creating dataset..."
Y = make_pinwheel(radial_std=0.3, tangential_std=0.05, num_classes=3,
num_per_class=50, rate=0.4)
# inference
print "inference ..."
M = 20
D = 2
lvm = aep.SGPLVM(Y, D, M, lik='Gaussian')
lvm.optimise(method='L-BFGS-B', alpha=0.2)
mx, vx = lvm.get_posterior_x()
fig = plt.figure()
ax = fig.add_subplot(121)
ax.plot(Y[:, 0], Y[:, 1], 'bx')
ax = fig.add_subplot(122)
ax.errorbar(mx[:, 0], mx[:, 1], xerr=np.sqrt(
vx[:, 0]), yerr=np.sqrt(vx[:, 1]), fmt='xk')
plt.show()
def run_semicircle():
# create dataset
print "creating dataset..."
N = 20
cos_val = [0.97, 0.95, 0.94, 0.89, 0.8,
0.88, 0.92, 0.96, 0.7, 0.65,
0.3, 0.25, 0.1, -0.25, -0.3,
-0.6, -0.67, -0.75, -0.97, -0.98]
cos_val = np.array(cos_val).reshape((N, 1))
# cos_val = 2*np.random.rand(N, 1) - 1
angles = np.arccos(cos_val)
sin_val = np.sin(angles)
Y = np.hstack((sin_val, cos_val))
Y += 0.05 * np.random.randn(Y.shape[0], Y.shape[1])
# inference
print "inference ..."
M = 10
D = 2
lvm = aep.SGPLVM(Y, D, M, lik='Gaussian')
lvm.optimise(method='L-BFGS-B', alpha=0.5, maxiter=2000)
plt.figure()
plt.plot(Y[:, 0], Y[:, 1], 'sb')
mx, vx = lvm.get_posterior_x()
for i in range(mx.shape[0]):
mxi = mx[i, :]
vxi = vx[i, :]
mxi1 = mxi + np.sqrt(vxi)
mxi2 = mxi - np.sqrt(vxi)
mxis = np.vstack([mxi.reshape((1, D)),
mxi1.reshape((1, D)),
mxi2.reshape((1, D))])
myis, vyis = lvm.predict_f(mxis)
plt.errorbar(myis[:, 0], myis[:, 1],
xerr=np.sqrt(vyis[:, 0]), yerr=np.sqrt(vyis[:, 1]), fmt='.k')
plt.show()
def run_xor():
from operator import xor
from scipy import special
# create dataset
print "generating dataset..."
n = 25
Y = np.zeros((0, 3))
for i in [0, 1]:
for j in [0, 1]:
a = i * np.ones((n, 1))
b = j * np.ones((n, 1))
c = xor(bool(i), bool(j)) * np.ones((n, 1))
Y_ij = np.hstack((a, b, c))
Y = np.vstack((Y, Y_ij))
Y = 2 * Y - 1
# inference
print "inference ..."
M = 10
D = 2
lvm = aep.SGPLVM(Y, D, M, lik='Probit')
lvm.optimise(method='L-BFGS-B', alpha=0.1, maxiter=200)
# predict given inputs
mx, vx = lvm.get_posterior_x()
lims = [-1.5, 1.5]
x = np.linspace(*lims, num=101)
y = np.linspace(*lims, num=101)
X, Y = np.meshgrid(x, y)
X_ravel = X.ravel()
Y_ravel = Y.ravel()
inputs = np.vstack((X_ravel, Y_ravel)).T
my, vy = lvm.predict_f(inputs)
t = my / np.sqrt(1 + vy)
Z = 0.5 * (1 + special.erf(t / np.sqrt(2)))
for d in range(3):
plt.figure()
plt.scatter(mx[:, 0], mx[:, 1])
zu = lvm.sgp_layer.zu
plt.plot(zu[:, 0], zu[:, 1], 'ko')
plt.contour(X, Y, np.log(Z[:, d] + 1e-16).reshape(X.shape))
plt.xlim(*lims)
plt.ylim(*lims)
# Y_test = np.array([[1, -1, 1], [-1, 1, 1], [-1, -1, -1], [1, 1, -1]])
# # impute missing data
# for k in range(3):
# Y_test_k = Y_test
# missing_mask = np.ones_like(Y_test_k)
# missing_mask[:, k] = 0
# my_pred, vy_pred = lvm.impute_missing(
# Y_test_k, missing_mask,
# alpha=0.1, no_iters=100, add_noise=False)
# print k, my_pred, vy_pred, Y_test_k
plt.show()
def run_frey():
# import dataset
data = pods.datasets.brendan_faces()
# Y = data['Y'][:50, :]
Y = data['Y']
Yn = Y - np.mean(Y, axis=0)
Yn /= np.std(Y, axis=0)
Y = Yn
# inference
print "inference ..."
M = 30
D = 20
lvm = aep.SGPLVM(Y, D, M, lik='Gaussian')
# lvm.train(alpha=0.5, no_epochs=10, n_per_mb=100, lrate=0.1, fixed_params=['sn'])
lvm.optimise(method='L-BFGS-B', alpha=0.1, maxiter=10)
plt.figure()
mx, vx = lvm.get_posterior_x()
zu = lvm.sgp_layer.zu
plt.scatter(mx[:, 0], mx[:, 1])
plt.plot(zu[:, 0], zu[:, 1], 'ko')
nx = ny = 30
x_values = np.linspace(-5, 5, nx)
y_values = np.linspace(-5, 5, ny)
sx = 28
sy = 20
canvas = np.empty((sx * ny, sy * nx))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
z_mu = np.array([[xi, yi]])
x_mean, x_var = lvm.predict_f(z_mu)
canvas[(nx - i - 1) * sx:(nx - i) * sx, j *
sy:(j + 1) * sy] = x_mean.reshape(sx, sy)
plt.figure(figsize=(8, 10))
Xi, Yi = np.meshgrid(x_values, y_values)
plt.imshow(canvas, origin="upper", cmap="gray")
plt.tight_layout()
plt.show()
if __name__ == '__main__':
run_cluster_MM(True)
run_cluster_MM(False)
# run_cluster_MC()
# run_semicircle()
# run_pinwheel()
# run_xor()
# run_oil()
|
thangbui/geepee
|
examples/gplvm_aep_examples.py
|
Python
|
mit
| 13,286
|
[
"Gaussian"
] |
c3fbd69a1503cadd9bd462760d8936fb6b2d7b4790bb25e24205e98b0e43c50b
|
# TODO: Add properties to solver
#TODO: snapshot_format not available in this version. update later.
__author__ = 'hugh'
bl_info = {
"name": "Create Caffe solution",
"category": "Object",
}
import bpy
import random
import time
import os
tab = ' '
tab2 = tab + tab
tab3 = tab2 + tab
def getFillerString(filler, name):
fillerString = tab3 + 'type: "%s"\n' % filler.type
if filler.type == 'constant':
fillerString += tab3 + 'value: %f\n' % (filler.value)
elif filler.type == 'xavier' or filler.type == 'msra':
fillerString += tab3 + 'variance_norm: %s\n' % (filler.variance_norm)
elif filler.type == 'gaussian':
fillerString += tab3 + 'mean: %f\n' % filler.mean
fillerString += tab3 + 'std: %f\n' % filler.std
if filler.is_sparse:
fillerString += tab3 + 'sparse: %i\n' % (filler.sparse)
elif filler.type == 'uniform':
fillerString += tab3 + 'min: %f\n' % filler.min
fillerString += tab3 + 'max: %f\n' % filler.max
string = '''\
%s {
%s
}
''' % (name, fillerString)
return string
def conv_template(node):
if node.square_padding:
padding_string = tab2 + 'pad: %i\n' % node.pad
else:
padding_string = tab2 + 'pad_h: %i\n' % node.pad_h
padding_string += tab2 + 'pad_w: %i\n' % node.pad_w
if node.square_kernel:
kernel_string = tab2 + 'kernel_size: %i\n' % node.kernel_size
else:
kernel_string = tab2 + 'kernel_h: %i\n' % node.kernel_h
kernel_string += tab2 + 'kernel_w: %i\n' % node.kernel_w
if node.square_stride:
stride_string = tab2 + 'stride: %i\n' % node.stride
else:
stride_string = tab2 + 'stride_h: %i\n' % node.stride_h
stride_string += tab2 + 'stride_w: %i\n' % node.stride_w
weight_filler_string = getFillerString(node.weight_filler, 'weight_filler')
bias_filler_string = getFillerString(node.bias_filler, 'bias_filler')
string = '''\
convolution_param {
num_output: %i
bias_term: %i
%s
%s
%s
%s
%s
}
''' % (node.num_output, node.bias_term, padding_string, kernel_string, stride_string, weight_filler_string,
bias_filler_string)
#loadable
return string
def data_param_template(node, source, batch_size):
string = '''\
data_param {
source: "%s"
backend: %s
batch_size: %i
rand_skip: %i
}
''' % (source, node.db_type, batch_size, node.rand_skip)
return string
def image_data_param_template(node, source, batch_size):
string = '''\
image_data_param {
source: "%s"
batch_size: %i
rand_skip: %i
shuffle: %i
new_height: %i
new_width: %i
is_color: %i
}
''' % (source, batch_size, node.rand_skip, node.shuffle, node.new_height, node.new_width, node.is_color)
return string
#TODO: Finish mean_value and random crop
def transform_param_template(node):
mean_file_string = ''
if node.use_mean_file:
mean_file_string = tab2 + 'mean_file: "%s"\n' % node.mean_file
string = '''\
transform_param {
scale: %f
mirror: %i
%s
}
''' % (node.scale, node.mirror, mean_file_string)
return string
def hdf5_data_template(node, source, batch_size):
string = '''\
hdf5_data_param {
source: "%s"
batch_size: %i
shuffle: %i
}
''' % (source, batch_size, node.shuffle)
return string
def pool_template(node):
string = '''\
pooling_param {
pool: %s
kernel_size: %i
stride: %i
}
''' % (node.mode, node.kernel_size, node.stride)
#Loadable
return string
def mvntemplate(node):
string = '''\
mvn_param {
normalize_variance: %s
across_channels: %s
eps: %f
}
''' % (node.normalize_variance, node.across_channels, node.eps)
#Loadable
return string
def eltwisetemplate(node):
if node.operation == 'PROD':
coeffstring = 'coeff: %f' % node.coeff
elif node.operation == 'SUM':
coeffstring = 'stable_prod_grad: %i' % node.stable_prod_grad
else:
coeffstring = ''
string = '''\
eltwise_param {
operation: %s
%s
}
''' % (node.operation, coeffstring)
return string
def FC_template(node):
weight_filler_string = getFillerString(node.weight_filler, 'weight_filler')
bias_filler_string = getFillerString(node.bias_filler, 'bias_filler')
string = '''\
inner_product_param {
num_output: %i
bias_term: %i
%s
%s
axis: %i
}
''' % (node.num_output, node.bias_term, weight_filler_string, bias_filler_string, node.axis)
return string
def PReLU_template(node):
filler_string = getFillerString(node.filler, 'filler')
string = '''\
prelu_param {
channel_shared: %i
%s
}
''' % (node.channel_shared, filler_string)
return string
def Concattemplate(node):
string = '''\
concat_param {
axis: %i
}
''' % (node.axis)
return string
def argmaxtemplate(node):
string = '''\
argmax_param {
out_max_val: %i
top_k: %i
}
''' % (node.OutMaxVal, node.TopK)
return string
def hdf5outputtemplate(node):
string = '''\
hdf5_output_param {
file_name: "%s"
}
}
''' % (node.filename)
return string
def logtemplate(node):
string = '''\
log_param {
scale: %f
shift: %f
base: %f
}
''' % (node.scale, node.shift, node.base)
return string
def powertemplate(node):
string = '''\
power_param {
power: %f
scale: %f
shift: %f
}
''' % (node.power, node.scale, node.shift)
return string
def exptemplate(node):
string = '''\
exp_param {
base: %f
scale: %f
shift: %f
}
''' % (node.base, node.scale, node.shift)
return string
def reductiontemplate(node):
string = '''\
reduction_param {
operation: %s
axis: %i
coeff: %f
}
''' % (node.operation, node.axis, node.coeff)
return string
def slicetemplate(node):
slice_points_string = '\n'.join(map(lambda x: tab2 + 'slice_point: %i' % x.slice_point, node.slice_points))
string = '''\
slice_param {
axis: %i
%s
}
''' % (node.axis, slice_points_string)
return string
def solver_template(node):
net_path = node.config_path + '%s_train_test.prototxt' % node.solvername
lr_string = ''
if node.lr_policy == 'step':
lr_string += 'gamma: %i\n' % node.gamma
lr_string += 'stepsize: %i\n' % node.stepsize
elif node.lr_policy == 'exp':
lr_string += 'gamma: %i\n' % node.gamma
elif node.lr_policy == 'inv':
lr_string += 'gamma: %i\n' % node.gamma
lr_string += 'power: %i\n' % node.power
elif node.lr_policy == 'multistep':
pass
elif node.lr_policy == 'poly':
lr_string += 'power: %i\n' % node.power
elif node.lr_policy == 'sigmoid':
lr_string += 'gamma: %i\n' % node.gamma
lr_string += 'stepsize: %i\n' % node.stepsize
random_seed_string = ''
if node.use_random_seed:
random_seed_string = 'random_seed: %i' % node.random_seed
delta_string = ''
if node.solver_type == 'ADAGRAD':
delta_string = 'delta %f' % node.delta
string = ''' \
net: "%s"
test_iter: %i
test_interval: %i
test_compute_loss: %i
test_initialization: %i
base_lr: %f
display: %i
average_loss: %i
max_iter: %i
iter_size: %i
lr_policy: "%s"
%s
momentum: %f
weight_decay: %f
regularization_type: "%s"
snapshot: %i
snapshot_prefix: "%s"
snapshot_diff: %i
solver_mode: %s
%s
solver_type: %s
%s
debug_info: %i
snapshot_after_train: %i
''' % (net_path, node.test_iter, node.test_interval, node.test_compute_loss, node.test_initialization, node.base_lr,
node.display, node.average_loss, node.max_iter,
node.iter_size, node.lr_policy, lr_string, node.momentum, node.weight_decay, node.regularization_type,
node.snapshot, node.snapshot_prefix, node.snapshot_diff,
node.solver_mode, random_seed_string, node.solver_type, delta_string, node.debug_info, node.snapshot_after_train)
return "\n".join(filter(lambda x: x.strip(), string.splitlines())) + "\n"
def deploytemplate(batch, channels, size, datain):
deploystring = '''\
name: "Autogen"
input: "%s"
input_dim: %i
input_dim: %i
input_dim: %i
input_dim: %i
''' % (datain, batch, channels, size, size)
return deploystring
def scripttemplate(caffepath, configpath, solvername, gpus, solver):
gpustring = ''
usedcount = 0
extrastring = ''
if solver == 'GPU' and gpus:
extrastring = '--gpu=%s' % gpus[-1]
solverstring = configpath + '%s_solver.prototxt' % solvername
caffestring = caffepath + 'caffe'
string = "#!/usr/bin/env sh \n '%s' train --solver='%s' %s" % (caffestring, solverstring, extrastring)
return string
def loss_weight_template(loss_weight):
return tab + 'loss_weight: %f' % loss_weight
def param_template(param):
string = tab + 'param {\n'
if param.name.strip():
string += tab2 + 'name: "%s"\n' % param.name
string += tab2 + 'lr_mult: %f\n' % param.lr_mult
string += tab2 + 'decay_mult: %f\n' % param.decay_mult
# string += tab2 + 'share_mode: %s\n' % param.share_mode
string += tab + '}'
return string
def get_params(node):
params = []
if node.extra_params:
params.append(param_template(node.weight_params))
params.append(param_template(node.bias_params))
return params
def get_include_in(node):
if node.include_in == "BOTH":
return ''
string = '''\
include {
phase: %s
}
''' % node.include_in
return string
def layer_template(node, tops, bottoms, special_params):
tops_string = '\n'.join(map(lambda x: tab + 'top: "%s"' % x, tops))
bottoms_string = '\n'.join(map(lambda x: tab + 'bottom: "%s"' % x, bottoms))
params_string = '\n'.join(get_params(node))
special_params_string = '\n'.join(special_params)
include_in_string = get_include_in(node)
string = '''\
layer {
name: "%s"
type: "%s"
%s
%s
%s
%s
%s
}
''' % (node.name, node.n_type, tops_string, bottoms_string, params_string, special_params_string, include_in_string)
return "\n".join(filter(lambda x: x.strip(), string.splitlines())) + "\n"
def LRNtemplate(node):
string = '''\
lrn_param {
local_size: %i
alpha: %f
beta: %f
norm_region: %s
}
''' % (node.size, node.alpha, node.beta, node.mode)
return string
def Relutemplate(node):
string = '''\
relu_param {
negative_slope: %f
}
''' % (node.negative_slope)
return string
def dropouttemplate(node):
string = '''\
dropout_param {
dropout_ratio: %f
}
''' % (node.dropout_ratio)
return string
class Vertex():
pass
def reorder(graph):
res_string = []
res_dstring = []
while len(graph) > 0:
curr = min(graph, key=lambda x: len(x.bottoms))
if len(curr.bottoms) != 0:
print('Cycle in graph?!')
res_string.append(curr.string)
res_dstring.append(curr.dstring)
for item in graph:
for top in curr.tops:
try:
item.bottoms.remove(top)
except:
pass
graph.remove(curr)
return res_string, res_dstring
def nodebefore(innode, socket=0):
return innode.inputs[socket].links[0].from_socket.node
def isinplace(node):
if node.bl_idname == 'ReluNodeType' or node.bl_idname == 'DropoutNodeType':
return 1
else:
return 0
def findsocket(socketname, node): #Given a node, find the position of a certain output socket
print (node.name)
for number, socket in enumerate(node.outputs):
if socket.name == socketname:
print(number)
return number
raise TypeError
def autotop(node, socket, orderpass=0): #Assigns an arbitrary top name to a node
print('autotop')
if isinplace(node) and not orderpass:
top = autobottom(node, 0, orderpass=0)
else:
top = node.name + str(socket)
return top
def autobottom(node, socketnum, orderpass=0): #Finds the bottom of a node socket
print ('autobottom')
if isinplace(nodebefore(node, socketnum)) and not orderpass:
socketbelow = nodebefore(node, socketnum).inputs[0].links[0].from_socket.name
socketbelowposition = findsocket(socketbelow, nodebefore(nodebefore(node, socketnum)))
bottom = nodebefore(nodebefore(node, socketnum), 0).name + str(socketbelowposition)
else:
socketbelow = node.inputs[socketnum].links[0].from_socket.name
socketbelowposition = findsocket(socketbelow, nodebefore(node, socketnum))
bottom = nodebefore(node, socketnum).name + str(socketbelowposition)
return bottom
def getbottomsandtops(node, orderpass=0):
bottoms = []
for socknum, input in enumerate(node.inputs):
if input.is_linked:
bottom = input.links[0].from_socket.output_name
print(input.links[0].from_socket.name)
if bottom != '':
bottoms.extend([bottom])
else:
bottoms.extend([autobottom(node, socknum, orderpass)])
tops = [x.output_name if x.output_name != '' else autotop(node, socket, orderpass) for socket, x in
enumerate(node.outputs)]
return bottoms, tops
class Solve(bpy.types.Operator):
"""Generate Caffe solver""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "nodes.make_solver" # unique identifier for buttons and menu items to reference.
bl_label = "Create Solution" # display name in the interface.
bl_options = {'REGISTER'} # enable undo for the operator.
def execute(self, context): # execute() is called by blender when running the operator.
graph = []
########################################### Main loop
for node in context.selected_nodes:
nname = node.name
string = ''
try:
bottoms, tops = getbottomsandtops(node)
except AttributeError:
print (node.name)
print(tops)
print (bottoms)
special_params = []
###########################
if node.bl_idname == 'DataNodeType':
transform_param = transform_param_template(node)
node.n_type = node.db_type
if node.db_type in ('LMDB', 'LEVELDB'):
train_params = [data_param_template(node, node.train_path, node.train_batch_size)]
test_params = [data_param_template(node, node.test_path, node.test_batch_size)]
node.n_type = 'Data'
train_params.append(transform_param)
test_params.append(transform_param)
elif node.db_type == 'ImageData':
train_params = [image_data_param_template(node, node.train_data, node.train_batch_size)]
test_params = [image_data_param_template(node, node.test_data, node.test_batch_size)]
train_params.append(transform_param)
test_params.append(transform_param)
elif node.db_type == 'HDF5Data':
train_params = [hdf5_data_template(node, node.train_data, node.train_batch_size)]
test_params = [hdf5_data_template(node, node.test_data, node.test_batch_size)]
node.include_in = "TRAIN"
train_string = layer_template(node, tops, bottoms, train_params)
node.include_in = "TEST"
test_string = layer_template(node, tops, bottoms, test_params)
string = train_string + test_string
#TODO: Finish dstring
dstring = ''
elif node.bl_idname == 'PoolNodeType':
special_params.append(pool_template(node))
elif node.bl_idname == 'EltwiseNodeType':
special_params.append(eltwisetemplate(node))
elif node.bl_idname == 'ExpNodeType':
special_params.append(exptemplate(node))
elif node.bl_idname == 'ConvNodeType':
special_params.append(conv_template(node))
elif node.bl_idname == 'DeConvNodeType':
special_params.append(conv_template(node))
elif node.bl_idname == 'FCNodeType':
special_params.append(FC_template(node))
elif node.bl_idname == 'FlattenNodeType':
dstring = string
elif node.bl_idname == 'SilenceNodeType':
dstring = string
elif node.bl_idname == 'LRNNodeType':
special_params.append(LRNtemplate(node))
elif node.bl_idname == 'AcNodeType':
node.type = node.mode
elif node.bl_idname == 'ReluNodeType':
special_params.append(Relutemplate(node))
elif node.bl_idname == 'PReluNodeType':
special_params.append(PReLU_template(node))
dstring = string
elif node.bl_idname == 'DropoutNodeType':
special_params.append(dropouttemplate(node))
elif node.bl_idname == 'SMLossNodeType':
special_params.append(loss_weight_template(node.w))
dstring = ''
elif node.bl_idname == 'SCELossNodeType':
special_params.append(loss_weight_template(node.w))
dstring = ''
elif node.bl_idname == 'EULossNodeType':
special_params.append(loss_weight_template(node.w))
dstring = ''
elif node.bl_idname == 'ConcatNodeType':
special_params.append(Concattemplate(node))
elif node.bl_idname == 'AccuracyNodeType':
dstring = ''
elif node.bl_idname == 'ArgMaxNodeType':
special_params.append(argmaxtemplate(node))
dstring = string
elif node.bl_idname == 'HDF5OutputNodeType':
special_params.append(hdf5outputtemplate(node))
dstring = ''
elif node.bl_idname == 'LogNodeType':
special_params.append(logtemplate(node))
dstring = string;
elif node.bl_idname == 'PowerNodeType':
special_params.append(powertemplate(node))
dstring = string;
elif node.bl_idname == 'ReductionNodeType':
special_params.append(reductiontemplate(node))
dstring = string;
elif node.bl_idname == 'SliceNodeType':
special_params.append(slicetemplate(node))
elif node.bl_idname == 'NodeReroute':
string = ''
dstring = ''
elif node.bl_idname == 'SolverNodeType':
solverstring = solver_template(node)
scriptstring = scripttemplate(node.caffe_exec, node.config_path, node.solvername, node.gpus,
solver=node.solver_mode)
configpath = node.config_path
solvername = node.solvername
elif node.bl_idname == 'MVNNodeType':
special_params.append(mvntemplate(node))
elif string == 0:
print (node.bl_idname)
if node.bl_idname != 'SolverNodeType':
if node.bl_idname != 'DataNodeType':
string = layer_template(node, tops, bottoms, special_params)
dstring = string
################################# Recalculate bottoms and tops for ordering
bottoms, tops = getbottomsandtops(node, orderpass=1)
#####################################
v = Vertex()
v.string = string
v.dstring = dstring
v.bottoms = bottoms
v.tops = tops
graph.append(v)
strings, dstrings = reorder(graph)
solution = ''.join(strings)
dsolution = ''.join(dstrings)
os.chdir(configpath)
ttfile = open('%s_train_test.prototxt' % solvername, mode='w')
ttfile.write(solution)
ttfile.close()
depfile = open('%s_deploy.prototxt' % solvername, mode='w')
depfile.write(dsolution)
depfile.close()
solvefile = open('%s_solver.prototxt' % solvername, mode='w')
solvefile.write(solverstring)
solvefile.close()
scriptfile = open('train_%s.sh' % solvername, mode='w')
scriptfile.write(scriptstring)
scriptfile.close()
print ('Finished solving tree')
return {'FINISHED'} # this lets blender know the operator finished successfully.
def register():
bpy.utils.register_class(Solve)
def unregister():
bpy.utils.unregister_class(Solve)
# This allows you to run the script directly from blenders text editor
# to test the addon without having to install it.
if __name__ == "__main__":
register()
|
codeaudit/caffe-gui-tool
|
CaffeGenerate.py
|
Python
|
unlicense
| 21,253
|
[
"Gaussian"
] |
b20ae35165f77ba8b58786b30a1e602ed4ca067316ada922c7f3916055978420
|
"""
# Notes:
- This simulation seeks to emulate the COBAHH benchmark simulations of (Brette
et al. 2007) using the Brian2 simulator for speed benchmark comparison to
DynaSim. However, this simulation does NOT include synapses, for better
comparison to Figure 5 of (Goodman and Brette, 2008) - although it uses the
COBAHH model of (Brette et al. 2007), not CUBA.
- The time taken to simulate will be indicated in the stdout log file
'~/batchdirs/brian_benchmark_COBAHH_nosyn_8000/pbsout/brian_benchmark_COBAHH_nosyn_8000.out'
- Note that this code has been slightly modified from the original (Brette et
al. 2007) benchmarking code, available here on ModelDB:
https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319
in order to work with version 2 of the Brian simulator (aka Brian2), and also
modified to change the model being benchmarked, etc.
# References:
- Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al.
Simulation of networks of spiking neurons: A review of tools and strategies.
Journal of Computational Neuroscience 2007;23:349–98.
doi:10.1007/s10827-007-0038-6.
- Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python.
Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008.
"""
from brian2 import *
# Parameters
cells = 8000
defaultclock.dt = 0.01*ms
area = 20000*umetre**2
Cm = (1*ufarad*cmetre**-2) * area
gl = (5e-5*siemens*cmetre**-2) * area
El = -60*mV
EK = -90*mV
ENa = 50*mV
g_na = (100*msiemens*cmetre**-2) * area
g_kd = (30*msiemens*cmetre**-2) * area
VT = -63*mV
# # Time constants
# taue = 5*ms
# taui = 10*ms
# # Reversal potentials
# Ee = 0*mV
# Ei = -80*mV
# we = 6*nS # excitatory synaptic weight
# wi = 67*nS # inhibitory synaptic weight
# The model
eqs = Equations('''
dv/dt = (gl*(El-v)-
g_na*(m*m*m)*h*(v-ENa)-
g_kd*(n*n*n*n)*(v-EK))/Cm : volt
dm/dt = alpha_m*(1-m)-beta_m*m : 1
dn/dt = alpha_n*(1-n)-beta_n*n : 1
dh/dt = alpha_h*(1-h)-beta_h*h : 1
alpha_m = 0.32*(mV**-1)*(13*mV-v+VT)/
(exp((13*mV-v+VT)/(4*mV))-1.)/ms : Hz
beta_m = 0.28*(mV**-1)*(v-VT-40*mV)/
(exp((v-VT-40*mV)/(5*mV))-1)/ms : Hz
alpha_h = 0.128*exp((17*mV-v+VT)/(18*mV))/ms : Hz
beta_h = 4./(1+exp((40*mV-v+VT)/(5*mV)))/ms : Hz
alpha_n = 0.032*(mV**-1)*(15*mV-v+VT)/
(exp((15*mV-v+VT)/(5*mV))-1.)/ms : Hz
beta_n = .5*exp((10*mV-v+VT)/(40*mV))/ms : Hz
''')
# dv/dt = (gl*(El-v)+ge*(Ee-v)+gi*(Ei-v)-
# dge/dt = -ge*(1./taue) : siemens
# dgi/dt = -gi*(1./taui) : siemens
P = NeuronGroup(cells, model=eqs, threshold='v>-20*mV', refractory=3*ms,
method='euler')
proportion=int(0.8*cells)
Pe = P[:proportion]
Pi = P[proportion:]
# Ce = Synapses(Pe, P, on_pre='ge+=we')
# Ci = Synapses(Pi, P, on_pre='gi+=wi')
# Ce.connect(p=0.98)
# Ci.connect(p=0.98)
# Initialization
P.v = 'El + (randn() * 5 - 5)*mV'
# P.ge = '(randn() * 1.5 + 4) * 10.*nS'
# P.gi = '(randn() * 12 + 20) * 10.*nS'
# Record a few traces
trace = StateMonitor(P, 'v', record=[1, 10, 100])
totaldata = StateMonitor(P, 'v', record=True)
run(0.5 * second, report='text')
# plot(trace.t/ms, trace[1].v/mV)
# plot(trace.t/ms, trace[10].v/mV)
# plot(trace.t/ms, trace[100].v/mV)
# xlabel('t (ms)')
# ylabel('v (mV)')
# show()
# print("Saving TC cell voltages!")
# numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
|
asoplata/dynasim-benchmark-brette-2007
|
output/Brian2/brian2_benchmark_COBAHH_nosyn_8000/brian2_benchmark_COBAHH_nosyn_8000.py
|
Python
|
gpl-3.0
| 3,350
|
[
"Brian"
] |
c1c3f066f51c9393e1c7f6d20cf69656ebbbf3bc699d8083f3e6a18d50fa9048
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
This module implements an interface to the Henkelmann et al.'s excellent
Fortran code for calculating a Bader charge analysis.
This module depends on a compiled bader executable available in the path.
Please download the library at http://theory.cm.utexas.edu/vasp/bader/ and
follow the instructions to compile the executable.
If you use this module, please cite the following:
G. Henkelman, A. Arnaldsson, and H. Jonsson, "A fast and robust algorithm for
Bader decomposition of charge density", Comput. Mater. Sci. 36, 254-360 (2006).
"""
from six.moves import map
from six.moves import zip
__author__ = "shyuepingong"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Beta"
__date__ = "4/5/13"
import os
import subprocess
import shutil
from pymatgen.io.vasp.outputs import Chgcar
from pymatgen.io.vasp.inputs import Potcar
from monty.os.path import which
from monty.dev import requires
from monty.tempfile import ScratchDir
@requires(which("bader"),
"BaderAnalysis requires the executable bader to be in the path."
" Please download the library at http://theory.cm.utexas"
".edu/vasp/bader/ and compile the executable.")
class BaderAnalysis(object):
"""
Bader analysis for a CHGCAR.
.. attribute: data
Atomic data parsed from bader analysis. Essentially a list of dicts
of the form::
[
{
"dist": 8.769,
"min": 0.8753,
"charge": 7.4168,
"y": 1.1598,
"x": 0.0079,
"z": 0.8348
},
...
]
.. attribute: vacuum_volume
Vacuum volume of the Bader analysis.
.. attribute: vacuum_charge
Vacuum charge of the Bader analysis.
.. attribute: nelectrons
Number of electrons of the Bader analysis.
.. attribute: chgcar
Chgcar object associated with input CHGCAR file.
.. attribute: potcar
Potcar object associated with POTCAR used for calculation (used for
calculating charge transferred).
"""
def __init__(self, chgcar_filename, potcar_filename=None):
"""
Initializes the Bader caller.
Args:
chgcar_filename: The filename of the CHGCAR.
potcar_filename: Optional: the filename of the corresponding
POTCAR file. Used for calculating the charge transfer. If
None, the get_charge_transfer method will raise a ValueError.
"""
self.chgcar = Chgcar.from_file(chgcar_filename)
self.potcar = Potcar.from_file(potcar_filename) \
if potcar_filename is not None else None
self.natoms = self.chgcar.poscar.natoms
chgcarpath = os.path.abspath(chgcar_filename)
with ScratchDir(".") as temp_dir:
shutil.copy(chgcarpath, os.path.join(temp_dir, "CHGCAR"))
rs = subprocess.Popen(["bader", "CHGCAR"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
data = []
with open("ACF.dat") as f:
raw = f.readlines()
headers = [s.lower() for s in raw.pop(0).split()]
raw.pop(0)
while True:
l = raw.pop(0).strip()
if l.startswith("-"):
break
vals = map(float, l.split()[1:])
data.append(dict(zip(headers[1:], vals)))
for l in raw:
toks = l.strip().split(":")
if toks[0] == "VACUUM CHARGE":
self.vacuum_charge = float(toks[1])
elif toks[0] == "VACUUM VOLUME":
self.vacuum_volume = float(toks[1])
elif toks[0] == "NUMBER OF ELECTRONS":
self.nelectrons = float(toks[1])
self.data = data
def get_charge(self, atom_index):
"""
Convenience method to get the charge on a particular atom.
Args:
atom_index:
Index of atom.
Returns:
Charge associated with atom from the Bader analysis.
"""
return self.data[atom_index]["charge"]
def get_charge_transfer(self, atom_index):
"""
Returns the charge transferred for a particular atom. Requires POTCAR
to be supplied.
Args:
atom_index:
Index of atom.
Returns:
Charge transfer associated with atom from the Bader analysis.
Given by final charge on atom - nelectrons in POTCAR for
associated atom.
"""
if self.potcar is None:
raise ValueError("POTCAR must be supplied in order to calculate "
"charge transfer!")
potcar_indices = []
for i, v in enumerate(self.natoms):
potcar_indices += [i] * v
nelect = self.potcar[potcar_indices[atom_index]].nelectrons
return self.data[atom_index]["charge"] - nelect
def get_oxidation_state_decorated_structure(self):
"""
Returns an oxidation state decorated structure.
Returns:
Returns an oxidation state decorated structure. Requires POTCAR
to be supplied.
"""
structure = self.chgcar.structure
charges = [self.get_charge_transfer(i) for i in range(len(structure))]
structure.add_oxidation_state_by_site(charges)
return structure
|
rousseab/pymatgen
|
pymatgen/command_line/bader_caller.py
|
Python
|
mit
| 5,733
|
[
"VASP",
"pymatgen"
] |
caee80a66cc9feadd2746698e22cda8a9ebd0626a18aa04320f6538cf303b532
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import os
import re
import shutil
import tempfile
from vistrails.core.utils import VistrailsInternalError
from vistrails.core.system.unix import executable_is_in_path,\
list2cmdline, execute_cmdline, execute_cmdline2, \
get_executable_path, execute_piped_cmdlines
__all__ = ['executable_is_in_path', 'list2cmdline', 'execute_cmdline',
'execute_cmdline2', 'get_executable_path', 'execute_piped_cmdlines',
'guess_total_memory',
'home_directory', 'remote_copy_program', 'remote_shell_program',
'graph_viz_dot_command_line', 'remove_graph_viz_temporaries',
'link_or_copy', 'XDestroyWindow',
'shell_font_face', 'shell_font_size',
'TestLinux']
################################################################################
_meminfo_fmt = re.compile(r'([^:]+):\s+([0-9]+)(?: (kB|B))?\n$')
def parse_meminfo():
"""parse_meminfo() -> dictionary
Parses /proc/meminfo and returns appropriate dictionary. Only available on
Linux."""
info = {}
with open('/proc/meminfo') as fp:
for line in fp:
m = _meminfo_fmt.match(line)
if m is None:
raise VistrailsInternalError("Invalid format found in "
"/proc/meminfo")
key, value, unit = m.groups()
if unit == 'kB':
value = int(value) * 1000
else:
value = int(value)
info[key] = value
return info
def guess_total_memory():
""" guess_total_memory() -> int
Return system memory in bytes.
"""
return parse_meminfo()['MemTotal']
def home_directory():
""" home_directory() -> str
Returns user's home directory using environment variable $HOME
"""
return os.getenv('HOME')
def remote_copy_program():
return "scp -p"
def remote_shell_program():
return "ssh -p"
def graph_viz_dot_command_line():
return 'dot -Tplain -o '
def remove_graph_viz_temporaries():
""" remove_graph_viz_temporaries() -> None
Removes temporary files generated by dot
"""
os.unlink(tempfile.gettempdir() + "dot_output_vistrails.txt")
os.unlink(tempfile.gettempdir() + "dot_tmp_vistrails.txt")
def link_or_copy(src, dst):
"""link_or_copy(src:str, dst:str) -> None
Tries to create a hard link to a file. If it is not possible, it will
copy file src to dst
"""
# Links if possible, but we're across devices, we need to copy.
try:
os.link(src, dst)
except OSError, e:
if e.errno == 18:
# Across-device linking is not possible. Let's copy.
shutil.copyfile(src, dst)
else:
raise e
def get_libX11():
""" get_libX11() -> CDLL
Return the X11 library loaded with ctypes. Only available on
Linux. We also need a way to find the correct X11 library name on
different machines. Right now, libX11.so.6 is used.
"""
from vistrails.core.bundles import py_import
ctypes = py_import('ctypes', {
'pip': 'ctypes',
'linux-debian': 'python-ctypes',
'linux-ubuntu': 'python-ctypes',
'linux-fedora': 'python-ctypes'})
c_void_p = ctypes.c_void_p
CDLL = ctypes.CDLL
return CDLL('libX11.so.6')
def XDestroyWindow(displayId, windowId):
""" XDestroyWindow(displayId: void_p_str, windowId: void_p_str) -> None
Destroy the X window specified by two strings displayId and
windowId containing void pointer string of (Display*) and (Window)
type.
This is specific for VTKCell to remove the top shell window. Since
VTK does not expose X11-related functions to Python, we have to
use ctypes to hi-jack X11 library and call XDestroyWindow to kill
the top-shell widget after reparent the OpenGL canvas to another
Qt widget
"""
from vistrails.core.bundles import py_import
ctypes = py_import('ctypes', {
'pip': 'ctypes',
'linux-debian': 'python-ctypes',
'linux-ubuntu': 'python-ctypes',
'linux-fedora': 'python-ctypes'})
c_void_p = ctypes.c_void_p
displayPtr = c_void_p(int(displayId[1:displayId.find('_void_p')], 16))
windowPtr = c_void_p(int(windowId[1:windowId.find('_void_p')], 16))
libx = get_libX11()
libx.XDestroyWindow(displayPtr, windowPtr)
def shell_font_face():
return 'Fixed'
def shell_font_size():
return 12
################################################################################
import unittest
class TestLinux(unittest.TestCase):
""" Class to test Linux specific functions """
def test1(self):
""" Test if guess_total_memory() is returning an int >= 0"""
result = guess_total_memory()
assert isinstance(result, (int, long))
assert result >= 0
def test2(self):
""" Test if home_directory is not empty """
result = home_directory()
assert result != ""
def test3(self):
""" Test if origin of link_or_copy'ed file is deleteable. """
import tempfile
import os
(fd1, name1) = tempfile.mkstemp()
os.close(fd1)
(fd2, name2) = tempfile.mkstemp()
os.close(fd2)
os.unlink(name2)
link_or_copy(name1, name2)
try:
os.unlink(name1)
except OSError:
self.fail("Should not throw")
os.unlink(name2)
def test_executable_file_in_path(self):
# Should exist in any POSIX shell
self.assertTrue(executable_is_in_path('ls'))
if __name__ == '__main__':
unittest.main()
|
hjanime/VisTrails
|
vistrails/core/system/linux.py
|
Python
|
bsd-3-clause
| 7,566
|
[
"VTK"
] |
138dbfadfda3fcf51a3c0acf7f9370beeb26a5eb68b0e6b74712be3070ff9ad2
|
import unittest
from timeseries import *
# import .timeseries
import io
import sys
from contextlib import redirect_stdout
import numpy as np
class MyTest(unittest.TestCase):
def test_astlexpar(self):
data = open("./samples/example1.ppl").read()
ast = pype.parser.parser.parse(data, lexer=pype.lexer.lexer)
printer = pype.semantic_analysis.PrettyPrint()
f = io.StringIO()
with redirect_stdout(f):
ast.walk(printer)
compare = open("./samples/example1.ast").read()
self.assertEqual(f.getvalue(), compare)
def test_singleassignment(self):
data = '''(import timeseries)
{ standardize
(:= new_t (/ (- t mu) sig))
(:= mu (mean t))
(:= sig (std t))
(input (TimeSeries t))
(output new_t)
}'''
ast = pype.parser.parser.parse(data, lexer=pype.lexer.lexer)
checker = pype.semantic_analysis.CheckSingleAssignment()
try:
ast.walk(checker)
except:
self.fail("Single Assignment erroneously flagged")
data = '''(import timeseries)
{ standardize
(:= new_t (/ (- t mu) sig))
(:= mu (mean t))
(:= mu (std t))
(input (TimeSeries t))
(output new_t)
}'''
ast = pype.parser.parser.parse(data, lexer=pype.lexer.lexer)
checker = pype.semantic_analysis.CheckSingleAssignment()
with self.assertRaises(SyntaxError):
ast.walk(checker)
data = '''(import timeseries)
{ standardize
(:= new_t (/ (- t mu) sig))
(:= mu (mean t))
(:= sig (std t))
(input (TimeSeries t))
(output new_t)
}
{ standardize2
(:= new_t (/ (- t mu) sig))
(:= mu (mean t))
(:= sig (std t))
(input (TimeSeries t))
(output new_t)
}'''
ast = pype.parser.parser.parse(data, lexer=pype.lexer.lexer)
checker = pype.semantic_analysis.CheckSingleAssignment()
try:
ast.walk(checker)
except:
self.fail("Single Assignment erroneously flagged")
def test_symtablevisitor(self):
data = open("./samples/example1.ppl").read()
ast = pype.parser.parser.parse(data, lexer=pype.lexer.lexer)
tabler = pype.translate.SymbolTableVisitor()
ast.walk(tabler)
symtab = tabler.symbol_table
self.assertListEqual(sorted(list(symtab.scopes())), sorted(['global', 'standardize']))
#self.assertEqual(len(symtab['global']), 11)
self.assertEqual(len(symtab['standardize']), 4)
def test_component(self):
@pype.component
def sillyfunc(a):
print(a)
self.assertEqual(sillyfunc._attributes['_pype_component'], True)
self.assertEqual(pype.is_component(sillyfunc), True)
def sillyfunc2(b):
print(b)
self.assertEqual(pype.is_component(sillyfunc2), False)
def test_deadcodeelimination(self):
data = """
(import timeseries)
{ component2
# sum of squares
(input x y)
(:= z (+ (* x x) (* y y)))
(output z)
}
{ six
# Produces the number 6 through convoluted means
(input x y)
(:= a (+ x (* 2 y)))
(:= b (+ (/ y x) (* x x)))
(:= c 6)
(:= d (component2 x y))
(:= e (+ (* a a) (+ (* b b) d)))
(output c)
}
"""
ast = pype.parser.parser.parse(data,pype.lexer.lexer)
q = pype.translate.SymbolTableVisitor()
ast.walk(q)
IR = ast.mod_walk(pype.translate.LoweringVisitor(q.symbol_table))
flowgraph = IR['six']
flowgraph2 = IR['component2']
eliminate = pype.optimize.DeadCodeElimination()
flowgraph = eliminate.visit(flowgraph)
flowgraph2 = eliminate.visit(flowgraph2)
def test_inline(self):
data = """
(import timeseries)
{ mul (input x y) (:= z (* x y)) (output z) }
{ dist (input a b) (:= c (+ (mul a b) (mul b a))) (output c) }
"""
graph1 = 'digraph dist {\n "@N2" -> "@N4"\n "@N3" -> "@N4"\n "@N1" -> "@N3"\n "@N0" -> "@N3"\n "@N4" -> "@N5"\n "@N5" -> "@N6"\n "@N0" -> "@N2"\n "@N1" -> "@N2"\n "@N0" [ label = "a" ]\n "@N5" [ label = "c" ]\n "@N1" [ label = "b" ]\n "@N0" [ color = "green" ]\n "@N1" [ color = "green" ]\n "@N6" [ color = "red" ]\n}\n'
graph2 = 'digraph dist {\n "@N1" -> "@N8"\n "@N14" -> "@N12"\n "@N1" -> "@N16"\n "@N0" -> "@N13"\n "@N5" -> "@N6"\n "@N8" -> "@N10"\n "@N11" -> "@N10"\n "@N12" -> "@N4"\n "@N7" -> "@N4"\n "@N15" -> "@N14"\n "@N13" -> "@N15"\n "@N16" -> "@N15"\n "@N9" -> "@N7"\n "@N4" -> "@N5"\n "@N0" -> "@N11"\n "@N10" -> "@N9"\n "@N0" [ label = "a" ]\n "@N5" [ label = "c" ]\n "@N1" [ label = "b" ]\n "@N0" [ color = "green" ]\n "@N1" [ color = "green" ]\n "@N6" [ color = "red" ]\n}\n'
graph1 = sorted(graph1.split('\n'))
graph2 = sorted(graph2.split('\n'))
ast = pype.parser.parser.parse(data,pype.lexer.lexer)
q = pype.translate.SymbolTableVisitor()
ast.walk(q)
IR = ast.mod_walk(pype.translate.LoweringVisitor(q.symbol_table))
flowgraph = IR['mul']
flowgraph2 = IR['dist']
eliminate = pype.optimize.InlineComponents()
flowgraph = eliminate.visit(flowgraph)
flowgraph2 = eliminate.visit(flowgraph2)
self.assertEqual(len(flowgraph2.inputs),2)
self.assertEqual(len(flowgraph2.outputs),1)
for nid in flowgraph2.nodes.keys():
self.assertNotEqual(flowgraph2.nodes[nid].ref,'mul')
def test_compiler(self):
pl = pype.Pipeline("samples/example1.ppl")
time = np.arange(100)
vals = np.arange(100) - 50
ts = timeseries.TimeSeries(time, vals)
standardized_ts = pl['standardize'].run(ts)
self.assertTrue(abs(standardized_ts.mean()) < 1e-17)
self.assertTrue(abs(standardized_ts.std() - 1.) < 1e-17)
suite = unittest.TestLoader().loadTestsFromModule(MyTest())
unittest.TextTestRunner().run(suite)
|
Planet-Nine/cs207project
|
tests/test_pype.py
|
Python
|
mit
| 6,223
|
[
"VisIt"
] |
ff2950df34e5f70f8563f8d4d1032abf236adbeb5ade8d57809fc6ce4b1a8175
|
import os
import optparse
import subprocess
from multiprocessing import Pool
directory = ""
results = "results.data"
extension = ""
aligned_extension = ".tab"
datatype = ""
perlpath = "/home/galaxy-dist/tools/osiris/tree-manipulation/"
def unescape(string):
mapped_chars = {
'>': '__gt__',
'<': '__lt__',
"'": '__sq__',
'"': '__dq__',
'[': '__ob__',
']': '__cb__',
'{': '__oc__',
'}': '__cc__',
'@': '__at__',
'\n': '__cn__',
'\r': '__cr__',
'\t': '__tc__',
'#': '__pd__'
}
for key, value in mapped_chars.iteritems():
string = string.replace(value, key)
return string
def isTabular(file):
with open(file) as f:
for line in f:
if line[0] == '>':
return False
return True
#def toData(text, name):
# name = name.replace("fasta", "") #file name has fasta when fasta file called
# text = name.replace(".fs.tre", "") + "\t" + text.replace(" " , "")
# return text
def toData(text, name):
text = text.split('\n')
result = ''
for line in text:
if '\t' in line:
line = line.replace("./data/","") + "\n"
result += line
return result # Index past the first newline char
def LB_pruner(input):
file_name = directory + os.sep + input
popen = subprocess.Popen(['perl', perlpath+'LB_prunerG.pl', file_name, indata, file_name + aligned_extension])
popen.wait()
class Sequence:
def __init__(self, string):
lis = string.split()
self.name = lis[0]
self.tree = lis[1]
self.string = string
def printFASTA(self):
return self.tree + '\n'
def saveMulti(tabFile):
with open(tabFile) as f:
for line in f:
seq = Sequence(line)
with open(directory + os.sep + seq.name + extension, "a") as p:
p.write(seq.printFASTA())
def saveSingle(fastaFile):
with open(fastaFile) as f:
for line in f:
with open(directory + os.sep + "fasta" + extension, "a") as p:
p.write(line)
def main():
usage = """%prog [options]
options (listed below) default to 'None' if omitted
"""
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'-d', '--directory',
metavar="PATH",
dest='path',
default='.',
help='Path to working directory.')
parser.add_option(
'-i', '--in',
dest='input',
action='store',
type='string',
metavar="FILE",
help='Name of input data.')
parser.add_option(
'-m', '--mult',
dest='datatype',
action='store',
type='string',
help='Multiplier')
options, args = parser.parse_args()
global directory
global indata
inputFile = unescape(options.input)
directory = unescape(options.path) + os.sep + "data"
indata = unescape(options.datatype)
os.mkdir(directory)
if isTabular(inputFile):
saveMulti(inputFile)
else:
saveSingle(inputFile)
pool = Pool()
list_of_files = [file for file in os.listdir(directory) if file.lower().endswith(extension)]
pool.map(LB_pruner, list_of_files)
result = [file for file in os.listdir(directory) if file.lower().endswith(aligned_extension)]
with open(directory + os.sep + results, "a") as f:
for file in result:
with open(directory + os.sep + file, "r") as r:
f.write(toData(r.read(),file))
if __name__ == '__main__':
main()
|
xibalbanus/PIA2
|
osiris_phylogenetics/phylostatistics/phytab_LB_pruner.py
|
Python
|
mit
| 3,591
|
[
"Galaxy"
] |
4089009e78ce53f08ff11d27cfe3a9f917c3ee619b64db4962e8514832b7458e
|
import os
import logging
import synapse.glob as s_glob
import synapse.common as s_common
import synapse.lib.cell as s_cell
import synapse.lib.msgpack as s_msgpack
logger = logging.getLogger(__name__)
defport = 65521 # the default neuron port
class Neuron(s_cell.Cell):
'''
A neuron node is the "master cell" for a neuron cluster.
'''
def postCell(self):
self.cells = self.getCellDict('cells')
path = self._path('admin.auth')
if not os.path.exists(path):
auth = self.genCellAuth('admin')
s_msgpack.dumpfile(auth, path)
def handlers(self):
return {
'cell:get': self._onCellGet,
'cell:reg': self._onCellReg,
'cell:init': self._onCellInit,
'cell:list': self._onCellList,
}
def _genCellName(self, name):
host = self.getConfOpt('host')
return '%s@%s' % (name, host)
def _onCellGet(self, chan, mesg):
name = mesg[1].get('name')
info = self.cells.get(name)
chan.txfini((True, info))
@s_glob.inpool
def _onCellReg(self, chan, mesg):
peer = chan.getLinkProp('cell:peer')
if peer is None:
enfo = ('NoCellPeer', {})
chan.tx((False, enfo))
return
info = mesg[1]
self.cells.set(peer, info)
self.fire('cell:reg', name=peer, info=info)
logger.info('cell registered: %s %r', peer, info)
chan.txfini((True, True))
return
def _onCellList(self, chan, mesg):
cells = self.cells.items()
chan.tx((True, cells))
@s_glob.inpool
def _onCellInit(self, chan, mesg):
# for now, only let admin provision...
root = 'admin@%s' % (self.getConfOpt('host'),)
peer = chan.getLinkProp('cell:peer')
if peer != root:
logger.warning('cell:init not allowed for: %s' % (peer,))
return chan.tx((False, None))
name = mesg[1].get('name').split('@')[0]
auth = self.genCellAuth(name)
chan.tx((True, auth))
def getCellInfo(self, name):
'''
Return the info dict for a given cell by name.
'''
return self.cells.get(name)
def getCellList(self):
'''
Return a list of (name, info) tuples for the known cells.
'''
return self.cells.items()
def genCellAuth(self, name):
'''
Generate or retrieve an auth/provision blob for a cell.
Args:
name (str): The unqualified cell name (ex. "axon00")
'''
host = self.getConfOpt('host')
full = '%s@%s' % (name, host)
auth = self.vault.genUserAuth(full)
auth[1]['neuron'] = self.getCellAddr()
return auth
def initConfDefs(self):
s_cell.Cell.initConfDefs(self)
self.addConfDefs((
('port', {'defval': defport, 'req': 1,
'doc': 'The TCP port the Neuron binds to (defaults to %d)' % defport}),
))
class NeuronClient:
def __init__(self, sess):
self.sess = sess
def genCellAuth(self, name, timeout=None):
'''
Generate a new cell auth file.
'''
mesg = ('cell:init', {'name': name})
ok, retn = self.sess.call(mesg, timeout=timeout)
return s_common.reqok(ok, retn)
|
vivisect/synapse
|
synapse/neuron.py
|
Python
|
apache-2.0
| 3,346
|
[
"NEURON"
] |
fc130c5b69eaed6864580c07aae17f08b1b10b13e1d3b4b99d3e4233f16ecf88
|
# Copyright 2013-2018 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for external dependencies.
# Custom logic for several other packages are in separate files.
import copy
import functools
import os
import re
import json
import shlex
import shutil
import textwrap
import platform
import typing as T
from enum import Enum
from pathlib import Path, PurePath
from .. import mlog
from .. import mesonlib
from ..compilers import clib_langs
from ..environment import Environment, MachineInfo
from ..cmake import CMakeExecutor, CMakeTraceParser, CMakeException, CMakeToolchain, CMakeExecScope, check_cmake_args
from ..mesonlib import MachineChoice, MesonException, OrderedSet, PerMachine
from ..mesonlib import Popen_safe, version_compare_many, version_compare, listify, stringlistify, extract_as_list, split_args
from ..mesonlib import Version, LibType, OptionKey
from ..mesondata import mesondata
from ..programs import ExternalProgram, find_external_program
if T.TYPE_CHECKING:
from ..compilers.compilers import CompilerType # noqa: F401
DependencyType = T.TypeVar('DependencyType', bound='Dependency')
# These must be defined in this file to avoid cyclical references.
packages = {}
_packages_accept_language = set()
class DependencyException(MesonException):
'''Exceptions raised while trying to find dependencies'''
class DependencyMethods(Enum):
# Auto means to use whatever dependency checking mechanisms in whatever order meson thinks is best.
AUTO = 'auto'
PKGCONFIG = 'pkg-config'
QMAKE = 'qmake'
CMAKE = 'cmake'
# Just specify the standard link arguments, assuming the operating system provides the library.
SYSTEM = 'system'
# This is only supported on OSX - search the frameworks directory by name.
EXTRAFRAMEWORK = 'extraframework'
# Detect using the sysconfig module.
SYSCONFIG = 'sysconfig'
# Specify using a "program"-config style tool
CONFIG_TOOL = 'config-tool'
# For backwards compatibility
SDLCONFIG = 'sdlconfig'
CUPSCONFIG = 'cups-config'
PCAPCONFIG = 'pcap-config'
LIBWMFCONFIG = 'libwmf-config'
# Misc
DUB = 'dub'
class Dependency:
@classmethod
def _process_include_type_kw(cls, kwargs) -> str:
if 'include_type' not in kwargs:
return 'preserve'
if not isinstance(kwargs['include_type'], str):
raise DependencyException('The include_type kwarg must be a string type')
if kwargs['include_type'] not in ['preserve', 'system', 'non-system']:
raise DependencyException("include_type may only be one of ['preserve', 'system', 'non-system']")
return kwargs['include_type']
def __init__(self, type_name, kwargs):
self.name = "null"
self.version = None # type: T.Optional[str]
self.language = None # None means C-like
self.is_found = False
self.type_name = type_name
self.compile_args = [] # type: T.List[str]
self.link_args = []
# Raw -L and -l arguments without manual library searching
# If None, self.link_args will be used
self.raw_link_args = None
self.sources = []
self.methods = process_method_kw(self.get_methods(), kwargs)
self.include_type = self._process_include_type_kw(kwargs)
self.ext_deps = [] # type: T.List[Dependency]
def __repr__(self):
s = '<{0} {1}: {2}>'
return s.format(self.__class__.__name__, self.name, self.is_found)
def is_built(self) -> bool:
return False
def summary_value(self) -> T.Union[str, mlog.AnsiDecorator, mlog.AnsiText]:
if not self.found():
return mlog.red('NO')
if not self.version:
return mlog.green('YES')
return mlog.AnsiText(mlog.green('YES'), ' ', mlog.cyan(self.version))
def get_compile_args(self) -> T.List[str]:
if self.include_type == 'system':
converted = []
for i in self.compile_args:
if i.startswith('-I') or i.startswith('/I'):
converted += ['-isystem' + i[2:]]
else:
converted += [i]
return converted
if self.include_type == 'non-system':
converted = []
for i in self.compile_args:
if i.startswith('-isystem'):
converted += ['-I' + i[8:]]
else:
converted += [i]
return converted
return self.compile_args
def get_link_args(self, raw: bool = False) -> T.List[str]:
if raw and self.raw_link_args is not None:
return self.raw_link_args
return self.link_args
def found(self) -> bool:
return self.is_found
def get_sources(self):
"""Source files that need to be added to the target.
As an example, gtest-all.cc when using GTest."""
return self.sources
@staticmethod
def get_methods():
return [DependencyMethods.AUTO]
def get_name(self):
return self.name
def get_version(self) -> str:
if self.version:
return self.version
else:
return 'unknown'
def get_include_type(self) -> str:
return self.include_type
def get_exe_args(self, compiler):
return []
def get_pkgconfig_variable(self, variable_name: str, kwargs: T.Dict[str, T.Any]) -> str:
raise DependencyException(f'{self.name!r} is not a pkgconfig dependency')
def get_configtool_variable(self, variable_name):
raise DependencyException(f'{self.name!r} is not a config-tool dependency')
def get_partial_dependency(self, *, compile_args: bool = False,
link_args: bool = False, links: bool = False,
includes: bool = False, sources: bool = False):
"""Create a new dependency that contains part of the parent dependency.
The following options can be inherited:
links -- all link_with arguments
includes -- all include_directory and -I/-isystem calls
sources -- any source, header, or generated sources
compile_args -- any compile args
link_args -- any link args
Additionally the new dependency will have the version parameter of it's
parent (if any) and the requested values of any dependencies will be
added as well.
"""
raise RuntimeError('Unreachable code in partial_dependency called')
def _add_sub_dependency(self, deplist: T.Iterable[T.Callable[[], 'Dependency']]) -> bool:
"""Add an internal depdency from a list of possible dependencies.
This method is intended to make it easier to add additional
dependencies to another dependency internally.
Returns true if the dependency was successfully added, false
otherwise.
"""
for d in deplist:
dep = d()
if dep.is_found:
self.ext_deps.append(dep)
return True
return False
def get_variable(self, *, cmake: T.Optional[str] = None, pkgconfig: T.Optional[str] = None,
configtool: T.Optional[str] = None, internal: T.Optional[str] = None,
default_value: T.Optional[str] = None,
pkgconfig_define: T.Optional[T.List[str]] = None) -> T.Union[str, T.List[str]]:
if default_value is not None:
return default_value
raise DependencyException(f'No default provided for dependency {self!r}, which is not pkg-config, cmake, or config-tool based.')
def generate_system_dependency(self, include_type: str) -> T.Type['Dependency']:
new_dep = copy.deepcopy(self)
new_dep.include_type = self._process_include_type_kw({'include_type': include_type})
return new_dep
class InternalDependency(Dependency):
def __init__(self, version, incdirs, compile_args, link_args, libraries,
whole_libraries, sources, ext_deps, variables: T.Dict[str, T.Any]):
super().__init__('internal', {})
self.version = version
self.is_found = True
self.include_directories = incdirs
self.compile_args = compile_args
self.link_args = link_args
self.libraries = libraries
self.whole_libraries = whole_libraries
self.sources = sources
self.ext_deps = ext_deps
self.variables = variables
def __deepcopy__(self, memo: dict) -> 'InternalDependency':
result = self.__class__.__new__(self.__class__)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k in ['libraries', 'whole_libraries']:
setattr(result, k, copy.copy(v))
else:
setattr(result, k, copy.deepcopy(v, memo))
return result
def summary_value(self) -> mlog.AnsiDecorator:
# Omit the version. Most of the time it will be just the project
# version, which is uninteresting in the summary.
return mlog.green('YES')
def is_built(self) -> bool:
if self.sources or self.libraries or self.whole_libraries:
return True
return any(d.is_built() for d in self.ext_deps)
def get_pkgconfig_variable(self, variable_name: str, kwargs: T.Dict[str, T.Any]) -> str:
raise DependencyException('Method "get_pkgconfig_variable()" is '
'invalid for an internal dependency')
def get_configtool_variable(self, variable_name):
raise DependencyException('Method "get_configtool_variable()" is '
'invalid for an internal dependency')
def get_partial_dependency(self, *, compile_args: bool = False,
link_args: bool = False, links: bool = False,
includes: bool = False, sources: bool = False):
final_compile_args = self.compile_args.copy() if compile_args else []
final_link_args = self.link_args.copy() if link_args else []
final_libraries = self.libraries.copy() if links else []
final_whole_libraries = self.whole_libraries.copy() if links else []
final_sources = self.sources.copy() if sources else []
final_includes = self.include_directories.copy() if includes else []
final_deps = [d.get_partial_dependency(
compile_args=compile_args, link_args=link_args, links=links,
includes=includes, sources=sources) for d in self.ext_deps]
return InternalDependency(
self.version, final_includes, final_compile_args,
final_link_args, final_libraries, final_whole_libraries,
final_sources, final_deps, self.variables)
def get_variable(self, *, cmake: T.Optional[str] = None, pkgconfig: T.Optional[str] = None,
configtool: T.Optional[str] = None, internal: T.Optional[str] = None,
default_value: T.Optional[str] = None,
pkgconfig_define: T.Optional[T.List[str]] = None) -> T.Union[str, T.List[str]]:
val = self.variables.get(internal, default_value)
if val is not None:
return val
raise DependencyException(f'Could not get an internal variable and no default provided for {self!r}')
def generate_link_whole_dependency(self) -> T.Type['Dependency']:
new_dep = copy.deepcopy(self)
new_dep.whole_libraries += new_dep.libraries
new_dep.libraries = []
return new_dep
class HasNativeKwarg:
def __init__(self, kwargs: T.Dict[str, T.Any]):
self.for_machine = self.get_for_machine_from_kwargs(kwargs)
def get_for_machine_from_kwargs(self, kwargs: T.Dict[str, T.Any]) -> MachineChoice:
return MachineChoice.BUILD if kwargs.get('native', False) else MachineChoice.HOST
class ExternalDependency(Dependency, HasNativeKwarg):
def __init__(self, type_name, environment: Environment, kwargs, language: T.Optional[str] = None):
Dependency.__init__(self, type_name, kwargs)
self.env = environment
self.name = type_name # default
self.is_found = False
self.language = language
self.version_reqs = kwargs.get('version', None)
if isinstance(self.version_reqs, str):
self.version_reqs = [self.version_reqs]
self.required = kwargs.get('required', True)
self.silent = kwargs.get('silent', False)
self.static = kwargs.get('static', False)
if not isinstance(self.static, bool):
raise DependencyException('Static keyword must be boolean')
# Is this dependency to be run on the build platform?
HasNativeKwarg.__init__(self, kwargs)
self.clib_compiler = detect_compiler(self.name, environment, self.for_machine, self.language)
def get_compiler(self):
return self.clib_compiler
def get_partial_dependency(self, *, compile_args: bool = False,
link_args: bool = False, links: bool = False,
includes: bool = False, sources: bool = False):
new = copy.copy(self)
if not compile_args:
new.compile_args = []
if not link_args:
new.link_args = []
if not sources:
new.sources = []
if not includes:
new.include_directories = []
if not sources:
new.sources = []
return new
def log_details(self):
return ''
def log_info(self):
return ''
def log_tried(self):
return ''
# Check if dependency version meets the requirements
def _check_version(self):
if not self.is_found:
return
if self.version_reqs:
# an unknown version can never satisfy any requirement
if not self.version:
found_msg = ['Dependency', mlog.bold(self.name), 'found:']
found_msg += [mlog.red('NO'), 'unknown version, but need:',
self.version_reqs]
mlog.log(*found_msg)
if self.required:
m = 'Unknown version of dependency {!r}, but need {!r}.'
raise DependencyException(m.format(self.name, self.version_reqs))
else:
(self.is_found, not_found, found) = \
version_compare_many(self.version, self.version_reqs)
if not self.is_found:
found_msg = ['Dependency', mlog.bold(self.name), 'found:']
found_msg += [mlog.red('NO'),
'found', mlog.normal_cyan(self.version), 'but need:',
mlog.bold(', '.join([f"'{e}'" for e in not_found]))]
if found:
found_msg += ['; matched:',
', '.join([f"'{e}'" for e in found])]
mlog.log(*found_msg)
if self.required:
m = 'Invalid version of dependency, need {!r} {!r} found {!r}.'
raise DependencyException(m.format(self.name, not_found, self.version))
return
class NotFoundDependency(Dependency):
def __init__(self, environment):
super().__init__('not-found', {})
self.env = environment
self.name = 'not-found'
self.is_found = False
def get_partial_dependency(self, *, compile_args: bool = False,
link_args: bool = False, links: bool = False,
includes: bool = False, sources: bool = False):
return copy.copy(self)
class ConfigToolDependency(ExternalDependency):
"""Class representing dependencies found using a config tool.
Takes the following extra keys in kwargs that it uses internally:
:tools List[str]: A list of tool names to use
:version_arg str: The argument to pass to the tool to get it's version
:returncode_value int: The value of the correct returncode
Because some tools are stupid and don't return 0
"""
tools = None
tool_name = None
version_arg = '--version'
__strip_version = re.compile(r'^[0-9][0-9.]+')
def __init__(self, name, environment, kwargs, language: T.Optional[str] = None):
super().__init__('config-tool', environment, kwargs, language=language)
self.name = name
# You may want to overwrite the class version in some cases
self.tools = listify(kwargs.get('tools', self.tools))
if not self.tool_name:
self.tool_name = self.tools[0]
if 'version_arg' in kwargs:
self.version_arg = kwargs['version_arg']
req_version = kwargs.get('version', None)
tool, version = self.find_config(req_version, kwargs.get('returncode_value', 0))
self.config = tool
self.is_found = self.report_config(version, req_version)
if not self.is_found:
self.config = None
return
self.version = version
def _sanitize_version(self, version):
"""Remove any non-numeric, non-point version suffixes."""
m = self.__strip_version.match(version)
if m:
# Ensure that there isn't a trailing '.', such as an input like
# `1.2.3.git-1234`
return m.group(0).rstrip('.')
return version
def find_config(self, versions: T.Optional[T.List[str]] = None, returncode: int = 0) \
-> T.Tuple[T.Optional[str], T.Optional[str]]:
"""Helper method that searches for config tool binaries in PATH and
returns the one that best matches the given version requirements.
"""
if not isinstance(versions, list) and versions is not None:
versions = listify(versions)
best_match = (None, None) # type: T.Tuple[T.Optional[str], T.Optional[str]]
for potential_bin in find_external_program(
self.env, self.for_machine, self.tool_name,
self.tool_name, self.tools, allow_default_for_cross=False):
if not potential_bin.found():
continue
tool = potential_bin.get_command()
try:
p, out = Popen_safe(tool + [self.version_arg])[:2]
except (FileNotFoundError, PermissionError):
continue
if p.returncode != returncode:
continue
out = self._sanitize_version(out.strip())
# Some tools, like pcap-config don't supply a version, but also
# don't fail with --version, in that case just assume that there is
# only one version and return it.
if not out:
return (tool, None)
if versions:
is_found = version_compare_many(out, versions)[0]
# This allows returning a found version without a config tool,
# which is useful to inform the user that you found version x,
# but y was required.
if not is_found:
tool = None
if best_match[1]:
if version_compare(out, '> {}'.format(best_match[1])):
best_match = (tool, out)
else:
best_match = (tool, out)
return best_match
def report_config(self, version, req_version):
"""Helper method to print messages about the tool."""
found_msg = [mlog.bold(self.tool_name), 'found:']
if self.config is None:
found_msg.append(mlog.red('NO'))
if version is not None and req_version is not None:
found_msg.append(f'found {version!r} but need {req_version!r}')
elif req_version:
found_msg.append(f'need {req_version!r}')
else:
found_msg += [mlog.green('YES'), '({})'.format(' '.join(self.config)), version]
mlog.log(*found_msg)
return self.config is not None
def get_config_value(self, args: T.List[str], stage: str) -> T.List[str]:
p, out, err = Popen_safe(self.config + args)
if p.returncode != 0:
if self.required:
raise DependencyException(
'Could not generate {} for {}.\n{}'.format(
stage, self.name, err))
return []
return split_args(out)
@staticmethod
def get_methods():
return [DependencyMethods.AUTO, DependencyMethods.CONFIG_TOOL]
def get_configtool_variable(self, variable_name):
p, out, _ = Popen_safe(self.config + [f'--{variable_name}'])
if p.returncode != 0:
if self.required:
raise DependencyException(
'Could not get variable "{}" for dependency {}'.format(
variable_name, self.name))
variable = out.strip()
mlog.debug(f'Got config-tool variable {variable_name} : {variable}')
return variable
def log_tried(self):
return self.type_name
def get_variable(self, *, cmake: T.Optional[str] = None, pkgconfig: T.Optional[str] = None,
configtool: T.Optional[str] = None, internal: T.Optional[str] = None,
default_value: T.Optional[str] = None,
pkgconfig_define: T.Optional[T.List[str]] = None) -> T.Union[str, T.List[str]]:
if configtool:
# In the not required case '' (empty string) will be returned if the
# variable is not found. Since '' is a valid value to return we
# set required to True here to force and error, and use the
# finally clause to ensure it's restored.
restore = self.required
self.required = True
try:
return self.get_configtool_variable(configtool)
except DependencyException:
pass
finally:
self.required = restore
if default_value is not None:
return default_value
raise DependencyException(f'Could not get config-tool variable and no default provided for {self!r}')
class PkgConfigDependency(ExternalDependency):
# The class's copy of the pkg-config path. Avoids having to search for it
# multiple times in the same Meson invocation.
class_pkgbin = PerMachine(None, None)
# We cache all pkg-config subprocess invocations to avoid redundant calls
pkgbin_cache = {}
def __init__(self, name, environment: 'Environment', kwargs, language: T.Optional[str] = None):
super().__init__('pkgconfig', environment, kwargs, language=language)
self.name = name
self.is_libtool = False
# Store a copy of the pkg-config path on the object itself so it is
# stored in the pickled coredata and recovered.
self.pkgbin = None
# Only search for pkg-config for each machine the first time and store
# the result in the class definition
if PkgConfigDependency.class_pkgbin[self.for_machine] is False:
mlog.debug('Pkg-config binary for %s is cached as not found.' % self.for_machine)
elif PkgConfigDependency.class_pkgbin[self.for_machine] is not None:
mlog.debug('Pkg-config binary for %s is cached.' % self.for_machine)
else:
assert PkgConfigDependency.class_pkgbin[self.for_machine] is None
mlog.debug('Pkg-config binary for %s is not cached.' % self.for_machine)
for potential_pkgbin in find_external_program(
self.env, self.for_machine, 'pkgconfig', 'Pkg-config',
environment.default_pkgconfig, allow_default_for_cross=False):
version_if_ok = self.check_pkgconfig(potential_pkgbin)
if not version_if_ok:
continue
if not self.silent:
mlog.log('Found pkg-config:', mlog.bold(potential_pkgbin.get_path()),
'(%s)' % version_if_ok)
PkgConfigDependency.class_pkgbin[self.for_machine] = potential_pkgbin
break
else:
if not self.silent:
mlog.log('Found Pkg-config:', mlog.red('NO'))
# Set to False instead of None to signify that we've already
# searched for it and not found it
PkgConfigDependency.class_pkgbin[self.for_machine] = False
self.pkgbin = PkgConfigDependency.class_pkgbin[self.for_machine]
if self.pkgbin is False:
self.pkgbin = None
msg = 'Pkg-config binary for machine %s not found. Giving up.' % self.for_machine
if self.required:
raise DependencyException(msg)
else:
mlog.debug(msg)
return
mlog.debug('Determining dependency {!r} with pkg-config executable '
'{!r}'.format(name, self.pkgbin.get_path()))
ret, self.version, _ = self._call_pkgbin(['--modversion', name])
if ret != 0:
return
self.is_found = True
try:
# Fetch cargs to be used while using this dependency
self._set_cargs()
# Fetch the libraries and library paths needed for using this
self._set_libs()
except DependencyException as e:
mlog.debug(f"pkg-config error with '{name}': {e}")
if self.required:
raise
else:
self.compile_args = []
self.link_args = []
self.is_found = False
self.reason = e
def __repr__(self):
s = '<{0} {1}: {2} {3}>'
return s.format(self.__class__.__name__, self.name, self.is_found,
self.version_reqs)
def _call_pkgbin_real(self, args, env):
cmd = self.pkgbin.get_command() + args
p, out, err = Popen_safe(cmd, env=env)
rc, out, err = p.returncode, out.strip(), err.strip()
call = ' '.join(cmd)
mlog.debug(f"Called `{call}` -> {rc}\n{out}")
return rc, out, err
@staticmethod
def setup_env(env: T.MutableMapping[str, str], environment: 'Environment', for_machine: MachineChoice,
extra_path: T.Optional[str] = None) -> None:
extra_paths: T.List[str] = environment.coredata.options[OptionKey('pkg_config_path', machine=for_machine)].value[:]
if extra_path and extra_path not in extra_paths:
extra_paths.append(extra_path)
sysroot = environment.properties[for_machine].get_sys_root()
if sysroot:
env['PKG_CONFIG_SYSROOT_DIR'] = sysroot
new_pkg_config_path = ':'.join([p for p in extra_paths])
env['PKG_CONFIG_PATH'] = new_pkg_config_path
pkg_config_libdir_prop = environment.properties[for_machine].get_pkg_config_libdir()
if pkg_config_libdir_prop:
new_pkg_config_libdir = ':'.join([p for p in pkg_config_libdir_prop])
env['PKG_CONFIG_LIBDIR'] = new_pkg_config_libdir
# Dump all PKG_CONFIG environment variables
for key, value in env.items():
if key.startswith('PKG_'):
mlog.debug(f'env[{key}]: {value}')
def _call_pkgbin(self, args, env=None):
# Always copy the environment since we're going to modify it
# with pkg-config variables
if env is None:
env = os.environ.copy()
else:
env = env.copy()
PkgConfigDependency.setup_env(env, self.env, self.for_machine)
fenv = frozenset(env.items())
targs = tuple(args)
cache = PkgConfigDependency.pkgbin_cache
if (self.pkgbin, targs, fenv) not in cache:
cache[(self.pkgbin, targs, fenv)] = self._call_pkgbin_real(args, env)
return cache[(self.pkgbin, targs, fenv)]
def _convert_mingw_paths(self, args: T.List[str]) -> T.List[str]:
'''
Both MSVC and native Python on Windows cannot handle MinGW-esque /c/foo
paths so convert them to C:/foo. We cannot resolve other paths starting
with / like /home/foo so leave them as-is so that the user gets an
error/warning from the compiler/linker.
'''
if not self.env.machines.build.is_windows():
return args
converted = []
for arg in args:
pargs = []
# Library search path
if arg.startswith('-L/'):
pargs = PurePath(arg[2:]).parts
tmpl = '-L{}:/{}'
elif arg.startswith('-I/'):
pargs = PurePath(arg[2:]).parts
tmpl = '-I{}:/{}'
# Full path to library or .la file
elif arg.startswith('/'):
pargs = PurePath(arg).parts
tmpl = '{}:/{}'
elif arg.startswith(('-L', '-I')) or (len(arg) > 2 and arg[1] == ':'):
# clean out improper '\\ ' as comes from some Windows pkg-config files
arg = arg.replace('\\ ', ' ')
if len(pargs) > 1 and len(pargs[1]) == 1:
arg = tmpl.format(pargs[1], '/'.join(pargs[2:]))
converted.append(arg)
return converted
def _split_args(self, cmd):
# pkg-config paths follow Unix conventions, even on Windows; split the
# output using shlex.split rather than mesonlib.split_args
return shlex.split(cmd)
def _set_cargs(self):
env = None
if self.language == 'fortran':
# gfortran doesn't appear to look in system paths for INCLUDE files,
# so don't allow pkg-config to suppress -I flags for system paths
env = os.environ.copy()
env['PKG_CONFIG_ALLOW_SYSTEM_CFLAGS'] = '1'
ret, out, err = self._call_pkgbin(['--cflags', self.name], env=env)
if ret != 0:
raise DependencyException('Could not generate cargs for %s:\n%s\n' %
(self.name, err))
self.compile_args = self._convert_mingw_paths(self._split_args(out))
def _search_libs(self, out, out_raw):
'''
@out: PKG_CONFIG_ALLOW_SYSTEM_LIBS=1 pkg-config --libs
@out_raw: pkg-config --libs
We always look for the file ourselves instead of depending on the
compiler to find it with -lfoo or foo.lib (if possible) because:
1. We want to be able to select static or shared
2. We need the full path of the library to calculate RPATH values
3. De-dup of libraries is easier when we have absolute paths
Libraries that are provided by the toolchain or are not found by
find_library() will be added with -L -l pairs.
'''
# Library paths should be safe to de-dup
#
# First, figure out what library paths to use. Originally, we were
# doing this as part of the loop, but due to differences in the order
# of -L values between pkg-config and pkgconf, we need to do that as
# a separate step. See:
# https://github.com/mesonbuild/meson/issues/3951
# https://github.com/mesonbuild/meson/issues/4023
#
# Separate system and prefix paths, and ensure that prefix paths are
# always searched first.
prefix_libpaths = OrderedSet()
# We also store this raw_link_args on the object later
raw_link_args = self._convert_mingw_paths(self._split_args(out_raw))
for arg in raw_link_args:
if arg.startswith('-L') and not arg.startswith(('-L-l', '-L-L')):
path = arg[2:]
if not os.path.isabs(path):
# Resolve the path as a compiler in the build directory would
path = os.path.join(self.env.get_build_dir(), path)
prefix_libpaths.add(path)
# Library paths are not always ordered in a meaningful way
#
# Instead of relying on pkg-config or pkgconf to provide -L flags in a
# specific order, we reorder library paths ourselves, according to th
# order specified in PKG_CONFIG_PATH. See:
# https://github.com/mesonbuild/meson/issues/4271
#
# Only prefix_libpaths are reordered here because there should not be
# too many system_libpaths to cause library version issues.
pkg_config_path: T.List[str] = self.env.coredata.options[OptionKey('pkg_config_path', machine=self.for_machine)].value
pkg_config_path = self._convert_mingw_paths(pkg_config_path)
prefix_libpaths = sort_libpaths(prefix_libpaths, pkg_config_path)
system_libpaths = OrderedSet()
full_args = self._convert_mingw_paths(self._split_args(out))
for arg in full_args:
if arg.startswith(('-L-l', '-L-L')):
# These are D language arguments, not library paths
continue
if arg.startswith('-L') and arg[2:] not in prefix_libpaths:
system_libpaths.add(arg[2:])
# Use this re-ordered path list for library resolution
libpaths = list(prefix_libpaths) + list(system_libpaths)
# Track -lfoo libraries to avoid duplicate work
libs_found = OrderedSet()
# Track not-found libraries to know whether to add library paths
libs_notfound = []
libtype = LibType.STATIC if self.static else LibType.PREFER_SHARED
# Generate link arguments for this library
link_args = []
for lib in full_args:
if lib.startswith(('-L-l', '-L-L')):
# These are D language arguments, add them as-is
pass
elif lib.startswith('-L'):
# We already handled library paths above
continue
elif lib.startswith('-l'):
# Don't resolve the same -lfoo argument again
if lib in libs_found:
continue
if self.clib_compiler:
args = self.clib_compiler.find_library(lib[2:], self.env,
libpaths, libtype)
# If the project only uses a non-clib language such as D, Rust,
# C#, Python, etc, all we can do is limp along by adding the
# arguments as-is and then adding the libpaths at the end.
else:
args = None
if args is not None:
libs_found.add(lib)
# Replace -l arg with full path to library if available
# else, library is either to be ignored, or is provided by
# the compiler, can't be resolved, and should be used as-is
if args:
if not args[0].startswith('-l'):
lib = args[0]
else:
continue
else:
# Library wasn't found, maybe we're looking in the wrong
# places or the library will be provided with LDFLAGS or
# LIBRARY_PATH from the environment (on macOS), and many
# other edge cases that we can't account for.
#
# Add all -L paths and use it as -lfoo
if lib in libs_notfound:
continue
if self.static:
mlog.warning('Static library {!r} not found for dependency {!r}, may '
'not be statically linked'.format(lib[2:], self.name))
libs_notfound.append(lib)
elif lib.endswith(".la"):
shared_libname = self.extract_libtool_shlib(lib)
shared_lib = os.path.join(os.path.dirname(lib), shared_libname)
if not os.path.exists(shared_lib):
shared_lib = os.path.join(os.path.dirname(lib), ".libs", shared_libname)
if not os.path.exists(shared_lib):
raise DependencyException('Got a libtools specific "%s" dependencies'
'but we could not compute the actual shared'
'library path' % lib)
self.is_libtool = True
lib = shared_lib
if lib in link_args:
continue
link_args.append(lib)
# Add all -Lbar args if we have -lfoo args in link_args
if libs_notfound:
# Order of -L flags doesn't matter with ld, but it might with other
# linkers such as MSVC, so prepend them.
link_args = ['-L' + lp for lp in prefix_libpaths] + link_args
return link_args, raw_link_args
def _set_libs(self):
env = None
libcmd = ['--libs']
if self.static:
libcmd.append('--static')
libcmd.append(self.name)
# Force pkg-config to output -L fields even if they are system
# paths so we can do manual searching with cc.find_library() later.
env = os.environ.copy()
env['PKG_CONFIG_ALLOW_SYSTEM_LIBS'] = '1'
ret, out, err = self._call_pkgbin(libcmd, env=env)
if ret != 0:
raise DependencyException('Could not generate libs for %s:\n%s\n' %
(self.name, err))
# Also get the 'raw' output without -Lfoo system paths for adding -L
# args with -lfoo when a library can't be found, and also in
# gnome.generate_gir + gnome.gtkdoc which need -L -l arguments.
ret, out_raw, err_raw = self._call_pkgbin(libcmd)
if ret != 0:
raise DependencyException('Could not generate libs for %s:\n\n%s' %
(self.name, out_raw))
self.link_args, self.raw_link_args = self._search_libs(out, out_raw)
def get_pkgconfig_variable(self, variable_name: str, kwargs: T.Dict[str, T.Any]) -> str:
options = ['--variable=' + variable_name, self.name]
if 'define_variable' in kwargs:
definition = kwargs.get('define_variable', [])
if not isinstance(definition, list):
raise DependencyException('define_variable takes a list')
if len(definition) != 2 or not all(isinstance(i, str) for i in definition):
raise DependencyException('define_variable must be made up of 2 strings for VARIABLENAME and VARIABLEVALUE')
options = ['--define-variable=' + '='.join(definition)] + options
ret, out, err = self._call_pkgbin(options)
variable = ''
if ret != 0:
if self.required:
raise DependencyException('dependency %s not found:\n%s\n' %
(self.name, err))
else:
variable = out.strip()
# pkg-config doesn't distinguish between empty and non-existent variables
# use the variable list to check for variable existence
if not variable:
ret, out, _ = self._call_pkgbin(['--print-variables', self.name])
if not re.search(r'^' + variable_name + r'$', out, re.MULTILINE):
if 'default' in kwargs:
variable = kwargs['default']
else:
mlog.warning(f"pkgconfig variable '{variable_name}' not defined for dependency {self.name}.")
mlog.debug(f'Got pkgconfig variable {variable_name} : {variable}')
return variable
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG]
def check_pkgconfig(self, pkgbin):
if not pkgbin.found():
mlog.log(f'Did not find pkg-config by name {pkgbin.name!r}')
return None
try:
p, out = Popen_safe(pkgbin.get_command() + ['--version'])[0:2]
if p.returncode != 0:
mlog.warning('Found pkg-config {!r} but it failed when run'
''.format(' '.join(pkgbin.get_command())))
return None
except FileNotFoundError:
mlog.warning('We thought we found pkg-config {!r} but now it\'s not there. How odd!'
''.format(' '.join(pkgbin.get_command())))
return None
except PermissionError:
msg = 'Found pkg-config {!r} but didn\'t have permissions to run it.'.format(' '.join(pkgbin.get_command()))
if not self.env.machines.build.is_windows():
msg += '\n\nOn Unix-like systems this is often caused by scripts that are not executable.'
mlog.warning(msg)
return None
return out.strip()
def extract_field(self, la_file, fieldname):
with open(la_file) as f:
for line in f:
arr = line.strip().split('=')
if arr[0] == fieldname:
return arr[1][1:-1]
return None
def extract_dlname_field(self, la_file):
return self.extract_field(la_file, 'dlname')
def extract_libdir_field(self, la_file):
return self.extract_field(la_file, 'libdir')
def extract_libtool_shlib(self, la_file):
'''
Returns the path to the shared library
corresponding to this .la file
'''
dlname = self.extract_dlname_field(la_file)
if dlname is None:
return None
# Darwin uses absolute paths where possible; since the libtool files never
# contain absolute paths, use the libdir field
if self.env.machines[self.for_machine].is_darwin():
dlbasename = os.path.basename(dlname)
libdir = self.extract_libdir_field(la_file)
if libdir is None:
return dlbasename
return os.path.join(libdir, dlbasename)
# From the comments in extract_libtool(), older libtools had
# a path rather than the raw dlname
return os.path.basename(dlname)
def log_tried(self):
return self.type_name
def get_variable(self, *, cmake: T.Optional[str] = None, pkgconfig: T.Optional[str] = None,
configtool: T.Optional[str] = None, internal: T.Optional[str] = None,
default_value: T.Optional[str] = None,
pkgconfig_define: T.Optional[T.List[str]] = None) -> T.Union[str, T.List[str]]:
if pkgconfig:
kwargs = {}
if default_value is not None:
kwargs['default'] = default_value
if pkgconfig_define is not None:
kwargs['define_variable'] = pkgconfig_define
try:
return self.get_pkgconfig_variable(pkgconfig, kwargs)
except DependencyException:
pass
if default_value is not None:
return default_value
raise DependencyException(f'Could not get pkg-config variable and no default provided for {self!r}')
class CMakeDependency(ExternalDependency):
# The class's copy of the CMake path. Avoids having to search for it
# multiple times in the same Meson invocation.
class_cmakeinfo = PerMachine(None, None)
# Version string for the minimum CMake version
class_cmake_version = '>=3.4'
# CMake generators to try (empty for no generator)
class_cmake_generators = ['', 'Ninja', 'Unix Makefiles', 'Visual Studio 10 2010']
class_working_generator = None
def _gen_exception(self, msg):
return DependencyException(f'Dependency {self.name} not found: {msg}')
def _main_cmake_file(self) -> str:
return 'CMakeLists.txt'
def _extra_cmake_opts(self) -> T.List[str]:
return []
def _map_module_list(self, modules: T.List[T.Tuple[str, bool]], components: T.List[T.Tuple[str, bool]]) -> T.List[T.Tuple[str, bool]]:
# Map the input module list to something else
# This function will only be executed AFTER the initial CMake
# interpreter pass has completed. Thus variables defined in the
# CMakeLists.txt can be accessed here.
#
# Both the modules and components inputs contain the original lists.
return modules
def _map_component_list(self, modules: T.List[T.Tuple[str, bool]], components: T.List[T.Tuple[str, bool]]) -> T.List[T.Tuple[str, bool]]:
# Map the input components list to something else. This
# function will be executed BEFORE the initial CMake interpreter
# pass. Thus variables from the CMakeLists.txt can NOT be accessed.
#
# Both the modules and components inputs contain the original lists.
return components
def _original_module_name(self, module: str) -> str:
# Reverse the module mapping done by _map_module_list for
# one module
return module
def __init__(self, name: str, environment: Environment, kwargs, language: T.Optional[str] = None):
# Gather a list of all languages to support
self.language_list = [] # type: T.List[str]
if language is None:
compilers = None
if kwargs.get('native', False):
compilers = environment.coredata.compilers.build
else:
compilers = environment.coredata.compilers.host
candidates = ['c', 'cpp', 'fortran', 'objc', 'objcxx']
self.language_list += [x for x in candidates if x in compilers]
else:
self.language_list += [language]
# Add additional languages if required
if 'fortran' in self.language_list:
self.language_list += ['c']
# Ensure that the list is unique
self.language_list = list(set(self.language_list))
super().__init__('cmake', environment, kwargs, language=language)
self.name = name
self.is_libtool = False
# Store a copy of the CMake path on the object itself so it is
# stored in the pickled coredata and recovered.
self.cmakebin = None
self.cmakeinfo = None
# Where all CMake "build dirs" are located
self.cmake_root_dir = environment.scratch_dir
# T.List of successfully found modules
self.found_modules = []
# Initialize with None before the first return to avoid
# AttributeError exceptions in derived classes
self.traceparser = None # type: CMakeTraceParser
# TODO further evaluate always using MachineChoice.BUILD
self.cmakebin = CMakeExecutor(environment, CMakeDependency.class_cmake_version, self.for_machine, silent=self.silent)
if not self.cmakebin.found():
self.cmakebin = None
msg = f'No CMake binary for machine {self.for_machine} not found. Giving up.'
if self.required:
raise DependencyException(msg)
mlog.debug(msg)
return
# Setup the trace parser
self.traceparser = CMakeTraceParser(self.cmakebin.version(), self._get_build_dir())
cm_args = stringlistify(extract_as_list(kwargs, 'cmake_args'))
cm_args = check_cmake_args(cm_args)
if CMakeDependency.class_cmakeinfo[self.for_machine] is None:
CMakeDependency.class_cmakeinfo[self.for_machine] = self._get_cmake_info(cm_args)
self.cmakeinfo = CMakeDependency.class_cmakeinfo[self.for_machine]
if self.cmakeinfo is None:
raise self._gen_exception('Unable to obtain CMake system information')
package_version = kwargs.get('cmake_package_version', '')
if not isinstance(package_version, str):
raise DependencyException('Keyword "cmake_package_version" must be a string.')
components = [(x, True) for x in stringlistify(extract_as_list(kwargs, 'components'))]
modules = [(x, True) for x in stringlistify(extract_as_list(kwargs, 'modules'))]
modules += [(x, False) for x in stringlistify(extract_as_list(kwargs, 'optional_modules'))]
cm_path = stringlistify(extract_as_list(kwargs, 'cmake_module_path'))
cm_path = [x if os.path.isabs(x) else os.path.join(environment.get_source_dir(), x) for x in cm_path]
if cm_path:
cm_args.append('-DCMAKE_MODULE_PATH=' + ';'.join(cm_path))
if not self._preliminary_find_check(name, cm_path, self.cmakebin.get_cmake_prefix_paths(), environment.machines[self.for_machine]):
mlog.debug('Preliminary CMake check failed. Aborting.')
return
self._detect_dep(name, package_version, modules, components, cm_args)
def __repr__(self):
s = '<{0} {1}: {2} {3}>'
return s.format(self.__class__.__name__, self.name, self.is_found,
self.version_reqs)
def _get_cmake_info(self, cm_args):
mlog.debug("Extracting basic cmake information")
res = {}
# Try different CMake generators since specifying no generator may fail
# in cygwin for some reason
gen_list = []
# First try the last working generator
if CMakeDependency.class_working_generator is not None:
gen_list += [CMakeDependency.class_working_generator]
gen_list += CMakeDependency.class_cmake_generators
temp_parser = CMakeTraceParser(self.cmakebin.version(), self._get_build_dir())
toolchain = CMakeToolchain(self.env, self.for_machine, CMakeExecScope.DEPENDENCY, self._get_build_dir())
toolchain.write()
for i in gen_list:
mlog.debug('Try CMake generator: {}'.format(i if len(i) > 0 else 'auto'))
# Prepare options
cmake_opts = temp_parser.trace_args() + toolchain.get_cmake_args() + ['.']
cmake_opts += cm_args
if len(i) > 0:
cmake_opts = ['-G', i] + cmake_opts
# Run CMake
ret1, out1, err1 = self._call_cmake(cmake_opts, 'CMakePathInfo.txt')
# Current generator was successful
if ret1 == 0:
CMakeDependency.class_working_generator = i
break
mlog.debug(f'CMake failed to gather system information for generator {i} with error code {ret1}')
mlog.debug(f'OUT:\n{out1}\n\n\nERR:\n{err1}\n\n')
# Check if any generator succeeded
if ret1 != 0:
return None
try:
temp_parser.parse(err1)
except MesonException:
return None
def process_paths(l: T.List[str]) -> T.Set[str]:
if mesonlib.is_windows():
# Cannot split on ':' on Windows because its in the drive letter
l = [x.split(os.pathsep) for x in l]
else:
# https://github.com/mesonbuild/meson/issues/7294
l = [re.split(r':|;', x) for x in l]
l = [x for sublist in l for x in sublist]
return set(l)
# Extract the variables and sanity check them
root_paths = process_paths(temp_parser.get_cmake_var('MESON_FIND_ROOT_PATH'))
root_paths.update(process_paths(temp_parser.get_cmake_var('MESON_CMAKE_SYSROOT')))
root_paths = sorted(root_paths)
root_paths = list(filter(lambda x: os.path.isdir(x), root_paths))
module_paths = process_paths(temp_parser.get_cmake_var('MESON_PATHS_LIST'))
rooted_paths = []
for j in [Path(x) for x in root_paths]:
for i in [Path(x) for x in module_paths]:
rooted_paths.append(str(j / i.relative_to(i.anchor)))
module_paths = sorted(module_paths.union(rooted_paths))
module_paths = list(filter(lambda x: os.path.isdir(x), module_paths))
archs = temp_parser.get_cmake_var('MESON_ARCH_LIST')
common_paths = ['lib', 'lib32', 'lib64', 'libx32', 'share']
for i in archs:
common_paths += [os.path.join('lib', i)]
res = {
'module_paths': module_paths,
'cmake_root': temp_parser.get_cmake_var('MESON_CMAKE_ROOT')[0],
'archs': archs,
'common_paths': common_paths
}
mlog.debug(' -- Module search paths: {}'.format(res['module_paths']))
mlog.debug(' -- CMake root: {}'.format(res['cmake_root']))
mlog.debug(' -- CMake architectures: {}'.format(res['archs']))
mlog.debug(' -- CMake lib search paths: {}'.format(res['common_paths']))
return res
@staticmethod
@functools.lru_cache(maxsize=None)
def _cached_listdir(path: str) -> T.Tuple[T.Tuple[str, str]]:
try:
return tuple((x, str(x).lower()) for x in os.listdir(path))
except OSError:
return ()
@staticmethod
@functools.lru_cache(maxsize=None)
def _cached_isdir(path: str) -> bool:
try:
return os.path.isdir(path)
except OSError:
return False
def _preliminary_find_check(self, name: str, module_path: T.List[str], prefix_path: T.List[str], machine: MachineInfo) -> bool:
lname = str(name).lower()
# Checks <path>, <path>/cmake, <path>/CMake
def find_module(path: str) -> bool:
for i in [path, os.path.join(path, 'cmake'), os.path.join(path, 'CMake')]:
if not self._cached_isdir(i):
continue
# Check the directory case insensitive
content = self._cached_listdir(i)
candidates = ['Find{}.cmake', '{}Config.cmake', '{}-config.cmake']
candidates = [x.format(name).lower() for x in candidates]
if any([x[1] in candidates for x in content]):
return True
return False
# Search in <path>/(lib/<arch>|lib*|share) for cmake files
def search_lib_dirs(path: str) -> bool:
for i in [os.path.join(path, x) for x in self.cmakeinfo['common_paths']]:
if not self._cached_isdir(i):
continue
# Check <path>/(lib/<arch>|lib*|share)/cmake/<name>*/
cm_dir = os.path.join(i, 'cmake')
if self._cached_isdir(cm_dir):
content = self._cached_listdir(cm_dir)
content = list(filter(lambda x: x[1].startswith(lname), content))
for k in content:
if find_module(os.path.join(cm_dir, k[0])):
return True
# <path>/(lib/<arch>|lib*|share)/<name>*/
# <path>/(lib/<arch>|lib*|share)/<name>*/(cmake|CMake)/
content = self._cached_listdir(i)
content = list(filter(lambda x: x[1].startswith(lname), content))
for k in content:
if find_module(os.path.join(i, k[0])):
return True
return False
# Check the user provided and system module paths
for i in module_path + [os.path.join(self.cmakeinfo['cmake_root'], 'Modules')]:
if find_module(i):
return True
# Check the user provided prefix paths
for i in prefix_path:
if search_lib_dirs(i):
return True
# Check PATH
system_env = [] # type: T.List[str]
for i in os.environ.get('PATH', '').split(os.pathsep):
if i.endswith('/bin') or i.endswith('\\bin'):
i = i[:-4]
if i.endswith('/sbin') or i.endswith('\\sbin'):
i = i[:-5]
system_env += [i]
# Check the system paths
for i in self.cmakeinfo['module_paths'] + system_env:
if find_module(i):
return True
if search_lib_dirs(i):
return True
content = self._cached_listdir(i)
content = list(filter(lambda x: x[1].startswith(lname), content))
for k in content:
if search_lib_dirs(os.path.join(i, k[0])):
return True
# Mac framework support
if machine.is_darwin():
for j in ['{}.framework', '{}.app']:
j = j.format(lname)
if j in content:
if find_module(os.path.join(i, j[0], 'Resources')) or find_module(os.path.join(i, j[0], 'Version')):
return True
# Check the environment path
env_path = os.environ.get(f'{name}_DIR')
if env_path and find_module(env_path):
return True
return False
def _detect_dep(self, name: str, package_version: str, modules: T.List[T.Tuple[str, bool]], components: T.List[T.Tuple[str, bool]], args: T.List[str]):
# Detect a dependency with CMake using the '--find-package' mode
# and the trace output (stderr)
#
# When the trace output is enabled CMake prints all functions with
# parameters to stderr as they are executed. Since CMake 3.4.0
# variables ("${VAR}") are also replaced in the trace output.
mlog.debug('\nDetermining dependency {!r} with CMake executable '
'{!r}'.format(name, self.cmakebin.executable_path()))
# Try different CMake generators since specifying no generator may fail
# in cygwin for some reason
gen_list = []
# First try the last working generator
if CMakeDependency.class_working_generator is not None:
gen_list += [CMakeDependency.class_working_generator]
gen_list += CMakeDependency.class_cmake_generators
# Map the components
comp_mapped = self._map_component_list(modules, components)
toolchain = CMakeToolchain(self.env, self.for_machine, CMakeExecScope.DEPENDENCY, self._get_build_dir())
toolchain.write()
for i in gen_list:
mlog.debug('Try CMake generator: {}'.format(i if len(i) > 0 else 'auto'))
# Prepare options
cmake_opts = []
cmake_opts += [f'-DNAME={name}']
cmake_opts += ['-DARCHS={}'.format(';'.join(self.cmakeinfo['archs']))]
cmake_opts += [f'-DVERSION={package_version}']
cmake_opts += ['-DCOMPS={}'.format(';'.join([x[0] for x in comp_mapped]))]
cmake_opts += args
cmake_opts += self.traceparser.trace_args()
cmake_opts += toolchain.get_cmake_args()
cmake_opts += self._extra_cmake_opts()
cmake_opts += ['.']
if len(i) > 0:
cmake_opts = ['-G', i] + cmake_opts
# Run CMake
ret1, out1, err1 = self._call_cmake(cmake_opts, self._main_cmake_file())
# Current generator was successful
if ret1 == 0:
CMakeDependency.class_working_generator = i
break
mlog.debug(f'CMake failed for generator {i} and package {name} with error code {ret1}')
mlog.debug(f'OUT:\n{out1}\n\n\nERR:\n{err1}\n\n')
# Check if any generator succeeded
if ret1 != 0:
return
try:
self.traceparser.parse(err1)
except CMakeException as e:
e = self._gen_exception(str(e))
if self.required:
raise
else:
self.compile_args = []
self.link_args = []
self.is_found = False
self.reason = e
return
# Whether the package is found or not is always stored in PACKAGE_FOUND
self.is_found = self.traceparser.var_to_bool('PACKAGE_FOUND')
if not self.is_found:
return
# Try to detect the version
vers_raw = self.traceparser.get_cmake_var('PACKAGE_VERSION')
if len(vers_raw) > 0:
self.version = vers_raw[0]
self.version.strip('"\' ')
# Post-process module list. Used in derived classes to modify the
# module list (append prepend a string, etc.).
modules = self._map_module_list(modules, components)
autodetected_module_list = False
# Try guessing a CMake target if none is provided
if len(modules) == 0:
for i in self.traceparser.targets:
tg = i.lower()
lname = name.lower()
if f'{lname}::{lname}' == tg or lname == tg.replace('::', ''):
mlog.debug(f'Guessed CMake target \'{i}\'')
modules = [(i, True)]
autodetected_module_list = True
break
# Failed to guess a target --> try the old-style method
if len(modules) == 0:
incDirs = [x for x in self.traceparser.get_cmake_var('PACKAGE_INCLUDE_DIRS') if x]
defs = [x for x in self.traceparser.get_cmake_var('PACKAGE_DEFINITIONS') if x]
libs = [x for x in self.traceparser.get_cmake_var('PACKAGE_LIBRARIES') if x]
# Try to use old style variables if no module is specified
if len(libs) > 0:
self.compile_args = list(map(lambda x: f'-I{x}', incDirs)) + defs
self.link_args = libs
mlog.debug(f'using old-style CMake variables for dependency {name}')
mlog.debug(f'Include Dirs: {incDirs}')
mlog.debug(f'Compiler Definitions: {defs}')
mlog.debug(f'Libraries: {libs}')
return
# Even the old-style approach failed. Nothing else we can do here
self.is_found = False
raise self._gen_exception('CMake: failed to guess a CMake target for {}.\n'
'Try to explicitly specify one or more targets with the "modules" property.\n'
'Valid targets are:\n{}'.format(name, list(self.traceparser.targets.keys())))
# Set dependencies with CMake targets
# recognise arguments we should pass directly to the linker
reg_is_lib = re.compile(r'^(-l[a-zA-Z0-9_]+|-pthread|-delayload:[a-zA-Z0-9_\.]+|[a-zA-Z0-9_]+\.lib)$')
reg_is_maybe_bare_lib = re.compile(r'^[a-zA-Z0-9_]+$')
processed_targets = []
incDirs = []
compileDefinitions = []
compileOptions = []
libraries = []
for i, required in modules:
if i not in self.traceparser.targets:
if not required:
mlog.warning('CMake: T.Optional module', mlog.bold(self._original_module_name(i)), 'for', mlog.bold(name), 'was not found')
continue
raise self._gen_exception('CMake: invalid module {} for {}.\n'
'Try to explicitly specify one or more targets with the "modules" property.\n'
'Valid targets are:\n{}'.format(self._original_module_name(i), name, list(self.traceparser.targets.keys())))
targets = [i]
if not autodetected_module_list:
self.found_modules += [i]
while len(targets) > 0:
curr = targets.pop(0)
# Skip already processed targets
if curr in processed_targets:
continue
tgt = self.traceparser.targets[curr]
cfgs = []
cfg = ''
otherDeps = []
mlog.debug(tgt)
if 'INTERFACE_INCLUDE_DIRECTORIES' in tgt.properties:
incDirs += [x for x in tgt.properties['INTERFACE_INCLUDE_DIRECTORIES'] if x]
if 'INTERFACE_COMPILE_DEFINITIONS' in tgt.properties:
compileDefinitions += ['-D' + re.sub('^-D', '', x) for x in tgt.properties['INTERFACE_COMPILE_DEFINITIONS'] if x]
if 'INTERFACE_COMPILE_OPTIONS' in tgt.properties:
compileOptions += [x for x in tgt.properties['INTERFACE_COMPILE_OPTIONS'] if x]
if 'IMPORTED_CONFIGURATIONS' in tgt.properties:
cfgs = [x for x in tgt.properties['IMPORTED_CONFIGURATIONS'] if x]
cfg = cfgs[0]
if OptionKey('b_vscrt') in self.env.coredata.options:
is_debug = self.env.coredata.get_option(OptionKey('buildtype')) == 'debug'
if self.env.coredata.options[OptionKey('b_vscrt')].value in {'mdd', 'mtd'}:
is_debug = True
else:
is_debug = self.env.coredata.get_option(OptionKey('debug'))
if is_debug:
if 'DEBUG' in cfgs:
cfg = 'DEBUG'
elif 'RELEASE' in cfgs:
cfg = 'RELEASE'
else:
if 'RELEASE' in cfgs:
cfg = 'RELEASE'
if f'IMPORTED_IMPLIB_{cfg}' in tgt.properties:
libraries += [x for x in tgt.properties[f'IMPORTED_IMPLIB_{cfg}'] if x]
elif 'IMPORTED_IMPLIB' in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_IMPLIB'] if x]
elif f'IMPORTED_LOCATION_{cfg}' in tgt.properties:
libraries += [x for x in tgt.properties[f'IMPORTED_LOCATION_{cfg}'] if x]
elif 'IMPORTED_LOCATION' in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_LOCATION'] if x]
if 'INTERFACE_LINK_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['INTERFACE_LINK_LIBRARIES'] if x]
if f'IMPORTED_LINK_DEPENDENT_LIBRARIES_{cfg}' in tgt.properties:
otherDeps += [x for x in tgt.properties[f'IMPORTED_LINK_DEPENDENT_LIBRARIES_{cfg}'] if x]
elif 'IMPORTED_LINK_DEPENDENT_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['IMPORTED_LINK_DEPENDENT_LIBRARIES'] if x]
for j in otherDeps:
if j in self.traceparser.targets:
targets += [j]
elif reg_is_lib.match(j):
libraries += [j]
elif os.path.isabs(j) and os.path.exists(j):
libraries += [j]
elif self.env.machines.build.is_windows() and reg_is_maybe_bare_lib.match(j):
# On Windows, CMake library dependencies can be passed as bare library names,
# e.g. 'version' should translate into 'version.lib'. CMake brute-forces a
# combination of prefix/suffix combinations to find the right library, however
# as we do not have a compiler environment available to us, we cannot do the
# same, but must assume any bare argument passed which is not also a CMake
# target must be a system library we should try to link against
libraries += [f"{j}.lib"]
else:
mlog.warning('CMake: Dependency', mlog.bold(j), 'for', mlog.bold(name), 'target', mlog.bold(self._original_module_name(curr)), 'was not found')
processed_targets += [curr]
# Make sure all elements in the lists are unique and sorted
incDirs = sorted(set(incDirs))
compileDefinitions = sorted(set(compileDefinitions))
compileOptions = sorted(set(compileOptions))
libraries = sorted(set(libraries))
mlog.debug(f'Include Dirs: {incDirs}')
mlog.debug(f'Compiler Definitions: {compileDefinitions}')
mlog.debug(f'Compiler Options: {compileOptions}')
mlog.debug(f'Libraries: {libraries}')
self.compile_args = compileOptions + compileDefinitions + [f'-I{x}' for x in incDirs]
self.link_args = libraries
def _get_build_dir(self) -> Path:
build_dir = Path(self.cmake_root_dir) / f'cmake_{self.name}'
build_dir.mkdir(parents=True, exist_ok=True)
return build_dir
def _setup_cmake_dir(self, cmake_file: str) -> Path:
# Setup the CMake build environment and return the "build" directory
build_dir = self._get_build_dir()
# Remove old CMake cache so we can try out multiple generators
cmake_cache = build_dir / 'CMakeCache.txt'
cmake_files = build_dir / 'CMakeFiles'
if cmake_cache.exists():
cmake_cache.unlink()
shutil.rmtree(cmake_files.as_posix(), ignore_errors=True)
# Insert language parameters into the CMakeLists.txt and write new CMakeLists.txt
cmake_txt = mesondata['dependencies/data/' + cmake_file].data
# In general, some Fortran CMake find_package() also require C language enabled,
# even if nothing from C is directly used. An easy Fortran example that fails
# without C language is
# find_package(Threads)
# To make this general to
# any other language that might need this, we use a list for all
# languages and expand in the cmake Project(... LANGUAGES ...) statement.
from ..cmake import language_map
cmake_language = [language_map[x] for x in self.language_list if x in language_map]
if not cmake_language:
cmake_language += ['NONE']
cmake_txt = textwrap.dedent("""
cmake_minimum_required(VERSION ${{CMAKE_VERSION}})
project(MesonTemp LANGUAGES {})
""").format(' '.join(cmake_language)) + cmake_txt
cm_file = build_dir / 'CMakeLists.txt'
cm_file.write_text(cmake_txt)
mlog.cmd_ci_include(cm_file.absolute().as_posix())
return build_dir
def _call_cmake(self, args, cmake_file: str, env=None):
build_dir = self._setup_cmake_dir(cmake_file)
return self.cmakebin.call(args, build_dir, env=env)
@staticmethod
def get_methods():
return [DependencyMethods.CMAKE]
def log_tried(self):
return self.type_name
def log_details(self) -> str:
modules = [self._original_module_name(x) for x in self.found_modules]
modules = sorted(set(modules))
if modules:
return 'modules: ' + ', '.join(modules)
return ''
def get_variable(self, *, cmake: T.Optional[str] = None, pkgconfig: T.Optional[str] = None,
configtool: T.Optional[str] = None, internal: T.Optional[str] = None,
default_value: T.Optional[str] = None,
pkgconfig_define: T.Optional[T.List[str]] = None) -> T.Union[str, T.List[str]]:
if cmake and self.traceparser is not None:
try:
v = self.traceparser.vars[cmake]
except KeyError:
pass
else:
if len(v) == 1:
return v[0]
elif v:
return v
if default_value is not None:
return default_value
raise DependencyException(f'Could not get cmake variable and no default provided for {self!r}')
class DubDependency(ExternalDependency):
class_dubbin = None
def __init__(self, name, environment, kwargs):
super().__init__('dub', environment, kwargs, language='d')
self.name = name
self.compiler = super().get_compiler()
self.module_path = None
if 'required' in kwargs:
self.required = kwargs.get('required')
if DubDependency.class_dubbin is None:
self.dubbin = self._check_dub()
DubDependency.class_dubbin = self.dubbin
else:
self.dubbin = DubDependency.class_dubbin
if not self.dubbin:
if self.required:
raise DependencyException('DUB not found.')
self.is_found = False
return
mlog.debug('Determining dependency {!r} with DUB executable '
'{!r}'.format(name, self.dubbin.get_path()))
# we need to know the target architecture
arch = self.compiler.arch
# Ask dub for the package
ret, res = self._call_dubbin(['describe', name, '--arch=' + arch])
if ret != 0:
self.is_found = False
return
comp = self.compiler.get_id().replace('llvm', 'ldc').replace('gcc', 'gdc')
packages = []
description = json.loads(res)
for package in description['packages']:
packages.append(package['name'])
if package['name'] == name:
self.is_found = True
not_lib = True
if 'targetType' in package:
if package['targetType'] in ['library', 'sourceLibrary', 'staticLibrary', 'dynamicLibrary']:
not_lib = False
if not_lib:
mlog.error(mlog.bold(name), "found but it isn't a library")
self.is_found = False
return
self.module_path = self._find_right_lib_path(package['path'], comp, description, True, package['targetFileName'])
if not os.path.exists(self.module_path):
# check if the dependency was built for other archs
archs = [['x86_64'], ['x86'], ['x86', 'x86_mscoff']]
for a in archs:
description_a = copy.deepcopy(description)
description_a['architecture'] = a
arch_module_path = self._find_right_lib_path(package['path'], comp, description_a, True, package['targetFileName'])
if arch_module_path:
mlog.error(mlog.bold(name), "found but it wasn't compiled for", mlog.bold(arch))
self.is_found = False
return
mlog.error(mlog.bold(name), "found but it wasn't compiled with", mlog.bold(comp))
self.is_found = False
return
self.version = package['version']
self.pkg = package
if self.pkg['targetFileName'].endswith('.a'):
self.static = True
self.compile_args = []
for flag in self.pkg['dflags']:
self.link_args.append(flag)
for path in self.pkg['importPaths']:
self.compile_args.append('-I' + os.path.join(self.pkg['path'], path))
self.link_args = self.raw_link_args = []
for flag in self.pkg['lflags']:
self.link_args.append(flag)
self.link_args.append(os.path.join(self.module_path, self.pkg['targetFileName']))
# Handle dependencies
libs = []
def add_lib_args(field_name, target):
if field_name in target['buildSettings']:
for lib in target['buildSettings'][field_name]:
if lib not in libs:
libs.append(lib)
if os.name != 'nt':
pkgdep = PkgConfigDependency(lib, environment, {'required': 'true', 'silent': 'true'})
for arg in pkgdep.get_compile_args():
self.compile_args.append(arg)
for arg in pkgdep.get_link_args():
self.link_args.append(arg)
for arg in pkgdep.get_link_args(raw=True):
self.raw_link_args.append(arg)
for target in description['targets']:
if target['rootPackage'] in packages:
add_lib_args('libs', target)
add_lib_args(f'libs-{platform.machine()}', target)
for file in target['buildSettings']['linkerFiles']:
lib_path = self._find_right_lib_path(file, comp, description)
if lib_path:
self.link_args.append(lib_path)
else:
self.is_found = False
def get_compiler(self):
return self.compiler
def _find_right_lib_path(self, default_path, comp, description, folder_only=False, file_name=''):
module_path = lib_file_name = ''
if folder_only:
module_path = default_path
lib_file_name = file_name
else:
module_path = os.path.dirname(default_path)
lib_file_name = os.path.basename(default_path)
module_build_path = os.path.join(module_path, '.dub', 'build')
# If default_path is a path to lib file and
# directory of lib don't have subdir '.dub/build'
if not os.path.isdir(module_build_path) and os.path.isfile(default_path):
if folder_only:
return module_path
else:
return default_path
# Get D version implemented in the compiler
# gdc doesn't support this
ret, res = self._call_dubbin(['--version'])
if ret != 0:
mlog.error('Failed to run {!r}', mlog.bold(comp))
return
d_ver = re.search('v[0-9].[0-9][0-9][0-9].[0-9]', res) # Ex.: v2.081.2
if d_ver is not None:
d_ver = d_ver.group().rsplit('.', 1)[0].replace('v', '').replace('.', '') # Fix structure. Ex.: 2081
else:
d_ver = '' # gdc
if not os.path.isdir(module_build_path):
return ''
# Ex.: library-debug-linux.posix-x86_64-ldc_2081-EF934983A3319F8F8FF2F0E107A363BA
build_name = '-{}-{}-{}-{}_{}'.format(description['buildType'], '.'.join(description['platform']), '.'.join(description['architecture']), comp, d_ver)
for entry in os.listdir(module_build_path):
if build_name in entry:
for file in os.listdir(os.path.join(module_build_path, entry)):
if file == lib_file_name:
if folder_only:
return os.path.join(module_build_path, entry)
else:
return os.path.join(module_build_path, entry, lib_file_name)
return ''
def _call_dubbin(self, args, env=None):
p, out = Popen_safe(self.dubbin.get_command() + args, env=env)[0:2]
return p.returncode, out.strip()
def _call_copmbin(self, args, env=None):
p, out = Popen_safe(self.compiler.get_exelist() + args, env=env)[0:2]
return p.returncode, out.strip()
def _check_dub(self):
dubbin = ExternalProgram('dub', silent=True)
if dubbin.found():
try:
p, out = Popen_safe(dubbin.get_command() + ['--version'])[0:2]
if p.returncode != 0:
mlog.warning('Found dub {!r} but couldn\'t run it'
''.format(' '.join(dubbin.get_command())))
# Set to False instead of None to signify that we've already
# searched for it and not found it
dubbin = False
except (FileNotFoundError, PermissionError):
dubbin = False
else:
dubbin = False
if dubbin:
mlog.log('Found DUB:', mlog.bold(dubbin.get_path()),
'(%s)' % out.strip())
else:
mlog.log('Found DUB:', mlog.red('NO'))
return dubbin
@staticmethod
def get_methods():
return [DependencyMethods.DUB]
class ExternalLibrary(ExternalDependency):
def __init__(self, name, link_args, environment, language, silent=False):
super().__init__('library', environment, {}, language=language)
self.name = name
self.language = language
self.is_found = False
if link_args:
self.is_found = True
self.link_args = link_args
if not silent:
if self.is_found:
mlog.log('Library', mlog.bold(name), 'found:', mlog.green('YES'))
else:
mlog.log('Library', mlog.bold(name), 'found:', mlog.red('NO'))
def get_link_args(self, language=None, **kwargs):
'''
External libraries detected using a compiler must only be used with
compatible code. For instance, Vala libraries (.vapi files) cannot be
used with C code, and not all Rust library types can be linked with
C-like code. Note that C++ libraries *can* be linked with C code with
a C++ linker (and vice-versa).
'''
# Using a vala library in a non-vala target, or a non-vala library in a vala target
# XXX: This should be extended to other non-C linkers such as Rust
if (self.language == 'vala' and language != 'vala') or \
(language == 'vala' and self.language != 'vala'):
return []
return super().get_link_args(**kwargs)
def get_partial_dependency(self, *, compile_args: bool = False,
link_args: bool = False, links: bool = False,
includes: bool = False, sources: bool = False):
# External library only has link_args, so ignore the rest of the
# interface.
new = copy.copy(self)
if not link_args:
new.link_args = []
return new
class ExtraFrameworkDependency(ExternalDependency):
system_framework_paths = None
def __init__(self, name, env, kwargs, language: T.Optional[str] = None):
paths = kwargs.get('paths', [])
super().__init__('extraframeworks', env, kwargs, language=language)
self.name = name
# Full path to framework directory
self.framework_path = None
if not self.clib_compiler:
raise DependencyException('No C-like compilers are available')
if self.system_framework_paths is None:
try:
self.system_framework_paths = self.clib_compiler.find_framework_paths(self.env)
except MesonException as e:
if 'non-clang' in str(e):
# Apple frameworks can only be found (and used) with the
# system compiler. It is not available so bail immediately.
self.is_found = False
return
raise
self.detect(name, paths)
def detect(self, name, paths):
if not paths:
paths = self.system_framework_paths
for p in paths:
mlog.debug(f'Looking for framework {name} in {p}')
# We need to know the exact framework path because it's used by the
# Qt5 dependency class, and for setting the include path. We also
# want to avoid searching in an invalid framework path which wastes
# time and can cause a false positive.
framework_path = self._get_framework_path(p, name)
if framework_path is None:
continue
# We want to prefer the specified paths (in order) over the system
# paths since these are "extra" frameworks.
# For example, Python2's framework is in /System/Library/Frameworks and
# Python3's framework is in /Library/Frameworks, but both are called
# Python.framework. We need to know for sure that the framework was
# found in the path we expect.
allow_system = p in self.system_framework_paths
args = self.clib_compiler.find_framework(name, self.env, [p], allow_system)
if args is None:
continue
self.link_args = args
self.framework_path = framework_path.as_posix()
self.compile_args = ['-F' + self.framework_path]
# We need to also add -I includes to the framework because all
# cross-platform projects such as OpenGL, Python, Qt, GStreamer,
# etc do not use "framework includes":
# https://developer.apple.com/library/archive/documentation/MacOSX/Conceptual/BPFrameworks/Tasks/IncludingFrameworks.html
incdir = self._get_framework_include_path(framework_path)
if incdir:
self.compile_args += ['-I' + incdir]
self.is_found = True
return
def _get_framework_path(self, path, name):
p = Path(path)
lname = name.lower()
for d in p.glob('*.framework/'):
if lname == d.name.rsplit('.', 1)[0].lower():
return d
return None
def _get_framework_latest_version(self, path):
versions = []
for each in path.glob('Versions/*'):
# macOS filesystems are usually case-insensitive
if each.name.lower() == 'current':
continue
versions.append(Version(each.name))
if len(versions) == 0:
# most system frameworks do not have a 'Versions' directory
return 'Headers'
return 'Versions/{}/Headers'.format(sorted(versions)[-1]._s)
def _get_framework_include_path(self, path):
# According to the spec, 'Headers' must always be a symlink to the
# Headers directory inside the currently-selected version of the
# framework, but sometimes frameworks are broken. Look in 'Versions'
# for the currently-selected version or pick the latest one.
trials = ('Headers', 'Versions/Current/Headers',
self._get_framework_latest_version(path))
for each in trials:
trial = path / each
if trial.is_dir():
return trial.as_posix()
return None
@staticmethod
def get_methods():
return [DependencyMethods.EXTRAFRAMEWORK]
def log_info(self):
return self.framework_path
def log_tried(self):
return 'framework'
class DependencyFactory:
"""Factory to get dependencies from multiple sources.
This class provides an initializer that takes a set of names and classes
for various kinds of dependencies. When the initialized object is called
it returns a list of callables return Dependency objects to try in order.
:name: The name of the dependency. This will be passed as the name
parameter of the each dependency unless it is overridden on a per
type basis.
:methods: An ordered list of DependencyMethods. This is the order
dependencies will be returned in unless they are removed by the
_process_method function
:*_name: This will overwrite the name passed to the coresponding class.
For example, if the name is 'zlib', but cmake calls the dependency
'Z', then using `cmake_name='Z'` will pass the name as 'Z' to cmake.
:*_class: A *type* or callable that creates a class, and has the
signature of an ExternalDependency
:system_class: If you pass DependencyMethods.SYSTEM in methods, you must
set this argument.
"""
def __init__(self, name: str, methods: T.List[DependencyMethods], *,
extra_kwargs: T.Optional[T.Dict[str, T.Any]] = None,
pkgconfig_name: T.Optional[str] = None,
pkgconfig_class: 'T.Type[PkgConfigDependency]' = PkgConfigDependency,
cmake_name: T.Optional[str] = None,
cmake_class: 'T.Type[CMakeDependency]' = CMakeDependency,
configtool_class: 'T.Optional[T.Type[ConfigToolDependency]]' = None,
framework_name: T.Optional[str] = None,
framework_class: 'T.Type[ExtraFrameworkDependency]' = ExtraFrameworkDependency,
system_class: 'T.Type[ExternalDependency]' = ExternalDependency):
if DependencyMethods.CONFIG_TOOL in methods and not configtool_class:
raise DependencyException('A configtool must have a custom class')
self.extra_kwargs = extra_kwargs or {}
self.methods = methods
self.classes = {
# Just attach the correct name right now, either the generic name
# or the method specific name.
DependencyMethods.EXTRAFRAMEWORK: functools.partial(framework_class, framework_name or name),
DependencyMethods.PKGCONFIG: functools.partial(pkgconfig_class, pkgconfig_name or name),
DependencyMethods.CMAKE: functools.partial(cmake_class, cmake_name or name),
DependencyMethods.SYSTEM: functools.partial(system_class, name),
DependencyMethods.CONFIG_TOOL: None,
}
if configtool_class is not None:
self.classes[DependencyMethods.CONFIG_TOOL] = functools.partial(configtool_class, name)
@staticmethod
def _process_method(method: DependencyMethods, env: Environment, for_machine: MachineChoice) -> bool:
"""Report whether a method is valid or not.
If the method is valid, return true, otherwise return false. This is
used in a list comprehension to filter methods that are not possible.
By default this only remove EXTRAFRAMEWORK dependencies for non-mac platforms.
"""
# Extra frameworks are only valid for macOS and other apple products
if (method is DependencyMethods.EXTRAFRAMEWORK and
not env.machines[for_machine].is_darwin()):
return False
return True
def __call__(self, env: Environment, for_machine: MachineChoice,
kwargs: T.Dict[str, T.Any]) -> T.List['DependencyType']:
"""Return a list of Dependencies with the arguments already attached."""
methods = process_method_kw(self.methods, kwargs)
nwargs = self.extra_kwargs.copy()
nwargs.update(kwargs)
return [functools.partial(self.classes[m], env, nwargs) for m in methods
if self._process_method(m, env, for_machine)]
def get_dep_identifier(name, kwargs) -> T.Tuple:
identifier = (name, )
for key, value in kwargs.items():
# 'version' is irrelevant for caching; the caller must check version matches
# 'native' is handled above with `for_machine`
# 'required' is irrelevant for caching; the caller handles it separately
# 'fallback' and 'allow_fallback' is not part of the cache because,
# once a dependency has been found through a fallback, it should
# be used for the rest of the Meson run.
# 'default_options' is only used in fallback case
if key in ('version', 'native', 'required', 'fallback', 'allow_fallback', 'default_options'):
continue
# All keyword arguments are strings, ints, or lists (or lists of lists)
if isinstance(value, list):
value = frozenset(listify(value))
identifier += (key, value)
return identifier
display_name_map = {
'boost': 'Boost',
'cuda': 'CUDA',
'dub': 'DUB',
'gmock': 'GMock',
'gtest': 'GTest',
'hdf5': 'HDF5',
'llvm': 'LLVM',
'mpi': 'MPI',
'netcdf': 'NetCDF',
'openmp': 'OpenMP',
'wxwidgets': 'WxWidgets',
}
def find_external_dependency(name, env, kwargs):
assert(name)
required = kwargs.get('required', True)
if not isinstance(required, bool):
raise DependencyException('Keyword "required" must be a boolean.')
if not isinstance(kwargs.get('method', ''), str):
raise DependencyException('Keyword "method" must be a string.')
lname = name.lower()
if lname not in _packages_accept_language and 'language' in kwargs:
raise DependencyException(f'{name} dependency does not accept "language" keyword argument')
if not isinstance(kwargs.get('version', ''), (str, list)):
raise DependencyException('Keyword "Version" must be string or list.')
# display the dependency name with correct casing
display_name = display_name_map.get(lname, lname)
for_machine = MachineChoice.BUILD if kwargs.get('native', False) else MachineChoice.HOST
type_text = PerMachine('Build-time', 'Run-time')[for_machine] + ' dependency'
# build a list of dependency methods to try
candidates = _build_external_dependency_list(name, env, for_machine, kwargs)
pkg_exc = []
pkgdep = []
details = ''
for c in candidates:
# try this dependency method
try:
d = c()
d._check_version()
pkgdep.append(d)
except DependencyException as e:
pkg_exc.append(e)
mlog.debug(str(e))
else:
pkg_exc.append(None)
details = d.log_details()
if details:
details = '(' + details + ') '
if 'language' in kwargs:
details += 'for ' + d.language + ' '
# if the dependency was found
if d.found():
info = []
if d.version:
info.append(mlog.normal_cyan(d.version))
log_info = d.log_info()
if log_info:
info.append('(' + log_info + ')')
mlog.log(type_text, mlog.bold(display_name), details + 'found:', mlog.green('YES'), *info)
return d
# otherwise, the dependency could not be found
tried_methods = [d.log_tried() for d in pkgdep if d.log_tried()]
if tried_methods:
tried = '{}'.format(mlog.format_list(tried_methods))
else:
tried = ''
mlog.log(type_text, mlog.bold(display_name), details + 'found:', mlog.red('NO'),
f'(tried {tried})' if tried else '')
if required:
# if an exception occurred with the first detection method, re-raise it
# (on the grounds that it came from the preferred dependency detection
# method)
if pkg_exc and pkg_exc[0]:
raise pkg_exc[0]
# we have a list of failed ExternalDependency objects, so we can report
# the methods we tried to find the dependency
raise DependencyException('Dependency "%s" not found' % (name) +
(', tried %s' % (tried) if tried else ''))
return NotFoundDependency(env)
def _build_external_dependency_list(name: str, env: Environment, for_machine: MachineChoice,
kwargs: T.Dict[str, T.Any]) -> T.List['DependencyType']:
# First check if the method is valid
if 'method' in kwargs and kwargs['method'] not in [e.value for e in DependencyMethods]:
raise DependencyException('method {!r} is invalid'.format(kwargs['method']))
# Is there a specific dependency detector for this dependency?
lname = name.lower()
if lname in packages:
# Create the list of dependency object constructors using a factory
# class method, if one exists, otherwise the list just consists of the
# constructor
if isinstance(packages[lname], type) and issubclass(packages[lname], Dependency):
dep = [functools.partial(packages[lname], env, kwargs)]
else:
dep = packages[lname](env, for_machine, kwargs)
return dep
candidates = []
# If it's explicitly requested, use the dub detection method (only)
if 'dub' == kwargs.get('method', ''):
candidates.append(functools.partial(DubDependency, name, env, kwargs))
return candidates
# If it's explicitly requested, use the pkgconfig detection method (only)
if 'pkg-config' == kwargs.get('method', ''):
candidates.append(functools.partial(PkgConfigDependency, name, env, kwargs))
return candidates
# If it's explicitly requested, use the CMake detection method (only)
if 'cmake' == kwargs.get('method', ''):
candidates.append(functools.partial(CMakeDependency, name, env, kwargs))
return candidates
# If it's explicitly requested, use the Extraframework detection method (only)
if 'extraframework' == kwargs.get('method', ''):
# On OSX, also try framework dependency detector
if env.machines[for_machine].is_darwin():
candidates.append(functools.partial(ExtraFrameworkDependency, name, env, kwargs))
return candidates
# Otherwise, just use the pkgconfig and cmake dependency detector
if 'auto' == kwargs.get('method', 'auto'):
candidates.append(functools.partial(PkgConfigDependency, name, env, kwargs))
# On OSX, also try framework dependency detector
if env.machines[for_machine].is_darwin():
candidates.append(functools.partial(ExtraFrameworkDependency, name, env, kwargs))
# Only use CMake as a last resort, since it might not work 100% (see #6113)
candidates.append(functools.partial(CMakeDependency, name, env, kwargs))
return candidates
def sort_libpaths(libpaths: T.List[str], refpaths: T.List[str]) -> T.List[str]:
"""Sort <libpaths> according to <refpaths>
It is intended to be used to sort -L flags returned by pkg-config.
Pkg-config returns flags in random order which cannot be relied on.
"""
if len(refpaths) == 0:
return list(libpaths)
def key_func(libpath):
common_lengths = []
for refpath in refpaths:
try:
common_path = os.path.commonpath([libpath, refpath])
except ValueError:
common_path = ''
common_lengths.append(len(common_path))
max_length = max(common_lengths)
max_index = common_lengths.index(max_length)
reversed_max_length = len(refpaths[max_index]) - max_length
return (max_index, reversed_max_length)
return sorted(libpaths, key=key_func)
def strip_system_libdirs(environment, for_machine: MachineChoice, link_args):
"""Remove -L<system path> arguments.
leaving these in will break builds where a user has a version of a library
in the system path, and a different version not in the system path if they
want to link against the non-system path version.
"""
exclude = {f'-L{p}' for p in environment.get_compiler_system_dirs(for_machine)}
return [l for l in link_args if l not in exclude]
def process_method_kw(possible: T.Iterable[DependencyMethods], kwargs) -> T.List[DependencyMethods]:
method = kwargs.get('method', 'auto') # type: T.Union[DependencyMethods, str]
if isinstance(method, DependencyMethods):
return [method]
# TODO: try/except?
if method not in [e.value for e in DependencyMethods]:
raise DependencyException(f'method {method!r} is invalid')
method = DependencyMethods(method)
# This sets per-tool config methods which are deprecated to to the new
# generic CONFIG_TOOL value.
if method in [DependencyMethods.SDLCONFIG, DependencyMethods.CUPSCONFIG,
DependencyMethods.PCAPCONFIG, DependencyMethods.LIBWMFCONFIG]:
mlog.warning(textwrap.dedent("""\
Configuration method {} has been deprecated in favor of
'config-tool'. This will be removed in a future version of
meson.""".format(method)))
method = DependencyMethods.CONFIG_TOOL
# Set the detection method. If the method is set to auto, use any available method.
# If method is set to a specific string, allow only that detection method.
if method == DependencyMethods.AUTO:
methods = list(possible)
elif method in possible:
methods = [method]
else:
raise DependencyException(
'Unsupported detection method: {}, allowed methods are {}'.format(
method.value,
mlog.format_list([x.value for x in [DependencyMethods.AUTO] + list(possible)])))
return methods
if T.TYPE_CHECKING:
FactoryType = T.TypeVar('FactoryType', bound=T.Callable[..., T.List[T.Callable[[], 'Dependency']]])
def factory_methods(methods: T.Set[DependencyMethods]) -> T.Callable[['FactoryType'], 'FactoryType']:
"""Decorator for handling methods for dependency factory functions.
This helps to make factory functions self documenting
>>> @factory_methods([DependencyMethods.PKGCONFIG, DependencyMethods.CMAKE])
>>> def factory(env: Environment, for_machine: MachineChoice, kwargs: T.Dict[str, T.Any], methods: T.List[DependencyMethods]) -> T.List[T.Callable[[], 'Dependency']]:
>>> pass
"""
def inner(func: 'FactoryType') -> 'FactoryType':
@functools.wraps(func)
def wrapped(env: Environment, for_machine: MachineChoice, kwargs: T.Dict[str, T.Any]) -> T.List[T.Callable[[], 'Dependency']]:
return func(env, for_machine, kwargs, process_method_kw(methods, kwargs))
return T.cast('FactoryType', wrapped)
return inner
def detect_compiler(name: str, env: Environment, for_machine: MachineChoice,
language: T.Optional[str]) -> T.Optional['CompilerType']:
"""Given a language and environment find the compiler used."""
compilers = env.coredata.compilers[for_machine]
# Set the compiler for this dependency if a language is specified,
# else try to pick something that looks usable.
if language:
if language not in compilers:
m = name.capitalize() + ' requires a {0} compiler, but ' \
'{0} is not in the list of project languages'
raise DependencyException(m.format(language.capitalize()))
return compilers[language]
else:
for lang in clib_langs:
try:
return compilers[lang]
except KeyError:
continue
return None
|
pexip/meson
|
mesonbuild/dependencies/base.py
|
Python
|
apache-2.0
| 102,150
|
[
"NetCDF"
] |
8e2be49e6b557cec8fc0bf03e69fc25ef98de1b2583531a105a4eb70e9c0cf7d
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This package implements modules for input and output to and from Qchem
"""
|
dongsenfo/pymatgen
|
pymatgen/io/qchem/__init__.py
|
Python
|
mit
| 189
|
[
"pymatgen"
] |
eb1151064bfeee78a40de32525e686513f552bda7b7ca4b51184b08de345f066
|
from __future__ import unicode_literals
import json
import nexmo
import pytz
import six
from context_processors import GroupPermWrapper
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from decimal import Decimal
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core import mail
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test.utils import override_settings
from django.utils import timezone
from mock import patch, Mock
from smartmin.tests import SmartminTest
from temba.airtime.models import AirtimeTransfer
from temba.api.models import APIToken, Resthook
from temba.campaigns.models import Campaign, CampaignEvent
from temba.channels.models import Channel
from temba.contacts.models import Contact, ContactGroup, TEL_SCHEME, TWITTER_SCHEME
from temba.flows.models import Flow, ActionSet
from temba.locations.models import AdminBoundary
from temba.middleware import BrandingMiddleware
from temba.msgs.models import Label, Msg, INCOMING
from temba.orgs.models import UserSettings, NEXMO_SECRET, NEXMO_KEY
from temba.tests import TembaTest, MockResponse, MockTwilioClient, MockRequestValidator, FlowFileTest
from temba.triggers.models import Trigger
from temba.utils.email import link_components
from temba.utils import languages, dict_to_struct
from .models import Org, OrgEvent, TopUp, Invitation, Language, DAYFIRST, MONTHFIRST, CURRENT_EXPORT_VERSION
from .models import CreditAlert, ORG_CREDIT_OVER, ORG_CREDIT_LOW, ORG_CREDIT_EXPIRING
from .models import UNREAD_FLOW_MSGS, UNREAD_INBOX_MSGS, TopUpCredits
from .models import WHITELISTED, SUSPENDED, RESTORED
from .tasks import squash_topupcredits
class OrgContextProcessorTest(TembaTest):
def test_group_perms_wrapper(self):
administrators = Group.objects.get(name="Administrators")
editors = Group.objects.get(name="Editors")
viewers = Group.objects.get(name="Viewers")
administrators_wrapper = GroupPermWrapper(administrators)
self.assertTrue(administrators_wrapper['msgs']['msg_api'])
self.assertTrue(administrators_wrapper["msgs"]["msg_inbox"])
editors_wrapper = GroupPermWrapper(editors)
self.assertFalse(editors_wrapper["msgs"]["org_plan"])
self.assertTrue(editors_wrapper["msgs"]["msg_inbox"])
viewers_wrapper = GroupPermWrapper(viewers)
self.assertFalse(viewers_wrapper["msgs"]["msg_api"])
self.assertTrue(viewers_wrapper["msgs"]["msg_inbox"])
class OrgTest(TembaTest):
def test_get_org_users(self):
org_users = self.org.get_org_users()
self.assertTrue(self.user in org_users)
self.assertTrue(self.surveyor in org_users)
self.assertTrue(self.editor in org_users)
self.assertTrue(self.admin in org_users)
# should be ordered by email
self.assertEqual(self.admin, org_users[0])
self.assertEqual(self.editor, org_users[1])
self.assertEqual(self.surveyor, org_users[2])
self.assertEqual(self.user, org_users[3])
def test_get_unique_slug(self):
self.org.slug = 'allo'
self.org.save()
self.assertEqual(Org.get_unique_slug('foo'), 'foo')
self.assertEqual(Org.get_unique_slug('Which part?'), 'which-part')
self.assertEqual(Org.get_unique_slug('Allo'), 'allo-2')
def test_languages(self):
self.assertEqual(self.org.get_language_codes(), set())
self.org.set_languages(self.admin, ['eng', 'fre'], 'eng')
self.org.refresh_from_db()
self.assertEqual({l.name for l in self.org.languages.all()}, {"English", "French"})
self.assertEqual(self.org.primary_language.name, "English")
self.assertEqual(self.org.get_language_codes(), {'eng', 'fre'})
self.org.set_languages(self.admin, ['eng', 'kin'], 'kin')
self.org.refresh_from_db()
self.assertEqual({l.name for l in self.org.languages.all()}, {"English", "Kinyarwanda"})
self.assertEqual(self.org.primary_language.name, "Kinyarwanda")
self.assertEqual(self.org.get_language_codes(), {'eng', 'kin'})
def test_get_channel_countries(self):
self.assertEqual(self.org.get_channel_countries(), [])
self.org.connect_transferto('mylogin', 'api_token', self.admin)
self.assertEqual(self.org.get_channel_countries(), [dict(code='RW', name='Rwanda', currency_name='Rwanda Franc',
currency_code='RWF')])
Channel.create(self.org, self.user, 'US', 'A', None, "+12001112222", gcm_id="asdf", secret="asdf")
self.assertEqual(self.org.get_channel_countries(), [dict(code='RW', name='Rwanda', currency_name='Rwanda Franc',
currency_code='RWF'),
dict(code='US', name='United States',
currency_name='US Dollar', currency_code='USD')])
Channel.create(self.org, self.user, None, 'TT', name="Twitter Channel",
address="billy_bob", role="SR", scheme='twitter')
self.assertEqual(self.org.get_channel_countries(), [dict(code='RW', name='Rwanda', currency_name='Rwanda Franc',
currency_code='RWF'),
dict(code='US', name='United States',
currency_name='US Dollar', currency_code='USD')])
Channel.create(self.org, self.user, 'US', 'A', None, "+12001113333", gcm_id="qwer", secret="qwer")
self.assertEqual(self.org.get_channel_countries(), [dict(code='RW', name='Rwanda', currency_name='Rwanda Franc',
currency_code='RWF'),
dict(code='US', name='United States',
currency_name='US Dollar', currency_code='USD')])
def test_edit(self):
# use a manager now
self.login(self.admin)
# can we see the edit page
response = self.client.get(reverse('orgs.org_edit'))
self.assertEquals(200, response.status_code)
# update the name and slug of the organization
data = dict(name="Temba", timezone="Africa/Kigali", date_format=DAYFIRST, slug="nice temba")
response = self.client.post(reverse('orgs.org_edit'), data)
self.assertTrue('slug' in response.context['form'].errors)
data = dict(name="Temba", timezone="Africa/Kigali", date_format=MONTHFIRST, slug="nice-temba")
response = self.client.post(reverse('orgs.org_edit'), data)
self.assertEquals(302, response.status_code)
org = Org.objects.get(pk=self.org.pk)
self.assertEquals("Temba", org.name)
self.assertEquals("nice-temba", org.slug)
def test_recommended_channel(self):
self.org.timezone = pytz.timezone('Africa/Nairobi')
self.org.save()
self.assertEquals(self.org.get_recommended_channel(), 'africastalking')
self.org.timezone = pytz.timezone('America/Phoenix')
self.org.save()
self.assertEquals(self.org.get_recommended_channel(), 'twilio')
self.org.timezone = pytz.timezone('Asia/Jakarta')
self.org.save()
self.assertEquals(self.org.get_recommended_channel(), 'hub9')
self.org.timezone = pytz.timezone('Africa/Mogadishu')
self.org.save()
self.assertEquals(self.org.get_recommended_channel(), 'shaqodoon')
self.org.timezone = pytz.timezone('Europe/Amsterdam')
self.org.save()
self.assertEquals(self.org.get_recommended_channel(), 'nexmo')
self.org.timezone = pytz.timezone('Africa/Kigali')
self.org.save()
self.assertEquals(self.org.get_recommended_channel(), 'android')
def test_country(self):
country_url = reverse('orgs.org_country')
# can't see this page if not logged in
self.assertLoginRedirect(self.client.get(country_url))
# login as admin instead
self.login(self.admin)
response = self.client.get(country_url)
self.assertEquals(200, response.status_code)
# save with Rwanda as a country
response = self.client.post(country_url, dict(country=AdminBoundary.objects.get(name='Rwanda').pk))
# assert it has changed
org = Org.objects.get(pk=self.org.pk)
self.assertEqual("Rwanda", six.text_type(org.country))
self.assertEqual("RW", org.get_country_code())
# set our admin boundary name to something invalid
org.country.name = 'Fantasia'
org.country.save()
# getting our country code show now back down to our channel
self.assertEqual('RW', org.get_country_code())
# clear it out
self.client.post(country_url, dict(country=''))
# assert it has been
org = Org.objects.get(pk=self.org.pk)
self.assertFalse(org.country)
self.assertEquals('RW', org.get_country_code())
# remove all our channels so we no longer have a backdown
org.channels.all().delete()
org = Org.objects.get(pk=self.org.pk)
# now really don't have a clue of our country code
self.assertIsNone(org.get_country_code())
def test_plans(self):
self.contact = self.create_contact("Joe", "+250788123123")
self.create_msg(direction=INCOMING, contact=self.contact, text="Orange")
# check start and end date for this plan
self.assertEquals(timezone.now().date(), self.org.current_plan_start())
self.assertEquals(timezone.now().date() + relativedelta(months=1), self.org.current_plan_end())
# check our credits
self.login(self.admin)
response = self.client.get(reverse('orgs.org_home'))
self.assertContains(response, "999")
# view our topups
response = self.client.get(reverse('orgs.topup_list'))
# should say we have a 1,000 credits too
self.assertContains(response, "999")
# and that we have 999 credits left on our topup
self.assertContains(response, "1 of 1,000 Credits Used")
# our receipt should show that the topup was free
with patch('stripe.Charge.retrieve') as stripe:
stripe.return_value = ''
response = self.client.get(reverse('orgs.topup_read', args=[TopUp.objects.filter(org=self.org).first().pk]))
self.assertContains(response, '1000 Credits')
def test_user_update(self):
update_url = reverse('orgs.user_edit')
login_url = reverse('users.user_login')
# no access if anonymous
response = self.client.get(update_url)
self.assertRedirect(response, login_url)
self.login(self.admin)
# change the user language
post_data = dict(language='pt-br', first_name='Admin', last_name='User', email='administrator@temba.com', current_password='Administrator')
response = self.client.post(update_url, post_data)
self.assertRedirect(response, reverse('orgs.org_home'))
# check that our user settings have changed
settings = self.admin.get_settings()
self.assertEquals('pt-br', settings.language)
def test_usersettings(self):
self.login(self.admin)
post_data = dict(tel='+250788382382')
self.client.post(reverse('orgs.usersettings_phone'), post_data)
self.assertEquals('+250 788 382 382', UserSettings.objects.get(user=self.admin).get_tel_formatted())
post_data = dict(tel='bad number')
response = self.client.post(reverse('orgs.usersettings_phone'), post_data)
self.assertEquals(response.context['form'].errors['tel'][0], 'Invalid phone number, try again.')
def test_org_suspension(self):
from temba.flows.models import FlowRun
self.login(self.admin)
self.org.set_suspended()
self.org.refresh_from_db()
self.assertEqual(True, self.org.is_suspended())
self.assertEqual(0, Msg.objects.all().count())
self.assertEqual(0, FlowRun.objects.all().count())
# while we are suspended, we can't send broadcasts
send_url = reverse('msgs.broadcast_send')
mark = self.create_contact('Mark', number='+12065551212')
post_data = dict(text="send me ur bank account login im ur friend.", omnibox="c-%s" % mark.uuid)
response = self.client.post(send_url, post_data, follow=True)
self.assertEquals('Sorry, your account is currently suspended. To enable sending messages, please contact support.',
response.context['form'].errors['__all__'][0])
# we also can't start flows
flow = self.create_flow()
post_data = dict(omnibox="c-%s" % mark.uuid, restart_participants='on')
response = self.client.post(reverse('flows.flow_broadcast', args=[flow.pk]), post_data, follow=True)
self.assertEquals('Sorry, your account is currently suspended. To enable sending messages, please contact support.',
response.context['form'].errors['__all__'][0])
# or use the api to do either
def postAPI(url, data):
response = self.client.post(url + ".json", json.dumps(data), content_type="application/json", HTTP_X_FORWARDED_HTTPS='https')
if response.content:
response.json = response.json()
return response
url = reverse('api.v1.broadcasts')
response = postAPI(url, dict(contacts=[mark.uuid], text="You are adistant cousin to a wealthy person."))
self.assertContains(response, "Sorry, your account is currently suspended. To enable sending messages, please contact support.", status_code=400)
url = reverse('api.v1.runs')
response = postAPI(url, dict(flow_uuid=flow.uuid, phone="+250788123123"))
self.assertContains(response, "Sorry, your account is currently suspended. To enable sending messages, please contact support.", status_code=400)
# still no messages or runs
self.assertEqual(0, Msg.objects.all().count())
self.assertEqual(0, FlowRun.objects.all().count())
# unsuspend our org and start a flow
self.org.set_restored()
post_data = dict(omnibox="c-%s" % mark.uuid, restart_participants='on')
response = self.client.post(reverse('flows.flow_broadcast', args=[flow.pk]), post_data, follow=True)
self.assertEqual(1, FlowRun.objects.all().count())
def test_webhook_headers(self):
update_url = reverse('orgs.org_webhook')
login_url = reverse('users.user_login')
# no access if anonymous
response = self.client.get(update_url)
self.assertRedirect(response, login_url)
self.login(self.admin)
response = self.client.get(update_url)
self.assertEquals(200, response.status_code)
# set a webhook with headers
post_data = response.context['form'].initial
post_data['webhook'] = 'http://webhooks.uniceflabs.org'
post_data['header_1_key'] = 'Authorization'
post_data['header_1_value'] = 'Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=='
response = self.client.post(update_url, post_data)
self.assertEquals(302, response.status_code)
self.assertRedirect(response, reverse('orgs.org_home'))
# check that our webhook settings have changed
org = Org.objects.get(pk=self.org.pk)
self.assertEquals('http://webhooks.uniceflabs.org', org.get_webhook_url())
self.assertDictEqual({'Authorization': 'Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=='}, org.get_webhook_headers())
def test_org_administration(self):
manage_url = reverse('orgs.org_manage')
update_url = reverse('orgs.org_update', args=[self.org.pk])
login_url = reverse('users.user_login')
# no access to anon
response = self.client.get(manage_url)
self.assertRedirect(response, login_url)
response = self.client.get(update_url)
self.assertRedirect(response, login_url)
# or admins
self.login(self.admin)
response = self.client.get(manage_url)
self.assertRedirect(response, login_url)
response = self.client.get(update_url)
self.assertRedirect(response, login_url)
# only superuser
self.login(self.superuser)
response = self.client.get(manage_url)
self.assertEquals(200, response.status_code)
self.assertNotContains(response, "(Suspended)")
self.org.set_suspended()
response = self.client.get(manage_url)
self.assertContains(response, "(Suspended)")
# should contain our test org
self.assertContains(response, "Temba")
# and can go to that org
response = self.client.get(update_url)
self.assertEquals(200, response.status_code)
# change to the trial plan
post_data = {
'name': 'Temba',
'brand': 'rapidpro.io',
'plan': 'TRIAL',
'language': '',
'country': '',
'primary_language': '',
'timezone': pytz.timezone("Africa/Kigali"),
'config': '{}',
'date_format': 'D',
'webhook': None,
'webhook_events': 0,
'parent': '',
'viewers': [self.user.id],
'editors': [self.editor.id],
'administrators': [self.admin.id],
'surveyors': [self.surveyor.id],
'surveyor_password': None
}
response = self.client.post(update_url, post_data)
self.assertEquals(302, response.status_code)
# restore
post_data['status'] = RESTORED
response = self.client.post(update_url, post_data)
self.org.refresh_from_db()
self.assertFalse(self.org.is_suspended())
# white list
post_data['status'] = WHITELISTED
response = self.client.post(update_url, post_data)
self.org.refresh_from_db()
self.assertTrue(self.org.is_whitelisted())
# suspend
post_data['status'] = SUSPENDED
response = self.client.post(update_url, post_data)
self.org.refresh_from_db()
self.assertTrue(self.org.is_suspended())
def test_accounts(self):
url = reverse('orgs.org_accounts')
self.login(self.admin)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'If you use the RapidPro Surveyor application to run flows offline')
Org.objects.create(name="Another Org", timezone="Africa/Kigali", country=self.country,
brand='rapidpro.io', created_by=self.user, modified_by=self.user,
surveyor_password='nyaruka')
response = self.client.post(url, dict(surveyor_password='nyaruka'))
self.org.refresh_from_db()
self.assertContains(response, 'This password is not valid. Choose a new password and try again.')
self.assertIsNone(self.org.surveyor_password)
# now try again, but with a unique password
response = self.client.post(url, dict(surveyor_password='unique password'))
self.org.refresh_from_db()
self.assertEqual('unique password', self.org.surveyor_password)
# add an extra editor
editor = self.create_user('EditorTwo')
self.org.editors.add(editor)
self.surveyor.delete()
# fetch it as a formax so we can inspect the summary
response = self.client.get(url, HTTP_X_FORMAX=1, HTTP_X_PJAX=1)
self.assertContains(response, '1 Administrator')
self.assertContains(response, '2 Editors')
self.assertContains(response, '1 Viewer')
self.assertContains(response, '0 Surveyors')
def test_refresh_tokens(self):
self.login(self.admin)
url = reverse('orgs.org_home')
response = self.client.get(url)
# admin should have a token
token = APIToken.objects.get(user=self.admin)
# and it should be on the page
self.assertContains(response, token.key)
# let's refresh it
self.client.post(reverse('api.apitoken_refresh'))
# visit our account page again
response = self.client.get(url)
# old token no longer there
self.assertNotContains(response, token.key)
# old token now inactive
token.refresh_from_db()
self.assertFalse(token.is_active)
# there is a new token for this user
new_token = APIToken.objects.get(user=self.admin, is_active=True)
self.assertNotEqual(new_token.key, token.key)
self.assertContains(response, new_token.key)
# can't refresh if logged in as viewer
self.login(self.user)
response = self.client.post(reverse('api.apitoken_refresh'))
self.assertLoginRedirect(response)
# or just not an org user
self.login(self.non_org_user)
response = self.client.post(reverse('api.apitoken_refresh'))
self.assertLoginRedirect(response)
@override_settings(SEND_EMAILS=True)
def test_manage_accounts(self):
url = reverse('orgs.org_manage_accounts')
self.login(self.admin)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# give users an API token and give admin and editor an additional surveyor-role token
APIToken.get_or_create(self.org, self.admin)
APIToken.get_or_create(self.org, self.editor)
APIToken.get_or_create(self.org, self.surveyor)
APIToken.get_or_create(self.org, self.admin, role=Group.objects.get(name="Surveyors"))
APIToken.get_or_create(self.org, self.editor, role=Group.objects.get(name="Surveyors"))
# we have 19 fields in the form including 16 checkboxes for the four users, an email field, a user group field
# and 'loc' field.
expected_fields = {'invite_emails', 'invite_group', 'loc'}
for user in (self.surveyor, self.user, self.editor, self.admin):
for group in ('administrators', 'editors', 'viewers', 'surveyors'):
expected_fields.add(group + '_%d' % user.pk)
self.assertEqual(set(response.context['form'].fields.keys()), expected_fields)
self.assertEqual(response.context['form'].initial, {
'administrators_%d' % self.admin.pk: True,
'editors_%d' % self.editor.pk: True,
'viewers_%d' % self.user.pk: True,
'surveyors_%d' % self.surveyor.pk: True
})
self.assertEqual(response.context['form'].fields['invite_emails'].initial, None)
self.assertEqual(response.context['form'].fields['invite_group'].initial, 'V')
# keep admin as admin, editor as editor, but make user an editor too, and remove surveyor
post_data = {
'administrators_%d' % self.admin.pk: 'on',
'editors_%d' % self.editor.pk: 'on',
'editors_%d' % self.user.pk: 'on',
'invite_emails': "",
'invite_group': "V"
}
response = self.client.post(url, post_data)
self.assertRedirect(response, reverse('orgs.org_manage_accounts'))
self.org.refresh_from_db()
self.assertEqual(set(self.org.administrators.all()), {self.admin})
self.assertEqual(set(self.org.editors.all()), {self.user, self.editor})
self.assertFalse(set(self.org.viewers.all()), set())
self.assertEqual(set(self.org.surveyors.all()), set())
# our surveyor's API token will have been deleted
self.assertEqual(self.admin.api_tokens.filter(is_active=True).count(), 2)
self.assertEqual(self.editor.api_tokens.filter(is_active=True).count(), 2)
self.assertEqual(self.surveyor.api_tokens.filter(is_active=True).count(), 0)
# next we leave existing roles unchanged, but try to invite new user to be admin with invalid email address
post_data['invite_emails'] = "norkans7gmail.com"
post_data['invite_group'] = 'A'
response = self.client.post(url, post_data)
self.assertFormError(response, 'form', 'invite_emails', "One of the emails you entered is invalid.")
# try again with valid email
post_data['invite_emails'] = "norkans7@gmail.com"
response = self.client.post(url, post_data)
self.assertRedirect(response, reverse('orgs.org_manage_accounts'))
# an invitation is created
invitation = Invitation.objects.get()
self.assertEqual(invitation.org, self.org)
self.assertEqual(invitation.email, "norkans7@gmail.com")
self.assertEqual(invitation.user_group, "A")
# and sent by email
self.assertTrue(len(mail.outbox) == 1)
# pretend our invite was acted on
invitation.is_active = False
invitation.save()
# send another invitation, different group
post_data['invite_emails'] = "norkans7@gmail.com"
post_data['invite_group'] = 'E'
self.client.post(url, post_data)
# old invite should be updated
invitation.refresh_from_db()
self.assertEqual(invitation.user_group, 'E')
self.assertTrue(invitation.is_active)
# and new email sent
self.assertEqual(len(mail.outbox), 2)
# include multiple emails on the form
post_data['invite_emails'] = "norbert@temba.com,code@temba.com"
post_data['invite_group'] = 'A'
self.client.post(url, post_data)
# now 2 new invitations are created and sent
self.assertEqual(Invitation.objects.all().count(), 3)
self.assertEqual(len(mail.outbox), 4)
response = self.client.get(url)
# user ordered by email
self.assertEqual(list(response.context['org_users']), [self.admin, self.editor, self.user])
# invites ordered by email as well
self.assertEqual(response.context['invites'][0].email, 'code@temba.com')
self.assertEqual(response.context['invites'][1].email, 'norbert@temba.com')
self.assertEqual(response.context['invites'][2].email, 'norkans7@gmail.com')
# finally downgrade the editor to a surveyor and remove ourselves entirely from this org
response = self.client.post(url, {
'editors_%d' % self.user.pk: 'on',
'surveyors_%d' % self.editor.pk: 'on',
'invite_emails': "",
'invite_group': 'V'
})
# we should be redirected to chooser page
self.assertRedirect(response, reverse('orgs.org_choose'))
# and removed from this org
self.org.refresh_from_db()
self.assertEqual(set(self.org.administrators.all()), set())
self.assertEqual(set(self.org.editors.all()), {self.user})
self.assertEqual(set(self.org.viewers.all()), set())
self.assertEqual(set(self.org.surveyors.all()), {self.editor})
# editor will have lost their editor API token, but not their surveyor token
self.editor.refresh_from_db()
self.assertEqual([t.role.name for t in self.editor.api_tokens.filter(is_active=True)], ["Surveyors"])
# and all our API tokens for the admin are deleted
self.admin.refresh_from_db()
self.assertEqual(self.admin.api_tokens.filter(is_active=True).count(), 0)
@patch('temba.utils.email.send_temba_email')
def test_join(self, mock_send_temba_email):
def create_invite(group):
return Invitation.objects.create(org=self.org,
user_group=group,
email="norkans7@gmail.com",
created_by=self.admin,
modified_by=self.admin)
editor_invitation = create_invite('E')
editor_invitation.send_invitation()
email_args = mock_send_temba_email.call_args[0] # all positional args
self.assertEqual(email_args[0], "RapidPro Invitation")
self.assertIn('https://app.rapidpro.io/org/join/%s/' % editor_invitation.secret, email_args[1])
self.assertNotIn('{{', email_args[1])
self.assertIn('https://app.rapidpro.io/org/join/%s/' % editor_invitation.secret, email_args[2])
self.assertNotIn('{{', email_args[2])
editor_join_url = reverse('orgs.org_join', args=[editor_invitation.secret])
self.client.logout()
# if no user is logged we redirect to the create_login page
response = self.client.get(editor_join_url)
self.assertEqual(302, response.status_code)
response = self.client.get(editor_join_url, follow=True)
self.assertEqual(response.request['PATH_INFO'], reverse('orgs.org_create_login', args=[editor_invitation.secret]))
# a user is already logged in
self.invited_editor = self.create_user("InvitedEditor")
self.login(self.invited_editor)
response = self.client.get(editor_join_url)
self.assertEqual(200, response.status_code)
self.assertEqual(self.org.pk, response.context['org'].pk)
# we have a form without field except one 'loc'
self.assertEqual(1, len(response.context['form'].fields))
post_data = dict()
response = self.client.post(editor_join_url, post_data, follow=True)
self.assertEqual(200, response.status_code)
self.assertIn(self.invited_editor, self.org.editors.all())
self.assertFalse(Invitation.objects.get(pk=editor_invitation.pk).is_active)
roles = (('V', self.org.viewers), ('S', self.org.surveyors),
('A', self.org.administrators), ('E', self.org.editors))
# test it for each role
for role in roles:
invite = create_invite(role[0])
user = self.create_user('User%s' % role[0])
self.login(user)
response = self.client.post(reverse('orgs.org_join', args=[invite.secret]), follow=True)
self.assertEqual(200, response.status_code)
self.assertIsNotNone(role[1].filter(pk=user.pk).first())
# try an expired invite
invite = create_invite('S')
invite.is_active = False
invite.save()
expired_user = self.create_user("InvitedExpired")
self.login(expired_user)
response = self.client.post(reverse('orgs.org_join', args=[invite.secret]), follow=True)
self.assertEqual(200, response.status_code)
self.assertIsNone(self.org.surveyors.filter(pk=expired_user.pk).first())
def test_create_login(self):
admin_invitation = Invitation.objects.create(org=self.org,
user_group="A",
email="norkans7@gmail.com",
created_by=self.admin,
modified_by=self.admin)
admin_create_login_url = reverse('orgs.org_create_login', args=[admin_invitation.secret])
self.client.logout()
response = self.client.get(admin_create_login_url)
self.assertEquals(200, response.status_code)
self.assertEquals(self.org.pk, response.context['org'].pk)
# we have a form with 4 fields and one hidden 'loc'
self.assertEquals(5, len(response.context['form'].fields))
self.assertTrue('first_name' in response.context['form'].fields)
self.assertTrue('last_name' in response.context['form'].fields)
self.assertTrue('email' in response.context['form'].fields)
self.assertTrue('password' in response.context['form'].fields)
post_data = dict()
post_data['first_name'] = "Norbert"
post_data['last_name'] = "Kwizera"
post_data['email'] = "norkans7@gmail.com"
post_data['password'] = "norbertkwizeranorbert"
response = self.client.post(admin_create_login_url, post_data, follow=True)
self.assertEquals(200, response.status_code)
new_invited_user = User.objects.get(email="norkans7@gmail.com")
self.assertTrue(new_invited_user in self.org.administrators.all())
self.assertFalse(Invitation.objects.get(pk=admin_invitation.pk).is_active)
def test_surveyor_invite(self):
surveyor_invite = Invitation.objects.create(org=self.org,
user_group="S",
email="surveyor@gmail.com",
created_by=self.admin,
modified_by=self.admin)
admin_create_login_url = reverse('orgs.org_create_login', args=[surveyor_invite.secret])
self.client.logout()
post_data = dict(first_name='Surveyor', last_name='User', email='surveyor@gmail.com', password='password')
response = self.client.post(admin_create_login_url, post_data, follow=True)
self.assertEquals(200, response.status_code)
# as a surveyor we should have been rerourted
self.assertEquals(reverse('orgs.org_surveyor'), response._request.path)
self.assertFalse(Invitation.objects.get(pk=surveyor_invite.pk).is_active)
# make sure we are a surveyor
new_invited_user = User.objects.get(email="surveyor@gmail.com")
self.assertTrue(new_invited_user in self.org.surveyors.all())
# if we login, we should be rerouted too
self.client.logout()
response = self.client.post('/users/login/', {'username': 'surveyor@gmail.com', 'password': 'password'}, follow=True)
self.assertEquals(200, response.status_code)
self.assertEquals(reverse('orgs.org_surveyor'), response._request.path)
def test_surveyor(self):
self.client.logout()
url = '%s?mobile=true' % reverse('orgs.org_surveyor')
# try creating a surveyor account with a bogus password
post_data = dict(surveyor_password='badpassword')
response = self.client.post(url, post_data)
self.assertContains(response, 'Invalid surveyor password, please check with your project leader and try again.')
# save a surveyor password
self.org.surveyor_password = 'nyaruka'
self.org.save()
# now lets try again
post_data = dict(surveyor_password='nyaruka')
response = self.client.post(url, post_data)
self.assertContains(response, 'Enter your details below to create your account.')
# now try creating an account on the second step without and surveyor_password
post_data = dict(first_name='Marshawn', last_name='Lynch',
password='beastmode24', email='beastmode@seahawks.com')
response = self.client.post(url, post_data)
self.assertContains(response, 'Enter your details below to create your account.')
# now do the same but with a valid surveyor_password
post_data = dict(first_name='Marshawn', last_name='Lynch',
password='beastmode24', email='beastmode@seahawks.com',
surveyor_password='nyaruka')
response = self.client.post(url, post_data)
self.assertTrue('token' in response.url)
self.assertTrue('beastmode' in response.url)
self.assertTrue('Temba' in response.url)
# try with a login that already exists
post_data = dict(first_name='Resused', last_name='Email',
password='mypassword1', email='beastmode@seahawks.com',
surveyor_password='nyaruka')
response = self.client.post(url, post_data)
self.assertContains(response, 'That email address is already used')
# try with a login that already exists
post_data = dict(first_name='Short', last_name='Password',
password='short', email='thomasrawls@seahawks.com',
surveyor_password='nyaruka')
response = self.client.post(url, post_data)
self.assertContains(response, 'Passwords must contain at least 8 letters')
# finally make sure our login works
success = self.client.login(username='beastmode@seahawks.com', password='beastmode24')
self.assertTrue(success)
# and that we only have the surveyor role
self.assertIsNotNone(self.org.surveyors.filter(username='beastmode@seahawks.com').first())
self.assertIsNone(self.org.administrators.filter(username='beastmode@seahawks.com').first())
self.assertIsNone(self.org.editors.filter(username='beastmode@seahawks.com').first())
self.assertIsNone(self.org.viewers.filter(username='beastmode@seahawks.com').first())
def test_choose(self):
self.client.logout()
choose_url = reverse('orgs.org_choose')
# have a second org
self.create_secondary_org()
self.login(self.admin)
response = self.client.get(reverse('orgs.org_home'))
self.assertEquals(response.context['org'], self.org)
# add self.manager to self.org2 viewers
self.org2.viewers.add(self.admin)
response = self.client.get(choose_url)
self.assertEquals(200, response.status_code)
self.assertTrue('organization' in response.context['form'].fields)
post_data = dict()
post_data['organization'] = self.org2.pk
response = self.client.post(choose_url, post_data, follow=True)
self.assertEquals(200, response.status_code)
response = self.client.get(reverse('orgs.org_home'))
self.assertEquals(response.context_data['org'], self.org2)
# a non org user get's logged out
self.login(self.non_org_user)
response = self.client.get(choose_url)
self.assertRedirect(response, reverse('users.user_login'))
# superuser gets redirected to user management page
self.login(self.superuser)
response = self.client.get(choose_url, follow=True)
self.assertContains(response, "Organizations")
def test_topup_admin(self):
self.login(self.admin)
topup = TopUp.objects.get()
# admins shouldn't be able to see the create / manage / update pages
manage_url = reverse('orgs.topup_manage') + "?org=%d" % self.org.id
self.assertRedirect(self.client.get(manage_url), '/users/login/')
create_url = reverse('orgs.topup_create') + "?org=%d" % self.org.id
self.assertRedirect(self.client.get(create_url), '/users/login/')
update_url = reverse('orgs.topup_update', args=[topup.pk])
self.assertRedirect(self.client.get(update_url), '/users/login/')
# log in as root
self.login(self.superuser)
# should list our one topup
response = self.client.get(manage_url)
self.assertEquals(1, len(response.context['object_list']))
# create a new one
post_data = dict(price='1000', credits='500', comment="")
response = self.client.post(create_url, post_data)
self.assertEquals(2, TopUp.objects.filter(org=self.org).count())
self.assertEquals(1500, self.org.get_credits_remaining())
# update one of our topups
post_data = dict(is_active=True, price='0', credits='5000', comment="", expires_on="2025-04-03 13:47:46")
response = self.client.post(update_url, post_data)
self.assertEquals(5500, self.org.get_credits_remaining())
def test_topup_model(self):
topup = TopUp.create(self.admin, price=None, credits=1000)
self.assertEqual(topup.get_price_display(), "")
topup.price = 0
topup.save()
self.assertEqual(topup.get_price_display(), "Free")
topup.price = 100
topup.save()
self.assertEqual(topup.get_price_display(), "$1.00")
def test_topups(self):
settings.BRANDING[settings.DEFAULT_BRAND]['tiers'] = dict(multi_user=100000, multi_org=1000000)
contact = self.create_contact("Michael Shumaucker", "+250788123123")
test_contact = Contact.get_test_contact(self.user)
welcome_topup = TopUp.objects.get()
def create_msgs(recipient, count):
for m in range(count):
self.create_msg(contact=recipient, direction='I', text="Test %d" % m)
create_msgs(contact, 10)
with self.assertNumQueries(1):
self.assertEquals(150, self.org.get_low_credits_threshold())
with self.assertNumQueries(0):
self.assertEquals(150, self.org.get_low_credits_threshold())
# we should have 1000 minus 10 credits for this org
with self.assertNumQueries(4):
self.assertEquals(990, self.org.get_credits_remaining()) # from db
with self.assertNumQueries(0):
self.assertEquals(1000, self.org.get_credits_total()) # from cache
self.assertEquals(10, self.org.get_credits_used())
self.assertEquals(990, self.org.get_credits_remaining())
self.assertEquals(10, welcome_topup.msgs.count())
self.assertEquals(10, TopUp.objects.get(pk=welcome_topup.pk).get_used())
# at this point we shouldn't have squashed any topupcredits, so should have the same number as our used
self.assertEqual(10, TopUpCredits.objects.all().count())
# now squash
squash_topupcredits()
# should only have one remaining
self.assertEqual(1, TopUpCredits.objects.all().count())
# reduce our credits on our topup to 15
TopUp.objects.filter(pk=welcome_topup.pk).update(credits=15)
self.org.update_caches(OrgEvent.topup_updated, None) # invalidates our credits remaining cache
self.assertEquals(15, self.org.get_credits_total())
self.assertEquals(5, self.org.get_credits_remaining())
# create 10 more messages, only 5 of which will get a topup
create_msgs(contact, 10)
self.assertEquals(15, TopUp.objects.get(pk=welcome_topup.pk).msgs.count())
self.assertEquals(15, TopUp.objects.get(pk=welcome_topup.pk).get_used())
self.assertFalse(self.org._calculate_active_topup())
with self.assertNumQueries(0):
self.assertEquals(15, self.org.get_credits_total())
self.assertEquals(20, self.org.get_credits_used())
self.assertEquals(-5, self.org.get_credits_remaining())
# again create 10 more messages, none of which will get a topup
create_msgs(contact, 10)
with self.assertNumQueries(0):
self.assertEquals(15, self.org.get_credits_total())
self.assertEquals(30, self.org.get_credits_used())
self.assertEquals(-15, self.org.get_credits_remaining())
self.assertEquals(15, TopUp.objects.get(pk=welcome_topup.pk).get_used())
# raise our topup to take 20 and create another for 5
TopUp.objects.filter(pk=welcome_topup.pk).update(credits=20)
new_topup = TopUp.create(self.admin, price=0, credits=5)
self.org.update_caches(OrgEvent.topup_updated, None)
# apply topups which will max out both and reduce debt to 5
self.org.apply_topups()
self.assertEquals(20, welcome_topup.msgs.count())
self.assertEquals(20, TopUp.objects.get(pk=welcome_topup.pk).get_used())
self.assertEquals(5, new_topup.msgs.count())
self.assertEquals(5, TopUp.objects.get(pk=new_topup.pk).get_used())
self.assertEquals(25, self.org.get_credits_total())
self.assertEquals(30, self.org.get_credits_used())
self.assertEquals(-5, self.org.get_credits_remaining())
# create a message from our test contact, should not count against our totals
test_msg = self.create_msg(contact=test_contact, direction='I', text="Test")
self.assertIsNone(test_msg.topup_id)
self.assertEquals(30, self.org.get_credits_used())
# test special status
self.assertFalse(self.org.is_multi_user_tier())
self.assertFalse(self.org.is_multi_org_tier())
# add new topup with lots of credits
mega_topup = TopUp.create(self.admin, price=0, credits=100000)
self.org.update_caches(OrgEvent.topup_updated, None)
# after applying this, no non-test messages should be without a topup
self.org.apply_topups()
self.assertFalse(Msg.objects.filter(org=self.org, contact__is_test=False, topup=None))
self.assertFalse(Msg.objects.filter(org=self.org, contact__is_test=True).exclude(topup=None))
self.assertEquals(5, TopUp.objects.get(pk=mega_topup.pk).get_used())
# we aren't yet multi user since this topup was free
self.assertEquals(0, self.org.get_purchased_credits())
self.assertFalse(self.org.is_multi_user_tier())
self.assertEquals(100025, self.org.get_credits_total())
self.assertEquals(30, self.org.get_credits_used())
self.assertEquals(99995, self.org.get_credits_remaining())
# and new messages use the mega topup
msg = self.create_msg(contact=contact, direction='I', text="Test")
self.assertEquals(msg.topup, mega_topup)
self.assertEquals(6, TopUp.objects.get(pk=mega_topup.pk).get_used())
# but now it expires
yesterday = timezone.now() - relativedelta(days=1)
mega_topup.expires_on = yesterday
mega_topup.save(update_fields=['expires_on'])
self.org.update_caches(OrgEvent.topup_updated, None)
# new incoming messages should not be assigned a topup
msg = self.create_msg(contact=contact, direction='I', text="Test")
self.assertIsNone(msg.topup)
# check our totals
self.org.update_caches(OrgEvent.topup_updated, None)
with self.assertNumQueries(3):
self.assertEquals(0, self.org.get_purchased_credits())
self.assertEquals(31, self.org.get_credits_total())
self.assertEquals(32, self.org.get_credits_used())
self.assertEquals(-1, self.org.get_credits_remaining())
# all top up expired
TopUp.objects.all().update(expires_on=yesterday)
# we have expiring credits, and no more active
gift_topup = TopUp.create(self.admin, price=0, credits=100)
next_week = timezone.now() + relativedelta(days=7)
gift_topup.expires_on = next_week
gift_topup.save(update_fields=['expires_on'])
self.org.update_caches(OrgEvent.topup_updated, None)
self.org.apply_topups()
with self.assertNumQueries(3):
self.assertEquals(99, self.org.get_credits_expiring_soon())
with self.assertNumQueries(1):
self.assertEquals(15, self.org.get_low_credits_threshold())
with self.assertNumQueries(0):
self.assertEquals(99, self.org.get_credits_expiring_soon())
self.assertEquals(15, self.org.get_low_credits_threshold())
# some cedits expires but more credits will remain active
later_active_topup = TopUp.create(self.admin, price=0, credits=200)
five_week_ahead = timezone.now() + relativedelta(days=35)
later_active_topup.expires_on = five_week_ahead
later_active_topup.save(update_fields=['expires_on'])
self.org.update_caches(OrgEvent.topup_updated, None)
self.org.apply_topups()
with self.assertNumQueries(3):
self.assertEquals(0, self.org.get_credits_expiring_soon())
with self.assertNumQueries(1):
self.assertEquals(45, self.org.get_low_credits_threshold())
with self.assertNumQueries(0):
self.assertEquals(0, self.org.get_credits_expiring_soon())
self.assertEquals(45, self.org.get_low_credits_threshold())
# no expiring credits
gift_topup.expires_on = five_week_ahead
gift_topup.save(update_fields=['expires_on'])
self.org.update_caches(OrgEvent.topup_updated, None)
self.org.apply_topups()
with self.assertNumQueries(3):
self.assertEquals(0, self.org.get_credits_expiring_soon())
with self.assertNumQueries(1):
self.assertEquals(45, self.org.get_low_credits_threshold())
with self.assertNumQueries(0):
self.assertEquals(0, self.org.get_credits_expiring_soon())
self.assertEquals(45, self.org.get_low_credits_threshold())
# do not consider expired topup
gift_topup.expires_on = yesterday
gift_topup.save(update_fields=['expires_on'])
self.org.update_caches(OrgEvent.topup_updated, None)
self.org.apply_topups()
with self.assertNumQueries(3):
self.assertEquals(0, self.org.get_credits_expiring_soon())
with self.assertNumQueries(1):
self.assertEquals(30, self.org.get_low_credits_threshold())
with self.assertNumQueries(0):
self.assertEquals(0, self.org.get_credits_expiring_soon())
self.assertEquals(30, self.org.get_low_credits_threshold())
TopUp.objects.all().update(is_active=False)
self.org.update_caches(OrgEvent.topup_updated, None)
self.org.apply_topups()
with self.assertNumQueries(1):
self.assertEquals(0, self.org.get_low_credits_threshold())
with self.assertNumQueries(0):
self.assertEquals(0, self.org.get_low_credits_threshold())
# now buy some credits to make us multi user
TopUp.create(self.admin, price=100, credits=100000)
self.org.update_caches(OrgEvent.topup_updated, None)
self.assertTrue(self.org.is_multi_user_tier())
self.assertFalse(self.org.is_multi_org_tier())
# good deal!
TopUp.create(self.admin, price=100, credits=1000000)
self.org.update_caches(OrgEvent.topup_updated, None)
self.assertTrue(self.org.is_multi_user_tier())
self.assertTrue(self.org.is_multi_org_tier())
@patch('temba.orgs.views.TwilioRestClient', MockTwilioClient)
@patch('temba.orgs.models.TwilioRestClient', MockTwilioClient)
@patch('twilio.util.RequestValidator', MockRequestValidator)
def test_twilio_connect(self):
with patch('temba.tests.MockTwilioClient.MockAccounts.get') as mock_get:
with patch('temba.tests.MockTwilioClient.MockApplications.list') as mock_apps_list:
org = self.org
connect_url = reverse("orgs.org_twilio_connect")
self.login(self.admin)
self.admin.set_org(self.org)
response = self.client.get(connect_url)
self.assertEquals(200, response.status_code)
self.assertTrue(response.context['form'])
self.assertEquals(len(response.context['form'].fields.keys()), 3)
self.assertIn('account_sid', response.context['form'].fields.keys())
self.assertIn('account_token', response.context['form'].fields.keys())
mock_get.return_value = MockTwilioClient.MockAccount('Full')
mock_apps_list.return_value = [MockTwilioClient.MockApplication("%s/%d" % (settings.TEMBA_HOST.lower(),
self.org.pk))]
# try posting without an account token
post_data = dict()
post_data['account_sid'] = "AccountSid"
response = self.client.post(connect_url, post_data)
self.assertEquals(response.context['form'].errors['account_token'][0], 'This field is required.')
# now add the account token and try again
post_data['account_token'] = "AccountToken"
# but with an unexpected exception
with patch('temba.tests.MockTwilioClient.__init__') as mock:
mock.side_effect = Exception('Unexpected')
response = self.client.post(connect_url, post_data)
self.assertEquals('The Twilio account SID and Token seem invalid. '
'Please check them again and retry.',
response.context['form'].errors['__all__'][0])
self.client.post(connect_url, post_data)
org.refresh_from_db()
self.assertEquals(org.config_json()['ACCOUNT_SID'], "AccountSid")
self.assertEquals(org.config_json()['ACCOUNT_TOKEN'], "AccountToken")
self.assertTrue(org.config_json()['APPLICATION_SID'])
# when the user submit the secondary token, we use it to get the primary one from the rest API
with patch('temba.tests.MockTwilioClient.MockAccounts.get') as mock_get_primary:
mock_get_primary.return_value = MockTwilioClient.MockAccount('Full', 'PrimaryAccountToken')
self.client.post(connect_url, post_data)
org.refresh_from_db()
self.assertEquals(org.config_json()['ACCOUNT_SID'], "AccountSid")
self.assertEquals(org.config_json()['ACCOUNT_TOKEN'], "PrimaryAccountToken")
self.assertTrue(org.config_json()['APPLICATION_SID'])
twilio_account_url = reverse('orgs.org_twilio_account')
response = self.client.get(twilio_account_url)
self.assertEquals("AccountSid", response.context['account_sid'])
org.refresh_from_db()
config = org.config_json()
self.assertEquals('AccountSid', config['ACCOUNT_SID'])
self.assertEquals('PrimaryAccountToken', config['ACCOUNT_TOKEN'])
# post without a sid or token, should get a form validation error
response = self.client.post(twilio_account_url, dict(disconnect='false'), follow=True)
self.assertEquals('[{"message": "You must enter your Twilio Account SID", "code": ""}]',
response.context['form'].errors['__all__'].as_json())
# all our twilio creds should remain the same
org.refresh_from_db()
config = org.config_json()
self.assertEquals(config['ACCOUNT_SID'], "AccountSid")
self.assertEquals(config['ACCOUNT_TOKEN'], "PrimaryAccountToken")
self.assertEquals(config['APPLICATION_SID'], "TwilioTestSid")
# now try with all required fields, and a bonus field we shouldn't change
self.client.post(twilio_account_url, dict(account_sid='AccountSid',
account_token='SecondaryToken',
disconnect='false',
name='DO NOT CHANGE ME'), follow=True)
# name shouldn't change
org.refresh_from_db()
self.assertEquals(org.name, "Temba")
# now disconnect our twilio connection
self.assertTrue(org.is_connected_to_twilio())
self.client.post(twilio_account_url, dict(disconnect='true', follow=True))
org.refresh_from_db()
self.assertFalse(org.is_connected_to_twilio())
def test_has_airtime_transfers(self):
AirtimeTransfer.objects.filter(org=self.org).delete()
self.assertFalse(self.org.has_airtime_transfers())
contact = self.create_contact('Bob', number='+250788123123')
AirtimeTransfer.objects.create(org=self.org, recipient='+250788123123', amount='100',
contact=contact, created_by=self.admin, modified_by=self.admin)
self.assertTrue(self.org.has_airtime_transfers())
def test_transferto_model_methods(self):
org = self.org
org.refresh_from_db()
self.assertFalse(org.is_connected_to_transferto())
org.connect_transferto('login', 'token', self.admin)
org.refresh_from_db()
self.assertTrue(org.is_connected_to_transferto())
self.assertEqual(org.modified_by, self.admin)
org.remove_transferto_account(self.admin)
org.refresh_from_db()
self.assertFalse(org.is_connected_to_transferto())
self.assertEqual(org.modified_by, self.admin)
def test_transferto_account(self):
self.login(self.admin)
# connect transferTo
transferto_account_url = reverse('orgs.org_transfer_to_account')
with patch('temba.airtime.models.AirtimeTransfer.post_transferto_api_response') as mock_post_transterto_request:
mock_post_transterto_request.return_value = MockResponse(200, 'Unexpected content')
response = self.client.post(transferto_account_url, dict(account_login='login', airtime_api_token='token',
disconnect='false'))
self.assertContains(response, "Your TransferTo API key and secret seem invalid.")
self.assertFalse(self.org.is_connected_to_transferto())
mock_post_transterto_request.return_value = MockResponse(200, 'authentication_key=123\r\n'
'error_code=400\r\n'
'error_txt=Failed Authentication\r\n')
response = self.client.post(transferto_account_url, dict(account_login='login', airtime_api_token='token',
disconnect='false'))
self.assertContains(response, "Connecting to your TransferTo account failed "
"with error text: Failed Authentication")
self.assertFalse(self.org.is_connected_to_transferto())
mock_post_transterto_request.return_value = MockResponse(200, 'info_txt=pong\r\n'
'authentication_key=123\r\n'
'error_code=0\r\n'
'error_txt=Transaction successful\r\n')
response = self.client.post(transferto_account_url, dict(account_login='login', airtime_api_token='token',
disconnect='false'))
self.assertNoFormErrors(response)
# transferTo should be connected
self.org = Org.objects.get(pk=self.org.pk)
self.assertTrue(self.org.is_connected_to_transferto())
self.assertEqual(self.org.config_json()['TRANSFERTO_ACCOUNT_LOGIN'], 'login')
self.assertEqual(self.org.config_json()['TRANSFERTO_AIRTIME_API_TOKEN'], 'token')
response = self.client.get(transferto_account_url)
self.assertEqual(response.context['transferto_account_login'], 'login')
# and disconnect
response = self.client.post(transferto_account_url, dict(account_login='login', airtime_api_token='token',
disconnect='true'))
self.assertNoFormErrors(response)
self.org = Org.objects.get(pk=self.org.pk)
self.assertFalse(self.org.is_connected_to_transferto())
self.assertFalse(self.org.config_json()['TRANSFERTO_ACCOUNT_LOGIN'])
self.assertFalse(self.org.config_json()['TRANSFERTO_AIRTIME_API_TOKEN'])
mock_post_transterto_request.side_effect = Exception('foo')
response = self.client.post(transferto_account_url, dict(account_login='login', airtime_api_token='token',
disconnect='false'))
self.assertContains(response, "Your TransferTo API key and secret seem invalid.")
self.assertFalse(self.org.is_connected_to_transferto())
# No account connected, do not show the button to Transfer logs
response = self.client.get(transferto_account_url, HTTP_X_FORMAX=True)
self.assertNotContains(response, reverse('airtime.airtimetransfer_list'))
self.assertNotContains(response, "%s?disconnect=true" % reverse('orgs.org_transfer_to_account'))
response = self.client.get(transferto_account_url)
self.assertNotContains(response, reverse('airtime.airtimetransfer_list'))
self.assertNotContains(response, "%s?disconnect=true" % reverse('orgs.org_transfer_to_account'))
self.org.connect_transferto('login', 'token', self.admin)
# links not show if request is not from formax
response = self.client.get(transferto_account_url)
self.assertNotContains(response, reverse('airtime.airtimetransfer_list'))
self.assertNotContains(response, "%s?disconnect=true" % reverse('orgs.org_transfer_to_account'))
# link show for formax requests
response = self.client.get(transferto_account_url, HTTP_X_FORMAX=True)
self.assertContains(response, reverse('airtime.airtimetransfer_list'))
self.assertContains(response, "%s?disconnect=true" % reverse('orgs.org_transfer_to_account'))
def test_resthooks(self):
# no hitting this page without auth
resthook_url = reverse('orgs.org_resthooks')
response = self.client.get(resthook_url)
self.assertLoginRedirect(response)
self.login(self.admin)
# get our resthook management page
response = self.client.get(resthook_url)
# shouldn't have any resthooks listed yet
self.assertFalse(response.context['current_resthooks'])
# ok, let's create one
self.client.post(resthook_url, dict(resthook='mother-registration'))
# should now have a resthook
resthook = Resthook.objects.get()
self.assertEqual(resthook.slug, 'mother-registration')
self.assertEqual(resthook.org, self.org)
self.assertEqual(resthook.created_by, self.admin)
# fetch our read page, should have have our resthook
response = self.client.get(resthook_url)
self.assertTrue(response.context['current_resthooks'])
# let's try to create a repeat, should fail due to duplicate slug
response = self.client.post(resthook_url, dict(resthook='Mother-Registration'))
self.assertTrue(response.context['form'].errors)
# hit our list page used by select2, checking it lists our resthook
response = self.client.get(reverse('api.resthook_list') + "?_format=select2")
results = response.json()['results']
self.assertEqual(len(results), 1)
self.assertEqual(results[0], dict(text='mother-registration', id='mother-registration'))
# finally, let's remove that resthook
self.client.post(resthook_url, {'resthook_%d' % resthook.id: 'checked'})
resthook.refresh_from_db()
self.assertFalse(resthook.is_active)
# no more resthooks!
response = self.client.get(resthook_url)
self.assertFalse(response.context['current_resthooks'])
def test_smtp_server(self):
self.login(self.admin)
smtp_server_url = reverse('orgs.org_smtp_server')
self.org.refresh_from_db()
self.assertFalse(self.org.has_smtp_config())
response = self.client.post(smtp_server_url, dict(disconnect='false'), follow=True)
self.assertEquals('[{"message": "You must enter a from email", "code": ""}]',
response.context['form'].errors['__all__'].as_json())
response = self.client.post(smtp_server_url, dict(smtp_from_email='foobar.com',
disconnect='false'), follow=True)
self.assertEquals('[{"message": "Please enter a valid email address", "code": ""}]',
response.context['form'].errors['__all__'].as_json())
response = self.client.post(smtp_server_url, dict(smtp_from_email='foo@bar.com',
disconnect='false'), follow=True)
self.assertEquals('[{"message": "You must enter the SMTP host", "code": ""}]',
response.context['form'].errors['__all__'].as_json())
response = self.client.post(smtp_server_url, dict(smtp_from_email='foo@bar.com',
smtp_host='smtp.example.com',
disconnect='false'), follow=True)
self.assertEquals('[{"message": "You must enter the SMTP username", "code": ""}]',
response.context['form'].errors['__all__'].as_json())
response = self.client.post(smtp_server_url, dict(smtp_from_email='foo@bar.com',
smtp_host='smtp.example.com',
smtp_username='support@example.com',
disconnect='false'), follow=True)
self.assertEquals('[{"message": "You must enter the SMTP password", "code": ""}]',
response.context['form'].errors['__all__'].as_json())
response = self.client.post(smtp_server_url, dict(smtp_from_email='foo@bar.com',
smtp_host='smtp.example.com',
smtp_username='support@example.com',
smtp_password='secret',
disconnect='false'), follow=True)
self.assertEquals('[{"message": "You must enter the SMTP port", "code": ""}]',
response.context['form'].errors['__all__'].as_json())
response = self.client.post(smtp_server_url, dict(smtp_from_email='foo@bar.com',
smtp_host='smtp.example.com',
smtp_username='support@example.com',
smtp_password='secret',
smtp_port='465',
smtp_encryption='',
disconnect='false'), follow=True)
self.org.refresh_from_db()
self.assertTrue(self.org.has_smtp_config())
self.assertEquals(self.org.config_json()['SMTP_FROM_EMAIL'], 'foo@bar.com')
self.assertEquals(self.org.config_json()['SMTP_HOST'], 'smtp.example.com')
self.assertEquals(self.org.config_json()['SMTP_USERNAME'], 'support@example.com')
self.assertEquals(self.org.config_json()['SMTP_PASSWORD'], 'secret')
self.assertEquals(self.org.config_json()['SMTP_PORT'], '465')
self.assertEquals(self.org.config_json()['SMTP_ENCRYPTION'], '')
response = self.client.get(smtp_server_url)
self.assertEquals('foo@bar.com', response.context['flow_from_email'])
self.client.post(smtp_server_url, dict(smtp_from_email='support@example.com',
smtp_host='smtp.example.com',
smtp_username='support@example.com',
smtp_password='secret',
smtp_port='465',
smtp_encryption='T',
name="DO NOT CHANGE ME",
disconnect='false'), follow=True)
# name shouldn't change
self.org.refresh_from_db()
self.assertEquals(self.org.name, "Temba")
self.assertTrue(self.org.has_smtp_config())
self.client.post(smtp_server_url, dict(disconnect='true'), follow=True)
self.org.refresh_from_db()
self.assertFalse(self.org.has_smtp_config())
response = self.client.post(smtp_server_url, dict(smtp_from_email=' support@example.com',
smtp_host=' smtp.example.com ',
smtp_username=' support@example.com ',
smtp_password='secret ',
smtp_port='465 ',
smtp_encryption='T',
disconnect='false'), follow=True)
self.org.refresh_from_db()
self.assertTrue(self.org.has_smtp_config())
self.assertEquals(self.org.config_json()['SMTP_FROM_EMAIL'], 'support@example.com')
self.assertEquals(self.org.config_json()['SMTP_HOST'], 'smtp.example.com')
self.assertEquals(self.org.config_json()['SMTP_USERNAME'], 'support@example.com')
self.assertEquals(self.org.config_json()['SMTP_PASSWORD'], 'secret')
self.assertEquals(self.org.config_json()['SMTP_PORT'], '465')
self.assertEquals(self.org.config_json()['SMTP_ENCRYPTION'], 'T')
@patch('nexmo.Client.create_application')
def test_connect_nexmo(self, mock_create_application):
mock_create_application.return_value = dict(id='app-id', keys=dict(private_key='private-key'))
self.login(self.admin)
# connect nexmo
connect_url = reverse('orgs.org_nexmo_connect')
# simulate invalid credentials
with patch('requests.get') as nexmo:
nexmo.return_value = MockResponse(401, '{"error-code": "401"}')
response = self.client.post(connect_url, dict(api_key='key', api_secret='secret'))
self.assertContains(response, "Your Nexmo API key and secret seem invalid.")
self.assertFalse(self.org.is_connected_to_nexmo())
# ok, now with a success
with patch('requests.get') as nexmo_get:
with patch('requests.post') as nexmo_post:
# believe it or not nexmo returns 'error-code' 200
nexmo_get.return_value = MockResponse(200, '{"error-code": "200"}')
nexmo_post.return_value = MockResponse(200, '{"error-code": "200"}')
self.client.post(connect_url, dict(api_key='key', api_secret='secret'))
# nexmo should now be connected
self.org = Org.objects.get(pk=self.org.pk)
self.assertTrue(self.org.is_connected_to_nexmo())
self.assertEquals(self.org.config_json()['NEXMO_KEY'], 'key')
self.assertEquals(self.org.config_json()['NEXMO_SECRET'], 'secret')
nexmo_account_url = reverse('orgs.org_nexmo_account')
response = self.client.get(nexmo_account_url)
self.assertEquals("key", response.context['api_key'])
self.org.refresh_from_db()
config = self.org.config_json()
self.assertEquals('key', config[NEXMO_KEY])
self.assertEquals('secret', config[NEXMO_SECRET])
# post without api token, should get validation error
response = self.client.post(nexmo_account_url, dict(disconnect='false'), follow=True)
self.assertEquals('[{"message": "You must enter your Nexmo Account API Key", "code": ""}]',
response.context['form'].errors['__all__'].as_json())
# nexmo config should remain the same
self.org.refresh_from_db()
config = self.org.config_json()
self.assertEquals('key', config[NEXMO_KEY])
self.assertEquals('secret', config[NEXMO_SECRET])
# now try with all required fields, and a bonus field we shouldn't change
self.client.post(nexmo_account_url, dict(api_key='other_key',
api_secret='secret-too',
disconnect='false',
name='DO NOT CHNAGE ME'), follow=True)
# name shouldn't change
self.org.refresh_from_db()
self.assertEquals(self.org.name, "Temba")
# should change nexmo config
with patch('nexmo.Client.get_balance') as mock_get_balance:
mock_get_balance.return_value = 120
self.client.post(nexmo_account_url, dict(api_key='other_key',
api_secret='secret-too',
disconnect='false'), follow=True)
self.org.refresh_from_db()
config = self.org.config_json()
self.assertEquals('other_key', config[NEXMO_KEY])
self.assertEquals('secret-too', config[NEXMO_SECRET])
self.assertTrue(self.org.is_connected_to_nexmo())
self.client.post(nexmo_account_url, dict(disconnect='true'), follow=True)
self.org.refresh_from_db()
self.assertFalse(self.org.is_connected_to_nexmo())
# and disconnect
self.org.remove_nexmo_account(self.admin)
self.assertFalse(self.org.is_connected_to_nexmo())
self.assertFalse(self.org.config_json()['NEXMO_KEY'])
self.assertFalse(self.org.config_json()['NEXMO_SECRET'])
@patch('nexmo.Client.create_application')
def test_nexmo_configuration(self, mock_create_application):
mock_create_application.return_value = dict(id='app-id', keys=dict(private_key='private-key'))
self.login(self.admin)
nexmo_configuration_url = reverse('orgs.org_nexmo_configuration')
# try nexmo not connected
response = self.client.get(nexmo_configuration_url)
self.assertEqual(response.status_code, 302)
response = self.client.get(nexmo_configuration_url, follow=True)
self.assertEqual(response.request['PATH_INFO'], reverse('orgs.org_nexmo_connect'))
self.org.connect_nexmo('key', 'secret', self.admin)
with patch('temba.utils.nexmo.NexmoClient.update_account') as mock_update_account:
# try automatic nexmo settings update
mock_update_account.return_value = True
response = self.client.get(nexmo_configuration_url)
self.assertEqual(response.status_code, 302)
response = self.client.get(nexmo_configuration_url, follow=True)
self.assertEqual(response.request['PATH_INFO'], reverse('channels.channel_claim_nexmo'))
with patch('temba.utils.nexmo.NexmoClient.update_account') as mock_update_account:
mock_update_account.side_effect = [nexmo.Error, nexmo.Error]
response = self.client.get(nexmo_configuration_url)
self.assertEqual(response.status_code, 200)
response = self.client.get(nexmo_configuration_url, follow=True)
self.assertEqual(response.request['PATH_INFO'], reverse('orgs.org_nexmo_configuration'))
def test_connect_plivo(self):
self.login(self.admin)
# connect plivo
connect_url = reverse('orgs.org_plivo_connect')
# simulate invalid credentials
with patch('requests.get') as plivo_mock:
plivo_mock.return_value = MockResponse(401,
'Could not verify your access level for that URL.'
'\nYou have to login with proper credentials')
response = self.client.post(connect_url, dict(auth_id='auth-id', auth_token='auth-token'))
self.assertContains(response,
"Your Plivo AUTH ID and AUTH TOKEN seem invalid. Please check them again and retry.")
self.assertFalse(Channel.CONFIG_PLIVO_AUTH_ID in self.client.session)
self.assertFalse(Channel.CONFIG_PLIVO_AUTH_TOKEN in self.client.session)
# ok, now with a success
with patch('requests.get') as plivo_mock:
plivo_mock.return_value = MockResponse(200, json.dumps(dict()))
self.client.post(connect_url, dict(auth_id='auth-id', auth_token='auth-token'))
# plivo should be added to the session
self.assertEquals(self.client.session[Channel.CONFIG_PLIVO_AUTH_ID], 'auth-id')
self.assertEquals(self.client.session[Channel.CONFIG_PLIVO_AUTH_TOKEN], 'auth-token')
def test_download(self):
response = self.client.get('/org/download/messages/123/')
self.assertLoginRedirect(response)
self.login(self.admin)
response = self.client.get('/org/download/messages/123/')
self.assertRedirect(response, '/assets/download/message_export/123/')
response = self.client.get('/org/download/contacts/123/')
self.assertRedirect(response, '/assets/download/contact_export/123/')
response = self.client.get('/org/download/flows/123/')
self.assertRedirect(response, '/assets/download/results_export/123/')
def test_tiers(self):
# default is no tiers, everything is allowed, go crazy!
self.assertTrue(self.org.is_import_flows_tier())
self.assertTrue(self.org.is_multi_user_tier())
self.assertTrue(self.org.is_multi_org_tier())
# same when tiers are missing completely
del settings.BRANDING[settings.DEFAULT_BRAND]['tiers']
self.assertTrue(self.org.is_import_flows_tier())
self.assertTrue(self.org.is_multi_user_tier())
self.assertTrue(self.org.is_multi_org_tier())
# not enough credits with tiers enabled
settings.BRANDING[settings.DEFAULT_BRAND]['tiers'] = dict(import_flows=1, multi_user=100000, multi_org=1000000)
self.assertIsNone(self.org.create_sub_org('Sub Org A'))
self.assertFalse(self.org.is_import_flows_tier())
self.assertFalse(self.org.is_multi_user_tier())
self.assertFalse(self.org.is_multi_org_tier())
# not enough credits, but tiers disabled
settings.BRANDING[settings.DEFAULT_BRAND]['tiers'] = dict(import_flows=0, multi_user=0, multi_org=0)
self.assertIsNotNone(self.org.create_sub_org('Sub Org A'))
self.assertTrue(self.org.is_import_flows_tier())
self.assertTrue(self.org.is_multi_user_tier())
self.assertTrue(self.org.is_multi_org_tier())
# tiers enabled, but enough credits
settings.BRANDING[settings.DEFAULT_BRAND]['tiers'] = dict(import_flows=1, multi_user=100000, multi_org=1000000)
TopUp.create(self.admin, price=100, credits=1000000)
self.org.update_caches(OrgEvent.topup_updated, None)
self.assertIsNotNone(self.org.create_sub_org('Sub Org B'))
self.assertTrue(self.org.is_import_flows_tier())
self.assertTrue(self.org.is_multi_user_tier())
self.assertTrue(self.org.is_multi_org_tier())
def test_sub_orgs(self):
from temba.orgs.models import Debit
settings.BRANDING[settings.DEFAULT_BRAND]['tiers'] = dict(multi_org=1000000)
# lets start with two topups
expires = timezone.now() + timedelta(days=400)
first_topup = TopUp.objects.filter(org=self.org).first()
second_topup = TopUp.create(self.admin, price=0, credits=1000, org=self.org, expires_on=expires)
sub_org = self.org.create_sub_org('Sub Org')
# we won't create sub orgs if the org isn't the proper level
self.assertIsNone(sub_org)
# lower the tier and try again
settings.BRANDING[settings.DEFAULT_BRAND]['tiers'] = dict(multi_org=0)
sub_org = self.org.create_sub_org('Sub Org')
# suborgs can't create suborgs
self.assertIsNone(sub_org.create_sub_org('Grandchild Org'))
# we should be linked to our parent with the same brand
self.assertEqual(self.org, sub_org.parent)
self.assertEqual(self.org.brand, sub_org.brand)
# our sub account should have zero credits
self.assertEqual(0, sub_org.get_credits_remaining())
# default values should be the same as parent
self.assertEqual(self.org.timezone, sub_org.timezone)
self.assertEqual(self.org.created_by, sub_org.created_by)
# now allocate some credits to our sub org
self.assertTrue(self.org.allocate_credits(self.admin, sub_org, 700))
self.assertEqual(700, sub_org.get_credits_remaining())
self.assertEqual(1300, self.org.get_credits_remaining())
# we should have a debit to track this transaction
debits = Debit.objects.filter(topup__org=self.org)
self.assertEqual(1, len(debits))
debit = debits.first()
self.assertEqual(700, debit.amount)
self.assertEqual(Debit.TYPE_ALLOCATION, debit.debit_type)
self.assertEqual(first_topup.expires_on, debit.beneficiary.expires_on)
# try allocating more than we have
self.assertFalse(self.org.allocate_credits(self.admin, sub_org, 1301))
self.assertEqual(700, sub_org.get_credits_remaining())
self.assertEqual(1300, self.org.get_credits_remaining())
self.assertEqual(700, self.org._calculate_credits_used())
# now allocate across our remaining topups
self.assertTrue(self.org.allocate_credits(self.admin, sub_org, 1200))
self.assertEqual(1900, sub_org.get_credits_remaining())
self.assertEqual(1900, self.org.get_credits_used())
self.assertEqual(100, self.org.get_credits_remaining())
# now clear our cache, we ought to have proper amount still
self.org._calculate_credit_caches()
sub_org._calculate_credit_caches()
self.assertEqual(1900, sub_org.get_credits_remaining())
self.assertEqual(100, self.org.get_credits_remaining())
# this creates two more debits, for a total of three
debits = Debit.objects.filter(topup__org=self.org).order_by('id')
self.assertEqual(3, len(debits))
# the last two debits should expire at same time as topup they were funded by
self.assertEqual(first_topup.expires_on, debits[1].topup.expires_on)
self.assertEqual(second_topup.expires_on, debits[2].topup.expires_on)
# allocate the exact number of credits remaining
self.org.allocate_credits(self.admin, sub_org, 100)
self.assertEqual(2000, sub_org.get_credits_remaining())
self.assertEqual(0, self.org.get_credits_remaining())
def test_sub_org_ui(self):
self.login(self.admin)
settings.BRANDING[settings.DEFAULT_BRAND]['tiers'] = dict(multi_org=1000000)
# set our org on the session
session = self.client.session
session['org_id'] = self.org.id
session.save()
response = self.client.get(reverse('orgs.org_home'))
self.assertNotContains(response, 'Manage Organizations')
# attempting to manage orgs should redirect
response = self.client.get(reverse('orgs.org_sub_orgs'))
self.assertRedirect(response, reverse('orgs.org_home'))
# creating a new sub org should also redirect
response = self.client.get(reverse('orgs.org_create_sub_org'))
self.assertRedirect(response, reverse('orgs.org_home'))
# make sure posting is gated too
new_org = dict(name='Sub Org', timezone=self.org.timezone, date_format=self.org.date_format)
response = self.client.post(reverse('orgs.org_create_sub_org'), new_org)
self.assertRedirect(response, reverse('orgs.org_home'))
# same thing with trying to transfer credits
response = self.client.get(reverse('orgs.org_transfer_credits'))
self.assertRedirect(response, reverse('orgs.org_home'))
# cant manage users either
response = self.client.get(reverse('orgs.org_manage_accounts_sub_org'))
self.assertRedirect(response, reverse('orgs.org_home'))
# zero out our tier
settings.BRANDING[settings.DEFAULT_BRAND]['tiers'] = dict(multi_org=0)
self.assertTrue(self.org.is_multi_org_tier())
response = self.client.get(reverse('orgs.org_home'))
self.assertContains(response, 'Manage Organizations')
# now we can manage our orgs
response = self.client.get(reverse('orgs.org_sub_orgs'))
self.assertEqual(200, response.status_code)
self.assertContains(response, 'Organizations')
# add a sub org
response = self.client.post(reverse('orgs.org_create_sub_org'), new_org)
self.assertRedirect(response, reverse('orgs.org_sub_orgs'))
sub_org = Org.objects.filter(name='Sub Org').first()
self.assertIsNotNone(sub_org)
self.assertIn(self.admin, sub_org.administrators.all())
# load the transfer credit page
response = self.client.get(reverse('orgs.org_transfer_credits'))
self.assertEqual(200, response.status_code)
# try to transfer more than we have
post_data = dict(from_org=self.org.id, to_org=sub_org.id, amount=1500)
response = self.client.post(reverse('orgs.org_transfer_credits'), post_data)
self.assertContains(response, "Pick a different organization to transfer from")
# now transfer some creditos
post_data = dict(from_org=self.org.id, to_org=sub_org.id, amount=600)
response = self.client.post(reverse('orgs.org_transfer_credits'), post_data)
self.assertEqual(400, self.org.get_credits_remaining())
self.assertEqual(600, sub_org.get_credits_remaining())
# we can reach the manage accounts page too now
response = self.client.get('%s?org=%d' % (reverse('orgs.org_manage_accounts_sub_org'), sub_org.id))
self.assertEqual(200, response.status_code)
# edit our sub org's name
new_org['name'] = 'New Sub Org Name'
new_org['slug'] = 'new-sub-org-name'
response = self.client.post('%s?org=%s' % (reverse('orgs.org_edit_sub_org'), sub_org.pk), new_org)
self.assertIsNotNone(Org.objects.filter(name='New Sub Org Name').first())
# now we should see new topups on our sub org
session['org_id'] = sub_org.id
session.save()
response = self.client.get(reverse('orgs.topup_list'))
self.assertContains(response, '600 Credits')
class AnonOrgTest(TembaTest):
"""
Tests the case where our organization is marked as anonymous, that is the phone numbers are masked
for users.
"""
def setUp(self):
super(AnonOrgTest, self).setUp()
self.org.is_anon = True
self.org.save()
def test_contacts(self):
from temba.contacts.models import ContactURN
# are there real phone numbers on the contact list page?
contact = self.create_contact(None, "+250788123123")
self.login(self.admin)
masked = "%010d" % contact.pk
response = self.client.get(reverse('contacts.contact_list'))
# phone not in the list
self.assertNotContains(response, "788 123 123")
# but the id is
self.assertContains(response, masked)
self.assertContains(response, ContactURN.ANON_MASK_HTML)
# can't search for it
response = self.client.get(reverse('contacts.contact_list') + "?search=788")
# can't look for 788 as that is in the search box..
self.assertNotContains(response, "123123")
# create a flow
flow = self.create_flow()
# start the contact down it
flow.start([], [contact])
# should have one SMS
self.assertEquals(1, Msg.objects.all().count())
# shouldn't show the number on the outgoing page
response = self.client.get(reverse('msgs.msg_outbox'))
self.assertNotContains(response, "788 123 123")
# create an incoming SMS, check our flow page
Msg.create_incoming(self.channel, contact.get_urn().urn, "Blue")
response = self.client.get(reverse('msgs.msg_flow'))
self.assertNotContains(response, "788 123 123")
self.assertContains(response, masked)
# send another, this will be in our inbox this time
Msg.create_incoming(self.channel, contact.get_urn().urn, "Where's the beef?")
response = self.client.get(reverse('msgs.msg_flow'))
self.assertNotContains(response, "788 123 123")
self.assertContains(response, masked)
# contact detail page
response = self.client.get(reverse('contacts.contact_read', args=[contact.uuid]))
self.assertNotContains(response, "788 123 123")
self.assertContains(response, masked)
class OrgCRUDLTest(TembaTest):
def test_org_grant(self):
grant_url = reverse('orgs.org_grant')
response = self.client.get(grant_url)
self.assertRedirect(response, '/users/login/')
self.user = self.create_user(username="tito")
self.login(self.user)
response = self.client.get(grant_url)
self.assertRedirect(response, '/users/login/')
granters = Group.objects.get(name='Granters')
self.user.groups.add(granters)
response = self.client.get(grant_url)
self.assertEquals(200, response.status_code)
# fill out the form
post_data = dict(email='john@carmack.com', first_name="John", last_name="Carmack",
name="Oculus", timezone="Africa/Kigali", credits="100000", password='dukenukem')
response = self.client.post(grant_url, post_data, follow=True)
self.assertContains(response, "created")
org = Org.objects.get(name="Oculus")
self.assertEquals(100000, org.get_credits_remaining())
# check user exists and is admin
User.objects.get(username="john@carmack.com")
self.assertTrue(org.administrators.filter(username="john@carmack.com"))
self.assertTrue(org.administrators.filter(username="tito"))
# try a new org with a user that already exists instead
del post_data['password']
post_data['name'] = "id Software"
response = self.client.post(grant_url, post_data, follow=True)
self.assertContains(response, "created")
org = Org.objects.get(name="id Software")
self.assertEquals(100000, org.get_credits_remaining())
self.assertTrue(org.administrators.filter(username="john@carmack.com"))
self.assertTrue(org.administrators.filter(username="tito"))
@patch("temba.orgs.views.OrgCRUDL.Signup.pre_process")
def test_new_signup_with_user_logged_in(self, mock_pre_process):
mock_pre_process.return_value = None
signup_url = reverse('orgs.org_signup')
self.user = self.create_user(username="tito")
self.login(self.user)
response = self.client.get(signup_url)
self.assertEqual(response.status_code, 200)
post_data = dict(first_name="Kellan", last_name="Alexander", email="kellan@example.com",
password="HeyThere", name="AlexCom", timezone="Africa/Kigali")
response = self.client.post(signup_url, post_data)
self.assertEqual(response.status_code, 302)
# should have a new user
user = User.objects.get(username="kellan@example.com")
self.assertEqual(user.first_name, "Kellan")
self.assertEqual(user.last_name, "Alexander")
self.assertEqual(user.email, "kellan@example.com")
self.assertTrue(user.check_password("HeyThere"))
self.assertTrue(user.api_token) # should be able to generate an API token
# should have a new org
org = Org.objects.get(name="AlexCom")
self.assertEqual(org.timezone, pytz.timezone("Africa/Kigali"))
# of which our user is an administrator
self.assertTrue(org.get_org_admins().filter(pk=user.pk))
# not the logged in user at the signup time
self.assertFalse(org.get_org_admins().filter(pk=self.user.pk))
def test_org_signup(self):
signup_url = reverse('orgs.org_signup')
response = self.client.get(signup_url)
self.assertEqual(response.status_code, 200)
self.assertIn('name', response.context['form'].fields)
# submit with missing fields
response = self.client.post(signup_url, {})
self.assertFormError(response, 'form', 'name', "This field is required.")
self.assertFormError(response, 'form', 'first_name', "This field is required.")
self.assertFormError(response, 'form', 'last_name', "This field is required.")
self.assertFormError(response, 'form', 'email', "This field is required.")
self.assertFormError(response, 'form', 'password', "This field is required.")
self.assertFormError(response, 'form', 'timezone', "This field is required.")
# submit with invalid password and email
post_data = dict(first_name="Eugene", last_name="Rwagasore", email="bad_email",
password="badpass", name="Your Face", timezone="Africa/Kigali")
response = self.client.post(signup_url, post_data)
self.assertFormError(response, 'form', 'email', "Enter a valid email address.")
self.assertFormError(response, 'form', 'password', "Passwords must contain at least 8 letters.")
# submit with valid data (long email)
post_data = dict(first_name="Eugene", last_name="Rwagasore", email="myal12345678901234567890@relieves.org",
password="HelloWorld1", name="Relieves World", timezone="Africa/Kigali")
response = self.client.post(signup_url, post_data)
self.assertEqual(response.status_code, 302)
# should have a new user
user = User.objects.get(username="myal12345678901234567890@relieves.org")
self.assertEqual(user.first_name, "Eugene")
self.assertEqual(user.last_name, "Rwagasore")
self.assertEqual(user.email, "myal12345678901234567890@relieves.org")
self.assertTrue(user.check_password("HelloWorld1"))
self.assertTrue(user.api_token) # should be able to generate an API token
# should have a new org
org = Org.objects.get(name="Relieves World")
self.assertEqual(org.timezone, pytz.timezone("Africa/Kigali"))
self.assertEqual(str(org), "Relieves World")
self.assertEqual(org.slug, "relieves-world")
# of which our user is an administrator
self.assertTrue(org.get_org_admins().filter(pk=user.pk))
# org should have 1000 credits
self.assertEqual(org.get_credits_remaining(), 1000)
# from a single welcome topup
topup = TopUp.objects.get(org=org)
self.assertEqual(topup.credits, 1000)
self.assertEqual(topup.price, 0)
# fake session set_org to make the test work
user.set_org(org)
# should now be able to go to channels page
response = self.client.get(reverse('channels.channel_claim'))
self.assertEquals(200, response.status_code)
# check that we have all the tabs
self.assertContains(response, reverse('msgs.msg_inbox'))
self.assertContains(response, reverse('flows.flow_list'))
self.assertContains(response, reverse('contacts.contact_list'))
self.assertContains(response, reverse('channels.channel_list'))
self.assertContains(response, reverse('orgs.org_home'))
post_data['name'] = "Relieves World Rwanda"
response = self.client.post(signup_url, post_data)
self.assertTrue('email' in response.context['form'].errors)
# if we hit /login we'll be taken back to the channel page
response = self.client.get(reverse('users.user_check_login'))
self.assertRedirect(response, reverse('orgs.org_choose'))
# but if we log out, same thing takes us to the login page
self.client.logout()
response = self.client.get(reverse('users.user_check_login'))
self.assertRedirect(response, reverse('users.user_login'))
# try going to the org home page, no dice
response = self.client.get(reverse('orgs.org_home'))
self.assertRedirect(response, reverse('users.user_login'))
# log in as the user
self.client.login(username="myal12345678901234567890@relieves.org", password="HelloWorld1")
response = self.client.get(reverse('orgs.org_home'))
self.assertEquals(200, response.status_code)
# try setting our webhook and subscribe to one of the events
response = self.client.post(reverse('orgs.org_webhook'), dict(webhook='http://fake.com/webhook.php', mt_sms=1))
self.assertRedirect(response, reverse('orgs.org_home'))
org = Org.objects.get(name="Relieves World")
self.assertEquals("http://fake.com/webhook.php", org.get_webhook_url())
self.assertTrue(org.is_notified_of_mt_sms())
self.assertFalse(org.is_notified_of_mo_sms())
self.assertFalse(org.is_notified_of_mt_call())
self.assertFalse(org.is_notified_of_mo_call())
self.assertFalse(org.is_notified_of_alarms())
# try changing our username, wrong password
post_data = dict(email='myal@wr.org', current_password='HelloWorld')
response = self.client.post(reverse('orgs.user_edit'), post_data)
self.assertEquals(200, response.status_code)
self.assertTrue('current_password' in response.context['form'].errors)
# bad new password
post_data = dict(email='myal@wr.org', current_password='HelloWorld1', new_password='passwor')
response = self.client.post(reverse('orgs.user_edit'), post_data)
self.assertEquals(200, response.status_code)
self.assertTrue('new_password' in response.context['form'].errors)
User.objects.create(username='bill@msn.com', email='bill@msn.com')
# dupe user
post_data = dict(email='bill@msn.com', current_password='HelloWorld1')
response = self.client.post(reverse('orgs.user_edit'), post_data)
self.assertEquals(200, response.status_code)
self.assertTrue('email' in response.context['form'].errors)
post_data = dict(email='myal@wr.org', first_name="Myal", last_name="Greene", language="en-us", current_password='HelloWorld1')
response = self.client.post(reverse('orgs.user_edit'), post_data)
self.assertRedirect(response, reverse('orgs.org_home'))
self.assertTrue(User.objects.get(username='myal@wr.org'))
self.assertTrue(User.objects.get(email='myal@wr.org'))
self.assertFalse(User.objects.filter(username='myal@relieves.org'))
self.assertFalse(User.objects.filter(email='myal@relieves.org'))
post_data['current_password'] = 'HelloWorld1'
post_data['new_password'] = 'Password123'
response = self.client.post(reverse('orgs.user_edit'), post_data)
self.assertRedirect(response, reverse('orgs.org_home'))
user = User.objects.get(username='myal@wr.org')
self.assertTrue(user.check_password('Password123'))
def test_org_timezone(self):
self.assertEqual(self.org.timezone, pytz.timezone('Africa/Kigali'))
Msg.create_incoming(self.channel, "tel:250788382382", "My name is Frank")
self.login(self.admin)
response = self.client.get(reverse('msgs.msg_inbox'), follow=True)
# Check the message datetime
created_on = response.context['object_list'][0].created_on.astimezone(self.org.timezone)
self.assertIn(created_on.strftime("%I:%M %p").lower().lstrip('0'), response.content)
# change the org timezone to "Africa/Nairobi"
self.org.timezone = pytz.timezone('Africa/Nairobi')
self.org.save()
response = self.client.get(reverse('msgs.msg_inbox'), follow=True)
# checkout the message should have the datetime changed by timezone
created_on = response.context['object_list'][0].created_on.astimezone(self.org.timezone)
self.assertIn(created_on.strftime("%I:%M %p").lower().lstrip('0'), response.content)
def test_urn_schemes(self):
# remove existing channels
Channel.objects.all().update(is_active=False, org=None)
self.assertEqual(set(), self.org.get_schemes(Channel.ROLE_SEND))
self.assertEqual(set(), self.org.get_schemes(Channel.ROLE_RECEIVE))
# add a receive only tel channel
Channel.create(self.org, self.user, 'RW', Channel.TYPE_TWILIO, "Nexmo", "0785551212", role="R", secret="45678", gcm_id="123")
self.org = Org.objects.get(pk=self.org.pk)
self.assertEqual(set(), self.org.get_schemes(Channel.ROLE_SEND))
self.assertEqual({TEL_SCHEME}, self.org.get_schemes(Channel.ROLE_RECEIVE))
# add a send/receive tel channel
Channel.create(self.org, self.user, 'RW', Channel.TYPE_TWILIO, "Twilio", "0785553434", role="SR", secret="56789", gcm_id="456")
self.org = Org.objects.get(pk=self.org.id)
self.assertEqual({TEL_SCHEME}, self.org.get_schemes(Channel.ROLE_SEND))
self.assertEqual({TEL_SCHEME}, self.org.get_schemes(Channel.ROLE_RECEIVE))
# add a twitter channel
Channel.create(self.org, self.user, None, Channel.TYPE_TWITTER, "Twitter")
self.org = Org.objects.get(pk=self.org.id)
self.assertEqual({TEL_SCHEME, TWITTER_SCHEME}, self.org.get_schemes(Channel.ROLE_SEND))
self.assertEqual({TEL_SCHEME, TWITTER_SCHEME}, self.org.get_schemes(Channel.ROLE_RECEIVE))
def test_login_case_not_sensitive(self):
login_url = reverse('users.user_login')
User.objects.create_superuser("superuser", "superuser@group.com", "superuser")
response = self.client.post(login_url, dict(username="superuser", password="superuser"))
self.assertEquals(response.status_code, 302)
response = self.client.post(login_url, dict(username="superuser", password="superuser"), follow=True)
self.assertEquals(response.request['PATH_INFO'], reverse('orgs.org_manage'))
response = self.client.post(login_url, dict(username="SUPeruser", password="superuser"))
self.assertEquals(response.status_code, 302)
response = self.client.post(login_url, dict(username="SUPeruser", password="superuser"), follow=True)
self.assertEquals(response.request['PATH_INFO'], reverse('orgs.org_manage'))
User.objects.create_superuser("withCAPS", "with_caps@group.com", "thePASSWORD")
response = self.client.post(login_url, dict(username="withcaps", password="thePASSWORD"))
self.assertEquals(response.status_code, 302)
response = self.client.post(login_url, dict(username="withcaps", password="thePASSWORD"), follow=True)
self.assertEquals(response.request['PATH_INFO'], reverse('orgs.org_manage'))
# passwords stay case sensitive
response = self.client.post(login_url, dict(username="withcaps", password="thepassword"), follow=True)
self.assertTrue('form' in response.context)
self.assertTrue(response.context['form'].errors)
def test_org_service(self):
# create a customer service user
self.csrep = self.create_user("csrep")
self.csrep.groups.add(Group.objects.get(name="Customer Support"))
self.csrep.is_staff = True
self.csrep.save()
service_url = reverse('orgs.org_service')
# without logging in, try to service our main org
response = self.client.post(service_url, dict(organization=self.org.id))
self.assertRedirect(response, '/users/login/')
# try logging in with a normal user
self.login(self.admin)
# same thing, no permission
response = self.client.post(service_url, dict(organization=self.org.id))
self.assertRedirect(response, '/users/login/')
# ok, log in as our cs rep
self.login(self.csrep)
# then service our org
response = self.client.post(service_url, dict(organization=self.org.id))
self.assertRedirect(response, '/msg/inbox/')
# create a new contact
response = self.client.post(reverse('contacts.contact_create'), data=dict(name='Ben Haggerty',
urn__tel__0='0788123123'))
self.assertNoFormErrors(response)
# make sure that contact's created on is our cs rep
contact = Contact.objects.get(urns__path='+250788123123', org=self.org)
self.assertEquals(self.csrep, contact.created_by)
# make sure we can manage topups as well
TopUp.objects.create(org=self.org, price=100, credits=1000, expires_on=timezone.now() + timedelta(days=30),
created_by=self.admin, modified_by=self.admin)
response = self.client.get(reverse('orgs.topup_manage') + "?org=%d" % self.org.id)
# i'd buy that for a dollar!
self.assertContains(response, '$1.00')
self.assertNotRedirect(response, '/users/login/')
# ok, now end our session
response = self.client.post(service_url, dict())
self.assertRedirect(response, '/org/manage/')
# can no longer go to inbox, asked to log in
response = self.client.get(reverse('msgs.msg_inbox'))
self.assertRedirect(response, '/users/login/')
class LanguageTest(TembaTest):
def test_languages(self):
url = reverse('orgs.org_languages')
self.login(self.admin)
# update our org with some language settings
response = self.client.post(url, dict(primary_lang='fre', languages='hat,arc'))
self.assertEqual(response.status_code, 302)
self.org.refresh_from_db()
self.assertEqual(self.org.primary_language.name, 'French')
self.assertIsNotNone(self.org.languages.filter(name='French'))
# everything after the paren should be stripped for aramaic
self.assertIsNotNone(self.org.languages.filter(name='Official Aramaic'))
# everything after the semi should be stripped for haitian
self.assertIsNotNone(self.org.languages.filter(name='Haitian'))
# check that the last load shows our new languages
response = self.client.get(url)
self.assertEqual(response.context['languages'], 'Haitian and Official Aramaic')
self.assertContains(response, 'fre')
self.assertContains(response, 'hat,arc')
# three translation languages
self.client.post(url, dict(primary_lang='fre', languages='hat,arc,spa'))
response = self.client.get(reverse('orgs.org_languages'))
self.assertEqual(response.context['languages'], 'Haitian, Official Aramaic and Spanish')
# one translation language
self.client.post(url, dict(primary_lang='fre', languages='hat'))
response = self.client.get(reverse('orgs.org_languages'))
self.assertEqual(response.context['languages'], 'Haitian')
# remove all languages
self.client.post(url, dict())
self.org.refresh_from_db()
self.assertIsNone(self.org.primary_language)
self.assertFalse(self.org.languages.all())
# search languages
response = self.client.get('%s?search=fre' % url)
results = response.json()['results']
self.assertEqual(len(results), 4)
# initial should do a match on code only
response = self.client.get('%s?initial=fre' % url)
results = response.json()['results']
self.assertEqual(len(results), 1)
def test_language_codes(self):
self.assertEquals('French', languages.get_language_name('fre'))
self.assertEquals('Creoles and pidgins, English based', languages.get_language_name('cpe'))
# should strip off anything after an open paren or semicolon
self.assertEquals('Official Aramaic', languages.get_language_name('arc'))
self.assertEquals('Haitian', languages.get_language_name('hat'))
# check that search returns results and in the proper order
matches = languages.search_language_names('Fre')
self.assertEquals(4, len(matches))
self.assertEquals('Creoles and pidgins, French-based', matches[0]['text'])
self.assertEquals('French', matches[1]['text'])
self.assertEquals('French, Middle (ca.1400-1600)', matches[2]['text'])
self.assertEquals('French, Old (842-ca.1400)', matches[3]['text'])
# try a language that doesn't exist
self.assertEquals(None, languages.get_language_name('klingon'))
def test_get_localized_text(self):
text_translations = dict(eng="Hello", esp="Hola")
# null case
self.assertEqual(Language.get_localized_text(None, None, "Hi"), "Hi")
# simple dictionary case
self.assertEqual(Language.get_localized_text(text_translations, ['eng'], "Hi"), "Hello")
# missing language case
self.assertEqual(Language.get_localized_text(text_translations, ['fre'], "Hi"), "Hi")
# secondary option
self.assertEqual(Language.get_localized_text(text_translations, ['fre', 'esp'], "Hi"), "Hola")
class BulkExportTest(TembaTest):
def test_get_dependencies(self):
# import a flow that triggers another flow
contact1 = self.create_contact("Marshawn", "+14255551212")
substitutions = dict(contact_id=contact1.id)
flow = self.get_flow('triggered', substitutions)
# read in the old version 8 raw json
old_json = json.loads(self.get_import_json('triggered', substitutions))
old_actions = old_json['flows'][1]['action_sets'][0]['actions']
# splice our actionset with old bits
actionset = flow.action_sets.all()[0]
actionset.actions = json.dumps(old_actions)
actionset.save()
# fake our version number back to 8
flow.version_number = 8
flow.save()
# now make sure a call to get dependencies succeeds and shows our flow
triggeree = Flow.objects.filter(name='Triggeree').first()
self.assertIn(triggeree, flow.get_dependencies()['flows'])
def test_trigger_flow(self):
self.import_file('triggered_flow')
flow = Flow.objects.filter(name='Trigger a Flow', org=self.org).first()
definition = flow.as_json()
actions = definition[Flow.ACTION_SETS][0]['actions']
self.assertEquals(1, len(actions))
self.assertEquals('Triggered Flow', actions[0]['flow']['name'])
def test_trigger_dependency(self):
# tests the case of us doing an export of only a single flow (despite dependencies) and making sure we
# don't include the triggers of our dependent flows (which weren't exported)
self.import_file('parent_child_trigger')
parent = Flow.objects.filter(name='Parent Flow').first()
self.login(self.admin)
# export only the parent
post_data = dict(flows=[parent.pk], campaigns=[])
response = self.client.post(reverse('orgs.org_export'), post_data)
exported = response.json()
# shouldn't have any triggers
self.assertFalse(exported['triggers'])
def test_subflow_dependencies(self):
self.import_file('subflow')
parent = Flow.objects.filter(name='Parent Flow').first()
child = Flow.objects.filter(name='Child Flow').first()
self.assertIn(child, parent.get_dependencies()['flows'])
self.login(self.admin)
response = self.client.get(reverse('orgs.org_export'))
from bs4 import BeautifulSoup
soup = BeautifulSoup(response.content, "html.parser")
group = str(soup.findAll("div", {"class": "exportables bucket"})[0])
self.assertIn('Parent Flow', group)
self.assertIn('Child Flow', group)
def test_flow_export_dynamic_group(self):
flow = self.get_flow('favorites')
# get one of our flow actionsets, change it to an AddToGroupAction
actionset = ActionSet.objects.filter(flow=flow).order_by('y').first()
# replace the actions
from temba.flows.models import AddToGroupAction
actionset.set_actions_dict([AddToGroupAction([dict(uuid='123', name="Other Group"), '@contact.name']).as_json()])
actionset.save()
# now let's export!
self.login(self.admin)
post_data = dict(flows=[flow.pk], campaigns=[])
response = self.client.post(reverse('orgs.org_export'), post_data)
exported = response.json()
# try to import the flow
flow.delete()
response.json()
Flow.import_flows(exported, self.org, self.admin)
# make sure the created flow has the same action set
flow = Flow.objects.filter(name="%s" % flow.name).first()
actionset = ActionSet.objects.filter(flow=flow).order_by('y').first()
self.assertTrue('@contact.name' in actionset.get_actions()[0].groups)
def test_missing_flows_on_import(self):
# import a flow that starts a missing flow
self.import_file('start_missing_flow')
# the flow that kicks off our missing flow
flow = Flow.objects.get(name='Start Missing Flow')
# make sure our missing flow is indeed not there
self.assertIsNone(Flow.objects.filter(name='Missing Flow').first())
# these two actionsets only have a single action that starts the missing flow
# therefore they should not be created on import
self.assertIsNone(ActionSet.objects.filter(flow=flow, y=160, x=90).first())
self.assertIsNone(ActionSet.objects.filter(flow=flow, y=233, x=395).first())
# should have this actionset, but only one action now since one was removed
other_actionset = ActionSet.objects.filter(flow=flow, y=145, x=731).first()
self.assertEquals(1, len(other_actionset.get_actions()))
# now make sure it does the same thing from an actionset
self.import_file('start_missing_flow_from_actionset')
self.assertIsNotNone(Flow.objects.filter(name='Start Missing Flow').first())
self.assertIsNone(Flow.objects.filter(name='Missing Flow').first())
def test_import(self):
self.login(self.admin)
# try importing without having purchased credits
settings.BRANDING[settings.DEFAULT_BRAND]['tiers'] = dict(import_flows=1, multi_user=100000, multi_org=1000000)
post_data = dict(import_file=open('%s/test_flows/new_mother.json' % settings.MEDIA_ROOT, 'rb'))
response = self.client.post(reverse('orgs.org_import'), post_data)
self.assertEquals(response.context['form'].errors['import_file'][0], 'Sorry, import is a premium feature')
# now purchase some credits and try again
TopUp.objects.create(org=self.org, price=1, credits=10000,
expires_on=timezone.now() + timedelta(days=30),
created_by=self.admin, modified_by=self.admin)
# force our cache to reload
self.org.get_credits_total(force_dirty=True)
self.org.update_caches(OrgEvent.topup_updated, None)
self.assertTrue(self.org.get_purchased_credits() > 0)
# now try again with purchased credits, but our file is too old
post_data = dict(import_file=open('%s/test_flows/too_old.json' % settings.MEDIA_ROOT, 'rb'))
response = self.client.post(reverse('orgs.org_import'), post_data)
self.assertEquals(response.context['form'].errors['import_file'][0], 'This file is no longer valid. Please export a new version and try again.')
# simulate an unexpected exception during import
with patch('temba.triggers.models.Trigger.import_triggers') as validate:
validate.side_effect = Exception('Unexpected Error')
post_data = dict(import_file=open('%s/test_flows/new_mother.json' % settings.MEDIA_ROOT, 'rb'))
response = self.client.post(reverse('orgs.org_import'), post_data)
self.assertEquals(response.context['form'].errors['import_file'][0], 'Sorry, your import file is invalid.')
# trigger import failed, new flows that were added should get rolled back
self.assertIsNone(Flow.objects.filter(org=self.org, name='New Mother').first())
def test_import_campaign_with_translations(self):
self.import_file('campaign_import_with_translations')
campaign = Campaign.objects.all().first()
event = campaign.events.all().first()
action_set = event.flow.action_sets.order_by('-y').first()
actions = action_set.get_actions_dict()
action_msg = actions[0]['msg']
event_msg = json.loads(event.message)
self.assertEqual(event_msg['swa'], 'hello')
self.assertEqual(event_msg['eng'], 'Hey')
# base language for this flow is 'swa' despite our org languages being unset
self.assertEqual(event.flow.base_language, 'swa')
self.assertEqual(action_msg['swa'], 'hello')
self.assertEqual(action_msg['eng'], 'Hey')
def test_export_import(self):
def assert_object_counts():
self.assertEquals(8, Flow.objects.filter(org=self.org, is_active=True, is_archived=False, flow_type='F').count())
self.assertEquals(2, Flow.objects.filter(org=self.org, is_active=True, is_archived=False, flow_type='M').count())
self.assertEquals(1, Campaign.objects.filter(org=self.org, is_archived=False).count())
self.assertEquals(4, CampaignEvent.objects.filter(campaign__org=self.org, event_type='F').count())
self.assertEquals(2, CampaignEvent.objects.filter(campaign__org=self.org, event_type='M').count())
self.assertEquals(2, Trigger.objects.filter(org=self.org, trigger_type='K', is_archived=False).count())
self.assertEquals(1, Trigger.objects.filter(org=self.org, trigger_type='C', is_archived=False).count())
self.assertEquals(1, Trigger.objects.filter(org=self.org, trigger_type='M', is_archived=False).count())
self.assertEquals(3, ContactGroup.user_groups.filter(org=self.org).count())
self.assertEquals(1, Label.label_objects.filter(org=self.org).count())
# import all our bits
self.import_file('the_clinic')
# check that the right number of objects successfully imported for our app
assert_object_counts()
# let's update some stuff
confirm_appointment = Flow.objects.get(name='Confirm Appointment')
confirm_appointment.expires_after_minutes = 60
confirm_appointment.save()
action_set = confirm_appointment.action_sets.order_by('-y').first()
actions = action_set.get_actions_dict()
actions[0]['msg']['base'] = 'Thanks for nothing'
action_set.set_actions_dict(actions)
action_set.save()
trigger = Trigger.objects.filter(keyword='patient').first()
trigger.flow = confirm_appointment
trigger.save()
message_flow = Flow.objects.filter(flow_type='M', campaignevent__offset=-1).order_by('pk').first()
action_set = message_flow.action_sets.order_by('-y').first()
actions = action_set.get_actions_dict()
self.assertEquals("Hi there, just a quick reminder that you have an appointment at The Clinic at @contact.next_appointment. If you can't make it please call 1-888-THE-CLINIC.", actions[0]['msg']['base'])
actions[0]['msg'] = 'No reminders for you!'
action_set.set_actions_dict(actions)
action_set.save()
# now reimport
self.import_file('the_clinic')
# our flow should get reset from the import
confirm_appointment = Flow.objects.get(pk=confirm_appointment.pk)
action_set = confirm_appointment.action_sets.order_by('-y').first()
actions = action_set.get_actions_dict()
self.assertEquals("Thanks, your appointment at The Clinic has been confirmed for @contact.next_appointment. See you then!", actions[0]['msg']['base'])
# same with our trigger
trigger = Trigger.objects.filter(keyword='patient').first()
self.assertEquals(Flow.objects.filter(name='Register Patient').first(), trigger.flow)
# our old campaign message flow should be inactive now
self.assertTrue(Flow.objects.filter(pk=message_flow.pk, is_active=False))
# find our new message flow, and see that the original message is there
message_flow = Flow.objects.filter(flow_type='M', campaignevent__offset=-1, is_active=True).order_by('pk').first()
action_set = Flow.objects.get(pk=message_flow.pk).action_sets.order_by('-y').first()
actions = action_set.get_actions_dict()
self.assertEquals("Hi there, just a quick reminder that you have an appointment at The Clinic at @contact.next_appointment. If you can't make it please call 1-888-THE-CLINIC.", actions[0]['msg']['base'])
# and we should have the same number of items as after the first import
assert_object_counts()
# see that everything shows up properly on our export page
self.login(self.admin)
response = self.client.get(reverse('orgs.org_export'))
self.assertContains(response, 'Register Patient')
self.assertContains(response, 'Catch All')
self.assertContains(response, 'Missed Call')
self.assertContains(response, 'Start Notifications')
self.assertContains(response, 'Stop Notifications')
self.assertContains(response, 'Confirm Appointment')
self.assertContains(response, 'Appointment Followup')
# our campaign
self.assertContains(response, 'Appointment Schedule')
# now let's export!
post_data = dict(flows=[f.pk for f in Flow.objects.filter(flow_type='F')],
campaigns=[c.pk for c in Campaign.objects.all()])
response = self.client.post(reverse('orgs.org_export'), post_data)
exported = response.json()
self.assertEquals(CURRENT_EXPORT_VERSION, exported.get('version', 0))
self.assertEquals('https://app.rapidpro.io', exported.get('site', None))
self.assertEquals(8, len(exported.get('flows', [])))
self.assertEquals(4, len(exported.get('triggers', [])))
self.assertEquals(1, len(exported.get('campaigns', [])))
# set our org language to english
self.org.set_languages(self.admin, ['eng', 'fre'], 'eng')
# finally let's try importing our exported file
self.org.import_app(exported, self.admin, site='http://app.rapidpro.io')
assert_object_counts()
message_flow = Flow.objects.filter(flow_type='M', campaignevent__offset=-1, is_active=True).order_by('pk').first()
# make sure the base language is set to 'base', not 'eng'
self.assertEqual(message_flow.base_language, 'base')
# let's rename a flow and import our export again
flow = Flow.objects.get(name='Confirm Appointment')
flow.name = "A new flow"
flow.save()
campaign = Campaign.objects.all().first()
campaign.name = "A new campagin"
campaign.save()
group = ContactGroup.user_groups.filter(name='Pending Appointments').first()
group.name = "A new group"
group.save()
# it should fall back on ids and not create new objects even though the names changed
self.org.import_app(exported, self.admin, site='http://app.rapidpro.io')
assert_object_counts()
# and our objets should have the same names as before
self.assertEquals('Confirm Appointment', Flow.objects.get(pk=flow.pk).name)
self.assertEquals('Appointment Schedule', Campaign.objects.all().first().name)
self.assertEquals('Pending Appointments', ContactGroup.user_groups.get(pk=group.pk).name)
# let's rename our objects again
flow.name = "A new name"
flow.save()
campaign.name = "A new campagin"
campaign.save()
group.name = "A new group"
group.save()
# now import the same import but pretend its from a different site
self.org.import_app(exported, self.admin, site='http://temba.io')
# the newly named objects won't get updated in this case and we'll create new ones instead
self.assertEquals(9, Flow.objects.filter(org=self.org, is_archived=False, flow_type='F').count())
self.assertEquals(2, Campaign.objects.filter(org=self.org, is_archived=False).count())
self.assertEquals(4, ContactGroup.user_groups.filter(org=self.org).count())
# now archive a flow
register = Flow.objects.filter(name='Register Patient').first()
register.is_archived = True
register.save()
# default view shouldn't show archived flows
response = self.client.get(reverse('orgs.org_export'))
self.assertNotContains(response, 'Register Patient')
# with the archived flag one, it should be there
response = self.client.get("%s?archived=1" % reverse('orgs.org_export'))
self.assertContains(response, 'Register Patient')
# delete our flow, and reimport
confirm_appointment.delete()
self.org.import_app(exported, self.admin, site='https://app.rapidpro.io')
# make sure we have the previously exported expiration
confirm_appointment = Flow.objects.get(name='Confirm Appointment')
self.assertEquals(60, confirm_appointment.expires_after_minutes)
# now delete a flow
register = Flow.objects.filter(name='Register Patient').first()
register.is_active = False
register.save()
# default view shouldn't show deleted flows
response = self.client.get(reverse('orgs.org_export'))
self.assertNotContains(response, 'Register Patient')
# even with the archived flag one deleted flows should not show up
response = self.client.get("%s?archived=1" % reverse('orgs.org_export'))
self.assertNotContains(response, 'Register Patient')
class CreditAlertTest(TembaTest):
def test_check_org_credits(self):
self.joe = self.create_contact("Joe Blow", "123")
self.create_msg(contact=self.joe)
with self.settings(HOSTNAME="rapidpro.io", SEND_EMAILS=True):
with patch('temba.orgs.models.Org.get_credits_remaining') as mock_get_credits_remaining:
mock_get_credits_remaining.return_value = -1
# no alert yet
self.assertFalse(CreditAlert.objects.all())
CreditAlert.check_org_credits()
# one alert created and sent
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_OVER).count())
self.assertEquals(1, len(mail.outbox))
# alert email is for out of credits type
sent_email = mail.outbox[0]
self.assertEqual(len(sent_email.to), 1)
self.assertTrue('RapidPro account for Temba' in sent_email.body)
self.assertTrue('is out of credit.' in sent_email.body)
# no new alert if one is sent and no new email
CreditAlert.check_org_credits()
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_OVER).count())
self.assertEquals(1, len(mail.outbox))
# reset alerts
CreditAlert.reset_for_org(self.org)
self.assertFalse(CreditAlert.objects.filter(org=self.org, is_active=True))
# can resend a new alert
CreditAlert.check_org_credits()
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_OVER).count())
self.assertEquals(2, len(mail.outbox))
mock_get_credits_remaining.return_value = 10
with patch('temba.orgs.models.Org.has_low_credits') as mock_has_low_credits:
mock_has_low_credits.return_value = True
self.assertFalse(CreditAlert.objects.filter(org=self.org, alert_type=ORG_CREDIT_LOW))
CreditAlert.check_org_credits()
# low credit alert created and email sent
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_LOW).count())
self.assertEquals(3, len(mail.outbox))
# email sent
sent_email = mail.outbox[2]
self.assertEqual(len(sent_email.to), 1)
self.assertTrue('RapidPro account for Temba' in sent_email.body)
self.assertTrue('is running low on credits' in sent_email.body)
# no new alert if one is sent and no new email
CreditAlert.check_org_credits()
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_LOW).count())
self.assertEquals(3, len(mail.outbox))
# reset alerts
CreditAlert.reset_for_org(self.org)
self.assertFalse(CreditAlert.objects.filter(org=self.org, is_active=True))
# can resend a new alert
CreditAlert.check_org_credits()
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_LOW).count())
self.assertEquals(4, len(mail.outbox))
mock_has_low_credits.return_value = False
with patch('temba.orgs.models.Org.get_credits_expiring_soon') as mock_get_credits_exipiring_soon:
mock_get_credits_exipiring_soon.return_value = 0
self.assertFalse(CreditAlert.objects.filter(org=self.org, alert_type=ORG_CREDIT_EXPIRING))
CreditAlert.check_org_credits()
# no alert since no expiring credits
self.assertFalse(CreditAlert.objects.filter(org=self.org, alert_type=ORG_CREDIT_EXPIRING))
mock_get_credits_exipiring_soon.return_value = 200
CreditAlert.check_org_credits()
# expiring credit alert created and email sent
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_EXPIRING).count())
self.assertEquals(5, len(mail.outbox))
# email sent
sent_email = mail.outbox[4]
self.assertEqual(len(sent_email.to), 1)
self.assertTrue('RapidPro account for Temba' in sent_email.body)
self.assertTrue('expiring credits in less than one month.' in sent_email.body)
# no new alert if one is sent and no new email
CreditAlert.check_org_credits()
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_EXPIRING).count())
self.assertEquals(5, len(mail.outbox))
# reset alerts
CreditAlert.reset_for_org(self.org)
self.assertFalse(CreditAlert.objects.filter(org=self.org, is_active=True))
# can resend a new alert
CreditAlert.check_org_credits()
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_EXPIRING).count())
self.assertEquals(6, len(mail.outbox))
class UnreadCountTest(FlowFileTest):
def test_unread_count_test(self):
flow = self.get_flow('favorites')
# create a trigger for 'favs'
Trigger.objects.create(org=self.org, flow=flow, keyword='favs', created_by=self.admin, modified_by=self.admin)
# start our flow by firing an incoming message
contact = self.create_contact('Anakin Skywalker', '+12067791212')
msg = self.create_msg(contact=contact, text="favs")
# process it
Msg.process_message(msg)
# our flow unread count should have gone up
self.assertEquals(1, flow.get_and_clear_unread_responses())
# cleared by the first call
self.assertEquals(0, flow.get_and_clear_unread_responses())
# at this point our flow should have started.. go to our trigger list page to see if our context is correct
self.login(self.admin)
trigger_list = reverse('triggers.trigger_list')
response = self.client.get(trigger_list)
self.assertEquals(0, response.context['msgs_unread_count'])
self.assertEquals(1, response.context['flows_unread_count'])
# answer another question in the flow
msg = self.create_msg(contact=contact, text="red")
Msg.process_message(msg)
response = self.client.get(trigger_list)
self.assertEquals(0, response.context['msgs_unread_count'])
self.assertEquals(2, response.context['flows_unread_count'])
# finish the flow and send a message outside it
msg = self.create_msg(contact=contact, text="primus")
Msg.process_message(msg)
msg = self.create_msg(contact=contact, text="nic")
Msg.process_message(msg)
msg = self.create_msg(contact=contact, text="Hello?")
Msg.process_message(msg)
response = self.client.get(trigger_list)
self.assertEquals(4, response.context['flows_unread_count'])
self.assertEquals(1, response.context['msgs_unread_count'])
# visit the msg pane
response = self.client.get(reverse('msgs.msg_inbox'))
self.assertEquals(4, response.context['flows_unread_count'])
self.assertEquals(0, response.context['msgs_unread_count'])
# now the flow list pane
response = self.client.get(reverse('flows.flow_list'))
self.assertEquals(0, response.context['flows_unread_count'])
self.assertEquals(0, response.context['msgs_unread_count'])
# make sure a test contact doesn't update our counts
test_contact = self.create_contact("Test Contact", "+12065551214", is_test=True)
msg = self.create_msg(contact=test_contact, text="favs")
Msg.process_message(msg)
# assert our counts weren't updated
self.assertEquals(0, self.org.get_unread_msg_count(UNREAD_INBOX_MSGS))
self.assertEquals(0, self.org.get_unread_msg_count(UNREAD_FLOW_MSGS))
# wasn't counted for the individual flow
self.assertEquals(0, flow.get_and_clear_unread_responses())
class EmailContextProcessorsTest(SmartminTest):
def setUp(self):
super(EmailContextProcessorsTest, self).setUp()
self.admin = self.create_user("Administrator")
self.middleware = BrandingMiddleware()
def test_link_components(self):
self.request = Mock(spec=HttpRequest)
self.request.get_host.return_value = "rapidpro.io"
response = self.middleware.process_request(self.request)
self.assertIsNone(response)
self.assertEquals(link_components(self.request, self.admin), dict(protocol="https", hostname="app.rapidpro.io"))
with self.settings(HOSTNAME="rapidpro.io"):
forget_url = reverse('users.user_forget')
post_data = dict()
post_data['email'] = 'nouser@nouser.com'
response = self.client.post(forget_url, post_data, follow=True)
self.assertEquals(1, len(mail.outbox))
sent_email = mail.outbox[0]
self.assertEqual(len(sent_email.to), 1)
self.assertEqual(sent_email.to[0], 'nouser@nouser.com')
# we have the domain of rapipro.io brand
self.assertTrue('app.rapidpro.io' in sent_email.body)
class TestStripeCredits(TembaTest):
@patch('stripe.Customer.create')
@patch('stripe.Charge.create')
@override_settings(SEND_EMAILS=True)
def test_add_credits(self, charge_create, customer_create):
customer_create.return_value = dict_to_struct('Customer', dict(id='stripe-cust-1'))
charge_create.return_value = \
dict_to_struct('Charge', dict(id='stripe-charge-1',
card=dict_to_struct('Card', dict(last4='1234', type='Visa', name='Rudolph'))))
settings.BRANDING[settings.DEFAULT_BRAND]['bundles'] = (dict(cents="2000", credits=1000, feature=""),)
self.org.add_credits('2000', 'stripe-token', self.admin)
self.assertTrue(2000, self.org.get_credits_total())
# assert we saved our charge info
topup = self.org.topups.last()
self.assertEqual('stripe-charge-1', topup.stripe_charge)
# and we saved our stripe customer info
org = Org.objects.get(id=self.org.id)
self.assertEqual('stripe-cust-1', org.stripe_customer)
# assert we sent our confirmation emai
self.assertEqual(1, len(mail.outbox))
email = mail.outbox[0]
self.assertEquals("RapidPro Receipt", email.subject)
self.assertTrue('Rudolph' in email.body)
self.assertTrue('Visa' in email.body)
self.assertTrue('$20' in email.body)
@patch('stripe.Customer.create')
def test_add_credits_fail(self, customer_create):
customer_create.side_effect = ValueError("Invalid customer token")
with self.assertRaises(ValidationError):
self.org.add_credits('2000', 'stripe-token', self.admin)
# assert no email was sent
self.assertEqual(0, len(mail.outbox))
# and no topups created
self.assertEqual(1, self.org.topups.all().count())
self.assertEqual(1000, self.org.get_credits_total())
def test_add_credits_invalid_bundle(self):
with self.assertRaises(ValidationError):
self.org.add_credits('-10', 'stripe-token', self.admin)
# assert no email was sent
self.assertEqual(0, len(mail.outbox))
# and no topups created
self.assertEqual(1, self.org.topups.all().count())
self.assertEqual(1000, self.org.get_credits_total())
@patch('stripe.Customer.retrieve')
@patch('stripe.Charge.create')
@override_settings(SEND_EMAILS=True)
def test_add_credits_existing_customer(self, charge_create, customer_retrieve):
self.org.stripe_customer = 'stripe-cust-1'
self.org.save()
class MockCard(object):
def __init__(self):
self.id = 'stripe-card-1'
def delete(self):
pass
class MockCards(object):
def all(self):
return dict_to_struct('MockCardData', dict(data=[MockCard(), MockCard()]))
def create(self, card):
return MockCard()
class MockCustomer(object):
def __init__(self):
self.id = 'stripe-cust-1'
self.cards = MockCards()
def save(self):
pass
customer_retrieve.return_value = MockCustomer()
charge_create.return_value = \
dict_to_struct('Charge', dict(id='stripe-charge-1',
card=dict_to_struct('Card', dict(last4='1234', type='Visa', name='Rudolph'))))
settings.BRANDING[settings.DEFAULT_BRAND]['bundles'] = (dict(cents="2000", credits=1000, feature=""),)
self.org.add_credits('2000', 'stripe-token', self.admin)
self.assertTrue(2000, self.org.get_credits_total())
# assert we saved our charge info
topup = self.org.topups.last()
self.assertEqual('stripe-charge-1', topup.stripe_charge)
# and we saved our stripe customer info
org = Org.objects.get(id=self.org.id)
self.assertEqual('stripe-cust-1', org.stripe_customer)
# assert we sent our confirmation emai
self.assertEqual(1, len(mail.outbox))
email = mail.outbox[0]
self.assertEquals("RapidPro Receipt", email.subject)
self.assertTrue('Rudolph' in email.body)
self.assertTrue('Visa' in email.body)
self.assertTrue('$20' in email.body)
class ParsingTest(TembaTest):
def test_parse_decimal(self):
self.assertEqual(self.org.parse_decimal("Not num"), None)
self.assertEqual(self.org.parse_decimal("00.123"), Decimal("0.123"))
self.assertEqual(self.org.parse_decimal("6e33"), None)
self.assertEqual(self.org.parse_decimal("6e5"), Decimal("600000"))
self.assertEqual(self.org.parse_decimal("9999999999999999999999999"), None)
self.assertEqual(self.org.parse_decimal(""), None)
self.assertEqual(self.org.parse_decimal("NaN"), None)
self.assertEqual(self.org.parse_decimal("Infinity"), None)
|
tsotetsi/textily-web
|
temba/orgs/tests.py
|
Python
|
agpl-3.0
| 142,450
|
[
"VisIt"
] |
144c3a48f19d5406d464e37e06fa63c554a84cee81b9bd673437598abc789df5
|
#!/usr/bin/env python3
"""K. Miernik 2012
k.a.miernik@gmail.com
Distributed under GNU General Public Licence v3
Gaussian peak fitting class
"""
import math
import numpy as np
import os
import sys
import time
from lmfit import minimize, Parameters, report_errors
from Pyspectr.exceptions import GeneralError as GeneralError
class PeakFitter:
def __init__(self, peaks, baseline, plot_name):
self.plot_name = plot_name
self.params = Parameters()
self.peaks = peaks
self.baseline = baseline
if baseline == 'linear':
self.params.add('a0')
self.params.add('a1')
elif baseline == 'quadratic':
self.params.add('a0')
self.params.add('a1')
self.params.add('a2', value=0.0)
else:
raise GeneralError("Unknown background type {}".format(baseline))
for peak_index in range(len(self.peaks)):
self.params.add('x{}'.format(peak_index))
self.params.add('s{}'.format(peak_index))
self.params.add('A{}'.format(peak_index))
if self.peaks[peak_index].get('model') == 'gauss_l':
self.params.add('sL{}'.format(peak_index))
def _gauss(self, params, data_x, peak_index):
"""Gaussian function
"""
s = params['s{}'.format(peak_index)].value
mu = params['x{}'.format(peak_index)].value
A = params['A{}'.format(peak_index)].value
return ( A / (math.sqrt(2 * math.pi) * s) *
np.exp(-0.5 * ((data_x - mu) * (data_x - mu))
/ math.pow(s, 2)) )
def _gauss_lskew(self, params, data_x, peak_index):
"""Left skewed gaussian
"""
s = params['s{}'.format(peak_index)].value
mu = params['x{}'.format(peak_index)].value
A = params['A{}'.format(peak_index)].value
sL = params['sL{}'.format(peak_index)].value
y = []
for x in data_x:
if x < mu:
d = 2 * math.pow(s, 2) * (1 + sL / s * (mu - x))
else:
d = 2 * math.pow(s, 2)
y.append(A / (math.sqrt(2 * math.pi) * s) *
math.exp(-0.5 * math.pow(x - mu, 2) / d) )
return np.array(y)
def _linear(self, params, data_x):
a0 = params['a0'].value
a1 = params['a1'].value
return a0 + a1 * data_x
def _quadratic(self, params, data_x):
a0 = params['a0'].value
a1 = params['a1'].value
a2 = params['a2'].value
return a0 + a1 * data_x + a2 * data_x * data_x
def restrict_width(self, smin, smax):
for i, peak in enumerate(self.peaks):
self.params['s{}'.format(i)].value = (smax + smin) / 2
self.params['s{}'.format(i)].max = smax
def fit_func(self, params, data_x):
"""
Function used in residuals function to be fitted. Combines all peaks and
baseline
"""
y = np.zeros((len(data_x)))
if self.baseline == 'linear':
y += self._linear(params, data_x)
elif self.baseline == 'quadratic':
y += self._quadratic(params, data_x)
for peak_index in range(len(self.peaks)):
if (self.peaks[peak_index].get('model') is None or
self.peaks[peak_index].get('model') == 'gauss'):
y += self._gauss(params, data_x, peak_index)
elif self.peaks[peak_index].get('model') == 'gauss_l':
y += self._gauss_lskew(params, data_x, peak_index)
return y
def residual(self, params, data_x, data_y, data_dy):
"""Residuals to minimize
"""
model = self.fit_func(params, data_x)
return (data_y - model) / data_dy
def find_area(self, data_x, peak_index):
if (self.peaks[peak_index].get('model') is None or
self.peaks[peak_index].get('model') == 'gauss'):
yp = self._gauss(self.params, data_x, peak_index)
elif self.peaks[peak_index].get('model') == 'gauss_l':
yp = self._gauss_lskew(self.params, data_x, peak_index)
return(np.sum(yp))
def _initialize(self, data_x, data_y):
for i, peak in enumerate(self.peaks):
E = float(peak.get('E'))
model = peak.get('model')
self.params['x{}'.format(i)].value = E
self.params['x{}'.format(i)].min = data_x[0]
self.params['x{}'.format(i)].max = data_x[-1]
self.params['s{}'.format(i)].value = 0.85
self.params['s{}'.format(i)].vary = True
self.params['A{}'.format(i)].value = data_y[int(E - data_x[0])]
if model == "gauss_l":
self.params['sL{}'.format(i)].value = 0.1
self.params['sL{}'.format(i)].min = 0.0
self.params['sL{}'.format(i)].max = 2.0
x0 = np.average(data_x[0:5])
y0 = np.average(data_y[0:5])
x1 = np.average(data_x[-6:-1])
y1 = np.average(data_y[-6:-1])
self.params['a1'].value = (y1 - y0) / (x1 - x0)
self.params['a0'].value = y0 - x0 * self.params['a1'].value
def fit(self, data_x, data_y, data_dy, show='plot', pause=0):
"""
Fit peaks in the data, returns x_axis points, baseline (background)
and fit (peaks) data points. The parameters of the fit (peaks parameters)
can be extracted from params variable.
"""
self._initialize(data_x, data_y)
#lmfit no longer alters the params directly. Instead, it makes a copy
# and reports those. Historical code used self.params, now uses result.params
# dvm 2018-05-10
result = minimize(self.residual, self.params,
args=(data_x, data_y, data_dy))
x = np.linspace(data_x[0], data_x[-1], 1000)
y0 = self.fit_func(result.params, x)
if self.baseline == 'linear':
yb = self._linear(result.params, data_x)
elif self.baseline == 'quadratic':
yb = self._quadratic(result.params, data_x)
self.params = result.params
functions = {'x_axis' : x, 'baseline': yb, 'fit': y0}
return functions
|
ntbrewer/Pyspectr
|
Pyspectr/peak_fitter.py
|
Python
|
gpl-3.0
| 6,244
|
[
"Gaussian"
] |
98ab0559519eb2f276bffa8f883db7ba3a704d5d758446aae65af345bc612e48
|
#!/usr/bin/python
import sys
import argparse
import multiprocessing
import logging
import vcf
import random
import math
import pysam
def annotate_vcfs(bam, chromosomes, vcfs):
func_logger = logging.getLogger("%s-%s" % (annotate_vcfs.__name__, multiprocessing.current_process()))
random.seed(0)
# Load indexed BAM file
sam_file = pysam.Samfile(bam.name, "rb")
if not chromosomes:
func_logger.info("Chromosome list unspecified. Inferring from the BAMs")
chromosomes += list(sam_file.references)
chromosomes = sorted(list(set(chromosomes)))
func_logger.info("Chromosome list inferred as %s" % (str(chromosomes)))
if not chromosomes or len(chromosomes) == 0:
func_logger.error("Chromosome list empty")
return None
# Read through samfile and get some statistics
# hard code this for now
read_limit = 1000
# this is temporary, needs to read the reference to be sensible
# TODODODODODO!!!
num_read = 0.0
cover_sum = 0.0
template_list = list()
first_chr = sam_file.getrname(0)
for i in xrange(0, read_limit):
loc = random.randint(0, 30000000)
alignments = sam_file.fetch(first_chr, loc, loc + 1)
curr_num = 0
for aln in alignments:
if aln.mapq < 18:
continue
curr_num += 1
cover_sum += 1
template_list.append(abs(aln.tlen))
if curr_num > 0:
num_read += 1
template_list.sort()
num_template = float(len(template_list))
low_bound = int(math.floor(num_template * 0.05))
upp_bound = int(math.ceil(num_template * 0.95))
insert_count = 0
insert_sum = 0.0
insert_sq_sum = 0.0
for i in xrange(low_bound, upp_bound):
insert_count += 1
insert_sum += template_list[i]
insert_sq_sum += template_list[i] * template_list[i]
mean_coverage = cover_sum / num_read
mean_insert_size = insert_sum / insert_count
sd_insert_size = math.sqrt((insert_sq_sum / insert_count) - (mean_insert_size * mean_insert_size))
func_logger.info("Estimated coverage mean: {0:.2f}".format(mean_coverage))
func_logger.info("Estimated template size mean: {0:.2f}".format(mean_insert_size))
func_logger.info("Estimated template size sd: {0:.2f}".format(sd_insert_size))
func_logger.info("Estimated template size Q5: {0:.2f}".format(template_list[low_bound]))
func_logger.info("Estimated template size Q95: {0:.2f}".format(template_list[upp_bound - 1]))
template_upper_bound = mean_insert_size + (3 * sd_insert_size)
template_lower_bound = mean_insert_size - (3 * sd_insert_size)
# Read though VCF one line at a time
for inVCF in vcfs:
vcf_reader = vcf.Reader(open(inVCF.name))
vcf_template_reader = vcf.Reader(open(inVCF.name))
vcf_writer = vcf.Writer(open("anno_" + inVCF.name, 'w'), vcf_template_reader)
num_processed = 0
for vcf_record in vcf_reader:
if vcf_record.CHROM not in chromosomes:
continue
num_processed += 1
if num_processed % 100 == 0:
func_logger.info("{0} read from {1}".format(num_processed, inVCF.name))
# get the interval that corresponds to the SV
if vcf_record.INFO['SVTYPE'] == 'INS':
breakpoints = (vcf_record.start, vcf_record.start + 1)
else:
if 'END' in vcf_record.INFO:
breakpoints = (vcf_record.start, vcf_record.INFO['END'])
else:
breakpoints = (vcf_record.start, vcf_record.start + abs(int(vcf_record.INFO['SVLEN'][0])))
process_variant = True
if breakpoints[1] - breakpoints[0] > 1000000:
process_variant = False
if process_variant:
# get reads between breakpoints
# sample with replacement 100 points
unique_coverage = 0.0
total_coverage = 0.0
num_forward = 0.0
bases_aligned = 0.0
total_bases = 0.0
end_bases_aligned = 0.0
end_total_bases = 0.0
num_discordant_high = 0.0
num_discordant_low = 0.0
num_repeat = 10
for i in xrange(0, num_repeat):
loc = random.randint(breakpoints[0], breakpoints[1])
alignments = sam_file.fetch(vcf_record.CHROM, loc, loc + 1)
for rec in alignments:
if rec.mapq >= 18:
unique_coverage += 1
if not rec.is_reverse:
num_forward += 1
total_bases += rec.rlen
bases_aligned += rec.qlen
total_coverage += 1
# compute number of discordant
for loc in [max(breakpoints[0] - sd_insert_size, 0), breakpoints[1] + sd_insert_size]:
alignments = sam_file.fetch(vcf_record.CHROM, loc, loc + 1)
for rec in alignments:
if rec.mapq >= 18:
if abs(rec.tlen) > template_upper_bound:
num_discordant_high += 1
if abs(rec.tlen) < template_lower_bound:
num_discordant_low += 1
end_total_bases += rec.rlen
end_bases_aligned += rec.qlen
# get coverage between the breakpoints
vcf_record.INFO["AA_UNIQ_COV"] = (unique_coverage / num_repeat) / mean_coverage
vcf_record.INFO["AA_TOTAL_COV"] = (total_coverage / num_repeat) / mean_coverage
# get strand bias
if unique_coverage > 0.0:
vcf_record.INFO["AA_TOTAL_STRAND"] = (num_forward / unique_coverage - 0.5) ** 2
# get mapping quality stats
if total_coverage > 0.0:
vcf_record.INFO["AA_PROP_REPEAT"] = unique_coverage / total_coverage
# get clipped reads stats
if total_bases > 0.0:
vcf_record.INFO["AA_PROP_ALIGNED"] = bases_aligned / total_bases
if end_total_bases > 0.0:
vcf_record.INFO["AA_END_PROP_ALIGNED"] = end_bases_aligned / end_total_bases
# get discordant reads stats
vcf_record.INFO["AA_DISCORDANT_HIGH"] = num_discordant_high
vcf_record.INFO["AA_DISCORDANT_LOW"] = num_discordant_low
# get supplementary alignment stats
# Skip this for now
vcf_writer.write_record(vcf_record)
vcf_writer.close()
if __name__ == "__main__":
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(
description="Annotate VCF with additional useful features",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--bam", help="BAM file", required=True, type=file)
parser.add_argument("--chromosomes", nargs="+", help="Chromosomes", default=[])
parser.add_argument("--vcfs", nargs="+", help="Input VCF files", type=file)
args = parser.parse_args()
logger.info("Command-line: " + " ".join(sys.argv))
annotate_vcfs(args.bam, args.chromosomes, args.vcfs)
logger.info("All done!")
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/MetaSV-0.5-py2.7.egg/EGG-INFO/scripts/annotate_vcf_bam.py
|
Python
|
apache-2.0
| 7,657
|
[
"pysam"
] |
29744e7f3edd24d8af2c446e30d7261ae44bd65801afc6c3901ecb08ecf3d7de
|
"""TIP3P potential, constraints and dynamics."""
from math import pi, sin, cos
import numpy as np
import ase.units as units
from ase.parallel import world
from ase.md.md import MolecularDynamics
qH = 0.417
sigma0 = 3.15061
epsilon0 = 0.1521 * units.kcal / units.mol
rOH = 0.9572
thetaHOH = 104.52 / 180 * pi
class TIP3P:
def __init__(self, rc=9.0, width=1.0):
self.energy = None
self.forces = None
self.rc1 = rc - width
self.rc2 = rc
def get_spin_polarized(self):
return False
def update(self, atoms):
if (self.energy is None or
len(self.numbers) != len(atoms) or
(self.numbers != atoms.get_atomic_numbers()).any()):
self.calculate(atoms)
elif ((self.positions != atoms.get_positions()).any() or
(self.pbc != atoms.get_pbc()).any() or
(self.cell != atoms.get_cell()).any()):
self.calculate(atoms)
def calculation_required(self, atoms, quantities):
if len(quantities) == 0:
return False
return (self.energy is None or
len(self.numbers) != len(atoms) or
(self.numbers != atoms.get_atomic_numbers()).any() or
(self.positions != atoms.get_positions()).any() or
(self.pbc != atoms.get_pbc()).any() or
(self.cell != atoms.get_cell()).any())
def get_potential_energy(self, atoms):
self.update(atoms)
return self.energy
def get_forces(self, atoms):
self.update(atoms)
return self.forces.copy()
def get_stress(self, atoms):
raise NotImplementedError
def calculate(self, atoms):
self.positions = atoms.get_positions().copy()
self.cell = atoms.get_cell().copy()
self.pbc = atoms.get_pbc().copy()
natoms = len(atoms)
nH2O = natoms // 3
assert self.pbc.all()
C = self.cell.diagonal()
assert not (self.cell - np.diag(C)).any()
assert (C >= 2 * self.rc2).all()
self.numbers = atoms.get_atomic_numbers()
Z = self.numbers.reshape((-1, 3))
assert (Z[:, 1:] == 1).all() and (Z[:, 0] == 8).all()
R = self.positions.reshape((nH2O, 3, 3))
RO = R[:, 0]
self.energy = 0.0
self.forces = np.zeros((natoms, 3))
if world is None:
mya = range(nH2O - 1)
else:
rank = world.rank
size = world.size
assert nH2O // (2 * size) == 0
mynH2O = nH2O // 2 // size
mya = (range(rank * n, (rank + 1) * n) +
range((size - rank - 1) * n, (size - rank) * n))
q = np.empty(3)
q[:] = qH * (units.Hartree * units.Bohr)**0.5
q[0] *= -2
for a in mya:
DOO = (RO[a + 1:] - RO[a] + 0.5 * C) % C - 0.5 * C
dOO = (DOO**2).sum(axis=1)**0.5
x1 = dOO > self.rc1
x2 = dOO < self.rc2
f = np.zeros(nH2O - a - 1)
f[x2] = 1.0
dfdd = np.zeros(nH2O - a - 1)
x12 = np.logical_and(x1, x2)
d = (dOO[x12] - self.rc1) / (self.rc2 - self.rc1)
f[x12] -= d**2 * (3.0 - 2.0 * d)
dfdd[x12] -= 6.0 / (self.rc2 - self.rc1) * d * (1.0 - d)
y = (sigma0 / dOO)**6
y2 = y**2
e = 4 * epsilon0 * (y2 - y)
self.energy += np.dot(e, f)
dedd = 24 * epsilon0 * (2 * y2 - y) / dOO * f - e * dfdd
F = (dedd / dOO)[:, np.newaxis] * DOO
self.forces[(a + 1) * 3::3] += F
self.forces[a * 3] -= F.sum(axis=0)
for i in range(3):
D = (R[a + 1:] - R[a, i] + 0.5 * C) % C - 0.5 * C
d = (D**2).sum(axis=2)**0.5
e = q[i] * q / d
self.energy += np.dot(f, e).sum()
F = (e / d**2 * f[:, np.newaxis])[:, :, np.newaxis] * D
F[:, 0] -= (e.sum(axis=1) * dfdd / dOO)[:, np.newaxis] * DOO
self.forces[(a + 1) * 3:] += F.reshape((-1, 3))
self.forces[a * 3 + i] -= F.sum(axis=0).sum(axis=0)
if world is not None:
self.energy = world.sum(self.energy)
world.sum(self.forces)
class H2OConstraint:
"""Constraint object for a rigid H2O molecule."""
def __init__(self, r=rOH, theta=thetaHOH, iterations=23, masses=None):
self.r = r
self.theta = theta
self.iterations = iterations
self.m = masses
def set_masses(self, masses):
self.m = masses
def adjust_positions(self, old, new):
bonds = [(0, 1, self.r), (0, 2, self.r)]
if self.theta:
bonds.append((1, 2, sin(self.theta / 2) * self.r * 2))
for iter in range(self.iterations):
for i, j, r in bonds:
D = old[i::3] - old[j::3]
m1 = self.m[i]
m2 = self.m[j]
a = new[i::3]
b = new[j::3]
B = a - b
x = (D**2).sum(axis=1)
y = (D * B).sum(axis=1)
z = (B**2).sum(axis=1) - r**2
k = m1 * m2 / (m1 + m2) * ((y**2 - x * z)**0.5 - y) / x
k.shape = (-1, 1)
a += k / m1 * D
b -= k / m2 * D
def adjust_forces(self, positions, forces):
pass
def copy(self):
return H2OConstraint(self.r, self.theta, self.iterations, self.m)
class Verlet(MolecularDynamics):
def step(self, f):
atoms = self.atoms
m = atoms.get_masses()[:, np.newaxis]
v = self.atoms.get_velocities()
r0 = atoms.get_positions()
r = r0 + self.dt * v + self.dt**2 * f / m
atoms.set_positions(r)
r = atoms.get_positions()
v = (r - r0) / self.dt
self.atoms.set_velocities(v)
return atoms.get_forces()
|
slabanja/ase
|
ase/calculators/tip3p.py
|
Python
|
gpl-2.0
| 5,975
|
[
"ASE"
] |
87c562cffbfd8e3e83f9b19187386e819d46892664dc4768bac23321563d651b
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from functools import update_wrapper, partial
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
from camelot.view.art import Icon
from camelot.view.model_thread import post, object_thread, model_function
from camelot.view.search import create_entity_search_query_decorator
from camelot.view.controls.decorated_line_edit import DecoratedLineEdit
from camelot.core.utils import ugettext as _
from camelot.core.utils import variant_to_pyobject
from camelot.core.utils import create_constant_function
from customeditor import CustomEditor, set_background_color_palette
import logging
logger = logging.getLogger('camelot.view.controls.editors.many2oneeditor')
class Many2OneEditor( CustomEditor ):
"""Widget for editing many 2 one relations"""
new_icon = Icon('tango/16x16/actions/document-new.png')
search_icon = Icon('tango/16x16/actions/system-search.png')
arrow_down_key_pressed = QtCore.pyqtSignal()
class CompletionsModel(QtCore.QAbstractListModel):
def __init__(self, parent=None):
QtCore.QAbstractListModel.__init__(self, parent)
self._completions = []
def setCompletions(self, completions):
self._completions = completions
self.layoutChanged.emit()
def data(self, index, role):
if role == Qt.DisplayRole:
return QtCore.QVariant(self._completions[index.row()][0])
elif role == Qt.EditRole:
return QtCore.QVariant(self._completions[index.row()][1])
return QtCore.QVariant()
def rowCount(self, index=None):
return len(self._completions)
def columnCount(self, index=None):
return 1
def __init__(self,
admin=None,
parent=None,
editable=True,
field_name='manytoone',
**kwargs):
""":param entity_admin : The Admin interface for the object on the one
side of the relation
"""
CustomEditor.__init__(self, parent)
self.setObjectName( field_name )
self.admin = admin
self.entity_set = False
self._editable = editable
self._entity_representation = ''
self.entity_instance_getter = None
self._last_highlighted_entity_getter = None
self.layout = QtGui.QHBoxLayout()
self.layout.setSpacing(0)
self.layout.setContentsMargins( 0, 0, 0, 0)
# Search button
self.search_button = QtGui.QToolButton()
self.search_button.setAutoRaise(True)
self.search_button.setFocusPolicy(Qt.ClickFocus)
self.search_button.setFixedHeight(self.get_height())
self.search_button.clicked.connect(self.searchButtonClicked)
self.search_button.setIcon(
Icon('tango/16x16/actions/edit-clear.png').getQIcon()
)
self.search_button.setToolTip(unicode(_('clear')))
# Open button
self.open_button = QtGui.QToolButton()
self.open_button.setAutoRaise(True)
self.open_button.setFocusPolicy(Qt.ClickFocus)
self.open_button.setFixedHeight(self.get_height())
self.open_button.clicked.connect(self.openButtonClicked)
self.open_button.setIcon( self.new_icon.getQIcon() )
self.open_button.setToolTip(unicode(_('new')))
# Search input
self.search_input = DecoratedLineEdit(self)
self.search_input.set_background_text(_('Search...'))
self.search_input.textEdited.connect(self.textEdited)
self.search_input.set_minimum_width( 20 )
self.search_input.arrow_down_key_pressed.connect(self.on_arrow_down_key_pressed)
# suppose garbage was entered, we need to refresh the content
self.search_input.editingFinished.connect( self.search_input_editing_finished )
self.setFocusProxy(self.search_input)
# Search Completer
self.completer = QtGui.QCompleter()
self.completions_model = self.CompletionsModel(self.completer)
self.completer.setModel(self.completions_model)
self.completer.setCaseSensitivity(Qt.CaseInsensitive)
self.completer.setCompletionMode(
QtGui.QCompleter.UnfilteredPopupCompletion
)
#self.completer.activated.connect(self.completionActivated)
#self.completer.highlighted.connect(self.completion_highlighted)
self.completer.activated[QtCore.QModelIndex].connect(self.completionActivated)
self.completer.highlighted[QtCore.QModelIndex].connect(self.completion_highlighted)
self.search_input.setCompleter(self.completer)
# Setup layout
self.layout.addWidget(self.search_input)
self.layout.addWidget(self.search_button)
self.layout.addWidget(self.open_button)
self.setLayout(self.layout)
def set_field_attributes(self, editable = True,
background_color = None,
tooltip = None, **kwargs):
self.set_editable(editable)
set_background_color_palette( self.search_input, background_color )
self.search_input.setToolTip(unicode(tooltip or ''))
def set_editable(self, editable):
self._editable = editable
self.search_input.setEnabled(editable)
self.search_button.setEnabled(editable)
def on_arrow_down_key_pressed(self):
self.arrow_down_key_pressed.emit()
def textEdited(self, text):
self._last_highlighted_entity_getter = None
text = self.search_input.user_input()
def create_search_completion(text):
return lambda: self.search_completions(text)
post(
create_search_completion(unicode(text)),
self.display_search_completions
)
self.completer.complete()
@model_function
def search_completions(self, text):
"""Search for object that match text, to fill the list of completions
:return: a list of tuples of (object_representation, object_getter)
"""
search_decorator = create_entity_search_query_decorator(
self.admin, text
)
if search_decorator:
sresult = [
(unicode(e), create_constant_function(e))
for e in search_decorator(self.admin.entity.query).limit(20)
]
return text, sresult
return text, []
def display_search_completions(self, prefix_and_completions):
assert object_thread( self )
prefix, completions = prefix_and_completions
self.completions_model.setCompletions(completions)
self.completer.setCompletionPrefix(prefix)
self.completer.complete()
def completionActivated(self, index):
object_getter = index.data(Qt.EditRole)
self.setEntity(variant_to_pyobject(object_getter))
def completion_highlighted(self, index ):
object_getter = index.data(Qt.EditRole)
pyob = variant_to_pyobject(object_getter)
self._last_highlighted_entity_getter = pyob
def openButtonClicked(self):
if self.entity_set:
return self.createFormView()
else:
return self.createNew()
def createSelectView(self):
from camelot.view.action_steps.select_object import SelectDialog
select_dialog = SelectDialog( self.admin, self )
select_dialog.exec_()
if select_dialog.object_getter != None:
self.select_object( select_dialog.object_getter )
def returnPressed(self):
if not self.entity_set:
self.createSelectView()
def searchButtonClicked(self):
if self.entity_set:
self.setEntity(lambda:None)
else:
self.createSelectView()
def trashButtonClicked(self):
self.setEntity(lambda:None)
def createNew(self):
assert object_thread( self )
@model_function
def get_has_subclasses():
return len(self.admin.get_subclass_tree())
post(get_has_subclasses, self.show_new_view)
def show_new_view(self, has_subclasses):
assert object_thread( self )
from camelot.view.workspace import show_top_level
selected = QtGui.QDialog.Accepted
admin = self.admin
if has_subclasses:
from camelot.view.controls.inheritance import SubclassDialog
select_subclass = SubclassDialog(self, self.admin)
select_subclass.setWindowTitle(_('select'))
selected = select_subclass.exec_()
admin = select_subclass.selected_subclass
if selected:
form = admin.create_new_view()
form.entity_created_signal.connect( self.select_object )
show_top_level( form, self )
def createFormView(self):
if self.entity_instance_getter:
def get_admin_and_title():
obj = self.entity_instance_getter()
admin = self.admin.get_related_admin(obj.__class__)
return admin, ''
post(get_admin_and_title, self.show_form_view)
def show_form_view(self, admin_and_title):
from camelot.view.workspace import show_top_level
admin, title = admin_and_title
def create_collection_getter(instance_getter):
return lambda:[instance_getter()]
from camelot.view.proxy.collection_proxy import CollectionProxy
model = CollectionProxy(
admin,
create_collection_getter(self.entity_instance_getter),
admin.get_fields
)
model.dataChanged.connect(self.dataChanged)
form = admin.create_form_view(title, model, 0)
# @todo : dirty trick to keep reference
#self.__form = form
show_top_level( form, self )
def dataChanged(self, index1, index2):
self.setEntity(self.entity_instance_getter, False)
def search_input_editing_finished(self):
if not self.entity_set:
# Only try to 'guess' what the user meant when no entity is set
# to avoid inappropriate removal of data, (eg when the user presses
# Esc, editingfinished will be called as well, and we should not
# overwrite the current entity set)
if self._last_highlighted_entity_getter:
self.setEntity(self._last_highlighted_entity_getter)
elif not self.entity_set and self.completions_model.rowCount()==1:
# There is only one possible option
index = self.completions_model.index(0,0)
entity_getter = variant_to_pyobject(index.data(Qt.EditRole))
self.setEntity(entity_getter)
self.search_input.set_user_input(self._entity_representation)
def set_value(self, value):
""":param value: either ValueLoading, or a function that returns None
or the entity to be shown in the editor"""
self._last_highlighted_entity_getter = None
value = CustomEditor.set_value(self, value)
if value:
self.setEntity(value, propagate = False)
def get_value(self):
""":return: a function that returns the selected entity or ValueLoading
or None"""
value = CustomEditor.get_value(self)
if not value:
value = self.entity_instance_getter
return value
@QtCore.pyqtSlot(tuple)
def set_instance_representation(self, representation_and_propagate):
"""Update the gui"""
((desc, pk), propagate) = representation_and_propagate
self._entity_representation = desc
self.search_input.set_user_input(desc)
if pk != False:
self.open_button.setIcon(
Icon('tango/16x16/places/folder.png').getQIcon()
)
self.open_button.setToolTip(unicode(_('open')))
self.open_button.setEnabled(True)
self.search_button.setIcon(
Icon('tango/16x16/actions/edit-clear.png').getQIcon()
)
self.search_button.setToolTip(unicode(_('clear')))
self.entity_set = True
else:
self.open_button.setIcon( self.new_icon.getQIcon() )
self.open_button.setToolTip(unicode(_('new')))
self.open_button.setEnabled(self._editable)
self.search_button.setIcon( self.search_icon.getQIcon() )
self.search_button.setToolTip(_('Search'))
self.entity_set = False
if propagate:
self.editingFinished.emit()
def setEntity(self, entity_instance_getter, propagate=True):
self.entity_instance_getter = entity_instance_getter
def get_instance_representation( entity_instance_getter, propagate ):
"""Get a representation of the instance
:return: (unicode, pk) its unicode representation and its primary
key or ('', False) if the instance was None"""
entity = entity_instance_getter()
if entity and hasattr(entity, 'id'):
return ((unicode(entity), entity.id), propagate)
elif entity:
return ((unicode(entity), False), propagate)
return ((None, False), propagate)
post( update_wrapper( partial( get_instance_representation,
entity_instance_getter,
propagate ),
get_instance_representation ),
self.set_instance_representation)
def select_object( self, entity_instance_getter ):
self.setEntity(entity_instance_getter)
|
jeroendierckx/Camelot
|
camelot/view/controls/editors/many2oneeditor.py
|
Python
|
gpl-2.0
| 14,748
|
[
"VisIt"
] |
3b08c6484b8f7646e887e4a40bbc0c6662f9dc274da3968d501c22c3285094d9
|
import numpy as np
from gpaw import debug
from gpaw.io.tar import Reader, Writer
from gpaw.utilities import is_contiguous
from gpaw.analyse.observers import Observer
from gpaw.transformers import Transformer
from gpaw.tddft import attosec_to_autime, eV_to_aufrequency
# -------------------------------------------------------------------
class DensityFourierTransform(Observer):
def __init__(self, timestep, frequencies, width=None, interval=1):
"""
Parameters
----------
timestep: float
Time step in attoseconds (10^-18 s), e.g., 4.0 or 8.0
frequencies: NumPy array or list of floats
Frequencies in eV for Fourier transforms
width: float or None
Width of Gaussian envelope in eV, otherwise no envelope
interval: int
Number of timesteps between calls (used when attaching)
"""
Observer.__init__(self, interval)
self.timestep = interval * timestep * attosec_to_autime # autime
self.omega_w = np.asarray(frequencies) * eV_to_aufrequency # autime^(-1)
if width is None:
self.sigma = None
else:
self.sigma = width * eV_to_aufrequency # autime^(-1)
self.nw = len(self.omega_w)
self.dtype = complex # np.complex128 really, but hey...
self.Fnt_wsG = None
self.Fnt_wsg = None
self.Ant_sG = None
self.Ant_sg = None
def initialize(self, paw, allocate=True):
self.allocated = False
assert hasattr(paw, 'time') and hasattr(paw, 'niter'), 'Use TDDFT!'
self.time = paw.time
self.niter = paw.niter
self.world = paw.wfs.world
self.gd = paw.density.gd
self.finegd = paw.density.finegd
self.nspins = paw.density.nspins
self.stencil = paw.input_parameters.stencils[1] # i.e. tar['InterpolationStencil']
self.interpolator = paw.density.interpolator
self.cinterpolator = Transformer(self.gd, self.finegd, self.stencil, \
dtype=self.dtype, allocate=False)
self.phase_cd = np.ones((3, 2), dtype=complex)
self.Ant_sG = paw.density.nt_sG.copy() # TODO in allocate instead?
# Attach to PAW-type object
paw.attach(self, self.interval, density=paw.density)
if allocate:
self.allocate()
def allocate(self):
if not self.allocated:
self.Fnt_wsG = self.gd.zeros((self.nw, self.nspins), \
dtype=self.dtype)
self.Fnt_wsg = None
#self.Ant_sG = ...
self.Ant_sg = None
self.gamma_w = np.ones(self.nw, dtype=complex) * self.timestep
self.cinterpolator.allocate()
self.allocated = True
if debug:
assert is_contiguous(self.Fnt_wsG, self.dtype)
def interpolate_fourier_transform(self):
if self.Fnt_wsg is None:
self.Fnt_wsg = self.finegd.empty((self.nw, self.nspins), \
dtype=self.dtype)
if self.dtype == float:
intapply = self.interpolator.apply
else:
intapply = lambda Fnt_G, Fnt_g: self.cinterpolator.apply(Fnt_G, \
Fnt_g, self.phase_cd)
for w in range(self.nw):
for s in range(self.nspins):
intapply(self.Fnt_wsG[w,s], self.Fnt_wsg[w,s])
def interpolate_average(self):
if self.Ant_sg is None:
self.Ant_sg = self.finegd.empty(self.nspins, dtype=float)
for s in range(self.nspins):
self.interpolator.apply(self.Ant_sG[s], self.Ant_sg[s])
def update(self, density):
# Update time
# t[N] = t[N-1] + dt[N-1] #TODO better time-convention?
self.time += self.timestep
# Complex exponential with/without finite-width envelope
f_w = np.exp(1.0j*self.omega_w*self.time)
if self.sigma is not None:
f_w *= np.exp(-self.time**2*self.sigma**2/2.0)
# Update Fourier transformed density components
# Fnt_wG[N] = Fnt_wG[N-1] + 1/sqrt(pi) * (nt_G[N]-avg_nt_G[N-1]) \
# * (f[N]*t[N] - gamma[N-1]) * dt[N]/(t[N]+dt[N])
for w in range(self.nw):
self.Fnt_wsG[w] += 1/np.pi**0.5 * (density.nt_sG - self.Ant_sG) \
* (f_w[w]*self.time - self.gamma_w[w]) * self.timestep \
/ (self.time + self.timestep)
# Update the cumulative phase factors
# gamma[N] = gamma[N-1] + f[N]*dt[N]
self.gamma_w += f_w * self.timestep
# If dt[N] = dt for all N and sigma = 0, then this simplifies to:
# gamma[N] = Sum_{n=0}^N exp(i*omega*n*dt) * dt
# = (1 - exp(i*omega*(N+1)*dt)) / (1 - exp(i*omega*dt)) * dt
# Update average density
# Ant_G[N] = (t[N]*Ant_G[N-1] + nt_G[N]*dt[N])/(t[N]+dt[N])
self.Ant_sG = (self.time*self.Ant_sG + density.nt_sG*self.timestep) \
/ (self.time + self.timestep)
def get_fourier_transform(self, frequency=0, spin=0, gridrefinement=1):
if gridrefinement == 1:
return self.Fnt_wsG[frequency, spin]
elif gridrefinement == 2:
if self.Fnt_wsg is None:
self.interpolate_fourier_transform()
return self.Fnt_wsg[frequency, spin]
else:
raise NotImplementedError('Arbitrary refinement not implemented')
def get_average(self, spin=0, gridrefinement=1):
if gridrefinement == 1:
return self.Ant_sG[spin]
elif gridrefinement == 2:
if self.Ant_sg is None:
self.interpolate_average()
return self.Ant_sg[spin]
else:
raise NotImplementedError('Arbitrary refinement not implemented')
def read(self, filename, idiotproof=True):
if idiotproof and not filename.endswith('.ftd'):
raise IOError('Filename must end with `.ftd`.')
tar = Reader(filename)
# Test data type
dtype = {'Float':float, 'Complex':complex}[tar['DataType']]
if dtype != self.dtype:
raise IOError('Data is an incompatible type.')
# Test time
time = tar['Time']
if idiotproof and abs(time-self.time) >= 1e-9:
raise IOError('Timestamp is incompatible with calculator.')
# Test timestep (non-critical)
timestep = tar['TimeStep']
if abs(timestep - self.timestep) > 1e-12:
print 'Warning: Time-step has been altered. (%lf -> %lf)' \
% (self.timestep, timestep)
self.timestep = timestep
# Test dimensions
nw = tar.dimension('nw')
nspins = tar.dimension('nspins')
ng = (tar.dimension('ngptsx'), tar.dimension('ngptsy'), \
tar.dimension('ngptsz'),)
if (nw != self.nw or nspins != self.nspins or
(ng != self.gd.get_size_of_global_array()).any()):
raise IOError('Data has incompatible shapes.')
# Test width (non-critical)
sigma = tar['Width']
if ((sigma is None)!=(self.sigma is None) or # float <-> None
(sigma is not None and self.sigma is not None and \
abs(sigma - self.sigma) > 1e-12)): # float -> float
print 'Warning: Width has been altered. (%s -> %s)' \
% (self.sigma, sigma)
self.sigma = sigma
# Read frequencies
self.omega_w[:] = tar.get('Frequency')
# Read cumulative phase factors
self.gamma_w[:] = tar.get('PhaseFactor')
# Read average densities on master and distribute
for s in range(self.nspins):
all_Ant_G = tar.get('Average', s)
self.gd.distribute(all_Ant_G, self.Ant_sG[s])
# Read fourier transforms on master and distribute
for w in range(self.nw):
for s in range(self.nspins):
all_Fnt_G = tar.get('FourierTransform', w, s)
self.gd.distribute(all_Fnt_G, self.Fnt_wsG[w,s])
# Close for good measure
tar.close()
def write(self, filename, idiotproof=True):
if idiotproof and not filename.endswith('.ftd'):
raise IOError('Filename must end with `.ftd`.')
master = self.world.rank == 0
# Open writer on master and set parameters/dimensions
if master:
tar = Writer(filename)
tar['DataType'] = {float:'Float', complex:'Complex'}[self.dtype]
tar['Time'] = self.time
tar['TimeStep'] = self.timestep #non-essential
tar['Width'] = self.sigma
tar.dimension('nw', self.nw)
tar.dimension('nspins', self.nspins)
# Create dimensions for varioius netCDF variables:
ng = self.gd.get_size_of_global_array()
tar.dimension('ngptsx', ng[0])
tar.dimension('ngptsy', ng[1])
tar.dimension('ngptsz', ng[2])
# Write frequencies
tar.add('Frequency', ('nw',), self.omega_w, dtype=float)
# Write cumulative phase factors
tar.add('PhaseFactor', ('nw',), self.gamma_w, dtype=self.dtype)
# Collect average densities on master and write
if master:
tar.add('Average', ('nspins', 'ngptsx', 'ngptsy',
'ngptsz', ), dtype=float)
for s in range(self.nspins):
big_Ant_G = self.gd.collect(self.Ant_sG[s])
if master:
tar.fill(big_Ant_G)
# Collect fourier transforms on master and write
if master:
tar.add('FourierTransform', ('nw', 'nspins', 'ngptsx', 'ngptsy', \
'ngptsz', ), dtype=self.dtype)
for w in range(self.nw):
for s in range(self.nspins):
big_Fnt_G = self.gd.collect(self.Fnt_wsG[w,s])
if master:
tar.fill(big_Fnt_G)
# Close to flush changes
if master:
tar.close()
# Make sure slaves don't return before master is done
self.world.barrier()
def dump(self, filename):
if debug:
assert is_contiguous(self.Fnt_wsG, self.dtype)
assert is_contiguous(self.Ant_sG, float)
all_Fnt_wsG = self.gd.collect(self.Fnt_wsG)
all_Ant_sG = self.gd.collect(self.Ant_sG)
if self.world.rank == 0:
all_Fnt_wsG.dump(filename)
all_Ant_sG.dump(filename+'_avg') # crude but easy
self.omega_w.dump(filename+'_omega') # crude but easy
self.gamma_w.dump(filename+'_gamma') # crude but easy
def load(self, filename):
if self.world.rank == 0:
all_Fnt_wsG = np.load(filename)
all_Ant_sG = np.load(filename+'_avg') # crude but easy
else:
all_Fnt_wsG = None
all_Ant_sG = None
if debug:
assert all_Fnt_wsG is None or is_contiguous(all_Fnt_wsG, self.dtype)
assert all_Ant_sG is None or is_contiguous(all_Ant_sG, float)
if not self.allocated:
self.allocate()
self.gd.distribute(all_Fnt_wsG, self.Fnt_wsG)
self.gd.distribute(all_Ant_sG, self.Ant_sG)
self.omega_w = np.load(filename+'_omega') # crude but easy
self.gamma_w = np.load(filename+'_gamma') # crude but easy
|
qsnake/gpaw
|
gpaw/tddft/fourier.py
|
Python
|
gpl-3.0
| 11,388
|
[
"GPAW",
"Gaussian",
"NetCDF"
] |
c1d814a163b38b45413d6657e9054cb4533da8f1f212687d0763aa9a4a637214
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
NIST physical constants
https://physics.nist.gov/cuu/Constants/
https://physics.nist.gov/cuu/Constants/Table/allascii.txt
'''
LIGHT_SPEED = 137.03599967994 # http://physics.nist.gov/cgi-bin/cuu/Value?alph
# BOHR = .529 177 210 92(17) e-10m # http://physics.nist.gov/cgi-bin/cuu/Value?bohrrada0
BOHR = 0.52917721092 # Angstroms
BOHR_SI = BOHR * 1e-10
ALPHA = 7.2973525664e-3 # http://physics.nist.gov/cgi-bin/cuu/Value?alph
G_ELECTRON = 2.00231930436182 # http://physics.nist.gov/cgi-bin/cuu/Value?gem
E_MASS = 9.10938356e-31 # kg https://physics.nist.gov/cgi-bin/cuu/Value?me
AVOGADRO = 6.022140857e23 # https://physics.nist.gov/cgi-bin/cuu/Value?na
ATOMIC_MASS = 1e-3/AVOGADRO
PROTON_MASS = 1.672621898e-27 # kg https://physics.nist.gov/cgi-bin/cuu/Value?mp
PROTON_MASS_AU = PROTON_MASS/ATOMIC_MASS
BOHR_MAGNETON = 927.4009994e-26 # J/T http://physics.nist.gov/cgi-bin/cuu/Value?mub
NUC_MAGNETON = BOHR_MAGNETON * E_MASS / PROTON_MASS
PLANCK = 6.626070040e-34 # J*s http://physics.nist.gov/cgi-bin/cuu/Value?h
HBAR = PLANCK/(2*3.141592653589793) # https://physics.nist.gov/cgi-bin/cuu/Value?hbar
#HARTREE2J = 4.359744650e-18 # J https://physics.nist.gov/cgi-bin/cuu/Value?hrj
HARTREE2J = HBAR**2/(E_MASS*BOHR_SI**2)
HARTREE2EV = 27.21138602 # eV https://physics.nist.gov/cgi-bin/cuu/Value?threv
E_CHARGE = 1.6021766208e-19 # C https://physics.nist.gov/cgi-bin/cuu/Value?e
LIGHT_SPEED_SI = 299792458 # https://physics.nist.gov/cgi-bin/cuu/Value?c
DEBYE = 3.335641e-30 # C*m = 1e-18/LIGHT_SPEED_SI https://cccbdb.nist.gov/debye.asp
AU2DEBYE = E_CHARGE * BOHR*1e-10 / DEBYE # 2.541746
AUEFG = 9.71736235660e21 # V/m^2 https://physics.nist.gov/cgi-bin/cuu/Value?auefg
AU2TESLA = HBAR/(BOHR_SI**2 * E_CHARGE)
BOLTZMANN = 1.38064852e-23 # J/K https://physics.nist.gov/cgi-bin/cuu/Value?k
HARTREE2WAVENUMBER = 1e-2 * HARTREE2J / (LIGHT_SPEED_SI * PLANCK) # 2.194746313702e5
|
gkc1000/pyscf
|
pyscf/data/nist.py
|
Python
|
apache-2.0
| 2,567
|
[
"Avogadro",
"PySCF"
] |
33010f2ff0fd9df989ae19ab3bb0e9aa1af1a5ed726586c069fba2a420fd51ac
|
from collections import OrderedDict
import pytest
from diffraction import Crystal, Site
CALCITE_ATOMIC_SITES = OrderedDict([
("Ca1", ["Ca2+", [0, 0, 0]]),
("C1", ["C4+", [0, 0, 0.25]]),
("O1", ["O2-", [0.25706, 0, 0.25]])
])
class TestCreatingFromSequence:
def test_can_create_from_sequence(self):
calcite = Crystal([4.99, 4.99, 17.002, 90, 90, 120], "R -3 c H")
assert calcite.a == 4.99
assert calcite.b == 4.99
assert calcite.c == 17.002
assert calcite.alpha == 90
assert calcite.beta == 90
assert calcite.gamma == 120
assert calcite.space_group == "R -3 c H"
class TestCreatingFromMapping:
def test_can_create_crystal_from_dictionary(self):
crystal_info = {"a": 4.99, "b": 4.99, "c": 17.002,
"alpha": 90, "beta": 90, "gamma": 120,
"space_group": "R -3 c H"}
calcite = Crystal.from_dict(crystal_info)
assert calcite.a == 4.99
assert calcite.b == 4.99
assert calcite.c == 17.002
assert calcite.alpha == 90
assert calcite.beta == 90
assert calcite.gamma == 120
assert calcite.space_group == "R -3 c H"
def test_error_if_lattice_parameter_missing_from_dict(self):
crystal_info = {"a": 4.99, "c": 17.002,
"alpha": 90, "beta": 90, "gamma": 120,
"space_group": "R -3 c H"}
with pytest.raises(ValueError) as exception_info:
Crystal.from_dict(crystal_info)
assert str(exception_info.value) == "Parameter: 'b' missing from input dictionary"
def test_error_if_space_group_missing_from_dict(self):
crystal_info = {"a": 4.99, "b": 4.99, "c": 17.002,
"alpha": 90, "beta": 90, "gamma": 120}
with pytest.raises(ValueError) as exception_info:
Crystal.from_dict(crystal_info)
assert str(exception_info.value) == \
"Parameter: 'space_group' missing from input dictionary"
def test_atomic_sites_loaded_if_given(self):
crystal_info = {"a": 4.99, "b": 4.99, "c": 17.002,
"alpha": 90, "beta": 90, "gamma": 120,
"space_group": "R -3 c H", "sites": CALCITE_ATOMIC_SITES}
calcite = Crystal.from_dict(crystal_info)
expected_sites = {name: Site(ion, position)
for name, (ion, position) in CALCITE_ATOMIC_SITES.items()}
assert calcite.sites == expected_sites
class TestCreatingFromCIF:
def test_can_create_crystal_from_single_datablock_cif(self):
calcite = Crystal.from_cif("tests/functional/static/valid_cifs/calcite_icsd.cif")
assert calcite.a == 4.99
assert calcite.b == 4.99
assert calcite.c == 17.002
assert calcite.alpha == 90
assert calcite.beta == 90
assert calcite.gamma == 120
assert calcite.space_group == "R -3 c H"
expected_sites = {name: Site(ion, position)
for name, (ion, position) in CALCITE_ATOMIC_SITES.items()}
assert calcite.sites == expected_sites
def test_error_if_lattice_parameter_is_missing_from_cif(selfs):
with pytest.raises(ValueError) as exception_info:
Crystal.from_cif(
"tests/functional/static/invalid_cifs/calcite_icsd_missing_lattice_parameter.cif")
assert str(exception_info.value) == \
"Parameter: 'cell_length_b' missing from input CIF"
def test_error_datablock_not_given_for_multi_data_block_cif(self):
with pytest.raises(TypeError) as exception_info:
Crystal.from_cif("tests/functional/static/valid_cifs/multi_data_block.cif")
assert str(exception_info.value) == \
("__init__() missing keyword argument: 'data_block'. "
"Required when input CIF has multiple data blocks.")
def test_can_create_crystal_from_multi_data_block_cif(self):
CHFeNOS = Crystal.from_cif(
"tests/functional/static/valid_cifs/multi_data_block.cif",
data_block="data_CSD_CIF_ACAKOF")
assert CHFeNOS.a == 6.1250
assert CHFeNOS.b == 9.2460
assert CHFeNOS.c == 10.147
assert CHFeNOS.alpha == 77.16
assert CHFeNOS.beta == 83.44
assert CHFeNOS.gamma == 80.28
assert CHFeNOS.space_group == "P -1"
class TestAddingAtomicSites:
def test_can_add_sites_one_by_one(self):
calcite = Crystal([4.99, 4.99, 17.002, 90, 90, 120], "R -3 c H")
assert calcite.sites == {}
calcite.add_sites({"Ca1": CALCITE_ATOMIC_SITES["Ca1"]})
calcite.add_sites({"C1": CALCITE_ATOMIC_SITES["C1"]})
calcite.add_sites({"O1": CALCITE_ATOMIC_SITES["O1"]})
expected_sites = {name: Site(ion, position)
for name, (ion, position) in CALCITE_ATOMIC_SITES.items()}
assert calcite.sites == expected_sites
def test_adding_multiple_sites_at_once(self):
calcite = Crystal([4.99, 4.99, 17.002, 90, 90, 120], "R -3 c H")
calcite.add_sites(CALCITE_ATOMIC_SITES)
expected_sites = {name: Site(ion, position)
for name, (ion, position) in CALCITE_ATOMIC_SITES.items()}
assert calcite.sites == expected_sites
|
noahwaterfieldprice/diffraction
|
tests/functional/create_crystal_test.py
|
Python
|
gpl-2.0
| 5,308
|
[
"CRYSTAL"
] |
90330f6756760ab2998e4b4927d950db2083ab77f8686b97b8ef1b85e2b47c31
|
import ovh
import ConfigParser
import string
import warnings
from random import choice
from prettytable import PrettyTable
class EmailManager :
''' This class uses the ovh Python API and provide some
functionalities to interact with email accounts
Arguments:
niceoutput Optional. If True (default), prints out better looking tables
Properties:
client: ovh.Client() object
Methods:
list_emails List all the domain-associated email accounts
add_emails Add the emails from the dictionary given as argument
remove_emails Remove the emails listed in the dictionary given as argument
'''
client = ovh.Client()
parser = ConfigParser.SafeConfigParser()
parser.read('ovh.conf')
DOMAIN = parser.get('ovh-eu', 'domain')
def __init__(self,niceoutput = True):
''' Constructor. Checks for token validity and if not present or invalid prompt the user
for getting it '''
self.niceoutput = niceoutput
if not(self.__check_token()):
self.__get_token()
def __check_token(self):
print 'Checking Token...'
try:
self.client.get('/me/api/credential')
return True
except ovh.APIError as e:
print "API Error ({0})\n".format(e)
return False
def __get_token(self):
access_rules = [
{'method': 'GET', 'path': '/me/api/credential'},
{'method': 'GET', 'path': '/email/domain*'},
{'method': 'POST', 'path': '/email/domain*'},
{'method': 'PUT', 'path': '/email/domain*'},
{'method': 'DELETE', 'path': '/email/domain*'}
]
validation = self.client.request_consumerkey(access_rules)
print "To access OVH Api you must validate. Please visit the following\
link:\n %s" % validation['validationUrl']
raw_input('Press Enter when done...')
self.parser.set('ovh-eu', 'consumer_key', validation['consumerKey'])
with open('ovh.conf','wb') as configfile:
self.parser.write(configfile)
def __get_emails(self):
accounts=self.client.get('/email/domain/{0}/account'.format(self.DOMAIN))
accountData = []
for account in accounts:
accountData.append(self.client.get('/email/domain/{0}/account/{1}'.format(self.DOMAIN,\
account)))
return accountData
def list_emails(self):
accounts=self.__get_emails()
if not(self.niceoutput):
for account in accounts:
print account['accountName']+'@'+account['domain']
else:
tab = PrettyTable(["Account Name","Description","Size","Blocked"])
tab.align["City name"] = "c"
for account in accounts:
tab.add_row([
account['accountName']+'@'+account['domain'],
account['description'],
account['size'],
account['isBlocked']
])
print tab
def add_emails(self,emails):
print 'Adding emails...'
for i,email in enumerate(emails):
# If password is not set
if not(email['password']):
password = self.__mkpassword()
emails[i]['password'] = password
email['password'] = password
self.__add_email(email['address'], email['password'], email['description'])
return emails
def remove_emails(self,emails):
print 'Removing emails...'
for email in emails:
self.__remove_email(email['address'])
def __add_email(self,email,password,desc=None):
#Checking if email already present
accounts = self.__get_emails()
if email in [account['accountName']+'@'+account['domain'] for account in accounts]:
warnings.warn('{email} is already there!'.format(email=email),RuntimeWarning)
else:
self.client.post('/email/domain/{0}/account'.format(self.DOMAIN),
accountName=email.split('@')[0],
description = desc,
password = password,
size = 5E9
)
print email+' added!'
def __remove_email(self,email):
#Checking if email is present
accounts = self.__get_emails()
if not(email in [account['accountName']+'@'+account['domain'] for account in accounts]):
warnings.warn('{email} cannot be deleted: not present!'.format(email=email),\
RuntimeWarning)
else:
self.client.delete('/email/domain/{0}/account/{1}'.format(self.DOMAIN,email.split('@')[0]))
print email+' removed!'
def __mkpassword(self,size=18):
chars = string.ascii_letters+string.digits
return ''.join(choice(chars) for _ in range(size))
|
rubendibattista/ovh-python-email-manager
|
ovhem/em.py
|
Python
|
bsd-2-clause
| 5,365
|
[
"VisIt"
] |
eeb2344758f0d9129a81054701d232c2bab06bf5285aeba0cbb2c6e3f46f5832
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
A web crawler for networks’ international calling costs.
"""
from __future__ import print_function
import json
import os
import re
import sys
import time
import textwrap
import argparse
import logging
import coloredlogs
try:
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
except ImportError as imp_err:
raise ImportError('Failed to import \'selenium\':\n' + str(imp_err))
from __init__ import __version__, OperatorWebSite
SCRIPT = os.path.basename(__file__)
LOG_FILE = SCRIPT + '.log'
script_args = None
# Check Python version
if sys.version_info < (2, 6):
print('%s requires python version >= 2.6' % SCRIPT, file=sys.stderr)
sys.exit(os.EX_CONFIG)
def is_int(value):
""" Reports whether the value represents an int. """
try:
int(value)
return True
except ValueError:
return False
def is_float(value):
""" Reports whether the value represents a float. """
try:
float(value)
return True
except ValueError:
return False
def is_number(value):
""" Reports whether the value represents a number. """
return is_int(value) or is_float(value)
def process_actions(zone, operator_obj, sleep_time):
""" Processes all the required actions. """
res = None
# Dict containing a list of additional
# arguments for each available action
additional_args = {
'type_zone': {'zone': zone}}
# Process each action
for action_data in operator_obj.get_actions():
action_name = action_data.keys()[0]
path = action_data.values()[0]
action_args = {'path': path}
# Add additional action arguments
if action_name in additional_args:
key = additional_args[action_name].keys()[0]
value = additional_args[action_name].values()[0]
action_args[key] = value
# Execute the requested web driver action
method = operator_obj.get_attr(operator_obj, action_name)
# Stop processing the current zone
# if the method execution fails
res = method(action_args)
if res is None:
logging.error('Action \'%s\' failed, skipping zone \'%s\'',
action_name, zone)
break
logging.debug('Sleeping %s seconds ', str(sleep_time))
time.sleep(sleep_time)
return res
def log(msg, not_new_line=None):
""" Logging function. """
# Do not log in quiet mode or if
# a JSON document is required
if script_args.quiet or script_args.json:
return
# Print to STDOUT
if script_args.out is None:
if not_new_line:
print(msg, end='')
else:
print(msg)
# Print to file
else:
if not_new_line:
print(msg, file=script_args.out, end='')
else:
print(msg + os.linesep, file=script_args.out)
def process_data(data):
""" Parse the JSON object containing the data. """
driver = None
try:
# Chrome driver
driver = webdriver.Chrome()
# Process each operator
for operator in data['operators']:
name = operator['name']
url = operator['url']
logging.info('Operator: %s', name)
logging.info('URL: %s', url)
log('Operator:\t%s\nURL:\t\t%s' % (name, url))
# Visit the URL
driver.get(url)
# Create the operator web site object
operator_obj = OperatorWebSite(driver, operator)
# Dict containing the list of zones
# with their respective costs
costs = dict()
# Process each zone
log('Country zones:\t')
for zone in operator_obj.get_zones():
logging.info('Zone: %s\t', zone)
log('\t\t{}'.format(zone).ljust(30), not_new_line=True)
cost = process_actions(
zone, operator_obj, operator['sleep_time'])
# Check if the result is a number
if is_number(cost):
logging.info('Cost: %s', cost)
log('%s' % cost.rjust(10))
costs[zone] = cost
else:
logging.error('Cost does not appear to be a number')
# Add the costs to the output object
if script_args.json is not None:
operator["costs"] = costs
except WebDriverException as err:
raise err
sys.exit(os.EX_OSERR)
finally:
# Close and quit the browser
if driver is not None:
logging.debug('Closing web driver')
driver.close()
def load_data(file_name):
""" Load the data file. """
parsed_data = None
# Check whether the file exists
if not os.path.isfile(file_name):
logging.error('File \'%s\' does not exist', file_name)
sys.exit(os.EX_NOINPUT)
# Open the file and load its data
try:
data_file = open(file_name, 'r')
except (IOError, OSError) as err:
raise err
else:
try:
# Load the data file into a JSON object
parsed_data = json.loads(data_file.read())
except ValueError as err:
logging.error('Invalid JSON: %s', err)
finally:
data_file.close()
return parsed_data
def init_log():
""" Initialise the logging. """
level = script_args.log_level
log_dir = os.path.abspath(script_args.log_dir)
logger = logging.getLogger(__name__)
log_format = (
'[%(asctime)s] [%(levelname)s] '
'[%(name)s] [%(funcName)s():%(lineno)s] '
'[PID:%(process)d] %(message)s')
if not os.path.isdir(log_dir):
logging.error('Logging directory \'%s\' does not exist', log_dir)
sys.exit(os.EX_IOERR)
dir_re = re.compile(u'/$')
if not re.match(dir_re, log_dir):
log_dir += "/"
# Define the logging stream
stream = open(log_dir + LOG_FILE, 'w+')
log_levels = {
'unset': logging.NOTSET,
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
log_level = log_levels[level]
coloredlogs.install(
level=log_level,
fmt=log_format,
datefmt='%d/%m/%Y %H:%M:%S',
stream=stream)
log('Logging to \'%s\' at level \'%s\'' % (log_dir + LOG_FILE, level))
return logger
def init_out_file():
""" Open the output file. """
script_args.out = os.path.abspath(script_args.out)
log('Printing output to \'%s\'' % script_args.out)
if script_args.out is not None:
try:
if script_args.json:
json_re = re.compile(u'.json$')
if not re.match(json_re, script_args.out):
script_args.out += ".json"
script_args.out = open(script_args.out, 'w')
except IOError:
logging.exception(
'Could not open output file \'%s\'', script_args.out)
sys.exit(os.EX_IOERR)
def get_args():
""" Get the command-line arguments. """
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.RawTextHelpFormatter,
description=__doc__)
# Optional args
parser.add_argument(
'--data',
metavar='[file]',
type=str,
required=True,
help=textwrap.dedent("""\
File containing the operator URL,
the list of country zones and the file
structure for the Selenium driver."""))
parser.add_argument(
'-h',
'--help',
action='help',
default=argparse.SUPPRESS,
help=textwrap.dedent("""\
Show this help message and exit."""))
parser.add_argument(
'--json',
action='store_true',
help=textwrap.dedent("""\
Write the results using a JSON format."""))
parser.add_argument(
'--log-dir',
metavar='[dir]',
type=str,
default='/tmp/',
help=textwrap.dedent("""\
Write log file (.log) to a specific
folder (default /tmp)."""))
parser.add_argument(
'--log-level',
metavar='[level]',
default='info',
type=str,
help=textwrap.dedent("""\
Log levels: unset, debug, info, warning,
error, critical (default info)."""))
parser.add_argument(
'-o',
'--out',
metavar='[of]',
type=str,
help='Write output to file (default STDOUT).')
parser.add_argument(
'-q',
'--quiet',
action='store_true',
help='Run in quiet mode.')
parser.add_argument(
'-v',
'--version',
action='version',
version='%(prog)s {0}'.format(__version__),
help='Show version number.')
return parser.parse_args()
def main():
""" Main. """
try:
global script_args
script_args = get_args()
init_log()
if script_args.out is not None:
init_out_file()
data = load_data(os.path.abspath(script_args.data))
process_data(data)
# Print the output in JSON format
if script_args.json:
out_file = script_args.out
if script_args.out is None:
out_file = sys.stdout
json.dump(data, fp=out_file,
indent=4, encoding='utf-8')
if script_args.out is not None:
script_args.out.close()
# Ctrl-C
except KeyboardInterrupt, err:
raise err
except SystemExit, err:
raise err
except:
print('Unexpected error:', sys.exc_info()[0])
raise
return os.EX_OK
if __name__ == '__main__':
sys.exit(main())
|
luigi-riefolo/network_crawler
|
network_crawler/network_crawler.py
|
Python
|
mit
| 9,914
|
[
"VisIt"
] |
d62ee6454ab65d37a4529561694306e7629383d6fae80be545f4a66fd080b2f5
|
#neuro.py - a basic set of neural network functions
#these are almost entirely based on the functions
#provided in the textbook (Data Science from Scratch)
#This is not an efficient nor robust implementation.
#For educational purposes only.
from __future__ import division
import numpy as np
import math as math
import random
from linear_algebra import dot
import Image
import matplotlib.pyplot as mplot
import sys
weights=[]
def sigmoid(t):
return 1 / (1 + math.exp(-t))
def neuron_output(weights, inputs):
return sigmoid(dot(weights, inputs))
def predict(neural_network, input_vector):
return feed_forward(neural_network, input_vector)[-1][0]
def feed_forward(neural_network, input_vector):
"""takes in a neural network
(represented as a list of lists of lists of weights)
and returns the output from forward-propagating the input"""
outputs = []
# process one layer at a time
for layer in neural_network:
input_with_bias = input_vector + [1] # add a bias input
output = [neuron_output(neuron, input_with_bias) # compute the output
for neuron in layer] # for each neuron
outputs.append(output) # and remember it
# then the input to the next layer is the output of this one
input_vector = output
#print outputs
return outputs
def backpropagate(network, input_vector, targets):
output_layer=network[-1]
hidden_outputs, outputs = feed_forward(network, input_vector)
# the output * (1 - output) is from the derivative of sigmoid
output_deltas = [output * (1 - output) * (output - target)
for output, target in zip(outputs, targets)]
# adjust weights for output layer, one neuron at a time
for i, output_neuron in enumerate(network[-1]):
# focus on the ith output layer neuron
for j, hidden_output in enumerate(hidden_outputs + [1]):
# adjust the jth weight based on both
# this neuron's delta and its jth input
output_neuron[j] -= output_deltas[i] * hidden_output
# back-propagate errors to hidden layer
hidden_deltas = [hidden_output * (1 - hidden_output) *
dot(output_deltas, [n[i] for n in output_layer])
for i, hidden_output in enumerate(hidden_outputs)]
# adjust weights for hidden layer, one neuron at a time
for i, hidden_neuron in enumerate(network[0]):
for j, input in enumerate(input_vector + [1]):
hidden_neuron[j] -= hidden_deltas[i] * input
def train(network, input_vector, targets, reps):
for __ in range(reps):
for input, target in zip(input_vector,targets):
backpropagate(network, input, target)
def setup_network(inputs):
input_size = len(inputs[0])
num_hidden = 5
output_size = 1
hidden_layer = [[random.randrange(-1, 1)*random.random() for __ in range(input_size + 1)]
for __ in range(num_hidden)]
# each output neuron has one weight per hidden neuron, plus a bias weight
output_layer = [[random.randrange(-1,1)*random.random() for __ in range(num_hidden + 1)]
for __ in range(output_size)]
# the network starts out with random weights
network = [hidden_layer, output_layer]
return network
|
armandosrz/DataScience-343
|
Neuro/neuro.py
|
Python
|
apache-2.0
| 3,296
|
[
"NEURON"
] |
34262d3f7c3a1f679aa3cfe580be35e3ac683d8f51f59451b13dcac628cfaff0
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-delete-user
# Author : Adrian Casajus
########################################################################
"""
Remove User from Configuration
"""
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... User ...' % Script.scriptName,
'Arguments:',
' User: User name' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
from DIRAC import exit as DIRACExit
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
if len( args ) < 1:
Script.showHelp()
choice = raw_input( "Are you sure you want to delete user/s %s? yes/no [no]: " % ", ".join( args ) )
choice = choice.lower()
if choice not in ( "yes", "y" ):
print("Delete aborted")
DIRACExit( 0 )
for user in args:
if not diracAdmin.csDeleteUser( user ):
errorList.append( ( "delete user", "Cannot delete user %s" % user ) )
exitCode = 255
if not exitCode:
result = diracAdmin.csCommitChanges()
if not result[ 'OK' ]:
errorList.append( ( "commit", result[ 'Message' ] ) )
exitCode = 255
for error in errorList:
print("ERROR %s: %s" % error)
DIRACExit(exitCode)
|
fstagni/DIRAC
|
Interfaces/scripts/dirac-admin-delete-user.py
|
Python
|
gpl-3.0
| 1,587
|
[
"DIRAC"
] |
f0b00fd5acf4f77fac4dfe7e3c5acfe2104637aa6fdcd462d9c4946be60c811a
|
""" JobMonitoringHandler is the implementation of the JobMonitoring service
in the DISET framework
The following methods are available in the Service interface
"""
__RCSID__ = "$Id$"
from types import IntType, LongType, ListType, DictType, StringTypes, StringType, NoneType, BooleanType
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC import S_OK, S_ERROR
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.WorkloadManagementSystem.Service.JobPolicy import JobPolicy, RIGHT_GET_INFO
import DIRAC.Core.Utilities.Time as Time
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
# These are global instances of the DB classes
gJobDB = False
gJobLoggingDB = False
gTaskQueueDB = False
SUMMARY = ['JobType', 'Site', 'JobName', 'Owner', 'SubmissionTime',
'LastUpdateTime', 'Status', 'MinorStatus', 'ApplicationStatus']
SUMMARY = []
PRIMARY_SUMMARY = []
FINAL_STATES = ['Done', 'Completed', 'Stalled', 'Failed', 'Killed']
def initializeJobMonitoringHandler( serviceInfo ):
global gJobDB, gJobLoggingDB, gTaskQueueDB
gJobDB = JobDB()
gJobLoggingDB = JobLoggingDB()
gTaskQueueDB = TaskQueueDB()
return S_OK()
class JobMonitoringHandler( RequestHandler ):
def initialize( self ):
credDict = self.getRemoteCredentials()
self.ownerDN = credDict['DN']
self.ownerGroup = credDict['group']
operations = Operations( group = self.ownerGroup )
self.globalJobsInfo = operations.getValue( '/Services/JobMonitoring/GlobalJobsInfo', True )
self.jobPolicy = JobPolicy( self.ownerDN, self.ownerGroup, self.globalJobsInfo )
self.jobPolicy.setJobDB( gJobDB )
return S_OK()
##############################################################################
types_getApplicationStates = []
@staticmethod
def export_getApplicationStates ():
""" Return Distinct Values of ApplicationStatus job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'ApplicationStatus' )
##############################################################################
types_getJobTypes = []
@staticmethod
def export_getJobTypes ():
""" Return Distinct Values of JobType job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'JobType' )
##############################################################################
types_getOwners = []
@staticmethod
def export_getOwners ():
"""
Return Distinct Values of Owner job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'Owner' )
##############################################################################
types_getProductionIds = []
@staticmethod
def export_getProductionIds ():
"""
Return Distinct Values of ProductionId job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'JobGroup' )
##############################################################################
types_getJobGroups = []
@staticmethod
def export_getJobGroups( condDict = None, cutDate = None ):
"""
Return Distinct Values of ProductionId job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'JobGroup', condDict,
newer = cutDate )
##############################################################################
types_getSites = []
@staticmethod
def export_getSites ():
"""
Return Distinct Values of Site job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'Site' )
##############################################################################
types_getStates = []
@staticmethod
def export_getStates ():
"""
Return Distinct Values of Status job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'Status' )
##############################################################################
types_getMinorStates = []
@staticmethod
def export_getMinorStates ():
"""
Return Distinct Values of Minor Status job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'MinorStatus' )
##############################################################################
types_getJobs = []
@staticmethod
def export_getJobs ( attrDict = None, cutDate = None ):
"""
Return list of JobIds matching the condition given in attrDict
"""
# queryDict = {}
# if attrDict:
# if type ( attrDict ) != DictType:
# return S_ERROR( 'Argument must be of Dict Type' )
# for attribute in self.queryAttributes:
# # Only those Attribute in self.queryAttributes can be used
# if attrDict.has_key(attribute):
# queryDict[attribute] = attrDict[attribute]
print attrDict
return gJobDB.selectJobs( attrDict, newer = cutDate )
##############################################################################
types_getCounters = [ ListType ]
@staticmethod
def export_getCounters( attrList, attrDict = None, cutDate = '' ):
"""
Retrieve list of distinct attributes values from attrList
with attrDict as condition.
For each set of distinct values, count number of occurences.
Return a list. Each item is a list with 2 items, the list of distinct
attribute values and the counter
"""
# Check that Attributes in attrList and attrDict, they must be in
# self.queryAttributes.
# for attr in attrList:
# try:
# self.queryAttributes.index(attr)
# except:
# return S_ERROR( 'Requested Attribute not Allowed: %s.' % attr )
#
# for attr in attrDict:
# try:
# self.queryAttributes.index(attr)
# except:
# return S_ERROR( 'Condition Attribute not Allowed: %s.' % attr )
cutDate = str( cutDate )
if not attrDict:
attrDict = {}
return gJobDB.getCounters( 'Jobs', attrList, attrDict, newer = cutDate, timeStamp = 'LastUpdateTime' )
##############################################################################
types_getCurrentJobCounters = [ ]
@staticmethod
def export_getCurrentJobCounters( attrDict = None ):
""" Get job counters per Status with attrDict selection. Final statuses are given for
the last day.
"""
if not attrDict:
attrDict = {}
result = gJobDB.getCounters( 'Jobs', ['Status'], attrDict, timeStamp = 'LastUpdateTime' )
if not result['OK']:
return result
last_update = Time.dateTime() - Time.day
resultDay = gJobDB.getCounters( 'Jobs', ['Status'], attrDict, newer = last_update,
timeStamp = 'LastUpdateTime' )
if not resultDay['OK']:
return resultDay
resultDict = {}
for statusDict, count in result['Value']:
status = statusDict['Status']
resultDict[status] = count
if status in FINAL_STATES:
resultDict[status] = 0
for statusDayDict, ccount in resultDay['Value']:
if status == statusDayDict['Status']:
resultDict[status] = ccount
break
return S_OK( resultDict )
##############################################################################
types_getJobStatus = [ IntType ]
@staticmethod
def export_getJobStatus ( jobID ):
return gJobDB.getJobAttribute( jobID, 'Status' )
##############################################################################
types_getJobOwner = [ IntType ]
@staticmethod
def export_getJobOwner ( jobID ):
return gJobDB.getJobAttribute( jobID, 'Owner' )
##############################################################################
types_getJobSite = [ IntType ]
@staticmethod
def export_getJobSite ( jobID ):
return gJobDB.getJobAttribute( jobID, 'Site' )
##############################################################################
types_getJobJDL = [ IntType, BooleanType ]
@staticmethod
def export_getJobJDL( jobID, original ):
return gJobDB.getJobJDL( jobID, original = original )
##############################################################################
types_getJobLoggingInfo = [ IntType ]
@staticmethod
def export_getJobLoggingInfo( jobID ):
return gJobLoggingDB.getJobLoggingInfo( jobID )
##############################################################################
types_getJobsParameters = [ ListType, ListType ]
@staticmethod
def export_getJobsParameters ( jobIDs, parameters ):
if not ( jobIDs and parameters ) :
return S_OK( {} )
return gJobDB.getAttributesForJobList( jobIDs, parameters )
##############################################################################
types_getJobsStatus = [ ListType ]
@staticmethod
def export_getJobsStatus ( jobIDs ):
if not jobIDs:
return S_OK( {} )
return gJobDB.getAttributesForJobList( jobIDs, ['Status'] )
##############################################################################
types_getJobsMinorStatus = [ ListType ]
@staticmethod
def export_getJobsMinorStatus ( jobIDs ):
return gJobDB.getAttributesForJobList( jobIDs, ['MinorStatus'] )
##############################################################################
types_getJobsApplicationStatus = [ ListType ]
@staticmethod
def export_getJobsApplicationStatus ( jobIDs ):
return gJobDB.getAttributesForJobList( jobIDs, ['ApplicationStatus'] )
##############################################################################
types_getJobsSites = [ ListType ]
@staticmethod
def export_getJobsSites ( jobIDs ):
return gJobDB.getAttributesForJobList( jobIDs, ['Site'] )
##############################################################################
types_getJobSummary = [ IntType ]
@staticmethod
def export_getJobSummary( jobID ):
return gJobDB.getJobAttributes( jobID, SUMMARY )
##############################################################################
types_getJobPrimarySummary = [ IntType ]
@staticmethod
def export_getJobPrimarySummary( jobID ):
return gJobDB.getJobAttributes( jobID, PRIMARY_SUMMARY )
##############################################################################
types_getJobsSummary = [ ListType ]
@staticmethod
def export_getJobsSummary( jobIDs ):
if not jobIDs:
return S_ERROR( 'JobMonitoring.getJobsSummary: Received empty job list' )
result = gJobDB.getAttributesForJobList( jobIDs, SUMMARY )
# return result
restring = str( result['Value'] )
return S_OK( restring )
##############################################################################
types_getJobPageSummaryWeb = [DictType, ListType, IntType, IntType]
def export_getJobPageSummaryWeb( self, selectDict, sortList, startItem, maxItems, selectJobs = True ):
""" Get the summary of the job information for a given page in the
job monitor in a generic format
"""
resultDict = {}
startDate = selectDict.get( 'FromDate', None )
if startDate:
del selectDict['FromDate']
# For backward compatibility
if startDate is None:
startDate = selectDict.get( 'LastUpdate', None )
if startDate:
del selectDict['LastUpdate']
endDate = selectDict.get( 'ToDate', None )
if endDate:
del selectDict['ToDate']
result = self.jobPolicy.getControlledUsers( RIGHT_GET_INFO )
if not result['OK']:
return S_ERROR( 'Failed to evaluate user rights' )
if result['Value'] != 'ALL':
selectDict[ ( 'Owner', 'OwnerGroup' ) ] = result['Value']
# Sorting instructions. Only one for the moment.
if sortList:
orderAttribute = sortList[0][0] + ":" + sortList[0][1]
else:
orderAttribute = None
statusDict = {}
result = gJobDB.getCounters( 'Jobs', ['Status'], selectDict,
newer = startDate,
older = endDate,
timeStamp = 'LastUpdateTime' )
nJobs = 0
if result['OK']:
for stDict, count in result['Value']:
nJobs += count
statusDict[stDict['Status']] = count
resultDict['TotalRecords'] = nJobs
if nJobs == 0:
return S_OK( resultDict )
resultDict['Extras'] = statusDict
if selectJobs:
iniJob = startItem
if iniJob >= nJobs:
return S_ERROR( 'Item number out of range' )
result = gJobDB.selectJobs( selectDict, orderAttribute = orderAttribute,
newer = startDate, older = endDate, limit = ( maxItems, iniJob ) )
if not result['OK']:
return S_ERROR( 'Failed to select jobs: ' + result['Message'] )
summaryJobList = result['Value']
if not self.globalJobsInfo:
validJobs, _invalidJobs, _nonauthJobs, _ownJobs = self.jobPolicy.evaluateJobRights( summaryJobList,
RIGHT_GET_INFO )
summaryJobList = validJobs
result = gJobDB.getAttributesForJobList( summaryJobList, SUMMARY )
if not result['OK']:
return S_ERROR( 'Failed to get job summary: ' + result['Message'] )
summaryDict = result['Value']
# Evaluate last sign of life time
for jobID, jobDict in summaryDict.items():
if jobDict['HeartBeatTime'] == 'None':
jobDict['LastSignOfLife'] = jobDict['LastUpdateTime']
else:
lastTime = Time.fromString( jobDict['LastUpdateTime'] )
hbTime = Time.fromString( jobDict['HeartBeatTime'] )
if ( hbTime - lastTime ) > ( lastTime - lastTime ) or jobDict['Status'] == "Stalled":
jobDict['LastSignOfLife'] = jobDict['HeartBeatTime']
else:
jobDict['LastSignOfLife'] = jobDict['LastUpdateTime']
tqDict = {}
result = gTaskQueueDB.getTaskQueueForJobs( summaryJobList )
if result['OK']:
tqDict = result['Value']
# If no jobs can be selected after the properties check
if not summaryDict.keys():
return S_OK( resultDict )
# prepare the standard structure now
key = summaryDict.keys()[0]
paramNames = summaryDict[key].keys()
records = []
for jobID, jobDict in summaryDict.items():
jParList = []
for pname in paramNames:
jParList.append( jobDict[pname] )
jParList.append( tqDict.get( jobID, 0 ) )
records.append( jParList )
resultDict['ParameterNames'] = paramNames + ['TaskQueueID']
resultDict['Records'] = records
return S_OK( resultDict )
##############################################################################
types_getJobStats = [ StringTypes, DictType ]
@staticmethod
def export_getJobStats ( attribute, selectDict ):
""" Get job statistics distribution per attribute value with a given selection
"""
startDate = selectDict.get( 'FromDate', None )
if startDate:
del selectDict['FromDate']
# For backward compatibility
if startDate is None:
startDate = selectDict.get( 'LastUpdate', None )
if startDate:
del selectDict['LastUpdate']
endDate = selectDict.get( 'ToDate', None )
if endDate:
del selectDict['ToDate']
result = gJobDB.getCounters( 'Jobs', [attribute], selectDict,
newer = startDate,
older = endDate,
timeStamp = 'LastUpdateTime' )
resultDict = {}
if result['OK']:
for cDict, count in result['Value']:
resultDict[cDict[attribute]] = count
return S_OK( resultDict )
##############################################################################
types_getJobsPrimarySummary = [ ListType ]
@staticmethod
def export_getJobsPrimarySummary ( jobIDs ):
return gJobDB.getAttributesForJobList( jobIDs, PRIMARY_SUMMARY )
##############################################################################
types_getJobParameter = [ [StringType, IntType, LongType] , StringTypes ]
@staticmethod
def export_getJobParameter( jobID, parName ):
return gJobDB.getJobParameters( jobID, [parName] )
##############################################################################
types_getJobParameters = [ [IntType, LongType] ]
@staticmethod
def export_getJobParameters( jobID ):
return gJobDB.getJobParameters( jobID )
##############################################################################
types_traceJobParameter = [ StringTypes, [IntType, StringType, LongType, ListType], StringTypes, [StringType, NoneType], [StringType, NoneType] ]
@staticmethod
def export_traceJobParameter( site, localID, parameter, date, until ):
return gJobDB.traceJobParameter( site, localID, parameter, date, until )
##############################################################################
types_traceJobParameters = [ StringTypes, [IntType, StringType, LongType, ListType], [ListType, NoneType], [ListType, NoneType], [StringType, NoneType], [StringType, NoneType] ]
@staticmethod
def export_traceJobParameters( site, localID, parameterList, attributeList, date, until ):
return gJobDB.traceJobParameters( site, localID, parameterList, attributeList, date, until )
##############################################################################
types_getAtticJobParameters = [ [IntType, LongType] ]
@staticmethod
def export_getAtticJobParameters( jobID, parameters = None, rescheduleCycle = -1 ):
if not parameters:
parameters = []
return gJobDB.getAtticJobParameters( jobID, parameters, rescheduleCycle )
##############################################################################
types_getJobAttributes = [ IntType ]
@staticmethod
def export_getJobAttributes( jobID ):
return gJobDB.getJobAttributes( jobID )
##############################################################################
types_getJobAttribute = [ IntType, StringTypes ]
@staticmethod
def export_getJobAttribute( jobID, attribute ):
return gJobDB.getJobAttribute( jobID, attribute )
##############################################################################
types_getSiteSummary = [ ]
@staticmethod
def export_getSiteSummary():
return gJobDB.getSiteSummary()
##############################################################################
types_getJobHeartBeatData = [ IntType ]
@staticmethod
def export_getJobHeartBeatData( jobID ):
return gJobDB.getHeartBeatData( jobID )
##############################################################################
types_getInputData = [ [IntType, LongType] ]
@staticmethod
def export_getInputData( jobID ):
""" Get input data for the specified jobs
"""
return gJobDB.getInputData( jobID )
##############################################################################
types_getOwnerGroup = []
@staticmethod
def export_getOwnerGroup ():
"""
Return Distinct Values of OwnerGroup from the JobsDB
"""
return gJobDB.getDistinctJobAttributes( 'OwnerGroup' )
|
vmendez/DIRAC
|
WorkloadManagementSystem/Service/JobMonitoringHandler.py
|
Python
|
gpl-3.0
| 19,011
|
[
"DIRAC"
] |
2d3779752c3f5dcd216391290255c10add6ea67491e3236e2da7ea8f3c2feb75
|
#!/usr/bin/env python
"""
==================
ModEM
==================
# Generate data file for ModEM
# by Paul Soeffky 2013
# revised by LK 2014
# revised by JP 2014
# edited by AK 2016
"""
import os
import matplotlib.cm as cm
import matplotlib.colorbar as mcb
import matplotlib.colors as colors
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.widgets as widgets
import numpy as np
import scipy.interpolate as spi
import scipy.stats as stats
from matplotlib.colors import Normalize
from matplotlib.patches import Ellipse
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import mtpy.analysis.pt as mtpt
import mtpy.core.mt as mt
import mtpy.core.z as mtz
import mtpy.imaging.mtcolors as mtcl
import mtpy.imaging.mtplottools as mtplottools
import mtpy.modeling.ws3dinv as ws
import mtpy.utils.exceptions as mtex
import mtpy.utils.gis_tools
try:
from evtk.hl import gridToVTK, pointsToVTK
except ImportError:
print ('If you want to write a vtk file for 3d viewing, you need download '
'and install evtk from https://bitbucket.org/pauloh/pyevtk')
epsg_dict = {28350:['+proj=utm +zone=50 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',50],
28351:['+proj=utm +zone=51 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',51],
28352:['+proj=utm +zone=52 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',52],
28353:['+proj=utm +zone=53 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',53],
28354:['+proj=utm +zone=54 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',54],
28355:['+proj=utm +zone=55 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',55],
28356:['+proj=utm +zone=56 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',56],
3112:['+proj=lcc +lat_1=-18 +lat_2=-36 +lat_0=0 +lon_0=134 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',0],
4326:['+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs',0],
4204:['+proj=longlat +ellps=intl +no_defs', 0]}
#==============================================================================
class Data(object):
"""
Data will read and write .dat files for ModEM and convert a WS data file
to ModEM format.
..note: :: the data is interpolated onto the given periods such that all
stations invert for the same periods. The interpolation is
a linear interpolation of each of the real and imaginary parts
of the impedance tensor and induction tensor.
See mtpy.core.mt.MT.interpolate for more details
Arguments
------------
**edi_list** : list
list of full paths to .edi files you want to invert for
====================== ====================================================
Attributes/Key Words Description
====================== ====================================================
_dtype internal variable defining the data type of
data_array
_t_shape internal variable defining shape of tipper array in
_dtype
_z_shape internal variable defining shape of Z array in
_dtype
center_position (east, north, evel) for center point of station
array. All stations are relative to this location
for plotting purposes.
comp_index_dict dictionary for index values of component of Z and T
station_locations numpy.ndarray structured to store station
location values. Keys are:
* station --> station name
* east --> UTM east (m)
* north --> UTM north (m)
* lat --> latitude in decimal degrees
* lon --> longitude in decimal degrees
* elev --> elevation (m)
* zone --> UTM zone
* rel_east -- > relative east location to
center_position (m)
* rel_north --> relative north location to
center_position (m)
data_array numpy.ndarray (num_stations) structured to store
data. keys are:
* station --> station name
* lat --> latitude in decimal degrees
* lon --> longitude in decimal degrees
* elev --> elevation (m)
* rel_east -- > relative east location to
center_position (m)
* rel_north --> relative north location to
center_position (m)
* east --> UTM east (m)
* north --> UTM north (m)
* zone --> UTM zone
* z --> impedance tensor array with shape
(num_freq, 2, 2)
* z_err --> impedance tensor error array with
shape (num_freq, 2, 2)
* tip --> Tipper array with shape
(num_freq, 1, 2)
* tipperr --> Tipper array with shape
(num_freq, 1, 2)
data_fn full path to data file
data_period_list period list from all the data
edi_list list of full paths to edi files
error_egbert percentage to multiply sqrt(Z_xy*Zyx) by.
*default* is 3 as prescribed by Egbert & Kelbert
error_floor percentage to set the error floor at, anything below
this number will be set to error_floor.
*default* is 10
error_tipper absolute tipper error, all tipper error will be
set to this value unless you specify error_type as
'floor' or 'floor_egbert'.
*default* is .05 for 5%
error_type [ 'floor' | 'value' | 'egbert' ]
*default* is 'egbert'
* 'floor' sets the error floor to error_floor
* 'value' sets error to error_value
* 'egbert' sets error to
error_egbert * sqrt(abs(zxy*zyx))
* 'floor_egbert' sets error floor to
error_egbert * sqrt(abs(zxy*zyx))
error_value percentage to multiply Z by to set error
*default* is 5 for 5% of Z as error
fn_basename basename of data file. *default* is 'ModEM_Data.dat'
header_strings strings for header of data file following the format
outlined in the ModEM documentation
inv_comp_dict dictionary of inversion componets
inv_mode inversion mode, options are: *default* is '1'
* '1' --> for 'Full_Impedance' and
'Full_Vertical_Components'
* '2' --> 'Full_Impedance'
* '3' --> 'Off_Diagonal_Impedance' and
'Full_Vertical_Components'
* '4' --> 'Off_Diagonal_Impedance'
* '5' --> 'Full_Vertical_Components'
* '6' --> 'Full_Interstation_TF'
* '7' --> 'Off_Diagonal_Rho_Phase'
inv_mode_dict dictionary for inversion modes
max_num_periods maximum number of periods
mt_dict dictionary of mtpy.core.mt.MT objects with keys
being station names
period_dict dictionary of period index for period_list
period_list list of periods to invert for
period_max maximum value of period to invert for
period_min minimum value of period to invert for
rotate_angle Angle to rotate data to assuming 0 is N and E is 90
save_path path to save data file to
units [ [V/m]/[T] | [mV/km]/[nT] | Ohm ] units of Z
*default* is [mV/km]/[nT]
wave_sign [ + | - ] sign of time dependent wave.
*default* is '+' as positive downwards.
====================== ====================================================
========================== ================================================
Methods Description
========================== ================================================
convert_ws3dinv_data_file convert a ws3dinv file to ModEM fomrat,
**Note** this doesn't include tipper data and
you need a station location file like the one
output by mtpy.modeling.ws3dinv
get_data_from_edi get data from given .edi files and fill
attributes accordingly
get_mt_dict get a dictionary of mtpy.core.mt.MT objects
with keys being station names
get_period_list get a list of periods to invert for
get_station_locations get station locations and relative locations
filling in station_locations
read_data_file read in a ModEM data file and fill attributes
data_array, station_locations, period_list, mt_dict
write_data_file write a ModEM data file
========================== ================================================
:Example 1 --> create inversion period list: ::
>>> import os
>>> import mtpy.modeling.modem as modem
>>> edi_path = r"/home/mt/edi_files"
>>> edi_list = [os.path.join(edi_path, edi) \
for edi in os.listdir(edi_path)\
if edi.find('.edi') > 0]
import mtpy.modeling.ModEM >>> md = mtpy.modeling.ModEM.Data(edi_list, period_min=.1, period_max=300,\
max_num_periods=12)
>>> md.write_data_file(save_path=r"/home/modem/inv1")
:Example 2 --> set inverions period list from data: ::
>>> import os
>>> import mtpy.modeling.modem as modem
>>> edi_path = r"/home/mt/edi_files"
>>> edi_list = [os.path.join(edi_path, edi) \
for edi in os.listdir(edi_path)\
if edi.find('.edi') > 0]
import mtpy.modeling.ModEM >>> md = mtpy.modeling.ModEM.Data(edi_list)
>>> #get period list from an .edi file
>>> mt_obj1 = modem.mt.MT(edi_list[0])
>>> inv_period_list = 1./mt_obj1.Z.freq
>>> #invert for every third period in inv_period_list
>>> inv_period_list = inv_period_list[np.arange(0, len(inv_period_list, 3))]
>>> md.period_list = inv_period_list
>>> md.write_data_file(save_path=r"/home/modem/inv1")
:Example 3 --> change error values: ::
import mtpy.modeling.ModEM >>> import mtpy.modeling.modem as modem
>>> mdr = mtpy.modeling.ModEM.Data()
>>> mdr.read_data_file(r"/home/modem/inv1/ModEM_Data.dat")
>>> mdr.error_type = 'floor'
>>> mdr.error_floor = 10
>>> mdr.error_tipper = .03
>>> mdr.write_data_file(save_path=r"/home/modem/inv2")
:Example 4 --> change inversion type: ::
import mtpy.modeling.ModEM >>> import mtpy.modeling.modem as modem
>>> mdr = mtpy.modeling.ModEM.Data()
>>> mdr.read_data_file(r"/home/modem/inv1/ModEM_Data.dat")
>>> mdr.inv_mode = '3'
>>> mdr.write_data_file(save_path=r"/home/modem/inv2")
:Example 5 --> create mesh first then data file: ::
>>> import mtpy.modeling.modem as modem
>>> import os
>>> #1) make a list of all .edi files that will be inverted for
>>> edi_path = r"/home/EDI_Files"
>>> edi_list = [os.path.join(edi_path, edi)
for edi in os.listdir(edi_path)
import mtpy.modeling.ModEM >>> ... if edi.find('.edi') > 0]
>>> #2) make a grid from the stations themselves with 200m cell spacing
import mtpy.modeling.ModEM
>>> mmesh = mtpy.modeling.ModEM.Model(edi_list=edi_list, cell_size_east=200,
>>> ... cell_size_north=200)
>>> mmesh.make_mesh()
>>> # check to see if the mesh is what you think it should be
>>> mmesh.plot_mesh()
>>> # all is good write the mesh file
>>> mmesh.write_model_file(save_path=r"/home/modem/Inv1")
>>> # create data file
>>> md = mtpy.modeling.ModEM.Data(edi_list, station_locations=mmesh.station_locations)
>>> md.write_data_file(save_path=r"/home/modem/Inv1")
:Example 6 --> rotate data: ::
>>> md.rotation_angle = 60
>>> md.write_data_file(save_path=r"/home/modem/Inv1")
>>> # or
>>> md.write_data_file(save_path=r"/home/modem/Inv1", \
rotation_angle=60)
"""
def __init__(self, edi_list=None, **kwargs):
self.edi_list = edi_list
self.error_type = kwargs.pop('error_type', 'egbert')
self.error_floor = kwargs.pop('error_floor', 5.0)
self.error_value = kwargs.pop('error_value', 5.0)
self.error_egbert = kwargs.pop('error_egbert', 3.0)
self.error_tipper = kwargs.pop('error_tipper', .05)
self.wave_sign_impedance = kwargs.pop('wave_sign_impedance', '+')
self.wave_sign_tipper = kwargs.pop('wave_sign_tipper', '+')
self.units = kwargs.pop('units', '[mV/km]/[nT]')
self.inv_mode = kwargs.pop('inv_mode', '1')
self.period_list = kwargs.pop('period_list', None)
self.period_step = kwargs.pop('period_step', 1)
self.period_min = kwargs.pop('period_min', None)
self.period_max = kwargs.pop('period_max', None)
self.period_buffer = kwargs.pop('period_buffer', None)
self.max_num_periods = kwargs.pop('max_num_periods', None)
self.data_period_list = None
self.fn_basename = kwargs.pop('fn_basename', 'ModEM_Data.dat')
self.save_path = kwargs.pop('save_path', os.getcwd())
self.formatting = kwargs.pop('format', '1')
self._rotation_angle = kwargs.pop('rotation_angle', 0.0)
self._set_rotation_angle(self._rotation_angle)
self._station_locations = None
self.center_position = np.array([0.0, 0.0])
self.epsg = kwargs.pop('epsg',None)
self.data_array = None
self.mt_dict = None
self.data_fn = kwargs.pop('data_fn','ModEM_Data.dat')
self._z_shape = (1, 2, 2)
self._t_shape = (1, 1, 2)
self._dtype = [('station', '|S10'),
('lat', np.float),
('lon', np.float),
('elev', np.float),
('rel_east', np.float),
('rel_north', np.float),
('east', np.float),
('north', np.float),
('zone', '|S4'),
('z', (np.complex, self._z_shape)),
('z_err', (np.complex, self._z_shape)),
('tip', (np.complex, self._t_shape)),
('tip_err', (np.complex, self._t_shape))]
self.inv_mode_dict = {'1':['Full_Impedance', 'Full_Vertical_Components'],
'2':['Full_Impedance'],
'3':['Off_Diagonal_Impedance',
'Full_Vertical_Components'],
'4/g/data/ha3/fxz547/Githubz/mtpy2/examples/data/ModEM_files/VicSynthetic07/Modular_MPI_NLCG_019.rho':['Off_Diagonal_Impedance'],
'5':['Full_Vertical_Components'],
'6':['Full_Interstation_TF'],
'7':['Off_Diagonal_Rho_Phase']}
self.inv_comp_dict = {'Full_Impedance':['zxx', 'zxy', 'zyx', 'zyy'],
'Off_Diagonal_Impedance':['zxy', 'zyx'],
'Full_Vertical_Components':['tx', 'ty']}
self.comp_index_dict = {'zxx': (0, 0), 'zxy':(0, 1), 'zyx':(1, 0),
'zyy':(1, 1), 'tx':(0, 0), 'ty':(0, 1)}
self.header_strings = \
['# Created using MTpy error {0} of {1:.0f}%, data rotated {2:.1f} deg clockwise from N\n'.format(
self.error_type, self.error_floor, self._rotation_angle),
'# Period(s) Code GG_Lat GG_Lon X(m) Y(m) Z(m) Component Real Imag Error\n']
#size of a utm grid
self._utm_grid_size_north = 888960.0
self._utm_grid_size_east = 640000.0
self._utm_cross = False
self._utm_ellipsoid = 23
def _set_dtype(self, z_shape, t_shape):
"""
reset dtype
"""
self._z_shape = z_shape
self._t_shape = t_shape
self._dtype = [('station', '|S10'),
('lat', np.float),
('lon', np.float),
('elev', np.float),
('rel_east', np.float),
('rel_north', np.float),
('east', np.float),
('north', np.float),
('zone', '|S4'),
('z', (np.complex, self._z_shape)),
('z_err', (np.complex, self._z_shape)),
('tip', (np.complex, self._t_shape)),
('tip_err', (np.complex, self._t_shape))]
def _set_header_string(self):
"""
reset the header sring for file
"""
h_str = '# Created using MTpy error {0} of {1:.0f}%, data rotated {2:.1f}_deg clockwise from N\n'
if self.error_type == 'egbert':
self.header_strings[0] = h_str.format(self.error_type,
self.error_egbert,
self._rotation_angle)
elif self.error_type == 'floor':
self.header_strings[0] = h_str.format(self.error_type,
self.error_floor,
self._rotation_angle)
elif self.error_type == 'value':
self.header_strings[0] = h_str.format(self.error_type,
self.error_value,
self._rotation_angle)
def get_mt_dict(self):
"""
get mt_dict from edi file list
"""
if self.edi_list is None:
raise ModEMError('edi_list is None, please input a list of '
'.edi files containing the full path')
if len(self.edi_list) == 0:
raise ModEMError('edi_list is empty, please input a list of '
'.edi files containing the full path' )
self.mt_dict = {}
for edi in self.edi_list:
mt_obj = mt.MT(edi)
self.mt_dict[mt_obj.station] = mt_obj
def get_relative_station_locations(self):
"""
get station locations from edi files
"""
utm_zones_dict = {'M':9, 'L':8, 'K':7, 'J':6, 'H':5, 'G':4, 'F':3,
'E':2, 'D':1, 'C':0, 'N':10, 'P':11, 'Q':12, 'R':13,
'S':14, 'T':15, 'U':16, 'V':17, 'W':18, 'X':19}
# get center position of the stations in lat and lon
self.center_position[0] = self.data_array['lat'].mean()
self.center_position[1] = self.data_array['lon'].mean()
#--> need to convert lat and lon to east and north
for c_arr in self.data_array:
if c_arr['lat'] != 0.0 and c_arr['lon'] != 0.0:
c_arr['zone'], c_arr['east'], c_arr['north'] = \
mtpy.utils.gis_tools.ll_to_utm(self._utm_ellipsoid,
c_arr['lat'],
c_arr['lon'])
#--> need to check to see if all stations are in the same zone
utm_zone_list = list(set(self.data_array['zone']))
#if there are more than one zone, figure out which zone is the odd ball
utm_zone_dict = dict([(utmzone, 0) for utmzone in utm_zone_list])
if len(utm_zone_list) != 1:
self._utm_cross = True
for c_arr in self.data_array:
utm_zone_dict[c_arr['zone']] += 1
#flip keys and values so the key is the number of zones and
# the value is the utm zone
utm_zone_dict = dict([(utm_zone_dict[key], key)
for key in utm_zone_dict.keys()])
#get the main utm zone as the one with the most stations in it
main_utm_zone = utm_zone_dict[max(utm_zone_dict.keys())]
#Get a list of index values where utm zones are not the
#same as the main zone
diff_zones = np.where(self.data_array['zone'] != main_utm_zone)[0]
for c_index in diff_zones:
c_arr = self.data_array[c_index]
c_utm_zone = c_arr['zone']
print '{0} utm_zone is {1} and does not match {2}'.format(
c_arr['station'], c_arr['zone'], main_utm_zone)
zone_shift = 1-abs(utm_zones_dict[c_utm_zone[-1]]-\
utm_zones_dict[main_utm_zone[-1]])
#--> check to see if the zone is in the same latitude
#if odd ball zone is north of main zone, add 888960 m
if zone_shift > 1:
north_shift = self._utm_grid_size_north*zone_shift
print ('--> adding {0:.2f}'.format(north_shift)+\
' meters N to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['north'] += north_shift
#if odd ball zone is south of main zone, subtract 88960 m
elif zone_shift < -1:
north_shift = self._utm_grid_size_north*zone_shift
print ('--> subtracting {0:.2f}'.format(north_shift)+\
' meters N to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['north'] -= north_shift
#--> if zone is shifted east or west
if int(c_utm_zone[0:-1]) > int(main_utm_zone[0:-1]):
east_shift = self._utm_grid_size_east*\
abs(int(c_utm_zone[0:-1])-int(main_utm_zone[0:-1]))
print ('--> adding {0:.2f}'.format(east_shift)+\
' meters E to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['east'] += east_shift
elif int(c_utm_zone[0:-1]) < int(main_utm_zone[0:-1]):
east_shift = self._utm_grid_size_east*\
abs(int(c_utm_zone[0:-1])-int(main_utm_zone[0:-1]))
print ('--> subtracting {0:.2f}'.format(east_shift)+\
' meters E to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['east'] -= east_shift
#remove the average distance to get coordinates in a relative space
self.data_array['rel_east'] = self.data_array['east']-\
self.data_array['east'].mean()
self.data_array['rel_north'] = self.data_array['north']-\
self.data_array['north'].mean()
#--> rotate grid if necessary
#to do this rotate the station locations because ModEM assumes the
#input mesh is a lateral grid.
#needs to be 90 - because North is assumed to be 0 but the rotation
#matrix assumes that E is 0.
if self.rotation_angle != 0:
cos_ang = np.cos(np.deg2rad(self.rotation_angle))
sin_ang = np.sin(np.deg2rad(self.rotation_angle))
rot_matrix = np.matrix(np.array([[cos_ang, sin_ang],
[-sin_ang, cos_ang]]))
coords = np.array([self.data_array['rel_east'],
self.data_array['rel_north']])
#rotate the relative station locations
new_coords = np.array(np.dot(rot_matrix, coords))
self.data_array['rel_east'][:] = new_coords[0, :]
self.data_array['rel_north'][:] = new_coords[1, :]
print 'Rotated stations by {0:.1f} deg clockwise from N'.format(
self.rotation_angle)
#translate the stations so they are relative to 0,0
east_center = (self.data_array['rel_east'].max()-
np.abs(self.data_array['rel_east'].min()))/2
north_center = (self.data_array['rel_north'].max()-
np.abs(self.data_array['rel_north'].min()))/2
#remove the average distance to get coordinates in a relative space
self.data_array['rel_east'] -= east_center
self.data_array['rel_north'] -= north_center
def get_period_list(self):
"""
make a period list to invert for
"""
if self.mt_dict is None:
self.get_mt_dict()
if self.period_list is not None:
print '-'*50
print 'Inverting for periods:'
for per in self.period_list:
print ' {0:<12.6f}'.format(per)
print '-'*50
return
data_period_list = []
for s_key in sorted(self.mt_dict.keys()):
mt_obj = self.mt_dict[s_key]
data_period_list.extend(list(1./mt_obj.Z.freq))
self.data_period_list = np.array(sorted(list(set(data_period_list)),
reverse=False))
if self.period_min is not None:
if self.period_max is None:
raise ModEMError('Need to input period_max')
if self.period_max is not None:
if self.period_min is None:
raise ModEMError('Need to input period_min')
if self.period_min is not None and self.period_max is not None:
if self.max_num_periods is None:
raise ModEMError('Need to input number of periods to use')
min_index = np.where(self.data_period_list >= self.period_min)[0][0]
max_index = np.where(self.data_period_list <= self.period_max)[0][-1]
pmin = np.log10(self.data_period_list[min_index])
pmax = np.log10(self.data_period_list[max_index])
self.period_list = np.logspace(pmin, pmax, num=self.max_num_periods)
print '-'*50
print 'Inverting for periods:'
for per in self.period_list:
print ' {0:<12.6f}'.format(per)
print '-'*50
if self.period_list is None:
raise ModEMError('Need to input period_min, period_max, '
'max_num_periods or a period_list')
def _set_rotation_angle(self, rotation_angle):
"""
on set rotation angle rotate mt_dict and data_array,
"""
if self._rotation_angle == rotation_angle:
return
print 'Changing rotation angle from {0:.1f} to {1:.1f}'.format(
self._rotation_angle, rotation_angle)
self._rotation_angle = -self._rotation_angle+rotation_angle
if self.rotation_angle == 0:
return
print 'Changing rotation angle from {0:.1f} to {1:.1f}'.format(
self._rotation_angle, rotation_angle)
self._rotation_angle = rotation_angle
if self.data_array is None:
return
if self.mt_dict is None:
return
for mt_key in sorted(self.mt_dict.keys()):
mt_obj = self.mt_dict[mt_key]
mt_obj.Z.rotate(self._rotation_angle)
mt_obj.Tipper.rotate(self._rotation_angle)
print 'Data rotated to align with {0:.1f} deg clockwise from N'.format(
self._rotation_angle)
print '*'*70
print ' If you want to rotate station locations as well use the'
print ' command Data.get_relative_station_locations() '
print ' if stations have not already been rotated in Model'
print '*'*70
self._fill_data_array()
def _get_rotation_angle(self):
return self._rotation_angle
rotation_angle = property(fget=_get_rotation_angle,
fset=_set_rotation_angle,
doc="""Rotate data assuming N=0, E=90""")
def _fill_data_array(self):
"""
fill the data array from mt_dict
"""
if self.period_list is None:
self.get_period_list()
ns = len(self.mt_dict.keys())
nf = len(self.period_list)
d_array = False
if self.data_array is not None:
d_arr_copy = self.data_array.copy()
d_array = True
self._set_dtype((nf, 2, 2), (nf, 1, 2))
self.data_array = np.zeros(ns, dtype=self._dtype)
rel_distance = True
for ii, s_key in enumerate(sorted(self.mt_dict.keys())):
mt_obj = self.mt_dict[s_key]
if d_array is True:
try:
d_index = np.where(d_arr_copy['station'] == s_key)[0][0]
self.data_array[ii]['station'] = s_key
self.data_array[ii]['lat'] = d_arr_copy[d_index]['lat']
self.data_array[ii]['lon'] = d_arr_copy[d_index]['lon']
self.data_array[ii]['east'] = d_arr_copy[d_index]['east']
self.data_array[ii]['north'] = d_arr_copy[d_index]['north']
self.data_array[ii]['elev'] = d_arr_copy[d_index]['elev']
self.data_array[ii]['rel_east'] = d_arr_copy[d_index]['rel_east']
self.data_array[ii]['rel_north'] = d_arr_copy[d_index]['rel_north']
except IndexError:
print 'Could not find {0} in data_array'.format(s_key)
else:
self.data_array[ii]['station'] = mt_obj.station
self.data_array[ii]['lat'] = mt_obj.lat
self.data_array[ii]['lon'] = mt_obj.lon
self.data_array[ii]['east'] = mt_obj.east
self.data_array[ii]['north'] = mt_obj.north
self.data_array[ii]['elev'] = mt_obj.elev
try:
self.data_array[ii]['rel_east'] = mt_obj.grid_east
self.data_array[ii]['rel_north'] = mt_obj.grid_north
rel_distance = False
except AttributeError:
pass
# interpolate each station onto the period list
# check bounds of period list
interp_periods = self.period_list[np.where(
(self.period_list >= 1./mt_obj.Z.freq.max()) &
(self.period_list <= 1./mt_obj.Z.freq.min()))]
# if specified, apply a buffer so that interpolation doesn't stretch too far over periods
if type(self.period_buffer) in [float,int]:
interp_periods_new = []
dperiods = 1./mt_obj.Z.freq
for iperiod in interp_periods:
# find nearest data period
difference = np.abs(iperiod-dperiods)
nearestdperiod = dperiods[difference == np.amin(difference)][0]
if max(nearestdperiod/iperiod, iperiod/nearestdperiod) < self.period_buffer:
interp_periods_new.append(iperiod)
interp_periods = np.array(interp_periods_new)
interp_z, interp_t = mt_obj.interpolate(1./interp_periods)
for kk, ff in enumerate(interp_periods):
jj = np.where(self.period_list == ff)[0][0]
self.data_array[ii]['z'][jj] = interp_z.z[kk, :, :]
self.data_array[ii]['z_err'][jj] = interp_z.z_err[kk, :, :]
if mt_obj.Tipper.tipper is not None:
self.data_array[ii]['tip'][jj] = interp_t.tipper[kk, :, :]
self.data_array[ii]['tip_err'][jj] = \
interp_t.tipper_err[kk, :, :]
if rel_distance is False:
self.get_relative_station_locations()
def _set_station_locations(self, station_locations):
"""
take a station_locations array and populate data_array
"""
if self.data_array is None:
self.get_mt_dict()
self.get_period_list()
self._fill_data_array()
for s_arr in station_locations:
try:
d_index = np.where(self.data_array['station'] ==
s_arr['station'])[0][0]
except IndexError:
print 'Could not find {0} in data_array'.format(s_arr['station'])
d_index = None
if d_index is not None:
self.data_array[d_index]['lat'] = s_arr['lat']
self.data_array[d_index]['lon'] = s_arr['lon']
self.data_array[d_index]['east'] = s_arr['east']
self.data_array[d_index]['north'] = s_arr['north']
self.data_array[d_index]['elev'] = s_arr['elev']
self.data_array[d_index]['rel_east'] = s_arr['rel_east']
self.data_array[d_index]['rel_north'] = s_arr['rel_north']
def _get_station_locations(self):
"""
extract station locations from data array
"""
if self.data_array is None:
return None
station_locations = self.data_array[['station', 'lat', 'lon',
'north', 'east', 'elev',
'rel_north', 'rel_east']]
return station_locations
station_locations = property(_get_station_locations,
_set_station_locations,
doc="""location of stations""")
# def compute_inv_error(self, comp, data_value, data_error):
# """
# compute the error from the given parameters
# """
# #compute relative error
# if comp.find('t') == 0:
# if 'floor' in self.error_type:
# abs_err = max(self.error_tipper,
# data_error)
# else:
# abs_err = self.error_tipper
# elif comp.find('z') == 0:
# if self.error_type == 'floor':
# abs_err = max(data_error,
# (self.error_floor/100.)*abs(data_value))
#
# elif self.error_type == 'value':
# abs_err = abs(data_value)*self.error_value/100.
#
# elif self.error_type == 'egbert':
# d_zxy = self.data_array[ss]['z'][ff, 0, 1]
# d_zyx = self.data_array[ss]['z'][ff, 1, 0]
# abs_err = np.sqrt(abs(d_zxy*d_zyx))*\
# self.error_egbert/100.
# elif self.error_type == 'floor_egbert':
# abs_err = self.data_array[ss][c_key+'_err'][ff, z_ii, z_jj]
# d_zxy = self.data_array[ss]['z'][ff, 0, 1]
# d_zyx = self.data_array[ss]['z'][ff, 1, 0]
# if abs_err < np.sqrt(abs(d_zxy*d_zyx))*self.error_egbert/100.:
# abs_err = np.sqrt(abs(d_zxy*d_zyx))*self.error_egbert/100.
#
#
# if abs_err == 0.0:
# abs_err = 1e3
# print('''error at {0} is 0 for period {1} \n
# for {2}({3}, {4}) set to 1e3\n
# data = {5:.4e}+j{6:.4e}'''.format(
# sta, per, comp, z_ii, z_jj, zz.real,
# zz.imag))
# if self.units == 'ohm':
# abs_err /= 796.
def write_data_file(self, save_path=None, fn_basename=None,
rotation_angle=None, compute_error=True, fill=True):
"""
write data file for ModEM
will save file as save_path/fn_basename
Arguments:
------------
**save_path** : string
directory path to save data file to.
*default* is cwd
**fn_basename** : string
basename to save data file as
*default* is 'ModEM_Data.dat'
**rotation_angle** : float
angle to rotate the data by assuming N = 0,
E = 90. *default* is 0.0
Outputs:
----------
**data_fn** : string
full path to created data file
:Example: ::
>>> import os
>>> import mtpy.modeling.modem as modem
>>> edi_path = r"/home/mt/edi_files"
>>> edi_list = [os.path.join(edi_path, edi) \
for edi in os.listdir(edi_path)\
if edi.find('.edi') > 0]
import mtpy.modeling.ModEM >>> md = mtpy.modeling.ModEM.Data(edi_list, period_min=.1, period_max=300,\
max_num_periods=12)
>>> md.write_data_file(save_path=r"/home/modem/inv1")
"""
if save_path is not None:
self.save_path = save_path
if fn_basename is not None:
self.fn_basename = fn_basename
self.data_fn = os.path.join(self.save_path, self.fn_basename)
self.get_period_list()
#rotate data if desired
if rotation_angle is not None:
self.rotation_angle = rotation_angle
#be sure to fill in data array
if fill is True:
self._fill_data_array()
# get relative station locations in grid coordinates
self.get_relative_station_locations()
#reset the header string to be informational
self._set_header_string()
# number of periods - subtract periods with all zero components
nper = len(np.where(np.mean(np.mean(np.mean(np.abs(self.data_array['z']),axis=0),axis=1),axis=1)>0)[0])
dlines = []
for inv_mode in self.inv_mode_dict[self.inv_mode]:
dlines.append(self.header_strings[0])
dlines.append(self.header_strings[1])
dlines.append('> {0}\n'.format(inv_mode))
if inv_mode.find('Impedance') > 0:
dlines.append('> exp({0}i\omega t)\n'.format(self.wave_sign_impedance))
dlines.append('> {0}\n'.format(self.units))
elif inv_mode.find('Vertical') >=0:
dlines.append('> exp({0}i\omega t)\n'.format(self.wave_sign_tipper))
dlines.append('> []\n')
dlines.append('> 0\n') #oriention, need to add at some point
dlines.append('> {0: >10.6f} {1:>10.6f}\n'.format(
self.center_position[0], self.center_position[1]))
dlines.append('> {0} {1}\n'.format(self.data_array['z'].shape[1],
self.data_array['z'].shape[0]))
for ss in range(self.data_array['z'].shape[0]):
for ff in range(self.data_array['z'].shape[1]):
for comp in self.inv_comp_dict[inv_mode]:
#index values for component with in the matrix
z_ii, z_jj = self.comp_index_dict[comp]
#get the correct key for data array according to comp
if comp.find('z') == 0:
c_key = 'z'
elif comp.find('t') == 0:
c_key = 'tip'
#get the value for that compenent at that frequency
zz = self.data_array[ss][c_key][ff, z_ii, z_jj]
if zz.real != 0.0 and zz.imag != 0.0 and \
zz.real != 1e32 and zz.imag != 1e32:
if self.formatting == '1':
per = '{0:<12.5e}'.format(self.period_list[ff])
sta = '{0:>7}'.format(self.data_array[ss]['station'])
lat = '{0:> 9.3f}'.format(self.data_array[ss]['lat'])
lon = '{0:> 9.3f}'.format(self.data_array[ss]['lon'])
eas = '{0:> 12.3f}'.format(self.data_array[ss]['rel_east'])
nor = '{0:> 12.3f}'.format(self.data_array[ss]['rel_north'])
ele = '{0:> 12.3f}'.format(self.data_array[ss]['elev'])
com = '{0:>4}'.format(comp.upper())
if self.units == 'ohm':
rea = '{0:> 14.6e}'.format(zz.real/796.)
ima = '{0:> 14.6e}'.format(zz.imag/796.)
else:
rea = '{0:> 14.6e}'.format(zz.real)
ima = '{0:> 14.6e}'.format(zz.imag)
elif self.formatting == '2':
per = '{0:<14.6e}'.format(self.period_list[ff])
sta = '{0:<10}'.format(self.data_array[ss]['station'])
lat = '{0:> 14.6f}'.format(self.data_array[ss]['lat'])
lon = '{0:> 14.6f}'.format(self.data_array[ss]['lon'])
eas = '{0:> 12.3f}'.format(self.data_array[ss]['rel_east'])
nor = '{0:> 15.3f}'.format(self.data_array[ss]['rel_north'])
ele = '{0:> 10.3f}'.format(self.data_array[ss]['elev'])
com = '{0:>12}'.format(comp.upper())
if self.units == 'ohm':
rea = '{0:> 17.6e}'.format(zz.real/796.)
ima = '{0:> 17.6e}'.format(zz.imag/796.)
else:
rea = '{0:> 17.6e}'.format(zz.real)
ima = '{0:> 17.6e}'.format(zz.imag)
if compute_error:
#compute relative error
if comp.find('t') == 0:
if 'floor' in self.error_type:
abs_err = max(self.error_tipper,
self.data_array[ss]['tip_err'][ff,0,z_ii])
else:
abs_err = self.error_tipper
elif comp.find('z') == 0:
if self.error_type == 'floor':
rel_err = self.data_array[ss][c_key+'_err'][ff, z_ii, z_jj]/\
abs(zz)
if rel_err < self.error_floor/100.:
rel_err = self.error_floor/100.
abs_err = rel_err*abs(zz)
elif self.error_type == 'value':
abs_err = abs(zz)*self.error_value/100.
elif self.error_type == 'egbert':
d_zxy = self.data_array[ss]['z'][ff, 0, 1]
d_zyx = self.data_array[ss]['z'][ff, 1, 0]
abs_err = np.sqrt(abs(d_zxy*d_zyx))*\
self.error_egbert/100.
elif self.error_type == 'floor_egbert':
abs_err = self.data_array[ss][c_key+'_err'][ff, z_ii, z_jj]
d_zxy = self.data_array[ss]['z'][ff, 0, 1]
d_zyx = self.data_array[ss]['z'][ff, 1, 0]
if abs(d_zxy) == 0.0:
d_zxy = 1E3
if abs(d_zyx) == 0.0:
d_zyx = 1e3
eg_err = np.sqrt(abs(d_zxy*d_zyx))*self.error_egbert/100.
if abs_err < eg_err:
abs_err = np.sqrt(abs(d_zxy*d_zyx))*self.error_egbert/100.
else:
pass
if abs_err == 0.0:
abs_err = 1e3
print('''error at {0} is 0 for period {1} \n
for {2}({3}, {4}) set to 1e3\n
data = {5:.4e}+j{6:.4e}'''.format(
sta, per, comp, z_ii, z_jj, zz.real,
zz.imag))
if self.units == 'ohm':
abs_err /= 796.
else:
abs_err = self.data_array[ss][c_key+'_err'][ff, z_ii, z_jj].real
if c_key.find('z') >= 0 and self.units == 'ohm':
abs_err /= 796.
abs_err = '{0:> 14.6e}'.format(abs(abs_err))
#make sure that x==north, y==east, z==+down
dline = ''.join([per, sta, lat, lon, nor, eas, ele,
com, rea, ima, abs_err, '\n'])
dlines.append(dline)
dfid = file(self.data_fn, 'w')
dfid.writelines(dlines)
dfid.close()
print 'Wrote ModEM data file to {0}'.format(self.data_fn)
def convert_ws3dinv_data_file(self, ws_data_fn, station_fn=None,
save_path=None, fn_basename=None):
"""
convert a ws3dinv data file into ModEM format
Arguments:
------------
**ws_data_fn** : string
full path to WS data file
**station_fn** : string
full path to station info file output by
mtpy.modeling.ws3dinv. Or you can create one using
mtpy.modeling.ws3dinv.WSStation
**save_path** : string
directory path to save data file to.
*default* is cwd
**fn_basename** : string
basename to save data file as
*default* is 'ModEM_Data.dat'
Outputs:
-----------
**data_fn** : string
full path to created data file
:Example: ::
import mtpy.modeling.ModEM >>> import mtpy.modeling.modem as modem
>>> mdr = mtpy.modeling.ModEM.Data()
>>> mdr.convert_ws3dinv_data_file(r"/home/ws3dinv/inv1/WSData.dat",
station_fn=r"/home/ws3dinv/inv1/WS_Station_Locations.txt")
"""
if os.path.isfile(ws_data_fn) == False:
raise ws.WSInputError('Did not find {0}, check path'.format(ws_data_fn))
if save_path is not None:
self.save_path = save_path
else:
self.save_path = os.path.dirname(ws_data_fn)
if fn_basename is not None:
self.fn_basename = fn_basename
#--> get data from data file
wsd = ws.WSData()
wsd.read_data_file(ws_data_fn, station_fn=station_fn)
ns = wsd.data['station'].shape[0]
nf = wsd.period_list.shape[0]
self.period_list = wsd.period_list.copy()
self._set_dtype((nf, 2, 2), (nf, 1, 2))
self.data_array = np.zeros(ns, dtype=self._dtype)
#--> fill data array
for ii, d_arr in enumerate(wsd.data):
self.data_array[ii]['station'] = d_arr['station']
self.data_array[ii]['rel_east'] = d_arr['east']
self.data_array[ii]['rel_north'] = d_arr['north']
self.data_array[ii]['z'][:] = d_arr['z_data']
self.data_array[ii]['z_err'][:] = d_arr['z_data_err'].real*\
d_arr['z_err_map'].real
self.data_array[ii]['station'] = d_arr['station']
self.data_array[ii]['lat'] = 0.0
self.data_array[ii]['lon'] = 0.0
self.data_array[ii]['rel_east'] = d_arr['east']
self.data_array[ii]['rel_north'] = d_arr['north']
self.data_array[ii]['elev'] = 0.0
#need to change the inversion mode to be the same as the ws_data file
if self.data_array['z'].all() == 0.0:
if self.data_array['tip'].all() == 0.0:
self.inv_mode = '4'
else:
self.inv_mode = '3'
else:
if self.data_array['tip'].all() == 0.0:
self.inv_mode = '2'
else:
self.inv_mode = '1'
#-->write file
self.write_data_file()
def read_data_file(self, data_fn=None, center_utm = None):
"""
read ModEM data file
inputs:
data_fn = full path to data file name
center_utm = option to provide real world coordinates of the center of
the grid for putting the data and model back into
utm/grid coordinates, format [east_0, north_0, z_0]
Fills attributes:
* data_array
* period_list
* mt_dict
"""
if data_fn is not None:
self.data_fn = data_fn
self.save_path = os.path.dirname(self.data_fn)
self.fn_basename = os.path.basename(self.data_fn)
if self.data_fn is None:
raise ModEMError('data_fn is None, enter a data file to read.')
elif os.path.isfile(self.data_fn) is False:
raise ModEMError('Could not find {0}, check path'.format(self.data_fn))
dfid = file(self.data_fn, 'r')
dlines = dfid.readlines()
dfid.close()
header_list = []
metadata_list = []
data_list = []
period_list = []
station_list = []
read_impedance = False
read_tipper = False
for dline in dlines:
if dline.find('#') == 0:
header_list.append(dline.strip())
elif dline.find('>') == 0:
metadata_list.append(dline[1:].strip())
if dline.lower().find('ohm') > 0:
self.units = 'ohm'
elif dline.lower().find('mv') > 0:
self.units =' [mV/km]/[nT]'
elif dline.lower().find('vertical') > 0:
read_tipper = True
read_impedance = False
elif dline.lower().find('impedance') > 0:
read_impedance = True
read_tipper = False
if dline.find('exp') > 0:
if read_impedance is True:
self.wave_sign_impedance = dline[dline.find('(')+1]
elif read_tipper is True:
self.wave_sign_tipper = dline[dline.find('(')+1]
elif len(dline[1:].strip().split()) == 2:
value_list = [float(value) for value in
dline[1:].strip().split()]
if value_list[0]%1 == 0 and value_list[1]%1 == 0:
n_periods = value_list[0]
n_stations = value_list[1]
else:
self.center_position = np.array(value_list)
else:
dline_list = dline.strip().split()
if len(dline_list) == 11:
for ii, d_str in enumerate(dline_list):
if ii != 1:
try:
dline_list[ii] = float(d_str.strip())
except ValueError:
pass
# be sure the station name is a string
else:
dline_list[ii] = d_str.strip()
period_list.append(dline_list[0])
station_list.append(dline_list[1])
data_list.append(dline_list)
#try to find rotation angle
h_list = header_list[0].split()
for hh, h_str in enumerate(h_list):
if h_str.find('_deg') > 0:
try:
self._rotation_angle = float(h_str[0:h_str.find('_deg')])
print ('Set rotation angle to {0:.1f} '.format(
self._rotation_angle)+'deg clockwise from N')
except ValueError:
pass
self.period_list = np.array(sorted(set(period_list)))
station_list = sorted(set(station_list))
#make a period dictionary to with key as period and value as index
period_dict = dict([(per, ii) for ii, per in enumerate(self.period_list)])
#--> need to sort the data into a useful fashion such that each station
# is an mt object
data_dict = {}
z_dummy = np.zeros((len(self.period_list), 2, 2), dtype='complex')
t_dummy = np.zeros((len(self.period_list), 1, 2), dtype='complex')
index_dict = {'zxx': (0, 0), 'zxy':(0, 1), 'zyx':(1, 0), 'zyy':(1, 1),
'tx':(0, 0), 'ty':(0, 1)}
#dictionary for true false if station data (lat, lon, elev, etc)
#has been filled already so we don't rewrite it each time
tf_dict = {}
for station in station_list:
data_dict[station] = mt.MT()
data_dict[station].Z = mtz.Z(z_array=z_dummy.copy(),
z_err_array=z_dummy.copy().real,
freq=1./self.period_list)
data_dict[station].Tipper = mtz.Tipper(tipper_array=t_dummy.copy(),
tipper_err_array=t_dummy.copy().real,
freq=1./self.period_list)
#make sure that the station data starts out with false to fill
#the data later
tf_dict[station] = False
#fill in the data for each station
for dd in data_list:
#get the period index from the data line
p_index = period_dict[dd[0]]
#get the component index from the data line
ii, jj = index_dict[dd[7].lower()]
#if the station data has not been filled yet, fill it
if tf_dict[dd[1]] == False:
data_dict[dd[1]].lat = dd[2]
data_dict[dd[1]].lon = dd[3]
data_dict[dd[1]].grid_north = dd[4]
data_dict[dd[1]].grid_east = dd[5]
data_dict[dd[1]].grid_elev = dd[6]
data_dict[dd[1]].station = dd[1]
tf_dict[dd[1]] = True
#fill in the impedance tensor with appropriate values
if dd[7].find('Z') == 0:
z_err = dd[10]
if self.wave_sign_impedance == '+':
z_value = dd[8]+1j*dd[9]
elif self.wave_sign_impedance == '-':
z_value = dd[8]-1j*dd[9]
if self.units == 'ohm':
z_value *= 796.
z_err *= 796.
data_dict[dd[1]].Z.z[p_index, ii, jj] = z_value
data_dict[dd[1]].Z.z_err[p_index, ii, jj] = z_err
#fill in tipper with appropriate values
elif dd[7].find('T') == 0:
if self.wave_sign_tipper == '+':
data_dict[dd[1]].Tipper.tipper[p_index, ii, jj] = dd[8]+1j*dd[9]
elif self.wave_sign_tipper == '-':
data_dict[dd[1]].Tipper.tipper[p_index, ii, jj] = dd[8]-1j*dd[9]
data_dict[dd[1]].Tipper.tipper_err[p_index, ii, jj] = dd[10]
#make mt_dict an attribute for easier manipulation later
self.mt_dict = data_dict
ns = len(self.mt_dict.keys())
nf = len(self.period_list)
self._set_dtype((nf, 2, 2), (nf, 1, 2))
self.data_array = np.zeros(ns, dtype=self._dtype)
#Be sure to caclulate invariants and phase tensor for each station
for ii, s_key in enumerate(sorted(self.mt_dict.keys())):
mt_obj = self.mt_dict[s_key]
self.mt_dict[s_key].zinv.compute_invariants()
self.mt_dict[s_key].pt.set_z_object(mt_obj.Z)
self.mt_dict[s_key].Tipper.compute_amp_phase()
self.mt_dict[s_key].Tipper.compute_mag_direction()
self.data_array[ii]['station'] = mt_obj.station
self.data_array[ii]['lat'] = mt_obj.lat
self.data_array[ii]['lon'] = mt_obj.lon
self.data_array[ii]['east'] = mt_obj.east
self.data_array[ii]['north'] = mt_obj.north
self.data_array[ii]['elev'] = mt_obj.grid_elev
self.data_array[ii]['rel_east'] = mt_obj.grid_east
self.data_array[ii]['rel_north'] = mt_obj.grid_north
self.data_array[ii]['z'][:] = mt_obj.Z.z
self.data_array[ii]['z_err'][:] = mt_obj.Z.z_err
self.data_array[ii]['tip'][:] = mt_obj.Tipper.tipper
self.data_array[ii]['tip_err'][:] = mt_obj.Tipper.tipper_err
# option to provide real world coordinates in eastings/northings
# (ModEM data file contains real world center in lat/lon but projection
# is not provided so utm is assumed, causing errors when points cross
# utm zones. And lat/lon cut off to 3 d.p. causing errors in smaller areas)
if center_utm is not None:
self.data_array['east'] = self.data_array['rel_east'] + center_utm[0]
self.data_array['north'] = self.data_array['rel_north'] + center_utm[1]
def write_vtk_station_file(self, vtk_save_path=None,
vtk_fn_basename='ModEM_stations'):
"""
write a vtk file for station locations. For now this in relative
coordinates.
Arguments:
-------------
**vtk_save_path** : string
directory to save vtk file to.
*default* is Model.save_path
**vtk_fn_basename** : string
filename basename of vtk file
*default* is ModEM_stations, evtk will add
on the extension .vtu
"""
if vtk_save_path is not None:
vtk_fn = os.path.join(self.save_path, vtk_fn_basename)
else:
vtk_fn = os.path.join(vtk_save_path, vtk_fn_basename)
pointsToVTK(vtk_fn,
self.station_locations['rel_north']/1000,
self.station_locations['rel_east']/1000,
-self.station_locations['elev']/1000,
data={'elevation':self.station_locations['elev']})
print '--> Wrote station file to {0}'.format(vtk_fn)
print '-'*50
#==============================================================================
# mesh class
#==============================================================================
class Model(object):
"""
make and read a FE mesh grid
The mesh assumes the coordinate system where:
x == North
y == East
z == + down
All dimensions are in meters.
:Example 1 --> create mesh first then data file: ::
>>> import mtpy.modeling.modem as modem
>>> import os
>>> #1) make a list of all .edi files that will be inverted for
>>> edi_path = r"/home/EDI_Files"
>>> edi_list = [os.path.join(edi_path, edi)
for edi in os.listdir(edi_path)
import mtpy.modeling.ModEM >>> ... if edi.find('.edi') > 0]
>>> #2) make a grid from the stations themselves with 200m cell spacing
import mtpy.modeling.ModEM
>>> mmesh = mtpy.modeling.ModEM.Model(edi_list=edi_list, cell_size_east=200,
>>> ... cell_size_north=200)
>>> mmesh.make_mesh()
>>> # check to see if the mesh is what you think it should be
>>> msmesh.plot_mesh()
>>> # all is good write the mesh file
>>> msmesh.write_model_file(save_path=r"/home/modem/Inv1")
>>> # create data file
>>> md = mtpy.modeling.ModEM.Data(edi_list, station_locations=mmesh.station_locations)
>>> md.write_data_file(save_path=r"/home/modem/Inv1")
:Example 2 --> create data file first then model file: ::
>>> import mtpy.modeling.modem as modem
>>> import os
>>> #1) make a list of all .edi files that will be inverted for
>>> edi_path = r"/home/EDI_Files"
>>> edi_list = [os.path.join(edi_path, edi)
for edi in os.listdir(edi_path)
>>> ... if edi.find('.edi') > 0]
>>> #2) create data file
>>> md = modem.Data(edi_list)
>>> md.write_data_file(save_path=r"/home/modem/Inv1")
>>> #3) make a grid from the stations themselves with 200m cell spacing
>>> mmesh = modem.Model(edi_list=edi_list, cell_size_east=200,
cell_size_north=200,
station_locations=md.station_locations)
>>> mmesh.make_mesh()
>>> # check to see if the mesh is what you think it should be
>>> msmesh.plot_mesh()
>>> # all is good write the mesh file
>>> msmesh.write_model_file(save_path=r"/home/modem/Inv1")
:Example 3 --> Rotate Mesh: ::
>>> mmesh.mesh_rotation_angle = 60
>>> mmesh.make_mesh()
..note:: ModEM assumes all coordinates are relative to North and East, and
does not accommodate mesh rotations, therefore, here the rotation
is of the stations, which essentially does the same thing. You
will need to rotate you data to align with the 'new' coordinate
system.
==================== ======================================================
Attributes Description
==================== ======================================================
cell_size_east mesh block width in east direction
*default* is 500
cell_size_north mesh block width in north direction
*default* is 500
edi_list list of .edi files to invert for
grid_east overall distance of grid nodes in east direction
grid_north overall distance of grid nodes in north direction
grid_z overall distance of grid nodes in z direction
model_fn full path to initial file name
n_layers total number of vertical layers in model
nodes_east relative distance between nodes in east direction
nodes_north relative distance between nodes in north direction
nodes_z relative distance between nodes in east direction
pad_east number of cells for padding on E and W sides
*default* is 7
pad_north number of cells for padding on S and N sides
*default* is 7
pad_root_east padding cells E & W will be pad_root_east**(x)
pad_root_north padding cells N & S will be pad_root_north**(x)
pad_z number of cells for padding at bottom
*default* is 4
res_list list of resistivity values for starting model
res_model starting resistivity model
mesh_rotation_angle Angle to rotate the grid to. Angle is measured
positve clockwise assuming North is 0 and east is 90.
*default* is None
save_path path to save file to
station_fn full path to station file
station_locations location of stations
title title in initial file
z1_layer first layer thickness
z_bottom absolute bottom of the model *default* is 300,000
z_target_depth Depth of deepest target, *default* is 50,000
_utm_grid_size_east size of a UTM grid in east direction.
*default* is 640000 meters
_utm_grid_size_north size of a UTM grid in north direction.
*default* is 888960 meters
==================== ======================================================
..note:: If the survey steps across multiple UTM zones, then a
distance will be added to the stations to place them in
the correct location. This distance is
_utm_grid_size_north and _utm_grid_size_east. You should
these parameters to place the locations in the proper spot
as grid distances and overlaps change over the globe.
==================== ======================================================
Methods Description
==================== ======================================================
make_mesh makes a mesh from the given specifications
plot_mesh plots mesh to make sure everything is good
write_initial_file writes an initial model file that includes the mesh
==================== ======================================================
"""
def __init__(self, edi_list=None, **kwargs):
self.edi_list = edi_list
# size of cells within station area in meters
self.cell_size_east = kwargs.pop('cell_size_east', 500)
self.cell_size_north = kwargs.pop('cell_size_north', 500)
#padding cells on either side
self.pad_east = kwargs.pop('pad_east', 7)
self.pad_north = kwargs.pop('pad_north', 7)
self.pad_z = kwargs.pop('pad_z', 4)
#root of padding cells
self.pad_stretch_h= kwargs.pop('pad_stretch_h', 1.2)
self.pad_stretch_v= kwargs.pop('pad_stretch_v', 1.2)
self.z1_layer = kwargs.pop('z1_layer', 10)
self.z_target_depth = kwargs.pop('z_target_depth', 50000)
self.z_bottom = kwargs.pop('z_bottom', 300000)
#number of vertical layers
self.n_layers = kwargs.pop('n_layers', 30)
#strike angle to rotate grid to
self.mesh_rotation_angle = kwargs.pop('mesh_rotation_angle', 0)
#--> attributes to be calculated
#station information
self.station_locations = kwargs.pop('station_locations', None)
#grid nodes
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
#grid locations
self.grid_east = None
self.grid_north = None
self.grid_z = None
#size of a utm grid
self._utm_grid_size_north = 888960.0
self._utm_grid_size_east = 640000.0
self._utm_cross = False
self._utm_ellipsoid = 23
#resistivity model
self.res_model = None
self.grid_center = None
#inital file stuff
self.model_fn = kwargs.pop('model_fn', None)
self.save_path = kwargs.pop('save_path', None)
self.model_fn_basename = kwargs.pop('model_fn_basename',
'ModEM_Model.ws')
if self.model_fn is not None:
self.save_path = os.path.dirname(self.model_fn)
self.model_fn_basename = os.path.basename(self.model_fn)
self.title = 'Model File written by MTpy.modeling.modem'
self.res_scale = kwargs.pop('res_scale', 'loge')
def get_station_locations(self):
"""
get the station locations from lats and lons
"""
utm_zones_dict = {'M':9, 'L':8, 'K':7, 'J':6, 'H':5, 'G':4, 'F':3,
'E':2, 'D':1, 'C':0, 'N':10, 'P':11, 'Q':12, 'R':13,
'S':14, 'T':15, 'U':16, 'V':17, 'W':18, 'X':19}
#if station locations are not input read from the edi files
if self.station_locations is None:
if self.edi_list is None:
raise AttributeError('edi_list is None, need to input a list of '
'edi files to read in.')
n_stations = len(self.edi_list)
if n_stations == 0:
raise ModEMError('No .edi files in edi_list, please check '
'file locations.')
#make a structured array to put station location information into
self.station_locations = np.zeros(n_stations,
dtype=[('station','|S10'),
('lat', np.float),
('lon', np.float),
('east', np.float),
('north', np.float),
('zone', '|S4'),
('rel_east', np.float),
('rel_north', np.float),
('elev', np.float)])
#get station locations in meters
for ii, edi in enumerate(self.edi_list):
mt_obj = mt.MT(edi)
self.station_locations[ii]['lat'] = mt_obj.lat
self.station_locations[ii]['lon'] = mt_obj.lon
self.station_locations[ii]['station'] = mt_obj.station
self.station_locations[ii]['east'] = mt_obj.east
self.station_locations[ii]['north'] = mt_obj.north
self.station_locations[ii]['elev'] = mt_obj.elev
self.station_locations[ii]['zone'] = mt_obj.utm_zone
#--> need to convert lat and lon to east and north
for c_arr in self.station_locations:
if c_arr['lat'] != 0.0 and c_arr['lon'] != 0.0:
c_arr['zone'], c_arr['east'], c_arr['north'] = \
mtpy.utils.gis_tools.ll_to_utm(self._utm_ellipsoid,
c_arr['lat'],
c_arr['lon'])
#--> need to check to see if all stations are in the same zone
utm_zone_list = list(set(self.station_locations['zone']))
#if there are more than one zone, figure out which zone is the odd ball
utm_zone_dict = dict([(utmzone, 0) for utmzone in utm_zone_list])
if len(utm_zone_list) != 1:
self._utm_cross = True
for c_arr in self.station_locations:
utm_zone_dict[c_arr['zone']] += 1
#flip keys and values so the key is the number of zones and
# the value is the utm zone
utm_zone_dict = dict([(utm_zone_dict[key], key)
for key in utm_zone_dict.keys()])
#get the main utm zone as the one with the most stations in it
main_utm_zone = utm_zone_dict[max(utm_zone_dict.keys())]
#Get a list of index values where utm zones are not the
#same as the main zone
diff_zones = np.where(self.station_locations['zone'] != main_utm_zone)[0]
for c_index in diff_zones:
c_arr = self.station_locations[c_index]
c_utm_zone = c_arr['zone']
print '{0} utm_zone is {1} and does not match {2}'.format(
c_arr['station'], c_arr['zone'], main_utm_zone)
zone_shift = 1-abs(utm_zones_dict[c_utm_zone[-1]]-\
utm_zones_dict[main_utm_zone[-1]])
#--> check to see if the zone is in the same latitude
#if odd ball zone is north of main zone, add 888960 m
if zone_shift > 1:
north_shift = self._utm_grid_size_north*zone_shift
print ('--> adding {0:.2f}'.format(north_shift)+\
' meters N to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['north'] += north_shift
#if odd ball zone is south of main zone, subtract 88960 m
elif zone_shift < -1:
north_shift = self._utm_grid_size_north*zone_shift
print ('--> subtracting {0:.2f}'.format(north_shift)+\
' meters N to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['north'] -= north_shift
#--> if zone is shifted east or west
if int(c_utm_zone[0:-1]) > int(main_utm_zone[0:-1]):
east_shift = self._utm_grid_size_east*\
abs(int(c_utm_zone[0:-1])-int(main_utm_zone[0:-1]))
print ('--> adding {0:.2f}'.format(east_shift)+\
' meters E to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['east'] += east_shift
elif int(c_utm_zone[0:-1]) < int(main_utm_zone[0:-1]):
east_shift = self._utm_grid_size_east*\
abs(int(c_utm_zone[0:-1])-int(main_utm_zone[0:-1]))
print ('--> subtracting {0:.2f}'.format(east_shift)+\
' meters E to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['east'] -= east_shift
#remove the average distance to get coordinates in a relative space
self.station_locations['rel_east'] = self.station_locations['east']-\
self.station_locations['east'].mean()
self.station_locations['rel_north'] = self.station_locations['north']-\
self.station_locations['north'].mean()
#--> rotate grid if necessary
#to do this rotate the station locations because ModEM assumes the
#input mesh is a lateral grid.
#needs to be 90 - because North is assumed to be 0 but the rotation
#matrix assumes that E is 0.
if self.mesh_rotation_angle != 0:
cos_ang = np.cos(np.deg2rad(self.mesh_rotation_angle))
sin_ang = np.sin(np.deg2rad(self.mesh_rotation_angle))
rot_matrix = np.matrix(np.array([[cos_ang, sin_ang],
[-sin_ang, cos_ang]]))
coords = np.array([self.station_locations['rel_east'],
self.station_locations['rel_north']])
#rotate the relative station locations
new_coords = np.array(np.dot(rot_matrix, coords))
self.station_locations['rel_east'][:] = new_coords[0, :]
self.station_locations['rel_north'][:] = new_coords[1, :]
print 'Rotated stations by {0:.1f} deg clockwise from N'.format(
self.mesh_rotation_angle)
#translate the stations so they are relative to 0,0
east_center = (self.station_locations['rel_east'].max()-
np.abs(self.station_locations['rel_east'].min()))/2
north_center = (self.station_locations['rel_north'].max()-
np.abs(self.station_locations['rel_north'].min()))/2
#remove the average distance to get coordinates in a relative space
self.station_locations['rel_east'] -= east_center
self.station_locations['rel_north'] -= north_center
def make_mesh(self, update_data_center=False):
"""
create finite element mesh according to parameters set.
The mesh is built by first finding the center of the station area.
Then cells are added in the north and east direction with width
cell_size_east and cell_size_north to the extremeties of the station
area. Padding cells are then added to extend the model to reduce
edge effects. The number of cells are pad_east and pad_north and the
increase in size is by pad_root_east and pad_root_north. The station
locations are then computed as the center of the nearest cell as
required by the code.
The vertical cells are built to increase in size exponentially with
depth. The first cell depth is first_layer_thickness and should be
about 1/10th the shortest skin depth. The layers then increase
on a log scale to z_target_depth. Then the model is
padded with pad_z number of cells to extend the depth of the model.
padding = np.round(cell_size_east*pad_root_east**np.arange(start=.5,
stop=3, step=3./pad_east))+west
..note:: If the survey steps across multiple UTM zones, then a
distance will be added to the stations to place them in
the correct location. This distance is
_utm_grid_size_north and _utm_grid_size_east. You should
these parameters to place the locations in the proper spot
as grid distances and overlaps change over the globe.
"""
self.get_station_locations()
#find the edges of the grid
west = self.station_locations['rel_east'].min()-(1.5*self.cell_size_east)
east = self.station_locations['rel_east'].max()+(1.5*self.cell_size_east)
south = self.station_locations['rel_north'].min()-(1.5*self.cell_size_north)
north = self.station_locations['rel_north'].max()+(1.5*self.cell_size_north)
west = np.round(west, -2)
east= np.round(east, -2)
south= np.round(south, -2)
north = np.round(north, -2)
#-------make a grid around the stations from the parameters above------
#--> make grid in east-west direction
#cells within station area
east_gridr = np.arange(start=west, stop=east+self.cell_size_east,
step=self.cell_size_east)
#padding cells in the east-west direction
for ii in range(1, self.pad_east+1):
east_0 = float(east_gridr[-1])
west_0 = float(east_gridr[0])
add_size = np.round(self.cell_size_east*self.pad_stretch_h*ii, -2)
pad_w = west_0-add_size
pad_e = east_0+add_size
east_gridr = np.insert(east_gridr, 0, pad_w)
east_gridr = np.append(east_gridr, pad_e)
#--> need to make sure none of the stations lie on the nodes
for s_east in sorted(self.station_locations['rel_east']):
try:
node_index = np.where(abs(s_east-east_gridr) <
.02*self.cell_size_east)[0][0]
if s_east-east_gridr[node_index] > 0:
east_gridr[node_index] -= .02*self.cell_size_east
elif s_east-east_gridr[node_index] < 0:
east_gridr[node_index] += .02*self.cell_size_east
except IndexError:
continue
#--> make grid in north-south direction
#N-S cells with in station area
north_gridr = np.arange(start=south, stop=north+self.cell_size_north,
step=self.cell_size_north)
#padding cells in the east-west direction
for ii in range(1, self.pad_north+1):
south_0 = float(north_gridr[0])
north_0 = float(north_gridr[-1])
add_size = np.round(self.cell_size_north*self.pad_stretch_h*ii, -2)
pad_s = south_0-add_size
pad_n = north_0+add_size
north_gridr = np.insert(north_gridr, 0, pad_s)
north_gridr = np.append(north_gridr, pad_n)
#--> need to make sure none of the stations lie on the nodes
for s_north in sorted(self.station_locations['rel_north']):
try:
node_index = np.where(abs(s_north-north_gridr) <
.02*self.cell_size_north)[0][0]
if s_north-north_gridr[node_index] > 0:
north_gridr[node_index] -= .02*self.cell_size_north
elif s_north-north_gridr[node_index] < 0:
north_gridr[node_index] += .02*self.cell_size_north
except IndexError:
continue
#--> make depth grid
log_z = np.logspace(np.log10(self.z1_layer),
np.log10(self.z_target_depth-np.logspace(np.log10(self.z1_layer),
np.log10(self.z_target_depth),
num=self.n_layers)[-2]),
num=self.n_layers-self.pad_z)
z_nodes = np.array([zz-zz%10**np.floor(np.log10(zz)) for zz in
log_z])
#padding cells in the east-west direction
for ii in range(1, self.pad_z+1):
z_0 = np.float(z_nodes[-2])
pad_d = np.round(z_0*self.pad_stretch_v*ii, -2)
z_nodes = np.append(z_nodes, pad_d)
#make an array of absolute values
z_grid = np.array([z_nodes[:ii+1].sum() for ii in range(z_nodes.shape[0])])
#---Need to make an array of the individual cell dimensions for
# modem
east_nodes = east_gridr.copy()
nx = east_gridr.shape[0]
east_nodes[:nx/2] = np.array([abs(east_gridr[ii]-east_gridr[ii+1])
for ii in range(int(nx/2))])
east_nodes[nx/2:] = np.array([abs(east_gridr[ii]-east_gridr[ii+1])
for ii in range(int(nx/2)-1, nx-1)])
north_nodes = north_gridr.copy()
ny = north_gridr.shape[0]
north_nodes[:ny/2] = np.array([abs(north_gridr[ii]-north_gridr[ii+1])
for ii in range(int(ny/2))])
north_nodes[ny/2:] = np.array([abs(north_gridr[ii]-north_gridr[ii+1])
for ii in range(int(ny/2)-1, ny-1)])
#--put the grids into coordinates relative to the center of the grid
east_grid = east_nodes.copy()
east_grid[:int(nx/2)] = -np.array([east_nodes[ii:int(nx/2)].sum()
for ii in range(int(nx/2))])
east_grid[int(nx/2):] = np.array([east_nodes[int(nx/2):ii+1].sum()
for ii in range(int(nx/2), nx)])-\
east_nodes[int(nx/2)]
north_grid = north_nodes.copy()
north_grid[:int(ny/2)] = -np.array([north_nodes[ii:int(ny/2)].sum()
for ii in range(int(ny/2))])
north_grid[int(ny/2):] = np.array([north_nodes[int(ny/2):ii+1].sum()
for ii in range(int(ny/2),ny)])-\
north_nodes[int(ny/2)]
#compute grid center
center_east = -east_nodes.__abs__().sum()/2
center_north = -north_nodes.__abs__().sum()/2
center_z = 0
self.grid_center = np.array([center_north, center_east, center_z])
#make nodes attributes
self.nodes_east = east_nodes
self.nodes_north = north_nodes
self.nodes_z = z_nodes
self.grid_east = east_grid
self.grid_north = north_grid
self.grid_z = z_grid
#--> print out useful information
print '-'*15
print ' Number of stations = {0}'.format(len(self.station_locations))
print ' Dimensions: '
print ' e-w = {0}'.format(east_grid.shape[0])
print ' n-s = {0}'.format(north_grid.shape[0])
print ' z = {0} (without 7 air layers)'.format(z_grid.shape[0])
print ' Extensions: '
print ' e-w = {0:.1f} (m)'.format(east_nodes.__abs__().sum())
print ' n-s = {0:.1f} (m)'.format(north_nodes.__abs__().sum())
print ' 0-z = {0:.1f} (m)'.format(self.nodes_z.__abs__().sum())
print ' Stations rotated by: {0:.1f} deg clockwise positive from N'.format(self.mesh_rotation_angle)
print ''
print ' ** Note ModEM does not accommodate mesh rotations, it assumes'
print ' all coordinates are aligned to geographic N, E'
print ' therefore rotating the stations will have a similar effect'
print ' as rotating the mesh.'
print '-'*15
if self._utm_cross is True:
print '{0} {1} {2}'.format('-'*25, 'NOTE', '-'*25)
print ' Survey crosses UTM zones, be sure that stations'
print ' are properly located, if they are not, adjust parameters'
print ' _utm_grid_size_east and _utm_grid_size_north.'
print ' these are in meters and represent the utm grid size'
print ' Example: '
print ' >>> modem_model._utm_grid_size_east = 644000'
print ' >>> modem_model.make_mesh()'
print ''
print '-'*56
def plot_mesh(self, east_limits=None, north_limits=None, z_limits=None,
**kwargs):
"""
Arguments:
----------
**east_limits** : tuple (xmin,xmax)
plot min and max distances in meters for the
E-W direction. If None, the east_limits
will be set to furthest stations east and west.
*default* is None
**north_limits** : tuple (ymin,ymax)
plot min and max distances in meters for the
N-S direction. If None, the north_limits
will be set to furthest stations north and south.
*default* is None
**z_limits** : tuple (zmin,zmax)
plot min and max distances in meters for the
vertical direction. If None, the z_limits is
set to the number of layers. Z is positive down
*default* is None
"""
fig_size = kwargs.pop('fig_size', [6, 6])
fig_dpi = kwargs.pop('fig_dpi', 300)
fig_num = kwargs.pop('fig_num', 1)
station_marker = kwargs.pop('station_marker', 'v')
marker_color = kwargs.pop('station_color', 'b')
marker_size = kwargs.pop('marker_size', 2)
line_color = kwargs.pop('line_color', 'k')
line_width = kwargs.pop('line_width', .5)
plt.rcParams['figure.subplot.hspace'] = .3
plt.rcParams['figure.subplot.wspace'] = .3
plt.rcParams['figure.subplot.left'] = .12
plt.rcParams['font.size'] = 7
fig = plt.figure(fig_num, figsize=fig_size, dpi=fig_dpi)
plt.clf()
#make a rotation matrix to rotate data
#cos_ang = np.cos(np.deg2rad(self.mesh_rotation_angle))
#sin_ang = np.sin(np.deg2rad(self.mesh_rotation_angle))
#turns out ModEM has not accomodated rotation of the grid, so for
#now we will not rotate anything.
cos_ang = 1
sin_ang = 0
#--->plot map view
ax1 = fig.add_subplot(1, 2, 1, aspect='equal')
#plot station locations
plot_east = self.station_locations['rel_east']
plot_north = self.station_locations['rel_north']
ax1.scatter(plot_east,
plot_north,
marker=station_marker,
c=marker_color,
s=marker_size)
east_line_xlist = []
east_line_ylist = []
north_min = self.grid_north.min()
north_max = self.grid_north.max()
for xx in self.grid_east:
east_line_xlist.extend([xx*cos_ang+north_min*sin_ang,
xx*cos_ang+north_max*sin_ang])
east_line_xlist.append(None)
east_line_ylist.extend([-xx*sin_ang+north_min*cos_ang,
-xx*sin_ang+north_max*cos_ang])
east_line_ylist.append(None)
ax1.plot(east_line_xlist,
east_line_ylist,
lw=line_width,
color=line_color)
north_line_xlist = []
north_line_ylist = []
east_max = self.grid_east.max()
east_min = self.grid_east.min()
for yy in self.grid_north:
north_line_xlist.extend([east_min*cos_ang+yy*sin_ang,
east_max*cos_ang+yy*sin_ang])
north_line_xlist.append(None)
north_line_ylist.extend([-east_min*sin_ang+yy*cos_ang,
-east_max*sin_ang+yy*cos_ang])
north_line_ylist.append(None)
ax1.plot(north_line_xlist,
north_line_ylist,
lw=line_width,
color=line_color)
if east_limits == None:
ax1.set_xlim(plot_east.min()-10*self.cell_size_east,
plot_east.max()+10*self.cell_size_east)
else:
ax1.set_xlim(east_limits)
if north_limits == None:
ax1.set_ylim(plot_north.min()-10*self.cell_size_north,
plot_north.max()+ 10*self.cell_size_east)
else:
ax1.set_ylim(north_limits)
ax1.set_ylabel('Northing (m)', fontdict={'size':9,'weight':'bold'})
ax1.set_xlabel('Easting (m)', fontdict={'size':9,'weight':'bold'})
##----plot depth view
ax2 = fig.add_subplot(1, 2, 2, aspect='auto', sharex=ax1)
#plot the grid
east_line_xlist = []
east_line_ylist = []
for xx in self.grid_east:
east_line_xlist.extend([xx, xx])
east_line_xlist.append(None)
east_line_ylist.extend([0,
self.grid_z.max()])
east_line_ylist.append(None)
ax2.plot(east_line_xlist,
east_line_ylist,
lw=line_width,
color=line_color)
z_line_xlist = []
z_line_ylist = []
for zz in self.grid_z:
z_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
z_line_xlist.append(None)
z_line_ylist.extend([zz, zz])
z_line_ylist.append(None)
ax2.plot(z_line_xlist,
z_line_ylist,
lw=line_width,
color=line_color)
#--> plot stations
ax2.scatter(plot_east,
[0]*self.station_locations.shape[0],
marker=station_marker,
c=marker_color,
s=marker_size)
if z_limits == None:
ax2.set_ylim(self.z_target_depth, -200)
else:
ax2.set_ylim(z_limits)
if east_limits == None:
ax1.set_xlim(plot_east.min()-10*self.cell_size_east,
plot_east.max()+10*self.cell_size_east)
else:
ax1.set_xlim(east_limits)
ax2.set_ylabel('Depth (m)', fontdict={'size':9, 'weight':'bold'})
ax2.set_xlabel('Easting (m)', fontdict={'size':9, 'weight':'bold'})
plt.show()
def write_model_file(self, **kwargs):
"""
will write an initial file for ModEM.
Note that x is assumed to be S --> N, y is assumed to be W --> E and
z is positive downwards. This means that index [0, 0, 0] is the
southwest corner of the first layer. Therefore if you build a model
by hand the layer block will look as it should in map view.
Also, the xgrid, ygrid and zgrid are assumed to be the relative
distance between neighboring nodes. This is needed because wsinv3d
builds the model from the bottom SW corner assuming the cell width
from the init file.
Key Word Arguments:
----------------------
**nodes_north** : np.array(nx)
block dimensions (m) in the N-S direction.
**Note** that the code reads the grid assuming that
index=0 is the southern most point.
**nodes_east** : np.array(ny)
block dimensions (m) in the E-W direction.
**Note** that the code reads in the grid assuming that
index=0 is the western most point.
**nodes_z** : np.array(nz)
block dimensions (m) in the vertical direction.
This is positive downwards.
**save_path** : string
Path to where the initial file will be saved
to savepath/model_fn_basename
**model_fn_basename** : string
basename to save file to
*default* is ModEM_Model.ws
file is saved at savepath/model_fn_basename
**title** : string
Title that goes into the first line
*default* is Model File written by MTpy.modeling.modem
**res_model** : np.array((nx,ny,nz))
Prior resistivity model.
.. note:: again that the modeling code
assumes that the first row it reads in is the southern
most row and the first column it reads in is the
western most column. Similarly, the first plane it
reads in is the Earth's surface.
**res_scale** : [ 'loge' | 'log' | 'log10' | 'linear' ]
scale of resistivity. In the ModEM code it
converts everything to Loge,
*default* is 'loge'
"""
keys = ['nodes_east', 'nodes_north', 'nodes_z', 'title',
'res_model', 'save_path', 'model_fn', 'model_fn_basename']
for key in keys:
try:
setattr(self, key, kwargs[key])
except KeyError:
if self.__dict__[key] is None:
pass
if self.save_path is not None:
self.model_fn = os.path.join(self.save_path,
self.model_fn_basename)
if self.model_fn is None:
if self.save_path is None:
self.save_path = os.getcwd()
self.model_fn = os.path.join(self.save_path,
self.model_fn_basename)
elif os.path.isdir(self.save_path) == True:
self.model_fn = os.path.join(self.save_path,
self.model_fn_basename)
else:
self.save_path = os.path.dirname(self.save_path)
self.model_fn= self.save_path
if self.res_model is None or type(self.res_model) is float or\
type(self.res_model) is int:
res_model = np.zeros((self.nodes_north.shape[0],
self.nodes_east.shape[0],
self.nodes_z.shape[0]))
if self.res_model is None:
res_model[:, :, :] = 100.0
self.res_model = res_model
else:
res_model[:, :, :] = self.res_model
self.res_model = res_model
#--> write file
ifid = file(self.model_fn, 'w')
ifid.write('# {0}\n'.format(self.title.upper()))
ifid.write('{0:>5}{1:>5}{2:>5}{3:>5} {4}\n'.format(self.nodes_north.shape[0],
self.nodes_east.shape[0],
self.nodes_z.shape[0],
0,
self.res_scale.upper()))
#write S --> N node block
for ii, nnode in enumerate(self.nodes_north):
ifid.write('{0:>12.3f}'.format(abs(nnode)))
ifid.write('\n')
#write W --> E node block
for jj, enode in enumerate(self.nodes_east):
ifid.write('{0:>12.3f}'.format(abs(enode)))
ifid.write('\n')
#write top --> bottom node block
for kk, zz in enumerate(self.nodes_z):
ifid.write('{0:>12.3f}'.format(abs(zz)))
ifid.write('\n')
#write the resistivity in log e format
if self.res_scale.lower() == 'loge':
write_res_model = np.log(self.res_model[::-1, :, :])
elif self.res_scale.lower() == 'log' or \
self.res_scale.lower() == 'log10':
write_res_model = np.log10(self.res_model[::-1, :, :])
elif self.res_scale.lower() == 'linear':
write_res_model = self.res_model[::-1, :, :]
#write out the layers from resmodel
for zz in range(self.nodes_z.shape[0]):
ifid.write('\n')
for ee in range(self.nodes_east.shape[0]):
for nn in range(self.nodes_north.shape[0]):
ifid.write('{0:>13.5E}'.format(write_res_model[nn, ee, zz]))
ifid.write('\n')
if self.grid_center is None:
#compute grid center
center_east = -self.nodes_east.__abs__().sum()/2
center_north = -self.nodes_north.__abs__().sum()/2
center_z = 0
self.grid_center = np.array([center_north, center_east, center_z])
ifid.write('\n{0:>16.3f}{1:>16.3f}{2:>16.3f}\n'.format(self.grid_center[0],
self.grid_center[1], self.grid_center[2]))
if self.mesh_rotation_angle is None:
ifid.write('{0:>9.3f}\n'.format(0))
else:
ifid.write('{0:>9.3f}\n'.format(self.mesh_rotation_angle))
ifid.close()
print 'Wrote file to: {0}'.format(self.model_fn)
def read_model_file(self, model_fn=None):
"""
read an initial file and return the pertinent information including
grid positions in coordinates relative to the center point (0,0) and
starting model.
Note that the way the model file is output, it seems is that the
blocks are setup as
ModEM: WS:
---------- -----
0-----> N_north 0-------->N_east
| |
| |
V V
N_east N_north
Arguments:
----------
**model_fn** : full path to initializing file.
Outputs:
--------
**nodes_north** : np.array(nx)
array of nodes in S --> N direction
**nodes_east** : np.array(ny)
array of nodes in the W --> E direction
**nodes_z** : np.array(nz)
array of nodes in vertical direction positive downwards
**res_model** : dictionary
dictionary of the starting model with keys as layers
**res_list** : list
list of resistivity values in the model
**title** : string
title string
"""
if model_fn is not None:
self.model_fn = model_fn
if self.model_fn is None:
raise ModEMError('model_fn is None, input a model file name')
if os.path.isfile(self.model_fn) is None:
raise ModEMError('Cannot find {0}, check path'.format(self.model_fn))
self.save_path = os.path.dirname(self.model_fn)
ifid = file(self.model_fn, 'r')
ilines = ifid.readlines()
ifid.close()
self.title = ilines[0].strip()
#get size of dimensions, remembering that x is N-S, y is E-W, z is + down
nsize = ilines[1].strip().split()
n_north = int(nsize[0])
n_east = int(nsize[1])
n_z = int(nsize[2])
log_yn = nsize[4]
#get nodes
self.nodes_north = np.array([np.float(nn)
for nn in ilines[2].strip().split()])
self.nodes_east = np.array([np.float(nn)
for nn in ilines[3].strip().split()])
self.nodes_z = np.array([np.float(nn)
for nn in ilines[4].strip().split()])
self.res_model = np.zeros((n_north, n_east, n_z))
#get model
count_z = 0
line_index= 6
count_e = 0
while count_z < n_z:
iline = ilines[line_index].strip().split()
#blank lines spit the depth blocks, use those as a marker to
#set the layer number and start a new block
if len(iline) == 0:
count_z += 1
count_e = 0
line_index += 1
#each line in the block is a line of N-->S values for an east value
else:
north_line = np.array([float(nres) for nres in
ilines[line_index].strip().split()])
# Need to be sure that the resistivity array matches
# with the grids, such that the first index is the
# furthest south
self.res_model[:, count_e, count_z] = north_line[::-1]
count_e += 1
line_index += 1
#--> get grid center and rotation angle
if len(ilines) > line_index:
for iline in ilines[line_index:]:
ilist = iline.strip().split()
#grid center
if len(ilist) == 3:
self.grid_center = np.array(ilist, dtype=np.float)
#rotation angle
elif len(ilist) == 1:
self.rotation_angle = np.float(ilist[0])
else:
pass
#--> make sure the resistivity units are in linear Ohm-m
if log_yn.lower() == 'loge':
self.res_model = np.e**self.res_model
elif log_yn.lower() == 'log' or log_yn.lower() == 'log10':
self.res_model = 10**self.res_model
#put the grids into coordinates relative to the center of the grid
self.grid_north = np.array([self.nodes_north[0:ii].sum()
for ii in range(n_north + 1)])
self.grid_east = np.array([self.nodes_east[0:ii].sum()
for ii in range(n_east + 1)])
self.grid_z = np.array([self.nodes_z[:ii+1].sum()
for ii in range(n_z + 1)])
# center the grids
if self.grid_center is not None:
self.grid_north += self.grid_center[0]
self.grid_east += self.grid_center[1]
self.grid_z += self.grid_center[2]
self.cell_size_east = stats.mode(self.nodes_east)[0][0]
self.cell_size_north = stats.mode(self.nodes_north)[0][0]
self.pad_east = np.where(self.nodes_east[0:int(self.nodes_east.size/2)]
!= self.cell_size_east)[0][-1]
self.north_pad = np.where(self.nodes_north[0:int(self.nodes_north.size/2)]
!= self.cell_size_north)[0][-1]
def read_ws_model_file(self, ws_model_fn):
"""
reads in a WS3INV3D model file
"""
ws_model_obj = ws.WSModel(ws_model_fn)
ws_model_obj.read_model_file()
#set similar attributes
for ws_key in ws_model_obj.__dict__.keys():
for md_key in self.__dict__.keys():
if ws_key == md_key:
setattr(self, ws_key, ws_model_obj.__dict__[ws_key])
#compute grid center
center_east = -self.nodes_east.__abs__().sum()/2
center_north = -self.nodes_norths.__abs__().sum()/2
center_z = 0
self.grid_center = np.array([center_north, center_east, center_z])
def write_vtk_file(self, vtk_save_path=None,
vtk_fn_basename='ModEM_model_res'):
"""
write a vtk file to view in Paraview or other
Arguments:
-------------
**vtk_save_path** : string
directory to save vtk file to.
*default* is Model.save_path
**vtk_fn_basename** : string
filename basename of vtk file
*default* is ModEM_model_res, evtk will add
on the extension .vtr
"""
if vtk_save_path is not None:
vtk_fn = os.path.join(self.save_path, vtk_fn_basename)
else:
vtk_fn = os.path.join(vtk_save_path, vtk_fn_basename)
gridToVTK(vtk_fn,
self.grid_north/1000.,
self.grid_east/1000.,
self.grid_z/1000.,
pointData={'resistivity':self.res_model})
print '-'*50
print '--> Wrote model file to {0}\n'.format(vtk_fn)
print '='*26
print ' model dimensions = {0}'.format(self.res_model.shape)
print ' * north {0}'.format(self.grid_north.shape[0])
print ' * east {0}'.format(self.grid_east.shape[0])
print ' * depth {0}'.format(self.grid_z.shape[0])
print '='*26
#==============================================================================
# Control File for inversion
#==============================================================================
class Control_Inv(object):
"""
read and write control file for how the inversion starts and how it is run
"""
def __init__(self, **kwargs):
self.output_fn = kwargs.pop('output_fn', 'MODULAR_NLCG')
self.lambda_initial = kwargs.pop('lambda_initial', 10)
self.lambda_step = kwargs.pop('lambda_step', 10)
self.model_search_step = kwargs.pop('model_search_step', 1)
self.rms_reset_search = kwargs.pop('rms_reset_search', 2.0e-3)
self.rms_target = kwargs.pop('rms_target', 1.05)
self.lambda_exit = kwargs.pop('lambda_exit', 1.0e-4)
self.max_iterations = kwargs.pop('max_iterations', 100)
self.save_path = kwargs.pop('save_path', os.getcwd())
self.fn_basename = kwargs.pop('fn_basename', 'control.inv')
self.control_fn = kwargs.pop('control_fn', os.path.join(self.save_path,
self.fn_basename))
self._control_keys = ['Model and data output file name',
'Initial damping factor lambda',
'To update lambda divide by',
'Initial search step in model units',
'Restart when rms diff is less than',
'Exit search when rms is less than',
'Exit when lambda is less than',
'Maximum number of iterations']
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.output_fn, self.lambda_initial,
self.lambda_step, self.model_search_step,
self.rms_reset_search, self.rms_target,
self.lambda_exit, self.max_iterations])])
self._string_fmt_dict = dict([(key, value)
for key, value in zip(self._control_keys,
['<', '<.1f', '<.1f', '<.1f', '<.1e',
'<.2f', '<.1e', '<.0f'])])
def write_control_file(self, control_fn=None, save_path=None,
fn_basename=None):
"""
write control file
Arguments:
------------
**control_fn** : string
full path to save control file to
*default* is save_path/fn_basename
**save_path** : string
directory path to save control file to
*default* is cwd
**fn_basename** : string
basename of control file
*default* is control.inv
"""
if control_fn is not None:
self.save_path = os.path.dirname(control_fn)
self.fn_basename = os.path.basename(control_fn)
if save_path is not None:
self.save_path = save_path
if fn_basename is not None:
self.fn_basename = fn_basename
self.control_fn = os.path.join(self.save_path, self.fn_basename)
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.output_fn, self.lambda_initial,
self.lambda_step, self.model_search_step,
self.rms_reset_search, self.rms_target,
self.lambda_exit, self.max_iterations])])
clines = []
for key in self._control_keys:
value = self._control_dict[key]
str_fmt = self._string_fmt_dict[key]
clines.append('{0:<35}: {1:{2}}\n'.format(key, value, str_fmt))
cfid = file(self.control_fn, 'w')
cfid.writelines(clines)
cfid.close()
print 'Wrote ModEM control file to {0}'.format(self.control_fn)
def read_control_file(self, control_fn=None):
"""
read in a control file
"""
if control_fn is not None:
self.control_fn = control_fn
if self.control_fn is None:
raise mtex.MTpyError_file_handling('control_fn is None, input '
'control file')
if os.path.isfile(self.control_fn) is False:
raise mtex.MTpyError_file_handling('Could not find {0}'.format(
self.control_fn))
self.save_path = os.path.dirname(self.control_fn)
self.fn_basename = os.path.basename(self.control_fn)
cfid = file(self.control_fn, 'r')
clines = cfid.readlines()
cfid.close()
for cline in clines:
clist = cline.strip().split(':')
if len(clist) == 2:
try:
self._control_dict[clist[0].strip()] = float(clist[1])
except ValueError:
self._control_dict[clist[0].strip()] = clist[1]
#set attributes
attr_list = ['output_fn', 'lambda_initial','lambda_step',
'model_search_step','rms_reset_search','rms_target',
'lambda_exit','max_iterations']
for key, kattr in zip(self._control_keys, attr_list):
setattr(self, kattr, self._control_dict[key])
#==============================================================================
# Control File for inversion
#==============================================================================
class Control_Fwd(object):
"""
read and write control file for
This file controls how the inversion starts and how it is run
"""
def __init__(self, **kwargs):
self.num_qmr_iter = kwargs.pop('num_qmr_iter', 40)
self.max_num_div_calls = kwargs.pop('max_num_div_calls', 20)
self.max_num_div_iters = kwargs.pop('max_num_div_iters', 100)
self.misfit_tol_fwd = kwargs.pop('misfit_tol_fwd', 1.0e-7)
self.misfit_tol_adj = kwargs.pop('misfit_tol_adj', 1.0e-7)
self.misfit_tol_div = kwargs.pop('misfit_tol_div', 1.0e-5)
self.save_path = kwargs.pop('save_path', os.getcwd())
self.fn_basename = kwargs.pop('fn_basename', 'control.fwd')
self.control_fn = kwargs.pop('control_fn', os.path.join(self.save_path,
self.fn_basename))
self._control_keys = ['Number of QMR iters per divergence correction',
'Maximum number of divergence correction calls',
'Maximum number of divergence correction iters',
'Misfit tolerance for EM forward solver',
'Misfit tolerance for EM adjoint solver',
'Misfit tolerance for divergence correction']
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.num_qmr_iter,
self.max_num_div_calls,
self.max_num_div_iters,
self.misfit_tol_fwd,
self.misfit_tol_adj,
self.misfit_tol_div])])
self._string_fmt_dict = dict([(key, value)
for key, value in zip(self._control_keys,
['<.0f', '<.0f', '<.0f', '<.1e', '<.1e',
'<.1e'])])
def write_control_file(self, control_fn=None, save_path=None,
fn_basename=None):
"""
write control file
Arguments:
------------
**control_fn** : string
full path to save control file to
*default* is save_path/fn_basename
**save_path** : string
directory path to save control file to
*default* is cwd
**fn_basename** : string
basename of control file
*default* is control.inv
"""
if control_fn is not None:
self.save_path = os.path.dirname(control_fn)
self.fn_basename = os.path.basename(control_fn)
if save_path is not None:
self.save_path = save_path
if fn_basename is not None:
self.fn_basename = fn_basename
self.control_fn = os.path.join(self.save_path, self.fn_basename)
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.num_qmr_iter,
self.max_num_div_calls,
self.max_num_div_iters,
self.misfit_tol_fwd,
self.misfit_tol_adj,
self.misfit_tol_div])])
clines = []
for key in self._control_keys:
value = self._control_dict[key]
str_fmt = self._string_fmt_dict[key]
clines.append('{0:<47}: {1:{2}}\n'.format(key, value, str_fmt))
cfid = file(self.control_fn, 'w')
cfid.writelines(clines)
cfid.close()
print 'Wrote ModEM control file to {0}'.format(self.control_fn)
def read_control_file(self, control_fn=None):
"""
read in a control file
"""
if control_fn is not None:
self.control_fn = control_fn
if self.control_fn is None:
raise mtex.MTpyError_file_handling('control_fn is None, input '
'control file')
if os.path.isfile(self.control_fn) is False:
raise mtex.MTpyError_file_handling('Could not find {0}'.format(
self.control_fn))
self.save_path = os.path.dirname(self.control_fn)
self.fn_basename = os.path.basename(self.control_fn)
cfid = file(self.control_fn, 'r')
clines = cfid.readlines()
cfid.close()
for cline in clines:
clist = cline.strip().split(':')
if len(clist) == 2:
try:
self._control_dict[clist[0].strip()] = float(clist[1])
except ValueError:
self._control_dict[clist[0].strip()] = clist[1]
#set attributes
attr_list = ['num_qmr_iter','max_num_div_calls', 'max_num_div_iters',
'misfit_tol_fwd', 'misfit_tol_adj', 'misfit_tol_div']
for key, kattr in zip(self._control_keys, attr_list):
setattr(self, kattr, self._control_dict[key])
#==============================================================================
# covariance
#==============================================================================
class Covariance(object):
"""
read and write covariance files
"""
def __init__(self, grid_dimensions=None, **kwargs):
self.grid_dimensions = grid_dimensions
self.smoothing_east = kwargs.pop('smoothing_east', 0.3)
self.smoothing_north = kwargs.pop('smoothing_north', 0.3)
self.smoothing_z = kwargs.pop('smoothing_z', 0.3)
self.smoothing_num = kwargs.pop('smoothing_num', 1)
self.exception_list = kwargs.pop('exception_list', [])
self.mask_arr = kwargs.pop('mask_arr', None)
self.save_path = kwargs.pop('save_path', os.getcwd())
self.cov_fn_basename = kwargs.pop('cov_fn_basename', 'covariance.cov')
self.cov_fn = kwargs.pop('cov_fn', None)
self._header_str = '\n'.join(['+{0}+'.format('-'*77),
'| This file defines model covariance for a recursive autoregression scheme. |',
'| The model space may be divided into distinct areas using integer masks. |',
'| Mask 0 is reserved for air; mask 9 is reserved for ocean. Smoothing between |',
'| air, ocean and the rest of the model is turned off automatically. You can |',
'| also define exceptions to override smoothing between any two model areas. |',
'| To turn off smoothing set it to zero. This header is 16 lines long. |',
'| 1. Grid dimensions excluding air layers (Nx, Ny, NzEarth) |',
'| 2. Smoothing in the X direction (NzEarth real values) |',
'| 3. Smoothing in the Y direction (NzEarth real values) |',
'| 4. Vertical smoothing (1 real value) |',
'| 5. Number of times the smoothing should be applied (1 integer >= 0) |',
'| 6. Number of exceptions (1 integer >= 0) |',
'| 7. Exceptions in the for e.g. 2 3 0. (to turn off smoothing between 3 & 4) |',
'| 8. Two integer layer indices and Nx x Ny block of masks, repeated as needed.|',
'+{0}+'.format('-'*77)])
def write_covariance_file(self, cov_fn=None, save_path=None,
cov_fn_basename=None, model_fn=None,
sea_water=0.3, air=1e12):
"""
write a covariance file
"""
if model_fn is not None:
mod_obj = Model()
mod_obj.read_model_file(model_fn)
print 'Reading {0}'.format(model_fn)
self.grid_dimensions = mod_obj.res_model.shape
self.mask_arr = np.ones_like(mod_obj.res_model)
self.mask_arr[np.where(mod_obj.res_model > air*.9)] = 0
self.mask_arr[np.where((mod_obj.res_model < sea_water*1.1) &
(mod_obj.res_model > sea_water*.9))] = 9
if self.grid_dimensions is None:
raise ModEMError('Grid dimensions are None, input as (Nx, Ny, Nz)')
if cov_fn is not None:
self.cov_fn = cov_fn
else:
if save_path is not None:
self.save_path = save_path
if cov_fn_basename is not None:
self.cov_fn_basename = cov_fn_basename
self.cov_fn = os.path.join(self.save_path, self.cov_fn_basename)
clines = [self._header_str]
clines.append('\n\n')
#--> grid dimensions
clines.append(' {0:<10}{1:<10}{2:<10}\n'.format(self.grid_dimensions[0],
self.grid_dimensions[1],
self.grid_dimensions[2]))
clines.append('\n')
#--> smoothing in north direction
n_smooth_line = ''
for zz in range(self.grid_dimensions[0]):
n_smooth_line += ' {0:<5.1f}'.format(self.smoothing_north)
clines.append(n_smooth_line+'\n')
#--> smoothing in east direction
e_smooth_line = ''
for zz in range(self.grid_dimensions[1]):
e_smooth_line += ' {0:<5.1f}'.format(self.smoothing_east)
clines.append(e_smooth_line+'\n')
#--> smoothing in vertical direction
clines.append(' {0:<5.1f}\n'.format(self.smoothing_z))
clines.append('\n')
#--> number of times to apply smoothing
clines.append(' {0:<2.0f}\n'.format(self.smoothing_num))
clines.append('\n')
#--> exceptions
clines.append(' {0:<.0f}\n'.format(len(self.exception_list)))
for exc in self.exception_list:
clines.append('{0:<5.0f}{1:<5.0f}{2:<5.0f}\n'.format(exc[0],
exc[1],
exc[2]))
clines.append('\n')
clines.append('\n')
#--> mask array
if self.mask_arr is None:
self.mask_arr = np.ones((self.grid_dimensions[0],
self.grid_dimensions[1],
self.grid_dimensions[2]))
for zz in range(self.mask_arr.shape[2]):
clines.append(' {0:<8.0f}{0:<8.0f}\n'.format(zz+1))
for nn in range(self.mask_arr.shape[0]):
cline = ''
for ee in range(self.mask_arr.shape[1]):
cline += '{0:^3.0f}'.format(self.mask_arr[nn, ee, zz])
clines.append(cline+'\n')
cfid = file(self.cov_fn, 'w')
cfid.writelines(clines)
cfid.close()
print 'Wrote covariance file to {0}'.format(self.cov_fn)
#==============================================================================
# Add in elevation to the model
#==============================================================================
#--> read in ascii dem file
def read_dem_ascii(ascii_fn, cell_size=500, model_center=(0, 0), rot_90=0):
"""
read in dem which is ascii format
The ascii format is assumed to be:
ncols 3601
nrows 3601
xllcorner -119.00013888889
yllcorner 36.999861111111
cellsize 0.00027777777777778
NODATA_value -9999
elevation data W --> E
N
|
V
S
"""
dfid = file(ascii_fn, 'r')
d_dict = {}
for ii in range(6):
dline = dfid.readline()
dline = dline.strip().split()
key = dline[0].strip().lower()
value = float(dline[1].strip())
d_dict[key] = value
x0 = d_dict['xllcorner']
y0 = d_dict['yllcorner']
nx = int(d_dict['ncols'])
ny = int(d_dict['nrows'])
cs = d_dict['cellsize']
# read in the elevation data
elevation = np.zeros((nx, ny))
for ii in range(1, int(ny)+2):
dline = dfid.readline()
if len(str(dline)) > 1:
#needs to be backwards because first line is the furthest north row.
elevation[:, -ii] = np.array(dline.strip().split(' '), dtype='float')
else:
break
dfid.close()
# create lat and lon arrays from the dem fle
lon = np.arange(x0, x0+cs*(nx), cs)
lat = np.arange(y0, y0+cs*(ny), cs)
# calculate the lower left and uper right corners of the grid in meters
ll_en = mtpy.utils.gis_tools.ll_to_utm(23, lat[0], lon[0])
ur_en = mtpy.utils.gis_tools.ll_to_utm(23, lat[-1], lon[-1])
# estimate cell sizes for each dem measurement
d_east = abs(ll_en[1]-ur_en[1])/nx
d_north = abs(ll_en[2]-ur_en[2])/ny
# calculate the number of new cells according to the given cell size
# if the given cell size and cs are similar int could make the value 0,
# hence the need to make it one if it is 0.
num_cells = max([1, int(cell_size/np.mean([d_east, d_north]))])
# make easting and northing arrays in meters corresponding to lat and lon
east = np.arange(ll_en[1], ur_en[1], d_east)
north = np.arange(ll_en[2], ur_en[2], d_north)
#resample the data accordingly
new_east = east[np.arange(0, east.shape[0], num_cells)]
new_north = north[np.arange(0, north.shape[0], num_cells)]
new_x, new_y = np.meshgrid(np.arange(0, east.shape[0], num_cells),
np.arange(0, north.shape[0], num_cells),
indexing='ij')
elevation = elevation[new_x, new_y]
# estimate the shift of the DEM to relative model coordinates
shift_east = new_east.mean()-model_center[0]
shift_north = new_north.mean()-model_center[1]
# shift the easting and northing arrays accordingly so the DEM and model
# are collocated.
new_east = (new_east-new_east.mean())+shift_east
new_north = (new_north-new_north.mean())+shift_north
# need to rotate cause I think I wrote the dem backwards
if rot_90 == 1 or rot_90 == 3:
elevation = np.rot90(elevation, rot_90)
return new_north, new_east, elevation
else:
elevation = np.rot90(elevation, rot_90)
return new_east, new_north, elevation
def interpolate_elevation(elev_east, elev_north, elevation, model_east,
model_north, pad=3):
"""
interpolate the elevation onto the model grid.
Arguments:
---------------
*elev_east* : np.ndarray(num_east_nodes)
easting grid for elevation model
*elev_north* : np.ndarray(num_north_nodes)
northing grid for elevation model
*elevation* : np.ndarray(num_east_nodes, num_north_nodes)
elevation model assumes x is east, y is north
Units are meters
*model_east* : np.ndarray(num_east_nodes_model)
relative easting grid of resistivity model
*model_north* : np.ndarray(num_north_nodes_model)
relative northin grid of resistivity model
*pad* : int
number of cells to repeat elevation model by. So for pad=3,
then the interpolated elevation model onto the resistivity
model grid will have the outer 3 cells will be repeats of
the adjacent cell. This is to extend the elevation model
to the resistivity model cause most elevation models will
not cover the entire area.
Returns:
--------------
*interp_elev* : np.ndarray(num_north_nodes_model, num_east_nodes_model)
the elevation model interpolated onto the resistivity
model grid.
"""
# need to line up the elevation with the model
grid_east, grid_north = np.broadcast_arrays(elev_east[:, None],
elev_north[None, :])
# interpolate onto the model grid
interp_elev = spi.griddata((grid_east.ravel(), grid_north.ravel()),
elevation.ravel(),
(model_east[:, None],
model_north[None, :]),
method='linear',
fill_value=elevation.mean())
interp_elev[0:pad, pad:-pad] = interp_elev[pad, pad:-pad]
interp_elev[-pad:, pad:-pad] = interp_elev[-pad-1, pad:-pad]
interp_elev[:, 0:pad] = interp_elev[:, pad].repeat(pad).reshape(
interp_elev[:, 0:pad].shape)
interp_elev[:, -pad:] = interp_elev[:, -pad-1].repeat(pad).reshape(
interp_elev[:, -pad:].shape)
# transpose the modeled elevation to align with x=N, y=E
interp_elev = interp_elev.T
return interp_elev
def make_elevation_model(interp_elev, model_nodes_z, elevation_cell=30,
pad=3, res_air=1e12, fill_res=100, res_sea=0.3):
"""
Take the elevation data of the interpolated elevation model and map that
onto the resistivity model by adding elevation cells to the existing model.
..Note: that if there are large elevation gains, the elevation cell size
might need to be increased.
Arguments:
-------------
*interp_elev* : np.ndarray(num_nodes_north, num_nodes_east)
elevation model that has been interpolated onto the
resistivity model grid. Units are in meters.
*model_nodes_z* : np.ndarray(num_z_nodes_of_model)
vertical nodes of the resistivity model without
topography. Note these are the nodes given in
relative thickness, not the grid, which is total
depth. Units are meters.
*elevation_cell* : float
height of elevation cells to be added on. These
are assumed to be the same at all elevations.
Units are in meters
*pad* : int
number of cells to look for maximum and minimum elevation.
So if you only want elevations within the survey area,
set pad equal to the number of padding cells of the
resistivity model grid.
*res_air* : float
resistivity of air. Default is 1E12 Ohm-m
*fill_res* : float
resistivity value of subsurface in Ohm-m.
Returns:
-------------
*elevation_model* : np.ndarray(num_north_nodes, num_east_nodes,
num_elev_nodes+num_z_nodes)
Model grid with elevation mapped onto it.
Where anything above the surface will be given the
value of res_air, everything else will be fill_res
*new_nodes_z* : np.ndarray(num_z_nodes+num_elev_nodes)
a new array of vertical nodes, where any nodes smaller
than elevation_cell will be set to elevation_cell.
This can be input into a modem.Model object to
rewrite the model file.
"""
# calculate the max elevation within survey area
elev_max = interp_elev[pad:-pad, pad:-pad].max()
# need to set sea level to 0 elevation
elev_min = max([0, interp_elev[pad:-pad, pad:-pad].min()])
# scale the interpolated elevations to fit within elev_max, elev_min
interp_elev[np.where(interp_elev > elev_max)] = elev_max
#interp_elev[np.where(interp_elev < elev_min)] = elev_min
# calculate the number of elevation cells needed
num_elev_cells = int((elev_max-elev_min)/elevation_cell)
print 'Number of elevation cells: {0}'.format(num_elev_cells)
# find sea level if it is there
if elev_min < 0:
sea_level_index = num_elev_cells-abs(int((elev_min)/elevation_cell))-1
else:
sea_level_index = num_elev_cells-1
print 'Sea level index is {0}'.format(sea_level_index)
# make an array of just the elevation for the model
# north is first index, east is second, vertical is third
elevation_model = np.ones((interp_elev.shape[0],
interp_elev.shape[1],
num_elev_cells+model_nodes_z.shape[0]))
elevation_model[:, :, :] = fill_res
# fill in elevation model with air values. Remeber Z is positive down, so
# the top of the model is the highest point and index 0 is highest
# elevation
for nn in range(interp_elev.shape[0]):
for ee in range(interp_elev.shape[1]):
# need to test for ocean
if interp_elev[nn, ee] < 0:
# fill in from bottom to sea level, then rest with air
elevation_model[nn, ee, 0:sea_level_index] = res_air
dz = sea_level_index+abs(int((interp_elev[nn, ee])/elevation_cell))+1
elevation_model[nn, ee, sea_level_index:dz] = res_sea
else:
dz = int((elev_max-interp_elev[nn, ee])/elevation_cell)
elevation_model[nn, ee, 0:dz] = res_air
# make new z nodes array
new_nodes_z = np.append(np.repeat(elevation_cell, num_elev_cells),
model_nodes_z)
new_nodes_z[np.where(new_nodes_z < elevation_cell)] = elevation_cell
return elevation_model, new_nodes_z
def add_topography_to_model(dem_ascii_fn, model_fn, model_center=(0,0),
rot_90=0, cell_size=500, elev_cell=30):
"""
Add topography to an existing model from a dem in ascii format.
The ascii format is assumed to be:
ncols 3601
nrows 3601
xllcorner -119.00013888889
yllcorner 36.999861111111
cellsize 0.00027777777777778
NODATA_value -9999
elevation data W --> E
N
|
V
S
Arguments:
-------------
*dem_ascii_fn* : string
full path to ascii dem file
*model_fn* : string
full path to existing ModEM model file
*model_center* : (east, north) in meters
Sometimes the center of the DEM and the center of the
model don't line up. Use this parameter to line
everything up properly.
*rot_90* : [ 0 | 1 | 2 | 3 ]
rotate the elevation model by rot_90*90 degrees. Sometimes
the elevation model is flipped depending on your coordinate
system.
*cell_size* : float (meters)
horizontal cell size of grid to interpolate elevation
onto. This should be smaller or equal to the input
model cell size to be sure there is not spatial aliasing
*elev_cell* : float (meters)
vertical size of each elevation cell. This value should
be about 1/10th the smalles skin depth.
Returns:
---------------
*new_model_fn* : string
full path to model file that contains topography
"""
### 1.) read in the dem and center it onto the resistivity model
e_east, e_north, elevation = read_dem_ascii(dem_ascii_fn, cell_size=500,
model_center=model_center,
rot_90=3)
m_obj = Model()
m_obj.read_model_file(model_fn)
### 2.) interpolate the elevation model onto the model grid
m_elev = interpolate_elevation(e_east, e_north, elevation,
m_obj.grid_east, m_obj.grid_north, pad=3)
### 3.) make a resistivity model that incoorporates topography
mod_elev, elev_nodes_z = make_elevation_model(m_elev, m_obj.nodes_z,
elevation_cell=elev_cell)
### 4.) write new model file
m_obj.nodes_z = elev_nodes_z
m_obj.res_model = mod_elev
m_obj.write_model_file(model_fn_basename='{0}_topo.rho'.format(
os.path.basename(m_obj.model_fn)[0:-4]))
def change_data_elevation(data_fn, model_fn, new_data_fn=None, res_air=1e12):
"""
At each station in the data file rewrite the elevation, so the station is
on the surface, not floating in air.
Arguments:
------------------
*data_fn* : string
full path to a ModEM data file
*model_fn* : string
full path to ModEM model file that has elevation
incoorporated.
*new_data_fn* : string
full path to new data file name. If None, then
new file name will add _elev.dat to input filename
*res_air* : float
resistivity of air. Default is 1E12 Ohm-m
Returns:
-------------
*new_data_fn* : string
full path to new data file.
"""
d_obj = Data()
d_obj.read_data_file(data_fn)
m_obj = Model()
m_obj.read_model_file(model_fn)
for key in d_obj.mt_dict.keys():
mt_obj = d_obj.mt_dict[key]
e_index = np.where(m_obj.grid_east > mt_obj.grid_east)[0][0]
n_index = np.where(m_obj.grid_north > mt_obj.grid_north)[0][0]
z_index = np.where(m_obj.res_model[n_index, e_index, :] < res_air*.9)[0][0]
s_index = np.where(d_obj.data_array['station']==key)[0][0]
d_obj.data_array[s_index]['elev'] = m_obj.grid_z[z_index]
mt_obj.grid_elev = m_obj.grid_z[z_index]
if new_data_fn is None:
new_dfn = '{0}{1}'.format(data_fn[:-4], '_elev.dat')
else:
new_dfn=new_data_fn
d_obj.write_data_file(save_path=os.path.dirname(new_dfn),
fn_basename=os.path.basename(new_dfn),
compute_error=False,
fill=False)
return new_dfn
#==============================================================================
# Manipulate the model to test structures or create a starting model
#==============================================================================
class ModelManipulator(Model):
"""
will plot a model from wsinv3d or init file so the user can manipulate the
resistivity values relatively easily. At the moment only plotted
in map view.
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> initial_fn = r"/home/MT/ws3dinv/Inv1/WSInitialFile"
>>> mm = ws.WSModelManipulator(initial_fn=initial_fn)
=================== =======================================================
Buttons Description
=================== =======================================================
'=' increase depth to next vertical node (deeper)
'-' decrease depth to next vertical node (shallower)
'q' quit the plot, rewrites initial file when pressed
'a' copies the above horizontal layer to the present layer
'b' copies the below horizonal layer to present layer
'u' undo previous change
=================== =======================================================
=================== =======================================================
Attributes Description
=================== =======================================================
ax1 matplotlib.axes instance for mesh plot of the model
ax2 matplotlib.axes instance of colorbar
cb matplotlib.colorbar instance for colorbar
cid_depth matplotlib.canvas.connect for depth
cmap matplotlib.colormap instance
cmax maximum value of resistivity for colorbar. (linear)
cmin minimum value of resistivity for colorbar (linear)
data_fn full path fo data file
depth_index integer value of depth slice for plotting
dpi resolution of figure in dots-per-inch
dscale depth scaling, computed internally
east_line_xlist list of east mesh lines for faster plotting
east_line_ylist list of east mesh lines for faster plotting
fdict dictionary of font properties
fig matplotlib.figure instance
fig_num number of figure instance
fig_size size of figure in inches
font_size size of font in points
grid_east location of east nodes in relative coordinates
grid_north location of north nodes in relative coordinates
grid_z location of vertical nodes in relative coordinates
initial_fn full path to initial file
m_height mean height of horizontal cells
m_width mean width of horizontal cells
map_scale [ 'm' | 'km' ] scale of map
mesh_east np.meshgrid of east, north
mesh_north np.meshgrid of east, north
mesh_plot matplotlib.axes.pcolormesh instance
model_fn full path to model file
new_initial_fn full path to new initial file
nodes_east spacing between east nodes
nodes_north spacing between north nodes
nodes_z spacing between vertical nodes
north_line_xlist list of coordinates of north nodes for faster plotting
north_line_ylist list of coordinates of north nodes for faster plotting
plot_yn [ 'y' | 'n' ] plot on instantiation
radio_res matplotlib.widget.radio instance for change resistivity
rect_selector matplotlib.widget.rect_selector
res np.ndarray(nx, ny, nz) for model in linear resistivity
res_copy copy of res for undo
res_dict dictionary of segmented resistivity values
res_list list of resistivity values for model linear scale
res_model np.ndarray(nx, ny, nz) of resistivity values from
res_list (linear scale)
res_model_int np.ndarray(nx, ny, nz) of integer values corresponding
to res_list for initial model
res_value current resistivty value of radio_res
save_path path to save initial file to
station_east station locations in east direction
station_north station locations in north direction
xlimits limits of plot in e-w direction
ylimits limits of plot in n-s direction
=================== =======================================================
"""
def __init__(self, model_fn=None, data_fn=None, **kwargs):
#be sure to initialize Model
Model.__init__(self, model_fn=model_fn, **kwargs)
self.data_fn = data_fn
self.model_fn_basename = kwargs.pop('model_fn_basename',
'ModEM_Model_rw.ws')
if self.model_fn is not None:
self.save_path = os.path.dirname(self.model_fn)
elif self.data_fn is not None:
self.save_path = os.path.dirname(self.data_fn)
else:
self.save_path = os.getcwd()
#station locations in relative coordinates read from data file
self.station_east = None
self.station_north = None
#--> set map scale
self.map_scale = kwargs.pop('map_scale', 'km')
self.m_width = 100
self.m_height = 100
#--> scale the map coordinates
if self.map_scale=='km':
self.dscale = 1000.
if self.map_scale=='m':
self.dscale = 1.
#figure attributes
self.fig = None
self.ax1 = None
self.ax2 = None
self.cb = None
self.east_line_xlist = None
self.east_line_ylist = None
self.north_line_xlist = None
self.north_line_ylist = None
#make a default resistivity list to change values
self._res_sea = 0.3
self._res_air = 1E12
self.res_dict = None
self.res_list = kwargs.pop('res_list', None)
if self.res_list is None:
self.set_res_list(np.array([self._res_sea, 1, 10, 50, 100, 500,
1000, 5000],
dtype=np.float))
#set initial resistivity value
self.res_value = self.res_list[0]
self.cov_arr = None
#--> set map limits
self.xlimits = kwargs.pop('xlimits', None)
self.ylimits = kwargs.pop('ylimits', None)
self.font_size = kwargs.pop('font_size', 7)
self.fig_dpi = kwargs.pop('fig_dpi', 300)
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.cmap = kwargs.pop('cmap', cm.jet_r)
self.depth_index = kwargs.pop('depth_index', 0)
self.fdict = {'size':self.font_size+2, 'weight':'bold'}
self.subplot_wspace = kwargs.pop('subplot_wspace', .3)
self.subplot_hspace = kwargs.pop('subplot_hspace', .0)
self.subplot_right = kwargs.pop('subplot_right', .8)
self.subplot_left = kwargs.pop('subplot_left', .01)
self.subplot_top = kwargs.pop('subplot_top', .93)
self.subplot_bottom = kwargs.pop('subplot_bottom', .1)
#plot on initialization
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn=='y':
self.get_model()
self.plot()
def set_res_list(self, res_list):
"""
on setting res_list also set the res_dict to correspond
"""
self.res_list = res_list
#make a dictionary of values to write to file.
self.res_dict = dict([(res, ii)
for ii, res in enumerate(self.res_list,1)])
if self.fig is not None:
plt.close()
self.plot()
#---read files-------------------------------------------------------------
def get_model(self):
"""
reads in initial file or model file and set attributes:
-resmodel
-northrid
-eastrid
-zgrid
-res_list if initial file
"""
#--> read in model file
self.read_model_file()
self.cov_arr = np.ones_like(self.res_model)
#--> read in data file if given
if self.data_fn is not None:
md_data = Data()
md_data.read_data_file(self.data_fn)
#get station locations
self.station_east = md_data.station_locations['rel_east']
self.station_north = md_data.station_locations['rel_north']
#get cell block sizes
self.m_height = np.median(self.nodes_north[5:-5])/self.dscale
self.m_width = np.median(self.nodes_east[5:-5])/self.dscale
#make a copy of original in case there are unwanted changes
self.res_copy = self.res_model.copy()
#---plot model-------------------------------------------------------------
def plot(self):
"""
plots the model with:
-a radio dial for depth slice
-radio dial for resistivity value
"""
# set plot properties
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = self.subplot_left
plt.rcParams['figure.subplot.right'] = self.subplot_right
plt.rcParams['figure.subplot.bottom'] = self.subplot_bottom
plt.rcParams['figure.subplot.top'] = self.subplot_top
font_dict = {'size':self.font_size+2, 'weight':'bold'}
#make sure there is a model to plot
if self.res_model is None:
self.get_model()
self.cmin = np.floor(np.log10(min(self.res_list)))
self.cmax = np.ceil(np.log10(max(self.res_list)))
#-->Plot properties
plt.rcParams['font.size'] = self.font_size
#need to add an extra row and column to east and north to make sure
#all is plotted see pcolor for details.
plot_east = np.append(self.grid_east, self.grid_east[-1]*1.25)/self.dscale
plot_north = np.append(self.grid_north, self.grid_north[-1]*1.25)/self.dscale
#make a mesh grid for plotting
#the 'ij' makes sure the resulting grid is in east, north
self.mesh_east, self.mesh_north = np.meshgrid(plot_east,
plot_north,
indexing='ij')
self.fig = plt.figure(self.fig_num, self.fig_size, dpi=self.fig_dpi)
plt.clf()
self.ax1 = self.fig.add_subplot(1, 1, 1, aspect='equal')
#transpose to make x--east and y--north
plot_res = np.log10(self.res_model[:,:,self.depth_index].T)
self.mesh_plot = self.ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.cmin,
vmax=self.cmax)
#on plus or minus change depth slice
self.cid_depth = \
self.mesh_plot.figure.canvas.mpl_connect('key_press_event',
self._on_key_callback)
#plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
self.ax1.text(ee/self.dscale, nn/self.dscale,
'*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':self.font_size-2,
'weight':'bold'})
#set axis properties
if self.xlimits is not None:
self.ax1.set_xlim(self.xlimits)
else:
self.ax1.set_xlim(xmin=self.grid_east.min()/self.dscale,
xmax=self.grid_east.max()/self.dscale)
if self.ylimits is not None:
self.ax1.set_ylim(self.ylimits)
else:
self.ax1.set_ylim(ymin=self.grid_north.min()/self.dscale,
ymax=self.grid_north.max()/self.dscale)
#self.ax1.xaxis.set_minor_locator(MultipleLocator(100*1./dscale))
#self.ax1.yaxis.set_minor_locator(MultipleLocator(100*1./dscale))
self.ax1.set_ylabel('Northing ('+self.map_scale+')',
fontdict=self.fdict)
self.ax1.set_xlabel('Easting ('+self.map_scale+')',
fontdict=self.fdict)
depth_title = self.grid_z[self.depth_index]/self.dscale
self.ax1.set_title('Depth = {:.3f} '.format(depth_title)+\
'('+self.map_scale+')',
fontdict=self.fdict)
#plot the grid if desired
self.east_line_xlist = []
self.east_line_ylist = []
for xx in self.grid_east:
self.east_line_xlist.extend([xx/self.dscale, xx/self.dscale])
self.east_line_xlist.append(None)
self.east_line_ylist.extend([self.grid_north.min()/self.dscale,
self.grid_north.max()/self.dscale])
self.east_line_ylist.append(None)
self.ax1.plot(self.east_line_xlist,
self.east_line_ylist,
lw=.25,
color='k')
self.north_line_xlist = []
self.north_line_ylist = []
for yy in self.grid_north:
self.north_line_xlist.extend([self.grid_east.min()/self.dscale,
self.grid_east.max()/self.dscale])
self.north_line_xlist.append(None)
self.north_line_ylist.extend([yy/self.dscale, yy/self.dscale])
self.north_line_ylist.append(None)
self.ax1.plot(self.north_line_xlist,
self.north_line_ylist,
lw=.25,
color='k')
#plot the colorbar
# self.ax2 = mcb.make_axes(self.ax1, orientation='vertical', shrink=.35)
self.ax2 = self.fig.add_axes([.81, .45, .16, .03])
self.ax2.xaxis.set_ticks_position('top')
#seg_cmap = ws.cmap_discretize(self.cmap, len(self.res_list))
self.cb = mcb.ColorbarBase(self.ax2,cmap=self.cmap,
norm=colors.Normalize(vmin=self.cmin,
vmax=self.cmax),
orientation='horizontal')
self.cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size':self.font_size})
self.cb.set_ticks(np.arange(self.cmin, self.cmax+1))
self.cb.set_ticklabels([mtplottools.labeldict[cc]
for cc in np.arange(self.cmin, self.cmax+1)])
#make a resistivity radio button
#resrb = self.fig.add_axes([.85,.1,.1,.2])
#reslabels = ['{0:.4g}'.format(res) for res in self.res_list]
#self.radio_res = widgets.RadioButtons(resrb, reslabels,
# active=self.res_dict[self.res_value])
# slider_ax_bounds = list(self.cb.ax.get_position().bounds)
# slider_ax_bounds[0] += .1
slider_ax = self.fig.add_axes([.81, .5, .16, .03])
self.slider_res = widgets.Slider(slider_ax, 'Resistivity',
self.cmin, self.cmax,
valinit=2)
#make a rectangular selector
self.rect_selector = widgets.RectangleSelector(self.ax1,
self.rect_onselect,
drawtype='box',
useblit=True)
plt.show()
#needs to go after show()
self.slider_res.on_changed(self.set_res_value)
#self.radio_res.on_clicked(self.set_res_value)
def redraw_plot(self):
"""
redraws the plot
"""
current_xlimits = self.ax1.get_xlim()
current_ylimits = self.ax1.get_ylim()
self.ax1.cla()
plot_res = np.log10(self.res_model[:,:,self.depth_index].T)
self.mesh_plot = self.ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.cmin,
vmax=self.cmax)
#plot the stations
if self.station_east is not None:
for ee,nn in zip(self.station_east, self.station_north):
self.ax1.text(ee/self.dscale, nn/self.dscale,
'*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':self.font_size-2,
'weight':'bold'})
#set axis properties
if self.xlimits is not None:
self.ax1.set_xlim(self.xlimits)
else:
self.ax1.set_xlim(current_xlimits)
if self.ylimits is not None:
self.ax1.set_ylim(self.ylimits)
else:
self.ax1.set_ylim(current_ylimits)
self.ax1.set_ylabel('Northing ('+self.map_scale+')',
fontdict=self.fdict)
self.ax1.set_xlabel('Easting ('+self.map_scale+')',
fontdict=self.fdict)
depth_title = self.grid_z[self.depth_index]/self.dscale
self.ax1.set_title('Depth = {:.3f} '.format(depth_title)+\
'('+self.map_scale+')',
fontdict=self.fdict)
#plot finite element mesh
self.ax1.plot(self.east_line_xlist,
self.east_line_ylist,
lw=.25,
color='k')
self.ax1.plot(self.north_line_xlist,
self.north_line_ylist,
lw=.25,
color='k')
#be sure to redraw the canvas
self.fig.canvas.draw()
# def set_res_value(self, label):
# self.res_value = float(label)
# print 'set resistivity to ', label
# print self.res_value
def set_res_value(self, val):
self.res_value = 10**val
print 'set resistivity to ', self.res_value
def _on_key_callback(self,event):
"""
on pressing a key do something
"""
self.event_change_depth = event
#go down a layer on push of +/= keys
if self.event_change_depth.key == '=':
self.depth_index += 1
if self.depth_index>len(self.grid_z)-1:
self.depth_index = len(self.grid_z)-1
print 'already at deepest depth'
print 'Plotting Depth {0:.3f}'.format(self.grid_z[self.depth_index]/\
self.dscale)+'('+self.map_scale+')'
self.redraw_plot()
#go up a layer on push of - key
elif self.event_change_depth.key == '-':
self.depth_index -= 1
if self.depth_index < 0:
self.depth_index = 0
print 'Plotting Depth {0:.3f} '.format(self.grid_z[self.depth_index]/\
self.dscale)+'('+self.map_scale+')'
self.redraw_plot()
#exit plot on press of q
elif self.event_change_depth.key == 'q':
self.event_change_depth.canvas.mpl_disconnect(self.cid_depth)
plt.close(self.event_change_depth.canvas.figure)
self.rewrite_model_file()
#copy the layer above
elif self.event_change_depth.key == 'a':
try:
if self.depth_index == 0:
print 'No layers above'
else:
self.res_model[:, :, self.depth_index] = \
self.res_model[:, :, self.depth_index-1]
except IndexError:
print 'No layers above'
self.redraw_plot()
#copy the layer below
elif self.event_change_depth.key == 'b':
try:
self.res_model[:, :, self.depth_index] = \
self.res_model[:, :, self.depth_index+1]
except IndexError:
print 'No more layers below'
self.redraw_plot()
#undo
elif self.event_change_depth.key == 'u':
if type(self.xchange) is int and type(self.ychange) is int:
self.res_model[self.ychange, self.xchange, self.depth_index] =\
self.res_copy[self.ychange, self.xchange, self.depth_index]
else:
for xx in self.xchange:
for yy in self.ychange:
self.res_model[yy, xx, self.depth_index] = \
self.res_copy[yy, xx, self.depth_index]
self.redraw_plot()
def change_model_res(self, xchange, ychange):
"""
change resistivity values of resistivity model
"""
if type(xchange) is int and type(ychange) is int:
self.res_model[ychange, xchange, self.depth_index] = self.res_value
else:
for xx in xchange:
for yy in ychange:
self.res_model[yy, xx, self.depth_index] = self.res_value
self.redraw_plot()
def rect_onselect(self, eclick, erelease):
"""
on selecting a rectangle change the colors to the resistivity values
"""
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
self.xchange = self._get_east_index(x1, x2)
self.ychange = self._get_north_index(y1, y2)
#reset values of resistivity
self.change_model_res(self.xchange, self.ychange)
def _get_east_index(self, x1, x2):
"""
get the index value of the points to be changed
"""
if x1 < x2:
xchange = np.where((self.grid_east/self.dscale >= x1) & \
(self.grid_east/self.dscale <= x2))[0]
if len(xchange) == 0:
xchange = np.where(self.grid_east/self.dscale >= x1)[0][0]-1
return [xchange]
if x1 > x2:
xchange = np.where((self.grid_east/self.dscale <= x1) & \
(self.grid_east/self.dscale >= x2))[0]
if len(xchange) == 0:
xchange = np.where(self.grid_east/self.dscale >= x2)[0][0]-1
return [xchange]
#check the edges to see if the selection should include the square
xchange = np.append(xchange, xchange[0]-1)
xchange.sort()
return xchange
def _get_north_index(self, y1, y2):
"""
get the index value of the points to be changed in north direction
need to flip the index because the plot is flipped
"""
if y1 < y2:
ychange = np.where((self.grid_north/self.dscale > y1) & \
(self.grid_north/self.dscale < y2))[0]
if len(ychange) == 0:
ychange = np.where(self.grid_north/self.dscale >= y1)[0][0]-1
return [ychange]
elif y1 > y2:
ychange = np.where((self.grid_north/self.dscale < y1) & \
(self.grid_north/self.dscale > y2))[0]
if len(ychange) == 0:
ychange = np.where(self.grid_north/self.dscale >= y2)[0][0]-1
return [ychange]
ychange -= 1
ychange = np.append(ychange, ychange[-1]+1)
return ychange
def rewrite_model_file(self, model_fn=None, save_path=None,
model_fn_basename=None):
"""
write an initial file for wsinv3d from the model created.
"""
if save_path is not None:
self.save_path = save_path
self.model_fn = model_fn
if model_fn_basename is not None:
self.model_fn_basename = model_fn_basename
self.write_model_file()
#==============================================================================
# plot response
#==============================================================================
class moved_PlotResponse(object):
"""
plot data and response
Plots the real and imaginary impedance and induction vector if present.
:Example: ::
>>> import mtpy.modeling.new_modem as modem
>>> dfn = r"/home/MT/ModEM/Inv1/DataFile.dat"
>>> rfn = r"/home/MT/ModEM/Inv1/Test_resp_000.dat"
>>> mrp = modem.PlotResponse(data_fn=dfn, resp_fn=rfn)
>>> # plot only the TE and TM modes
>>> mrp.plot_component = 2
>>> mrp.redraw_plot()
======================== ==================================================
Attributes Description
======================== ==================================================
color_mode [ 'color' | 'bw' ] color or black and white plots
cted color for data TE mode
ctem color for data TM mode
ctmd color for model TE mode
ctmm color for model TM mode
data_fn full path to data file
data_object WSResponse instance
e_capsize cap size of error bars in points (*default* is .5)
e_capthick cap thickness of error bars in points (*default*
is 1)
fig_dpi resolution of figure in dots-per-inch (300)
fig_list list of matplotlib.figure instances for plots
fig_size size of figure in inches (*default* is [6, 6])
font_size size of font for tick labels, axes labels are
font_size+2 (*default* is 7)
legend_border_axes_pad padding between legend box and axes
legend_border_pad padding between border of legend and symbols
legend_handle_text_pad padding between text labels and symbols of legend
legend_label_spacing padding between labels
legend_loc location of legend
legend_marker_scale scale of symbols in legend
lw line width response curves (*default* is .5)
ms size of markers (*default* is 1.5)
mted marker for data TE mode
mtem marker for data TM mode
mtmd marker for model TE mode
mtmm marker for model TM mode
phase_limits limits of phase
plot_component [ 2 | 4 ] 2 for TE and TM or 4 for all components
plot_style [ 1 | 2 ] 1 to plot each mode in a seperate
subplot and 2 to plot xx, xy and yx, yy in same
plots
plot_type [ '1' | list of station name ] '1' to plot all
stations in data file or input a list of station
names to plot if station_fn is input, otherwise
input a list of integers associated with the
index with in the data file, ie 2 for 2nd station
plot_z [ True | False ] *default* is True to plot
impedance, False for plotting resistivity and
phase
plot_yn [ 'n' | 'y' ] to plot on instantiation
res_limits limits of resistivity in linear scale
resp_fn full path to response file
resp_object WSResponse object for resp_fn, or list of
WSResponse objects if resp_fn is a list of
response files
station_fn full path to station file written by WSStation
subplot_bottom space between axes and bottom of figure
subplot_hspace space between subplots in vertical direction
subplot_left space between axes and left of figure
subplot_right space between axes and right of figure
subplot_top space between axes and top of figure
subplot_wspace space between subplots in horizontal direction
======================== ==================================================
"""
def __init__(self, data_fn=None, resp_fn=None, **kwargs):
self.data_fn = data_fn
self.resp_fn = resp_fn
self.data_object = None
self.resp_object = []
self.color_mode = kwargs.pop('color_mode', 'color')
self.ms = kwargs.pop('ms', 1.5)
self.ms_r = kwargs.pop('ms_r', 3)
self.lw = kwargs.pop('lw', .5)
self.lw_r = kwargs.pop('lw_r', 1.0)
self.e_capthick = kwargs.pop('e_capthick', .5)
self.e_capsize = kwargs.pop('e_capsize', 2)
#color mode
if self.color_mode == 'color':
#color for data
self.cted = kwargs.pop('cted', (0, 0, 1))
self.ctmd = kwargs.pop('ctmd', (1, 0, 0))
self.mted = kwargs.pop('mted', 's')
self.mtmd = kwargs.pop('mtmd', 'o')
#color for occam2d model
self.ctem = kwargs.pop('ctem', (0, .6, .3))
self.ctmm = kwargs.pop('ctmm', (.9, 0, .8))
self.mtem = kwargs.pop('mtem', '+')
self.mtmm = kwargs.pop('mtmm', '+')
#black and white mode
elif self.color_mode == 'bw':
#color for data
self.cted = kwargs.pop('cted', (0, 0, 0))
self.ctmd = kwargs.pop('ctmd', (0, 0, 0))
self.mted = kwargs.pop('mted', 's')
self.mtmd = kwargs.pop('mtmd', 'o')
#color for occam2d model
self.ctem = kwargs.pop('ctem', (0.6, 0.6, 0.6))
self.ctmm = kwargs.pop('ctmm', (0.6, 0.6, 0.6))
self.mtem = kwargs.pop('mtem', '+')
self.mtmm = kwargs.pop('mtmm', 'x')
self.phase_limits_d = kwargs.pop('phase_limits_d', None)
self.phase_limits_od = kwargs.pop('phase_limits_od', None)
self.res_limits_d = kwargs.pop('res_limits_d', None)
self.res_limits_od = kwargs.pop('res_limits_od', None)
self.tipper_limits = kwargs.pop('tipper_limits', None)
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.subplot_wspace = kwargs.pop('subplot_wspace', .3)
self.subplot_hspace = kwargs.pop('subplot_hspace', .0)
self.subplot_right = kwargs.pop('subplot_right', .98)
self.subplot_left = kwargs.pop('subplot_left', .08)
self.subplot_top = kwargs.pop('subplot_top', .85)
self.subplot_bottom = kwargs.pop('subplot_bottom', .1)
self.legend_loc = 'upper center'
self.legend_pos = (.5, 1.18)
self.legend_marker_scale = 1
self.legend_border_axes_pad = .01
self.legend_label_spacing = 0.07
self.legend_handle_text_pad = .2
self.legend_border_pad = .15
self.font_size = kwargs.pop('font_size', 6)
self.plot_type = kwargs.pop('plot_type', '1')
self.plot_style = kwargs.pop('plot_style', 1)
self.plot_component = kwargs.pop('plot_component', 4)
self.plot_yn = kwargs.pop('plot_yn', 'y')
self.plot_z = kwargs.pop('plot_z', True)
self.ylabel_pad = kwargs.pop('ylabel_pad', 1.25)
self.fig_list = []
if self.plot_yn == 'y':
self.plot()
def plot(self):
"""
plot
"""
self.data_object = Data()
self.data_object.read_data_file(self.data_fn)
#get shape of impedance tensors
ns = len(self.data_object.mt_dict.keys())
#read in response files
if self.resp_fn != None:
self.resp_object = []
if type(self.resp_fn) is not list:
resp_obj = Data()
resp_obj.read_data_file(self.resp_fn)
self.resp_object = [resp_obj]
else:
for rfile in self.resp_fn:
resp_obj = Data()
resp_obj.read_data_file(rfile)
self.resp_object.append(resp_obj)
#get number of response files
nr = len(self.resp_object)
if type(self.plot_type) is list:
ns = len(self.plot_type)
#--> set default font size
plt.rcParams['font.size'] = self.font_size
fontdict = {'size':self.font_size+2, 'weight':'bold'}
if self.plot_z == True:
h_ratio = [1, 1, .5]
elif self.plot_z == False:
h_ratio = [1.5, 1, .5]
ax_list = []
line_list = []
label_list = []
#--> make key word dictionaries for plotting
kw_xx = {'color':self.cted,
'marker':self.mted,
'ms':self.ms,
'ls':':',
'lw':self.lw,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
kw_yy = {'color':self.ctmd,
'marker':self.mtmd,
'ms':self.ms,
'ls':':',
'lw':self.lw,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
if self.plot_type != '1':
pstation_list = []
if type(self.plot_type) is not list:
self.plot_type = [self.plot_type]
for ii, station in enumerate(self.data_object.mt_dict.keys()):
if type(station) is not int:
for pstation in self.plot_type:
if station.find(str(pstation)) >= 0:
pstation_list.append(station)
else:
for pstation in self.plot_type:
if station == int(pstation):
pstation_list.append(ii)
else:
pstation_list = self.data_object.mt_dict.keys()
for jj, station in enumerate(pstation_list):
z_obj = self.data_object.mt_dict[station].Z
t_obj = self.data_object.mt_dict[station].Tipper
period = self.data_object.period_list
print 'Plotting: {0}'.format(station)
#convert to apparent resistivity and phase
z_obj._compute_res_phase()
#find locations where points have been masked
nzxx = np.nonzero(z_obj.z[:, 0, 0])[0]
nzxy = np.nonzero(z_obj.z[:, 0, 1])[0]
nzyx = np.nonzero(z_obj.z[:, 1, 0])[0]
nzyy = np.nonzero(z_obj.z[:, 1, 1])[0]
ntx = np.nonzero(t_obj.tipper[:, 0, 0])[0]
nty = np.nonzero(t_obj.tipper[:, 0, 1])[0]
#convert to apparent resistivity and phase
if self.plot_z == True:
scaling = np.zeros_like(z_obj.z)
for ii in range(2):
for jj in range(2):
scaling[:, ii, jj] = 1./np.sqrt(z_obj.freq)
plot_res = abs(z_obj.z.real*scaling)
plot_res_err = abs(z_obj.z_err*scaling)
plot_phase = abs(z_obj.z.imag*scaling)
plot_phase_err = abs(z_obj.z_err*scaling)
h_ratio = [1, 1, .5]
elif self.plot_z == False:
plot_res = z_obj.resistivity
plot_res_err = z_obj.resistivity_err
plot_phase = z_obj.phase
plot_phase_err = z_obj.phase_err
h_ratio = [1.5, 1, .5]
try:
self.res_limits_d = (10**(np.floor(np.log10(min([plot_res[nzxx, 0, 0].min(),
plot_res[nzyy, 1, 1].min()])))),
10**(np.ceil(np.log10(max([plot_res[nzxx, 0, 0].max(),
plot_res[nzyy, 1, 1].max()])))))
except ValueError:
self.res_limits_d = None
try:
self.res_limits_od = (10**(np.floor(np.log10(min([plot_res[nzxy, 0, 1].min(),
plot_res[nzyx, 1, 0].min()])))),
10**(np.ceil(np.log10(max([plot_res[nzxy, 0, 1].max(),
plot_res[nzyx, 1, 0].max()])))))
except ValueError:
self.res_limits_od = None
#make figure
fig = plt.figure(station, self.fig_size, dpi=self.fig_dpi)
plt.clf()
fig.suptitle(str(station), fontdict=fontdict)
#set the grid of subplots
if np.all(t_obj.tipper == 0.0) == True:
self.plot_tipper = False
else:
self.plot_tipper = True
self.tipper_limits = (np.round(min([t_obj.tipper[ntx, 0, 0].real.min(),
t_obj.tipper[nty, 0, 1].real.min(),
t_obj.tipper[ntx, 0, 0].imag.min(),
t_obj.tipper[nty, 0, 1].imag.min()]),
1),
np.round(max([t_obj.tipper[ntx, 0, 0].real.max(),
t_obj.tipper[nty, 0, 1].real.max(),
t_obj.tipper[ntx, 0, 0].imag.max(),
t_obj.tipper[nty, 0, 1].imag.max()]),
1))
gs = gridspec.GridSpec(3, 4,
wspace=self.subplot_wspace,
left=self.subplot_left,
top=self.subplot_top,
bottom=self.subplot_bottom,
right=self.subplot_right,
hspace=self.subplot_hspace,
height_ratios=h_ratio)
axrxx = fig.add_subplot(gs[0, 0])
axrxy = fig.add_subplot(gs[0, 1], sharex=axrxx)
axryx = fig.add_subplot(gs[0, 2], sharex=axrxx, sharey=axrxy)
axryy = fig.add_subplot(gs[0, 3], sharex=axrxx, sharey=axrxx)
axpxx = fig.add_subplot(gs[1, 0])
axpxy = fig.add_subplot(gs[1, 1], sharex=axrxx)
axpyx = fig.add_subplot(gs[1, 2], sharex=axrxx)
axpyy = fig.add_subplot(gs[1, 3], sharex=axrxx)
axtxr = fig.add_subplot(gs[2, 0], sharex=axrxx)
axtxi = fig.add_subplot(gs[2, 1], sharex=axrxx, sharey=axtxr)
axtyr = fig.add_subplot(gs[2, 2], sharex=axrxx)
axtyi = fig.add_subplot(gs[2, 3], sharex=axrxx, sharey=axtyr)
self.ax_list = [axrxx, axrxy, axryx, axryy,
axpxx, axpxy, axpyx, axpyy,
axtxr, axtxi, axtyr, axtyi]
#---------plot the apparent resistivity-----------------------------------
#plot each component in its own subplot
# plot data response
erxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
plot_res[nzxx, 0, 0],
plot_res_err[nzxx, 0, 0],
**kw_xx)
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
plot_res[nzxy, 0, 1],
plot_res_err[nzxy, 0, 1],
**kw_xx)
eryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
plot_res[nzyx, 1, 0],
plot_res_err[nzyx, 1, 0],
**kw_yy)
eryy = mtplottools.plot_errorbar(axryy,
period[nzyy],
plot_res[nzyy, 1, 1],
plot_res_err[nzyy, 1, 1],
**kw_yy)
#plot phase
epxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
plot_phase[nzxx, 0, 0],
plot_phase_err[nzxx, 0, 0],
**kw_xx)
epxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
plot_phase[nzxy, 0, 1],
plot_phase_err[nzxy, 0, 1],
**kw_xx)
epyx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
plot_phase[nzyx, 1, 0],
plot_phase_err[nzyx, 1, 0],
**kw_yy)
epyy = mtplottools.plot_errorbar(axpyy,
period[nzyy],
plot_phase[nzyy, 1, 1],
plot_phase_err[nzyy, 1, 1],
**kw_yy)
#plot tipper
if self.plot_tipper == True:
ertx = mtplottools.plot_errorbar(axtxr,
period[ntx],
t_obj.tipper[ntx, 0, 0].real,
t_obj.tipper_err[ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axtyr,
period[nty],
t_obj.tipper[nty, 0, 1].real,
t_obj.tipper_err[nty, 0, 1],
**kw_yy)
eptx = mtplottools.plot_errorbar(axtxi,
period[ntx],
t_obj.tipper[ntx, 0, 0].imag,
t_obj.tipper_err[ntx, 0, 0],
**kw_xx)
epty = mtplottools.plot_errorbar(axtyi,
period[nty],
t_obj.tipper[nty, 0, 1].imag,
t_obj.tipper_err[nty, 0, 1],
**kw_yy)
#----------------------------------------------
# get error bar list for editing later
if self.plot_tipper == False:
try:
self._err_list = [[erxx[1][0], erxx[1][1], erxx[2][0]],
[erxy[1][0], erxy[1][1], erxy[2][0]],
[eryx[1][0], eryx[1][1], eryx[2][0]],
[eryy[1][0], eryy[1][1], eryy[2][0]]]
line_list = [[erxx[0]], [erxy[0]], [eryx[0]], [eryy[0]]]
except IndexError:
print 'Found no Z components for {0}'.format(self.station)
line_list = [[None], [None],
[None], [None]]
self._err_list = [[None, None, None],
[None, None, None],
[None, None, None],
[None, None, None]]
else:
try:
line_list = [[erxx[0]], [erxy[0]],
[eryx[0]], [eryy[0]],
[ertx[0]], [erty[0]]]
self._err_list = [[erxx[1][0], erxx[1][1], erxx[2][0]],
[erxy[1][0], erxy[1][1], erxy[2][0]],
[eryx[1][0], eryx[1][1], eryx[2][0]],
[eryy[1][0], eryy[1][1], eryy[2][0]],
[ertx[1][0], ertx[1][1], ertx[2][0]],
[erty[1][0], erty[1][1], erty[2][0]]]
except IndexError:
print 'Found no Z components for {0}'.format(station)
line_list = [[None], [None],
[None], [None],
[None], [None]]
self._err_list = [[None, None, None],
[None, None, None],
[None, None, None],
[None, None, None],
[None, None, None],
[None, None, None]]
#------------------------------------------
# make things look nice
# set titles of the Z components
label_list = [['$Z_{xx}$'], ['$Z_{xy}$'],
['$Z_{yx}$'], ['$Z_{yy}$']]
for ax, label in zip(self.ax_list[0:4], label_list):
ax.set_title(label[0],fontdict={'size':self.font_size+2,
'weight':'bold'})
# set legends for tipper components
# fake a line
l1 = plt.Line2D([0], [0], linewidth=0, color='w', linestyle='None',
marker='.')
t_label_list = ['Re{$T_x$}', 'Im{$T_x$}', 'Re{$T_y$}', 'Im{$T_y$}']
label_list += [['$T_{x}$'], ['$T_{y}$']]
for ax, label in zip(self.ax_list[-4:], t_label_list):
ax.legend([l1], [label], loc='upper left',
markerscale=.01,
borderaxespad=.05,
labelspacing=.01,
handletextpad=.05,
borderpad=.05,
prop={'size':max([self.font_size, 6])})
#set axis properties
for aa, ax in enumerate(self.ax_list):
ax.tick_params(axis='y', pad=self.ylabel_pad)
if aa < 8:
# ylabels[-1] = ''
# ylabels[0] = ''
# ax.set_yticklabels(ylabels)
# plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == True:
ax.set_yscale('log')
else:
ax.set_xlabel('Period (s)', fontdict=fontdict)
if aa < 4 and self.plot_z is False:
ax.set_yscale('log')
if aa == 0 or aa == 3:
ax.set_ylim(self.res_limits_d)
elif aa == 1 or aa == 2:
ax.set_ylim(self.res_limits_od)
if aa > 3 and aa < 8 and self.plot_z is False:
ax.yaxis.set_major_formatter(MultipleLocator(10))
if self.phase_limits_d is not None:
ax.set_ylim(self.phase_limits_d)
#set axes labels
if aa == 0:
if self.plot_z == False:
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Re[Z (mV/km nT)]',
fontdict=fontdict)
elif aa == 4:
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Im[Z (mV/km nT)]',
fontdict=fontdict)
elif aa == 8:
ax.set_ylabel('Tipper',
fontdict=fontdict)
if aa > 7:
ax.yaxis.set_major_locator(MultipleLocator(.1))
if self.tipper_limits is not None:
ax.set_ylim(self.tipper_limits)
else:
pass
ax.set_xscale('log')
ax.set_xlim(xmin=10**(np.floor(np.log10(period[0])))*1.01,
xmax=10**(np.ceil(np.log10(period[-1])))*.99)
ax.grid(True, alpha=.25)
ylabels = ax.get_yticks().tolist()
if aa < 8:
ylabels[-1] = ''
ylabels[0] = ''
ax.set_yticklabels(ylabels)
plt.setp(ax.get_xticklabels(), visible=False)
##----------------------------------------------
#plot model response
if self.resp_object is not None:
for resp_obj in self.resp_object:
resp_z_obj = resp_obj.mt_dict[station].Z
resp_z_err = np.nan_to_num((z_obj.z-resp_z_obj.z)/z_obj.z_err)
resp_z_obj._compute_res_phase()
resp_t_obj = resp_obj.mt_dict[station].Tipper
resp_t_err = np.nan_to_num((t_obj.tipper-resp_t_obj.tipper)/t_obj.tipper_err)
#convert to apparent resistivity and phase
if self.plot_z == True:
scaling = np.zeros_like(resp_z_obj.z)
for ii in range(2):
for jj in range(2):
scaling[:, ii, jj] = 1./np.sqrt(resp_z_obj.freq)
r_plot_res = abs(resp_z_obj.z.real*scaling)
r_plot_phase = abs(resp_z_obj.z.imag*scaling)
elif self.plot_z == False:
r_plot_res = resp_z_obj.resistivity
r_plot_phase = resp_z_obj.phase
rms_xx = resp_z_err[:, 0, 0].std()
rms_xy = resp_z_err[:, 0, 1].std()
rms_yx = resp_z_err[:, 1, 0].std()
rms_yy = resp_z_err[:, 1, 1].std()
#--> make key word dictionaries for plotting
kw_xx = {'color':self.ctem,
'marker':self.mtem,
'ms':self.ms_r,
'ls':':',
'lw':self.lw_r,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
kw_yy = {'color':self.ctmm,
'marker':self.mtmm,
'ms':self.ms_r,
'ls':':',
'lw':self.lw_r,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
# plot data response
rerxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
r_plot_res[nzxx, 0, 0],
None,
**kw_xx)
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
r_plot_res[nzxy, 0, 1],
None,
**kw_xx)
reryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
r_plot_res[nzyx, 1, 0],
None,
**kw_yy)
reryy = mtplottools.plot_errorbar(axryy,
period[nzyy],
r_plot_res[nzyy, 1, 1],
None,
**kw_yy)
#plot phase
repxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
r_plot_phase[nzxx, 0, 0],
None,
**kw_xx)
repxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
r_plot_phase[nzxy, 0, 1],
None,
**kw_xx)
repyx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
r_plot_phase[nzyx, 1, 0],
None,
**kw_yy)
repyy = mtplottools.plot_errorbar(axpyy,
period[nzyy],
r_plot_phase[nzyy, 1, 1],
None,
**kw_yy)
#plot tipper
if self.plot_tipper == True:
rertx = mtplottools.plot_errorbar(axtxr,
period[ntx],
resp_t_obj.tipper[ntx, 0, 0].real,
None,
**kw_xx)
rerty = mtplottools.plot_errorbar(axtyr,
period[nty],
resp_t_obj.tipper[nty, 0, 1].real,
None,
**kw_yy)
reptx = mtplottools.plot_errorbar(axtxi,
period[ntx],
resp_t_obj.tipper[ntx, 0, 0].imag,
None,
**kw_xx)
repty = mtplottools.plot_errorbar(axtyi,
period[nty],
resp_t_obj.tipper[nty, 0, 1].imag,
None,
**kw_yy)
if self.plot_tipper == False:
line_list[0] += [rerxx[0]]
line_list[1] += [rerxy[0]]
line_list[2] += [reryx[0]]
line_list[3] += [reryy[0]]
label_list[0] += ['$Z^m_{xx}$ '+
'rms={0:.2f}'.format(rms_xx)]
label_list[1] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy)]
label_list[2] += ['$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
label_list[3] += ['$Z^m_{yy}$ '+
'rms={0:.2f}'.format(rms_yy)]
else:
line_list[0] += [rerxx[0]]
line_list[1] += [rerxy[0]]
line_list[2] += [reryx[0]]
line_list[3] += [reryy[0]]
line_list[4] += [rertx[0]]
line_list[5] += [rerty[0]]
label_list[0] += ['$Z^m_{xx}$ '+
'rms={0:.2f}'.format(rms_xx)]
label_list[1] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy)]
label_list[2] += ['$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
label_list[3] += ['$Z^m_{yy}$ '+
'rms={0:.2f}'.format(rms_yy)]
label_list[4] += ['$T^m_{x}$ '+
'rms={0:.2f}'.format(resp_t_err[:, 0, 0].std())]
label_list[5] += ['$T^m_{y}$'+
'rms={0:.2f}'.format(resp_t_err[:, 0, 1].std())]
legend_ax_list = self.ax_list[0:4]
# if self.plot_tipper == True:
# legend_ax_list += [self.ax_list[-4], self.ax_list[-2]]
for aa, ax in enumerate(legend_ax_list):
ax.legend(line_list[aa],
label_list[aa],
loc=self.legend_loc,
bbox_to_anchor=self.legend_pos,
markerscale=self.legend_marker_scale,
borderaxespad=self.legend_border_axes_pad,
labelspacing=self.legend_label_spacing,
handletextpad=self.legend_handle_text_pad,
borderpad=self.legend_border_pad,
prop={'size':max([self.font_size, 5])})
plt.show()
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def save_figure(self, save_fn, file_format='pdf', orientation='portrait',
fig_dpi=None, close_fig='y'):
"""
save_plot will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
fig = plt.gcf()
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_fn) == False:
file_format = save_fn[-3:]
fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
else:
save_fn = os.path.join(save_fn, '_L2.'+
file_format)
fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.clf()
plt.close(fig)
else:
pass
self.fig_fn = save_fn
print 'Saved figure to: '+self.fig_fn
def update_plot(self):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotAllResponses()
>>> [ax.grid(True, which='major') for ax in [ps1.axrte,ps1.axtep]]
>>> ps1.update_plot()
"""
self.fig.canvas.draw()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return ("Plots data vs model response computed by WS3DINV")
#==============================================================================
# plot phase tensors
#==============================================================================
class moved_PlotPTMaps(mtplottools.MTEllipse):
"""
Plot phase tensor maps including residual pt if response file is input.
:Plot only data for one period: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> dfn = r"/home/MT/ws3dinv/Inv1/WSDataFile.dat"
>>> ptm = ws.PlotPTMaps(data_fn=dfn, plot_period_list=[0])
:Plot data and model response: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> dfn = r"/home/MT/ws3dinv/Inv1/WSDataFile.dat"
>>> rfn = r"/home/MT/ws3dinv/Inv1/Test_resp.00"
>>> mfn = r"/home/MT/ws3dinv/Inv1/Test_model.00"
>>> ptm = ws.PlotPTMaps(data_fn=dfn, resp_fn=rfn, model_fn=mfn,
>>> ... plot_period_list=[0])
>>> # adjust colorbar
>>> ptm.cb_res_pad = 1.25
>>> ptm.redraw_plot()
========================== ================================================
Attributes Description
========================== ================================================
cb_pt_pad percentage from top of axes to place pt
color bar. *default* is .90
cb_res_pad percentage from bottom of axes to place
resistivity color bar. *default* is 1.2
cb_residual_tick_step tick step for residual pt. *default* is 3
cb_tick_step tick step for phase tensor color bar,
*default* is 45
data np.ndarray(n_station, n_periods, 2, 2)
impedance tensors for station data
data_fn full path to data fle
dscale scaling parameter depending on map_scale
ellipse_cmap color map for pt ellipses. *default* is
mt_bl2gr2rd
ellipse_colorby [ 'skew' | 'skew_seg' | 'phimin' | 'phimax'|
'phidet' | 'ellipticity' ] parameter to color
ellipses by. *default* is 'phimin'
ellipse_range (min, max, step) min and max of colormap, need
to input step if plotting skew_seg
ellipse_size relative size of ellipses in map_scale
ew_limits limits of plot in e-w direction in map_scale
units. *default* is None, scales to station
area
fig_aspect aspect of figure. *default* is 1
fig_dpi resolution in dots-per-inch. *default* is 300
fig_list list of matplotlib.figure instances for each
figure plotted.
fig_size [width, height] in inches of figure window
*default* is [6, 6]
font_size font size of ticklabels, axes labels are
font_size+2. *default* is 7
grid_east relative location of grid nodes in e-w direction
in map_scale units
grid_north relative location of grid nodes in n-s direction
in map_scale units
grid_z relative location of grid nodes in z direction
in map_scale units
model_fn full path to initial file
map_scale [ 'km' | 'm' ] distance units of map.
*default* is km
mesh_east np.meshgrid(grid_east, grid_north, indexing='ij')
mesh_north np.meshgrid(grid_east, grid_north, indexing='ij')
model_fn full path to model file
nodes_east relative distance betwen nodes in e-w direction
in map_scale units
nodes_north relative distance betwen nodes in n-s direction
in map_scale units
nodes_z relative distance betwen nodes in z direction
in map_scale units
ns_limits (min, max) limits of plot in n-s direction
*default* is None, viewing area is station area
pad_east padding from extreme stations in east direction
pad_north padding from extreme stations in north direction
period_list list of periods from data
plot_grid [ 'y' | 'n' ] 'y' to plot grid lines
*default* is 'n'
plot_period_list list of period index values to plot
*default* is None
plot_yn ['y' | 'n' ] 'y' to plot on instantiation
*default* is 'y'
res_cmap colormap for resisitivity values.
*default* is 'jet_r'
res_limits (min, max) resistivity limits in log scale
*default* is (0, 4)
res_model np.ndarray(n_north, n_east, n_vertical) of
model resistivity values in linear scale
residual_cmap color map for pt residuals.
*default* is 'mt_wh2or'
resp np.ndarray(n_stations, n_periods, 2, 2)
impedance tensors for model response
resp_fn full path to response file
save_path directory to save figures to
save_plots [ 'y' | 'n' ] 'y' to save plots to save_path
station_east location of stations in east direction in
map_scale units
station_fn full path to station locations file
station_names station names
station_north location of station in north direction in
map_scale units
subplot_bottom distance between axes and bottom of figure window
subplot_left distance between axes and left of figure window
subplot_right distance between axes and right of figure window
subplot_top distance between axes and top of figure window
title titiel of plot *default* is depth of slice
xminorticks location of xminorticks
yminorticks location of yminorticks
========================== ================================================
"""
def __init__(self, data_fn=None, resp_fn=None, model_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn
self.resp_fn = resp_fn
self.save_path = kwargs.pop('save_path', None)
if self.model_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.model_fn)
elif self.model_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.model_fn)
if self.save_path is not None:
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
self.save_plots = kwargs.pop('save_plots', 'y')
self.plot_period_list = kwargs.pop('plot_period_list', None)
self.period_dict = None
self.map_scale = kwargs.pop('map_scale', 'km')
#make map scale
if self.map_scale == 'km':
self.dscale = 1000.
elif self.map_scale == 'm':
self.dscale = 1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.pad_east = kwargs.pop('pad_east', 2000)
self.pad_north = kwargs.pop('pad_north', 2000)
self.plot_grid = kwargs.pop('plot_grid', 'n')
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.fig_list = []
self.xminorticks = kwargs.pop('xminorticks', 1000)
self.yminorticks = kwargs.pop('yminorticks', 1000)
self.residual_cmap = kwargs.pop('residual_cmap', 'mt_wh2or')
self.font_size = kwargs.pop('font_size', 7)
self.cb_tick_step = kwargs.pop('cb_tick_step', 45)
self.cb_residual_tick_step = kwargs.pop('cb_residual_tick_step', 3)
self.cb_pt_pad = kwargs.pop('cb_pt_pad', 1.2)
self.cb_res_pad = kwargs.pop('cb_res_pad', .5)
self.res_limits = kwargs.pop('res_limits', (0,4))
self.res_cmap = kwargs.pop('res_cmap', 'jet_r')
#--> set the ellipse properties -------------------
self._ellipse_dict = kwargs.pop('ellipse_dict', {'size':2})
self._read_ellipse_dict()
self.subplot_right = .99
self.subplot_left = .085
self.subplot_top = .92
self.subplot_bottom = .1
self.subplot_hspace = .2
self.subplot_wspace = .05
self.data_obj = None
self.resp_obj = None
self.model_obj = None
self.period_list = None
self.pt_data_arr = None
self.pt_resp_arr = None
self.pt_resid_arr = None
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def _read_files(self):
"""
get information from files
"""
#--> read in data file
self.data_obj = Data()
self.data_obj.read_data_file(self.data_fn)
#--> read response file
if self.resp_fn is not None:
self.resp_obj = Data()
self.resp_obj.read_data_file(self.resp_fn)
#--> read mode file
if self.model_fn is not None:
self.model_obj = Model()
self.model_obj.read_model_file(self.model_fn)
self._get_plot_period_list()
self._get_pt()
def _get_plot_period_list(self):
"""
get periods to plot from input or data file
"""
#--> get period list to plot
if self.plot_period_list is None:
self.plot_period_list = self.data_obj.period_list
else:
if type(self.plot_period_list) is list:
#check if entries are index values or actual periods
if type(self.plot_period_list[0]) is int:
self.plot_period_list = [self.data_obj.period_list[ii]
for ii in self.plot_period_list]
else:
pass
elif type(self.plot_period_list) is int:
self.plot_period_list = self.data_obj.period_list[self.plot_period_list]
elif type(self.plot_period_list) is float:
self.plot_period_list = [self.plot_period_list]
self.period_dict = dict([(key, value) for value, key in
enumerate(self.data_obj.period_list)])
def _get_pt(self):
"""
put pt parameters into something useful for plotting
"""
ns = len(self.data_obj.mt_dict.keys())
nf = len(self.data_obj.period_list)
data_pt_arr = np.zeros((nf, ns), dtype=[('phimin', np.float),
('phimax', np.float),
('skew', np.float),
('azimuth', np.float),
('east', np.float),
('north', np.float)])
if self.resp_fn is not None:
model_pt_arr = np.zeros((nf, ns), dtype=[('phimin', np.float),
('phimax', np.float),
('skew', np.float),
('azimuth', np.float),
('east', np.float),
('north', np.float)])
res_pt_arr = np.zeros((nf, ns), dtype=[('phimin', np.float),
('phimax', np.float),
('skew', np.float),
('azimuth', np.float),
('east', np.float),
('north', np.float),
('geometric_mean', np.float)])
for ii, key in enumerate(self.data_obj.mt_dict.keys()):
east = self.data_obj.mt_dict[key].grid_east/self.dscale
north = self.data_obj.mt_dict[key].grid_north/self.dscale
dpt = self.data_obj.mt_dict[key].pt
data_pt_arr[:, ii]['east'] = east
data_pt_arr[:, ii]['north'] = north
data_pt_arr[:, ii]['phimin'] = dpt.phimin[0]
data_pt_arr[:, ii]['phimax'] = dpt.phimax[0]
data_pt_arr[:, ii]['azimuth'] = dpt.azimuth[0]
data_pt_arr[:, ii]['skew'] = dpt.beta[0]
if self.resp_fn is not None:
mpt = self.resp_obj.mt_dict[key].pt
try:
rpt = mtpt.ResidualPhaseTensor(pt_object1=dpt,
pt_object2=mpt)
rpt = rpt.residual_pt
res_pt_arr[:, ii]['east'] = east
res_pt_arr[:, ii]['north'] = north
res_pt_arr[:, ii]['phimin'] = rpt.phimin[0]
res_pt_arr[:, ii]['phimax'] = rpt.phimax[0]
res_pt_arr[:, ii]['azimuth'] = rpt.azimuth[0]
res_pt_arr[:, ii]['skew'] = rpt.beta[0]
res_pt_arr[:, ii]['geometric_mean'] = np.sqrt(abs(rpt.phimin[0]*\
rpt.phimax[0]))
except mtex.MTpyError_PT:
print key, dpt.pt.shape, mpt.pt.shape
model_pt_arr[:, ii]['east'] = east
model_pt_arr[:, ii]['north'] = north
model_pt_arr[:, ii]['phimin'] = mpt.phimin[0]
model_pt_arr[:, ii]['phimax'] = mpt.phimax[0]
model_pt_arr[:, ii]['azimuth'] = mpt.azimuth[0]
model_pt_arr[:, ii]['skew'] = mpt.beta[0]
#make these attributes
self.pt_data_arr = data_pt_arr
if self.resp_fn is not None:
self.pt_resp_arr = model_pt_arr
self.pt_resid_arr = res_pt_arr
def plot(self):
"""
plot phase tensor maps for data and or response, each figure is of a
different period. If response is input a third column is added which is
the residual phase tensor showing where the model is not fitting the data
well. The data is plotted in km.
"""
#--> read in data first
if self.data_obj is None:
self._read_files()
# set plot properties
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = self.subplot_left
plt.rcParams['figure.subplot.right'] = self.subplot_right
plt.rcParams['figure.subplot.bottom'] = self.subplot_bottom
plt.rcParams['figure.subplot.top'] = self.subplot_top
font_dict = {'size':self.font_size+2, 'weight':'bold'}
# make a grid of subplots
gs = gridspec.GridSpec(1, 3, hspace=self.subplot_hspace,
wspace=self.subplot_wspace)
#set some parameters for the colorbar
ckmin = float(self.ellipse_range[0])
ckmax = float(self.ellipse_range[1])
try:
ckstep = float(self.ellipse_range[2])
except IndexError:
if self.ellipse_cmap == 'mt_seg_bl2wh2rd':
raise ValueError('Need to input range as (min, max, step)')
else:
ckstep = 3
bounds = np.arange(ckmin, ckmax+ckstep, ckstep)
# set plot limits to be the station area
if self.ew_limits == None:
east_min = self.data_obj.data_array['rel_east'].min()-\
self.pad_east
east_max = self.data_obj.data_array['rel_east'].max()+\
self.pad_east
self.ew_limits = (east_min/self.dscale, east_max/self.dscale)
if self.ns_limits == None:
north_min = self.data_obj.data_array['rel_north'].min()-\
self.pad_north
north_max = self.data_obj.data_array['rel_north'].max()+\
self.pad_north
self.ns_limits = (north_min/self.dscale, north_max/self.dscale)
#-------------plot phase tensors------------------------------------
#for ff, per in enumerate(self.plot_period_list):
for ff, per in enumerate(self.plot_period_list[:1]):
#FZ
print(ff,per)
print(self.plot_period_list)
data_ii = self.period_dict[per]
print 'Plotting Period: {0:.5g}'.format(per)
fig = plt.figure('{0:.5g}'.format(per), figsize=self.fig_size,
dpi=self.fig_dpi)
fig.clf()
if self.resp_fn is not None:
axd = fig.add_subplot(gs[0, 0], aspect='equal')
axm = fig.add_subplot(gs[0, 1], aspect='equal')
axr = fig.add_subplot(gs[0, 2], aspect='equal')
ax_list = [axd, axm, axr]
else:
axd = fig.add_subplot(gs[0, :], aspect='equal')
ax_list = [axd]
#plot model below the phase tensors
if self.model_fn is not None:
approx_depth, d_index = ws.estimate_skin_depth(self.model_obj.res_model.copy(),
self.model_obj.grid_z.copy()/self.dscale,
per,
dscale=self.dscale)
#need to add an extra row and column to east and north to make sure
#all is plotted see pcolor for details.
plot_east = np.append(self.model_obj.grid_east,
self.model_obj.grid_east[-1]*1.25)/\
self.dscale
plot_north = np.append(self.model_obj.grid_north,
self.model_obj.grid_north[-1]*1.25)/\
self.dscale
#make a mesh grid for plotting
#the 'ij' makes sure the resulting grid is in east, north
self.mesh_east, self.mesh_north = np.meshgrid(plot_east,
plot_north,
indexing='ij')
for ax in ax_list:
plot_res = np.log10(self.model_obj.res_model[:, :, d_index].T)
ax.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.res_cmap,
vmin=self.res_limits[0],
vmax=self.res_limits[1])
#--> plot data phase tensors
for pt in self.pt_data_arr[data_ii]:
eheight = pt['phimin']/\
self.pt_data_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ewidth = pt['phimax']/\
self.pt_data_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ellipse = Ellipse((pt['east'],
pt['north']),
width=ewidth,
height=eheight,
angle=90-pt['azimuth'])
#get ellipse color
if self.ellipse_cmap.find('seg')>0:
ellipse.set_facecolor(mtcl.get_plot_color(pt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax,
bounds=bounds))
else:
ellipse.set_facecolor(mtcl.get_plot_color(pt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax))
axd.add_artist(ellipse)
#-----------plot response phase tensors---------------
if self.resp_fn is not None:
rcmin = np.floor(self.pt_resid_arr['geometric_mean'].min())
rcmax = np.floor(self.pt_resid_arr['geometric_mean'].max())
for mpt, rpt in zip(self.pt_resp_arr[data_ii],
self.pt_resid_arr[data_ii]):
eheight = mpt['phimin']/\
self.pt_resp_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ewidth = mpt['phimax']/\
self.pt_resp_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ellipsem = Ellipse((mpt['east'],
mpt['north']),
width=ewidth,
height=eheight,
angle=90-mpt['azimuth'])
#get ellipse color
if self.ellipse_cmap.find('seg')>0:
ellipsem.set_facecolor(mtcl.get_plot_color(mpt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax,
bounds=bounds))
else:
ellipsem.set_facecolor(mtcl.get_plot_color(mpt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax))
axm.add_artist(ellipsem)
#-----------plot residual phase tensors---------------
eheight = rpt['phimin']/\
self.pt_resid_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ewidth = rpt['phimax']/\
self.pt_resid_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ellipser = Ellipse((rpt['east'],
rpt['north']),
width=ewidth,
height=eheight,
angle=rpt['azimuth'])
#get ellipse color
rpt_color = np.sqrt(abs(rpt['phimin']*rpt['phimax']))
if self.ellipse_cmap.find('seg')>0:
ellipser.set_facecolor(mtcl.get_plot_color(rpt_color,
'geometric_mean',
self.residual_cmap,
ckmin,
ckmax,
bounds=bounds))
else:
ellipser.set_facecolor(mtcl.get_plot_color(rpt_color,
'geometric_mean',
self.residual_cmap,
ckmin,
ckmax))
axr.add_artist(ellipser)
#--> set axes properties
# data
axd.set_xlim(self.ew_limits)
axd.set_ylim(self.ns_limits)
axd.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=font_dict)
axd.set_ylabel('Northing ({0})'.format(self.map_scale),
fontdict=font_dict)
#make a colorbar for phase tensors
#bb = axd.axes.get_position().bounds
bb = axd.get_position().bounds
y1 = .25*(2+(self.ns_limits[1]-self.ns_limits[0])/
(self.ew_limits[1]-self.ew_limits[0]))
cb_location = (3.35*bb[2]/5+bb[0],
y1*self.cb_pt_pad, .295*bb[2], .02)
cbaxd = fig.add_axes(cb_location)
cbd = mcb.ColorbarBase(cbaxd,
cmap=mtcl.cmapdict[self.ellipse_cmap],
norm=Normalize(vmin=ckmin,
vmax=ckmax),
orientation='horizontal')
cbd.ax.xaxis.set_label_position('top')
cbd.ax.xaxis.set_label_coords(.5, 1.75)
cbd.set_label(mtplottools.ckdict[self.ellipse_colorby])
cbd.set_ticks(np.arange(ckmin, ckmax+self.cb_tick_step,
self.cb_tick_step))
axd.text(self.ew_limits[0]*.95,
self.ns_limits[1]*.95,
'Data',
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor':'white'},
fontdict={'size':self.font_size+1})
#Model and residual
if self.resp_fn is not None:
for aa, ax in enumerate([axm, axr]):
ax.set_xlim(self.ew_limits)
ax.set_ylim(self.ns_limits)
ax.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=font_dict)
plt.setp(ax.yaxis.get_ticklabels(), visible=False)
#make a colorbar ontop of axis
bb = ax.axes.get_position().bounds
y1 = .25*(2+(self.ns_limits[1]-self.ns_limits[0])/
(self.ew_limits[1]-self.ew_limits[0]))
cb_location = (3.35*bb[2]/5+bb[0],
y1*self.cb_pt_pad, .295*bb[2], .02)
cbax = fig.add_axes(cb_location)
if aa == 0:
cb = mcb.ColorbarBase(cbax,
cmap=mtcl.cmapdict[self.ellipse_cmap],
norm=Normalize(vmin=ckmin,
vmax=ckmax),
orientation='horizontal')
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.75)
cb.set_label(mtplottools.ckdict[self.ellipse_colorby])
cb.set_ticks(np.arange(ckmin, ckmax+self.cb_tick_step,
self.cb_tick_step))
ax.text(self.ew_limits[0]*.95,
self.ns_limits[1]*.95,
'Model',
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor':'white'},
fontdict={'size':self.font_size+1})
else:
cb = mcb.ColorbarBase(cbax,
cmap=mtcl.cmapdict[self.residual_cmap],
norm=Normalize(vmin=rcmin,
vmax=rcmax),
orientation='horizontal')
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.75)
cb.set_label(r"$\sqrt{\Phi_{min} \Phi_{max}}$")
cb_ticks = [rcmin, (rcmax-rcmin)/2, rcmax]
cb.set_ticks(cb_ticks)
ax.text(self.ew_limits[0]*.95,
self.ns_limits[1]*.95,
'Residual',
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor':'white'},
fontdict={'size':self.font_size+1})
if self.model_fn is not None:
for ax in ax_list:
ax.tick_params(direction='out')
bb = ax.axes.get_position().bounds
y1 = .25*(2-(self.ns_limits[1]-self.ns_limits[0])/
(self.ew_limits[1]-self.ew_limits[0]))
cb_position = (3.0*bb[2]/5+bb[0],
y1*self.cb_res_pad, .35*bb[2], .02)
cbax = fig.add_axes(cb_position)
cb = mcb.ColorbarBase(cbax,
cmap=self.res_cmap,
norm=Normalize(vmin=self.res_limits[0],
vmax=self.res_limits[1]),
orientation='horizontal')
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.5)
cb.set_label('Resistivity ($\Omega \cdot$m)')
cb_ticks = np.arange(np.floor(self.res_limits[0]),
np.ceil(self.res_limits[1]+1), 1)
cb.set_ticks(cb_ticks)
cb.set_ticklabels([mtplottools.labeldict[ctk] for ctk in cb_ticks])
plt.show()
self.fig_list.append(fig)
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def save_figure(self, save_path=None, fig_dpi=None, file_format='pdf',
orientation='landscape', close_fig='y'):
"""
save_figure will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_path) == False:
try:
os.mkdir(save_path)
except:
raise IOError('Need to input a correct directory path')
for fig in self.fig_list:
per = fig.canvas.get_window_title()
save_fn = os.path.join(save_path, 'PT_DepthSlice_{0}s.{1}'.format(
per, file_format))
fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.close(fig)
else:
pass
self.fig_fn = save_fn
print 'Saved figure to: '+self.fig_fn
#==============================================================================
# plot depth slices
#==============================================================================
class moved_PlotDepthSlice(object):
"""
Plots depth slices of resistivity model
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> mfn = r"/home/MT/ws3dinv/Inv1/Test_model.00"
>>> sfn = r"/home/MT/ws3dinv/Inv1/WSStationLocations.txt"
>>> # plot just first layer to check the formating
>>> pds = ws.PlotDepthSlice(model_fn=mfn, station_fn=sfn,
>>> ... depth_index=0, save_plots='n')
>>> #move color bar up
>>> pds.cb_location
>>> (0.64500000000000002, 0.14999999999999997, 0.3, 0.025)
>>> pds.cb_location = (.645, .175, .3, .025)
>>> pds.redraw_plot()
>>> #looks good now plot all depth slices and save them to a folder
>>> pds.save_path = r"/home/MT/ws3dinv/Inv1/DepthSlices"
>>> pds.depth_index = None
>>> pds.save_plots = 'y'
>>> pds.redraw_plot()
======================= ===================================================
Attributes Description
======================= ===================================================
cb_location location of color bar (x, y, width, height)
*default* is None, automatically locates
cb_orientation [ 'vertical' | 'horizontal' ]
*default* is horizontal
cb_pad padding between axes and colorbar
*default* is None
cb_shrink percentage to shrink colorbar by
*default* is None
climits (min, max) of resistivity color on log scale
*default* is (0, 4)
cmap name of color map *default* is 'jet_r'
data_fn full path to data file
depth_index integer value of depth slice index, shallowest
layer is 0
dscale scaling parameter depending on map_scale
ew_limits (min, max) plot limits in e-w direction in
map_scale units. *default* is None, sets viewing
area to the station area
fig_aspect aspect ratio of plot. *default* is 1
fig_dpi resolution of figure in dots-per-inch. *default* is
300
fig_list list of matplotlib.figure instances for each
depth slice
fig_size [width, height] in inches of figure size
*default* is [6, 6]
font_size size of ticklabel font in points, labels are
font_size+2. *default* is 7
grid_east relative location of grid nodes in e-w direction
in map_scale units
grid_north relative location of grid nodes in n-s direction
in map_scale units
grid_z relative location of grid nodes in z direction
in map_scale units
initial_fn full path to initial file
map_scale [ 'km' | 'm' ] distance units of map. *default* is
km
mesh_east np.meshgrid(grid_east, grid_north, indexing='ij')
mesh_north np.meshgrid(grid_east, grid_north, indexing='ij')
model_fn full path to model file
nodes_east relative distance betwen nodes in e-w direction
in map_scale units
nodes_north relative distance betwen nodes in n-s direction
in map_scale units
nodes_z relative distance betwen nodes in z direction
in map_scale units
ns_limits (min, max) plot limits in n-s direction in
map_scale units. *default* is None, sets viewing
area to the station area
plot_grid [ 'y' | 'n' ] 'y' to plot mesh grid lines.
*default* is 'n'
plot_yn [ 'y' | 'n' ] 'y' to plot on instantiation
res_model np.ndarray(n_north, n_east, n_vertical) of
model resistivity values in linear scale
save_path path to save figures to
save_plots [ 'y' | 'n' ] 'y' to save depth slices to save_path
station_east location of stations in east direction in
map_scale units
station_fn full path to station locations file
station_names station names
station_north location of station in north direction in
map_scale units
subplot_bottom distance between axes and bottom of figure window
subplot_left distance between axes and left of figure window
subplot_right distance between axes and right of figure window
subplot_top distance between axes and top of figure window
title titiel of plot *default* is depth of slice
xminorticks location of xminorticks
yminorticks location of yminorticks
======================= ===================================================
"""
def __init__(self, model_fn=None, data_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn
self.save_path = kwargs.pop('save_path', None)
if self.model_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.model_fn)
elif self.initial_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.initial_fn)
if self.save_path is not None:
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
self.save_plots = kwargs.pop('save_plots', 'y')
self.depth_index = kwargs.pop('depth_index', None)
self.map_scale = kwargs.pop('map_scale', 'km')
#make map scale
if self.map_scale=='km':
self.dscale=1000.
elif self.map_scale=='m':
self.dscale=1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.plot_grid = kwargs.pop('plot_grid', 'n')
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.fig_list = []
self.xminorticks = kwargs.pop('xminorticks', 1000)
self.yminorticks = kwargs.pop('yminorticks', 1000)
self.climits = kwargs.pop('climits', (0,4))
self.cmap = kwargs.pop('cmap', 'jet_r')
self.font_size = kwargs.pop('font_size', 8)
self.cb_shrink = kwargs.pop('cb_shrink', .8)
self.cb_pad = kwargs.pop('cb_pad', .01)
self.cb_orientation = kwargs.pop('cb_orientation', 'horizontal')
self.cb_location = kwargs.pop('cb_location', None)
self.subplot_right = .99
self.subplot_left = .085
self.subplot_top = .92
self.subplot_bottom = .1
self.res_model = None
self.grid_east = None
self.grid_north = None
self.grid_z = None
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
self.mesh_east = None
self.mesh_north = None
self.station_east = None
self.station_north = None
self.station_names = None
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def read_files(self):
"""
read in the files to get appropriate information
"""
#--> read in model file
if self.model_fn is not None:
if os.path.isfile(self.model_fn) == True:
md_model = Model()
md_model.read_model_file(self.model_fn)
self.res_model = md_model.res_model
self.grid_east = md_model.grid_east/self.dscale
self.grid_north = md_model.grid_north/self.dscale
self.grid_z = md_model.grid_z/self.dscale
self.nodes_east = md_model.nodes_east/self.dscale
self.nodes_north = md_model.nodes_north/self.dscale
self.nodes_z = md_model.nodes_z/self.dscale
else:
raise mtex.MTpyError_file_handling(
'{0} does not exist, check path'.format(self.model_fn))
#--> read in data file to get station locations
if self.data_fn is not None:
if os.path.isfile(self.data_fn) == True:
md_data = Data()
md_data.read_data_file(self.data_fn)
self.station_east = md_data.station_locations['rel_east']/self.dscale
self.station_north = md_data.station_locations['rel_north']/self.dscale
self.station_names = md_data.station_locations['station']
else:
print 'Could not find data file {0}'.format(self.data_fn)
def plot(self):
"""
plot depth slices
"""
#--> get information from files
self.read_files()
fdict = {'size':self.font_size+2, 'weight':'bold'}
cblabeldict={-2:'$10^{-3}$',-1:'$10^{-1}$',0:'$10^{0}$',1:'$10^{1}$',
2:'$10^{2}$',3:'$10^{3}$',4:'$10^{4}$',5:'$10^{5}$',
6:'$10^{6}$',7:'$10^{7}$',8:'$10^{8}$'}
#create an list of depth slices to plot
if self.depth_index == None:
zrange = range(self.grid_z.shape[0])
elif type(self.depth_index) is int:
zrange = [self.depth_index]
elif type(self.depth_index) is list or \
type(self.depth_index) is np.ndarray:
zrange = self.depth_index
#set the limits of the plot
if self.ew_limits == None:
if self.station_east is not None:
xlimits = (np.floor(self.station_east.min()),
np.ceil(self.station_east.max()))
else:
xlimits = (self.grid_east[5], self.grid_east[-5])
else:
xlimits = self.ew_limits
if self.ns_limits == None:
if self.station_north is not None:
ylimits = (np.floor(self.station_north.min()),
np.ceil(self.station_north.max()))
else:
ylimits = (self.grid_north[5], self.grid_north[-5])
else:
ylimits = self.ns_limits
#make a mesh grid of north and east
self.mesh_east, self.mesh_north = np.meshgrid(self.grid_east,
self.grid_north,
indexing='ij')
plt.rcParams['font.size'] = self.font_size
#--> plot depths into individual figures
for ii in zrange:
depth = '{0:.3f} ({1})'.format(self.grid_z[ii],
self.map_scale)
fig = plt.figure(depth, figsize=self.fig_size, dpi=self.fig_dpi)
plt.clf()
ax1 = fig.add_subplot(1, 1, 1, aspect=self.fig_aspect)
plot_res = np.log10(self.res_model[:, :, ii].T)
mesh_plot = ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
#plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
ax1.text(ee, nn, '*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':5, 'weight':'bold'})
#set axis properties
ax1.set_xlim(xlimits)
ax1.set_ylim(ylimits)
ax1.xaxis.set_minor_locator(MultipleLocator(self.xminorticks/self.dscale))
ax1.yaxis.set_minor_locator(MultipleLocator(self.yminorticks/self.dscale))
ax1.set_ylabel('Northing ('+self.map_scale+')',fontdict=fdict)
ax1.set_xlabel('Easting ('+self.map_scale+')',fontdict=fdict)
ax1.set_title('Depth = {0}'.format(depth), fontdict=fdict)
#plot the grid if desired
if self.plot_grid == 'y':
east_line_xlist = []
east_line_ylist = []
for xx in self.grid_east:
east_line_xlist.extend([xx, xx])
east_line_xlist.append(None)
east_line_ylist.extend([self.grid_north.min(),
self.grid_north.max()])
east_line_ylist.append(None)
ax1.plot(east_line_xlist,
east_line_ylist,
lw=.25,
color='k')
north_line_xlist = []
north_line_ylist = []
for yy in self.grid_north:
north_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
north_line_xlist.append(None)
north_line_ylist.extend([yy, yy])
north_line_ylist.append(None)
ax1.plot(north_line_xlist,
north_line_ylist,
lw=.25,
color='k')
#plot the colorbar
if self.cb_location is None:
if self.cb_orientation == 'horizontal':
self.cb_location = (ax1.axes.figbox.bounds[3]-.225,
ax1.axes.figbox.bounds[1]+.05,.3,.025)
elif self.cb_orientation == 'vertical':
self.cb_location = ((ax1.axes.figbox.bounds[2]-.15,
ax1.axes.figbox.bounds[3]-.21,.025,.3))
ax2 = fig.add_axes(self.cb_location)
cb = mcb.ColorbarBase(ax2,
cmap=self.cmap,
norm=Normalize(vmin=self.climits[0],
vmax=self.climits[1]),
orientation=self.cb_orientation)
if self.cb_orientation == 'horizontal':
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5,1.3)
elif self.cb_orientation == 'vertical':
cb.ax.yaxis.set_label_position('right')
cb.ax.yaxis.set_label_coords(1.25,.5)
cb.ax.yaxis.tick_left()
cb.ax.tick_params(axis='y',direction='in')
cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size':self.font_size+1})
cb.set_ticks(np.arange(self.climits[0],self.climits[1]+1))
cb.set_ticklabels([cblabeldict[cc]
for cc in np.arange(self.climits[0],
self.climits[1]+1)])
self.fig_list.append(fig)
#--> save plots to a common folder
if self.save_plots == 'y':
fig.savefig(os.path.join(self.save_path,
"Depth_{}_{:.4f}.png".format(ii, self.grid_z[ii])),
dpi=self.fig_dpi, bbox_inches='tight')
fig.clear()
plt.close()
else:
pass
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def update_plot(self, fig):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotAllResponses()
>>> [ax.grid(True, which='major') for ax in [ps1.axrte,ps1.axtep]]
>>> ps1.update_plot()
"""
fig.canvas.draw()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return ("Plots depth slices of model from WS3DINV")
#==============================================================================
# plot slices
#==============================================================================
class PlotSlices(object):
"""
plot all slices and be able to scroll through the model
:Example: ::
>>> import mtpy.modeling.modem_new as modem
>>> mfn = r"/home/modem/Inv1/Modular_NLCG_100.rho"
>>> dfn = r"/home/modem/Inv1/ModEM_data.dat"
>>> pds = ws.PlotSlices(model_fn=mfn, data_fn=dfn)
======================= ===================================================
Buttons Description
======================= ===================================================
'e' moves n-s slice east by one model block
'w' moves n-s slice west by one model block
'n' moves e-w slice north by one model block
'm' moves e-w slice south by one model block
'd' moves depth slice down by one model block
'u' moves depth slice up by one model block
======================= ===================================================
======================= ===================================================
Attributes Description
======================= ===================================================
ax_en matplotlib.axes instance for depth slice map view
ax_ez matplotlib.axes instance for e-w slice
ax_map matplotlib.axes instance for location map
ax_nz matplotlib.axes instance for n-s slice
climits (min , max) color limits on resistivity in log
scale. *default* is (0, 4)
cmap name of color map for resisitiviy.
*default* is 'jet_r'
data_fn full path to data file name
dscale scaling parameter depending on map_scale
east_line_xlist list of line nodes of east grid for faster plotting
east_line_ylist list of line nodes of east grid for faster plotting
ew_limits (min, max) limits of e-w in map_scale units
*default* is None and scales to station area
fig matplotlib.figure instance for figure
fig_aspect aspect ratio of plots. *default* is 1
fig_dpi resolution of figure in dots-per-inch
*default* is 300
fig_num figure instance number
fig_size [width, height] of figure window.
*default* is [6,6]
font_dict dictionary of font keywords, internally created
font_size size of ticklables in points, axes labes are
font_size+2. *default* is 7
grid_east relative location of grid nodes in e-w direction
in map_scale units
grid_north relative location of grid nodes in n-s direction
in map_scale units
grid_z relative location of grid nodes in z direction
in map_scale units
index_east index value of grid_east being plotted
index_north index value of grid_north being plotted
index_vertical index value of grid_z being plotted
initial_fn full path to initial file
key_press matplotlib.canvas.connect instance
map_scale [ 'm' | 'km' ] scale of map. *default* is km
mesh_east np.meshgrid(grid_east, grid_north)[0]
mesh_en_east np.meshgrid(grid_east, grid_north)[0]
mesh_en_north np.meshgrid(grid_east, grid_north)[1]
mesh_ez_east np.meshgrid(grid_east, grid_z)[0]
mesh_ez_vertical np.meshgrid(grid_east, grid_z)[1]
mesh_north np.meshgrid(grid_east, grid_north)[1]
mesh_nz_north np.meshgrid(grid_north, grid_z)[0]
mesh_nz_vertical np.meshgrid(grid_north, grid_z)[1]
model_fn full path to model file
ms size of station markers in points. *default* is 2
nodes_east relative distance betwen nodes in e-w direction
in map_scale units
nodes_north relative distance betwen nodes in n-s direction
in map_scale units
nodes_z relative distance betwen nodes in z direction
in map_scale units
north_line_xlist list of line nodes north grid for faster plotting
north_line_ylist list of line nodes north grid for faster plotting
ns_limits (min, max) limits of plots in n-s direction
*default* is None, set veiwing area to station area
plot_yn [ 'y' | 'n' ] 'y' to plot on instantiation
*default* is 'y'
res_model np.ndarray(n_north, n_east, n_vertical) of
model resistivity values in linear scale
station_color color of station marker. *default* is black
station_dict_east location of stations for each east grid row
station_dict_north location of stations for each north grid row
station_east location of stations in east direction
station_fn full path to station file
station_font_color color of station label
station_font_pad padding between station marker and label
station_font_rotation angle of station label
station_font_size font size of station label
station_font_weight weight of font for station label
station_id [min, max] index values for station labels
station_marker station marker
station_names name of stations
station_north location of stations in north direction
subplot_bottom distance between axes and bottom of figure window
subplot_hspace distance between subplots in vertical direction
subplot_left distance between axes and left of figure window
subplot_right distance between axes and right of figure window
subplot_top distance between axes and top of figure window
subplot_wspace distance between subplots in horizontal direction
title title of plot
z_limits (min, max) limits in vertical direction,
======================= ===================================================
"""
def __init__(self, model_fn, data_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.font_size = kwargs.pop('font_size', 7)
self.subplot_wspace = .20
self.subplot_hspace = .30
self.subplot_right = .98
self.subplot_left = .08
self.subplot_top = .97
self.subplot_bottom = .1
self.index_vertical = kwargs.pop('index_vertical', 0)
self.index_east = kwargs.pop('index_east', 0)
self.index_north = kwargs.pop('index_north', 0)
self.cmap = kwargs.pop('cmap', 'jet_r')
self.climits = kwargs.pop('climits', (0, 4))
self.map_scale = kwargs.pop('map_scale', 'km')
#make map scale
if self.map_scale=='km':
self.dscale=1000.
elif self.map_scale=='m':
self.dscale=1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.z_limits = kwargs.pop('z_limits', None)
self.res_model = None
self.grid_east = None
self.grid_north = None
self.grid_z = None
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
self.mesh_east = None
self.mesh_north = None
self.station_east = None
self.station_north = None
self.station_names = None
self.station_id = kwargs.pop('station_id', None)
self.station_font_size = kwargs.pop('station_font_size', 8)
self.station_font_pad = kwargs.pop('station_font_pad', 1.0)
self.station_font_weight = kwargs.pop('station_font_weight', 'bold')
self.station_font_rotation = kwargs.pop('station_font_rotation', 60)
self.station_font_color = kwargs.pop('station_font_color', 'k')
self.station_marker = kwargs.pop('station_marker',
r"$\blacktriangledown$")
self.station_color = kwargs.pop('station_color', 'k')
self.ms = kwargs.pop('ms', 10)
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def read_files(self):
"""
read in the files to get appropriate information
"""
#--> read in model file
if self.model_fn is not None:
if os.path.isfile(self.model_fn) == True:
md_model = Model()
md_model.read_model_file(self.model_fn)
self.res_model = md_model.res_model
self.grid_east = md_model.grid_east/self.dscale
self.grid_north = md_model.grid_north/self.dscale
self.grid_z = md_model.grid_z/self.dscale
self.nodes_east = md_model.nodes_east/self.dscale
self.nodes_north = md_model.nodes_north/self.dscale
self.nodes_z = md_model.nodes_z/self.dscale
else:
raise mtex.MTpyError_file_handling(
'{0} does not exist, check path'.format(self.model_fn))
#--> read in data file to get station locations
if self.data_fn is not None:
if os.path.isfile(self.data_fn) == True:
md_data = Data()
md_data.read_data_file(self.data_fn)
self.station_east = md_data.station_locations['rel_east']/self.dscale
self.station_north = md_data.station_locations['rel_north']/self.dscale
self.station_names = md_data.station_locations['station']
else:
print 'Could not find data file {0}'.format(self.data_fn)
def plot(self):
"""
plot:
east vs. vertical,
north vs. vertical,
east vs. north
"""
self.read_files()
self.get_station_grid_locations()
print "=============== ==============================================="
print " Buttons Description "
print "=============== ==============================================="
print " 'e' moves n-s slice east by one model block"
print " 'w' moves n-s slice west by one model block"
print " 'n' moves e-w slice north by one model block"
print " 'm' moves e-w slice south by one model block"
print " 'd' moves depth slice down by one model block"
print " 'u' moves depth slice up by one model block"
print "=============== ==============================================="
self.font_dict = {'size':self.font_size+2, 'weight':'bold'}
#--> set default font size
plt.rcParams['font.size'] = self.font_size
#set the limits of the plot
if self.ew_limits == None:
if self.station_east is not None:
self.ew_limits = (np.floor(self.station_east.min()),
np.ceil(self.station_east.max()))
else:
self.ew_limits = (self.grid_east[5], self.grid_east[-5])
if self.ns_limits == None:
if self.station_north is not None:
self.ns_limits = (np.floor(self.station_north.min()),
np.ceil(self.station_north.max()))
else:
self.ns_limits = (self.grid_north[5], self.grid_north[-5])
if self.z_limits == None:
depth_limit = max([(abs(self.ew_limits[0])+abs(self.ew_limits[1])),
(abs(self.ns_limits[0])+abs(self.ns_limits[1]))])
self.z_limits = (-5000/self.dscale, depth_limit)
self.fig = plt.figure(self.fig_num, figsize=self.fig_size,
dpi=self.fig_dpi)
plt.clf()
gs = gridspec.GridSpec(2, 2,
wspace=self.subplot_wspace,
left=self.subplot_left,
top=self.subplot_top,
bottom=self.subplot_bottom,
right=self.subplot_right,
hspace=self.subplot_hspace)
#make subplots
self.ax_ez = self.fig.add_subplot(gs[0, 0], aspect=self.fig_aspect)
self.ax_nz = self.fig.add_subplot(gs[1, 1], aspect=self.fig_aspect)
self.ax_en = self.fig.add_subplot(gs[1, 0], aspect=self.fig_aspect)
self.ax_map = self.fig.add_subplot(gs[0, 1])
#make grid meshes being sure the indexing is correct
self.mesh_ez_east, self.mesh_ez_vertical = np.meshgrid(self.grid_east,
self.grid_z,
indexing='ij')
self.mesh_nz_north, self.mesh_nz_vertical = np.meshgrid(self.grid_north,
self.grid_z,
indexing='ij')
self.mesh_en_east, self.mesh_en_north = np.meshgrid(self.grid_east,
self.grid_north,
indexing='ij')
#--> plot east vs vertical
self._update_ax_ez()
#--> plot north vs vertical
self._update_ax_nz()
#--> plot east vs north
self._update_ax_en()
#--> plot the grid as a map view
self._update_map()
#plot color bar
cbx = mcb.make_axes(self.ax_map, fraction=.15, shrink=.75, pad = .15)
cb = mcb.ColorbarBase(cbx[0],
cmap=self.cmap,
norm=Normalize(vmin=self.climits[0],
vmax=self.climits[1]))
cb.ax.yaxis.set_label_position('right')
cb.ax.yaxis.set_label_coords(1.25,.5)
cb.ax.yaxis.tick_left()
cb.ax.tick_params(axis='y',direction='in')
cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size':self.font_size+1})
cb.set_ticks(np.arange(np.ceil(self.climits[0]),
np.floor(self.climits[1]+1)))
cblabeldict={-2:'$10^{-3}$',-1:'$10^{-1}$',0:'$10^{0}$',1:'$10^{1}$',
2:'$10^{2}$',3:'$10^{3}$',4:'$10^{4}$',5:'$10^{5}$',
6:'$10^{6}$',7:'$10^{7}$',8:'$10^{8}$'}
cb.set_ticklabels([cblabeldict[cc]
for cc in np.arange(np.ceil(self.climits[0]),
np.floor(self.climits[1]+1))])
plt.show()
self.key_press = self.fig.canvas.mpl_connect('key_press_event',
self.on_key_press)
def on_key_press(self, event):
"""
on a key press change the slices
"""
key_press = event.key
if key_press == 'n':
if self.index_north == self.grid_north.shape[0]:
print 'Already at northern most grid cell'
else:
self.index_north += 1
if self.index_north > self.grid_north.shape[0]:
self.index_north = self.grid_north.shape[0]
self._update_ax_ez()
self._update_map()
if key_press == 'm':
if self.index_north == 0:
print 'Already at southern most grid cell'
else:
self.index_north -= 1
if self.index_north < 0:
self.index_north = 0
self._update_ax_ez()
self._update_map()
if key_press == 'e':
if self.index_east == self.grid_east.shape[0]:
print 'Already at eastern most grid cell'
else:
self.index_east += 1
if self.index_east > self.grid_east.shape[0]:
self.index_east = self.grid_east.shape[0]
self._update_ax_nz()
self._update_map()
if key_press == 'w':
if self.index_east == 0:
print 'Already at western most grid cell'
else:
self.index_east -= 1
if self.index_east < 0:
self.index_east = 0
self._update_ax_nz()
self._update_map()
if key_press == 'd':
if self.index_vertical == self.grid_z.shape[0]:
print 'Already at deepest grid cell'
else:
self.index_vertical += 1
if self.index_vertical > self.grid_z.shape[0]:
self.index_vertical = self.grid_z.shape[0]
self._update_ax_en()
print 'Depth = {0:.5g} ({1})'.format(self.grid_z[self.index_vertical],
self.map_scale)
if key_press == 'u':
if self.index_vertical == 0:
print 'Already at surface grid cell'
else:
self.index_vertical -= 1
if self.index_vertical < 0:
self.index_vertical = 0
self._update_ax_en()
print 'Depth = {0:.5gf} ({1})'.format(self.grid_z[self.index_vertical],
self.map_scale)
def _update_ax_ez(self):
"""
update east vs vertical plot
"""
self.ax_ez.cla()
plot_ez = np.log10(self.res_model[self.index_north, :, :])
self.ax_ez.pcolormesh(self.mesh_ez_east,
self.mesh_ez_vertical,
plot_ez,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
#plot stations
for sx in self.station_dict_north[self.grid_north[self.index_north]]:
self.ax_ez.text(sx,
0,
self.station_marker,
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size':self.ms,
'color':self.station_color})
self.ax_ez.set_xlim(self.ew_limits)
self.ax_ez.set_ylim(self.z_limits[1], self.z_limits[0])
self.ax_ez.set_ylabel('Depth ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_ez.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.fig.canvas.draw()
self._update_map()
def _update_ax_nz(self):
"""
update east vs vertical plot
"""
self.ax_nz.cla()
plot_nz = np.log10(self.res_model[:, self.index_east, :])
self.ax_nz.pcolormesh(self.mesh_nz_north,
self.mesh_nz_vertical,
plot_nz,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
#plot stations
for sy in self.station_dict_east[self.grid_east[self.index_east]]:
self.ax_nz.text(sy,
0,
self.station_marker,
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size':self.ms,
'color':self.station_color})
self.ax_nz.set_xlim(self.ns_limits)
self.ax_nz.set_ylim(self.z_limits[1], self.z_limits[0])
self.ax_nz.set_xlabel('Northing ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_nz.set_ylabel('Depth ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.fig.canvas.draw()
self._update_map()
def _update_ax_en(self):
"""
update east vs vertical plot
"""
self.ax_en.cla()
plot_en = np.log10(self.res_model[:, :, self.index_vertical].T)
self.ax_en.pcolormesh(self.mesh_en_east,
self.mesh_en_north,
plot_en,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
self.ax_en.set_xlim(self.ew_limits)
self.ax_en.set_ylim(self.ns_limits)
self.ax_en.set_ylabel('Northing ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_en.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=self.font_dict)
#--> plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
self.ax_en.text(ee, nn, '*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':5, 'weight':'bold'})
self.fig.canvas.draw()
self._update_map()
def _update_map(self):
self.ax_map.cla()
self.east_line_xlist = []
self.east_line_ylist = []
for xx in self.grid_east:
self.east_line_xlist.extend([xx, xx])
self.east_line_xlist.append(None)
self.east_line_ylist.extend([self.grid_north.min(),
self.grid_north.max()])
self.east_line_ylist.append(None)
self.ax_map.plot(self.east_line_xlist,
self.east_line_ylist,
lw=.25,
color='k')
self.north_line_xlist = []
self.north_line_ylist = []
for yy in self.grid_north:
self.north_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
self.north_line_xlist.append(None)
self.north_line_ylist.extend([yy, yy])
self.north_line_ylist.append(None)
self.ax_map.plot(self.north_line_xlist,
self.north_line_ylist,
lw=.25,
color='k')
#--> e-w indication line
self.ax_map.plot([self.grid_east.min(),
self.grid_east.max()],
[self.grid_north[self.index_north+1],
self.grid_north[self.index_north+1]],
lw=1,
color='g')
#--> e-w indication line
self.ax_map.plot([self.grid_east[self.index_east+1],
self.grid_east[self.index_east+1]],
[self.grid_north.min(),
self.grid_north.max()],
lw=1,
color='b')
#--> plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
self.ax_map.text(ee, nn, '*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':5, 'weight':'bold'})
self.ax_map.set_xlim(self.ew_limits)
self.ax_map.set_ylim(self.ns_limits)
self.ax_map.set_ylabel('Northing ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_map.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=self.font_dict)
#plot stations
self.ax_map.text(self.ew_limits[0]*.95, self.ns_limits[1]*.95,
'{0:.5g} ({1})'.format(self.grid_z[self.index_vertical],
self.map_scale),
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor': 'white'},
fontdict=self.font_dict)
self.fig.canvas.draw()
def get_station_grid_locations(self):
"""
get the grid line on which a station resides for plotting
"""
self.station_dict_east = dict([(gx, []) for gx in self.grid_east])
self.station_dict_north = dict([(gy, []) for gy in self.grid_north])
if self.station_east is not None:
for ss, sx in enumerate(self.station_east):
gx = np.where(self.grid_east <= sx)[0][-1]
self.station_dict_east[self.grid_east[gx]].append(self.station_north[ss])
for ss, sy in enumerate(self.station_north):
gy = np.where(self.grid_north <= sy)[0][-1]
self.station_dict_north[self.grid_north[gy]].append(self.station_east[ss])
else:
return
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
plt.close(self.fig)
self.plot()
def save_figure(self, save_fn=None, fig_dpi=None, file_format='pdf',
orientation='landscape', close_fig='y'):
"""
save_figure will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_fn) == False:
file_format = save_fn[-3:]
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
else:
save_fn = os.path.join(save_fn, '_E{0}_N{1}_Z{2}.{3}'.format(
self.index_east, self.index_north,
self.index_vertical, file_format))
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.clf()
plt.close(self.fig)
else:
pass
self.fig_fn = save_fn
print 'Saved figure to: '+self.fig_fn
#==============================================================================
# plot rms maps
#==============================================================================
class moved_Plot_RMS_Maps(object):
"""
plots the RMS as (data-model)/(error) in map view for all components
of the data file. Gets this infomration from the .res file output
by ModEM.
Arguments:
------------------
**residual_fn** : string
full path to .res file
=================== =======================================================
Attributes Description
=================== =======================================================
fig matplotlib.figure instance for a single plot
fig_dpi dots-per-inch resolution of figure *default* is 200
fig_num number of fig instance *default* is 1
fig_size size of figure in inches [width, height]
*default* is [7,6]
font_size font size of tick labels, axis labels are +2
*default* is 8
marker marker style for station rms,
see matplotlib.line for options,
*default* is 's' --> square
marker_size size of marker in points. *default* is 10
pad_x padding in map units from edge of the axis to stations
at the extremeties in longitude.
*default* is 1/2 tick_locator
pad_y padding in map units from edge of the axis to stations
at the extremeties in latitude.
*default* is 1/2 tick_locator
period_index index of the period you want to plot according to
self.residual.period_list. *default* is 1
plot_yn [ 'y' | 'n' ] default is 'y' to plot on instantiation
plot_z_list internal variable for plotting
residual modem.Data instance that holds all the information
from the residual_fn given
residual_fn full path to .res file
rms_cmap matplotlib.cm object for coloring the markers
rms_cmap_dict dictionary of color values for rms_cmap
rms_max maximum rms to plot. *default* is 5.0
rms_min minimum rms to plot. *default* is 1.0
save_path path to save figures to. *default* is directory of
residual_fn
subplot_bottom spacing from axis to bottom of figure canvas.
*default* is .1
subplot_hspace horizontal spacing between subplots.
*default* is .1
subplot_left spacing from axis to left of figure canvas.
*default* is .1
subplot_right spacing from axis to right of figure canvas.
*default* is .9
subplot_top spacing from axis to top of figure canvas.
*default* is .95
subplot_vspace vertical spacing between subplots.
*default* is .01
tick_locator increment for x and y major ticks. *default* is
limits/5
=================== =======================================================
=================== =======================================================
Methods Description
=================== =======================================================
plot plot rms maps for a single period
plot_loop loop over all frequencies and save figures to save_path
read_residual_fn read in residual_fn
redraw_plot after updating attributes call redraw_plot to
well redraw the plot
save_figure save the figure to a file
=================== =======================================================
:Example: ::
>>> import mtpy.modeling.modem_new as modem
>>> rms_plot = Plot_RMS_Maps(r"/home/ModEM/Inv1/mb_NLCG_030.res")
>>> # change some attributes
>>> rms_plot.fig_size = [6, 4]
>>> rms_plot.rms_max = 3
>>> rms_plot.redraw_plot()
>>> # happy with the look now loop over all periods
>>> rms_plot.plot_loop()
"""
def __init__(self, residual_fn, **kwargs):
self.residual_fn = residual_fn
self.residual = None
self.save_path = kwargs.pop('save_path', os.path.dirname(self.residual_fn))
self.period_index = kwargs.pop('period_index', 0)
self.subplot_left = kwargs.pop('subplot_left', .1)
self.subplot_right = kwargs.pop('subplot_right', .9)
self.subplot_top = kwargs.pop('subplot_top', .95)
self.subplot_bottom = kwargs.pop('subplot_bottom', .1)
self.subplot_hspace = kwargs.pop('subplot_hspace', .1)
self.subplot_vspace = kwargs.pop('subplot_vspace', .01)
self.font_size = kwargs.pop('font_size', 8)
self.fig_size = kwargs.pop('fig_size', [7.75, 6.75])
self.fig_dpi = kwargs.pop('fig_dpi', 200)
self.fig_num = kwargs.pop('fig_num', 1)
self.fig = None
self.marker = kwargs.pop('marker', 's')
self.marker_size = kwargs.pop('marker_size', 10)
self.rms_max = kwargs.pop('rms_max', 5)
self.rms_min = kwargs.pop('rms_min', 0)
self.tick_locator = kwargs.pop('tick_locator', None)
self.pad_x = kwargs.pop('pad_x', None)
self.pad_y = kwargs.pop('pad_y', None)
self.plot_yn = kwargs.pop('plot_yn', 'y')
# colormap for rms, goes white to black from 0 to rms max and
# red below 1 to show where the data is being over fit
self.rms_cmap_dict = {'red':((0.0, 1.0, 1.0),
(0.2, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'green':((0.0, 0.0, 0.0),
(0.2, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'blue':((0.0, 0.0, 0.0),
(0.2, 1.0, 1.0),
(1.0, 0.0, 0.0))}
self.rms_cmap = colors.LinearSegmentedColormap('rms_cmap',
self.rms_cmap_dict,
256)
self.plot_z_list = [{'label':r'$Z_{xx}$', 'index':(0, 0), 'plot_num':1},
{'label':r'$Z_{xy}$', 'index':(0, 1), 'plot_num':2},
{'label':r'$Z_{yx}$', 'index':(1, 0), 'plot_num':3},
{'label':r'$Z_{yy}$', 'index':(1, 1), 'plot_num':4},
{'label':r'$T_{x}$', 'index':(0, 0), 'plot_num':5},
{'label':r'$T_{y}$', 'index':(0, 1), 'plot_num':6}]
if self.plot_yn == 'y':
self.plot()
def read_residual_fn(self):
if self.residual is None:
self.residual = Data()
self.residual.read_data_file(self.residual_fn)
else:
pass
def plot(self):
"""
plot rms in map view
"""
self.read_residual_fn()
font_dict = {'size':self.font_size+2, 'weight':'bold'}
rms_1 = 1./self.rms_max
if self.tick_locator is None:
x_locator = np.round((self.residual.data_array['lon'].max()-
self.residual.data_array['lon'].min())/5, 2)
y_locator = np.round((self.residual.data_array['lat'].max()-
self.residual.data_array['lat'].min())/5, 2)
if x_locator > y_locator:
self.tick_locator = x_locator
elif x_locator < y_locator:
self.tick_locator = y_locator
if self.pad_x is None:
self.pad_x = self.tick_locator/2
if self.pad_y is None:
self.pad_y = self.tick_locator/2
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = self.subplot_left
plt.rcParams['figure.subplot.right'] = self.subplot_right
plt.rcParams['figure.subplot.bottom'] = self.subplot_bottom
plt.rcParams['figure.subplot.top'] = self.subplot_top
plt.rcParams['figure.subplot.wspace'] = self.subplot_hspace
plt.rcParams['figure.subplot.hspace'] = self.subplot_vspace
self.fig = plt.figure(self.fig_num, self.fig_size, dpi=self.fig_dpi)
for p_dict in self.plot_z_list:
ax = self.fig.add_subplot(3, 2, p_dict['plot_num'], aspect='equal')
ii = p_dict['index'][0]
jj = p_dict['index'][0]
for r_arr in self.residual.data_array:
# calulate the rms self.residual/error
if p_dict['plot_num'] < 5:
rms = r_arr['z'][self.period_index, ii, jj].__abs__()/\
(r_arr['z_err'][self.period_index, ii, jj].real)
else:
rms = r_arr['tip'][self.period_index, ii, jj].__abs__()/\
(r_arr['tip_err'][self.period_index, ii, jj].real)
#color appropriately
if np.nan_to_num(rms) == 0.0:
marker_color = (1, 1, 1)
marker = '.'
marker_size = .1
marker_edge_color = (1, 1, 1)
if rms > self.rms_max:
marker_color = (0, 0, 0)
marker = self.marker
marker_size = self.marker_size
marker_edge_color = (0, 0, 0)
elif rms >= 1 and rms <= self.rms_max:
r_color = 1-rms/self.rms_max+rms_1
marker_color = (r_color, r_color, r_color)
marker = self.marker
marker_size = self.marker_size
marker_edge_color = (0, 0, 0)
elif rms < 1:
r_color = 1-rms/self.rms_max
marker_color = (1, r_color, r_color)
marker = self.marker
marker_size = self.marker_size
marker_edge_color = (0, 0, 0)
ax.plot(r_arr['lon'], r_arr['lat'],
marker=marker,
ms=marker_size,
mec=marker_edge_color,
mfc=marker_color,
zorder=3)
if p_dict['plot_num'] == 1 or p_dict['plot_num'] == 3:
ax.set_ylabel('Latitude (deg)', fontdict=font_dict)
plt.setp(ax.get_xticklabels(), visible=False)
elif p_dict['plot_num'] == 2 or p_dict['plot_num'] == 4:
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
elif p_dict['plot_num'] == 6:
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_xlabel('Longitude (deg)', fontdict=font_dict)
else:
ax.set_xlabel('Longitude (deg)', fontdict=font_dict)
ax.set_ylabel('Latitude (deg)', fontdict=font_dict)
ax.text(self.residual.data_array['lon'].min()+.005-self.pad_x,
self.residual.data_array['lat'].max()-.005+self.pad_y,
p_dict['label'],
verticalalignment='top',
horizontalalignment='left',
bbox={'facecolor':'white'},
zorder=3)
ax.tick_params(direction='out')
ax.grid(zorder=0, color=(.75, .75, .75))
#[line.set_zorder(3) for line in ax.lines]
ax.set_xlim(self.residual.data_array['lon'].min()-self.pad_x,
self.residual.data_array['lon'].max()+self.pad_x)
ax.set_ylim(self.residual.data_array['lat'].min()-self.pad_y,
self.residual.data_array['lat'].max()+self.pad_y)
ax.xaxis.set_major_locator(MultipleLocator(self.tick_locator))
ax.yaxis.set_major_locator(MultipleLocator(self.tick_locator))
ax.xaxis.set_major_formatter(FormatStrFormatter('%2.2f'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%2.2f'))
#cb_ax = mcb.make_axes(ax, orientation='vertical', fraction=.1)
cb_ax = self.fig.add_axes([self.subplot_right+.02, .225, .02, .45])
color_bar = mcb.ColorbarBase(cb_ax,
cmap=self.rms_cmap,
norm=colors.Normalize(vmin=self.rms_min,
vmax=self.rms_max),
orientation='vertical')
color_bar.set_label('RMS', fontdict=font_dict)
self.fig.suptitle('period = {0:.5g} (s)'.format(self.residual.period_list[self.period_index]),
fontdict={'size':self.font_size+3, 'weight':'bold'})
plt.show()
def redraw_plot(self):
plt.close('all')
self.plot()
def save_figure(self, save_path=None, save_fn_basename=None,
save_fig_dpi=None, fig_format='.png', fig_close=True):
"""
save figure in the desired format
"""
if save_path is not None:
self.save_path = save_path
if save_fn_basename is not None:
pass
else:
save_fn_basename = '{0:02}_RMS_{1:.5g}_s.{2}'.format(self.period_index,
self.residual.period_list[self.period_index],
fig_format)
save_fn = os.path.join(self.save_path, save_fn_basename)
if save_fig_dpi is not None:
self.fig_dpi = save_fig_dpi
self.fig.savefig(save_fn, dpi=self.fig_dpi)
print 'saved file to {0}'.format(save_fn)
if fig_close == True:
plt.close('all')
def plot_loop(self, fig_format='png'):
"""
loop over all periods and save figures accordingly
"""
self.read_residual_fn()
for f_index in range(self.residual.period_list.shape[0]):
self.period_index = f_index
self.plot()
self.save_figure(fig_format=fig_format)
#==============================================================================
# Exceptions
#==============================================================================
class ModEMError(Exception):
pass
|
MTgeophysics/mtpy
|
legacy/modem_new.py
|
Python
|
gpl-3.0
| 313,405
|
[
"ParaView",
"VTK"
] |
a032e62073307ed7210b8ab25a56a399ab4b3c4fc3cc5d9ce8f9e93a8b8b9ee3
|
#
# Copyright (C) 2013,2014 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
import unittest as ut
import espresso.System as es
import numpy as np
from espresso.interactions import LennardJonesInteraction
class NonBondedInteractionsTests(ut.TestCase):
# def __init__(self,particleId):
# self.pid=particleId
def intersMatch(self,inType,outType,inParams,outParams):
"""Check, if the interaction type set and gotten back as well as the bond
parameters set and gotten back match. Only check keys present in
inParams.
"""
if inType!=outType:
print("Type mismatch:",inType,outType)
return False
for k in inParams.keys():
if k not in outParams:
print(k,"missing from returned parameters")
return False
if outParams[k]!=inParams[k]:
print("Mismatch in parameter ",k,inParams[k],outParams[k])
return False
return True
def generateTestForNonBondedInteraction(_partType1,_partType2,_interClass,_params,_interName):
"""Generates test cases for checking interaction parameters set and gotten back
from Es actually match. Only keys which are present in _params are checked
1st and 2nd arg: Particle type ids to check on
3rd: Class of the interaction to test, ie.e, FeneBond, HarmonicBond
4th: Interaction parameters as dictionary, i.e., {"k"=1.,"r_0"=0.
5th: Name of the interaction property to set (i.e. "lennardJones")
"""
partType1=_partType1
partType2=_partType2
interClass=_interClass
params=_params
interName=_interName
def func(self):
# This code is run at the execution of the generated function.
# It will use the state of the variables in the outer function,
# which was there, when the outer function was called
# Set parameters
getattr(es.nonBondedInter[partType1,partType2],interName).setParams(**params)
# Read them out again
outInter=getattr(es.nonBondedInter[partType1,partType2],interName)
outParams=outInter.getParams()
self.assertTrue(self.intersMatch(interClass,type(outInter),params,outParams), interClass(**params).typeName()+": value set and value gotten back differ for particle types "+str(partType1)+" and "+str(partType2)+": "+params.__str__()+" vs. "+outParams.__str__())
return func
test_lj1=generateTestForNonBondedInteraction(\
0,0,LennardJonesInteraction,\
{"epsilon":1.,"sigma":2.,"cutoff":3.,"shift":4.,"offset":5.,"min":7.},\
"lennardJones")
test_lj2=generateTestForNonBondedInteraction(\
0,0,LennardJonesInteraction,\
{"epsilon":1.3,"sigma":2.2,"cutoff":3.4,"shift":4.1,"offset":5.1,"min":7.1},\
"lennardJones")
test_lj3=generateTestForNonBondedInteraction(\
0,0,LennardJonesInteraction,\
{"epsilon":1.3,"sigma":2.2,"cutoff":3.4,"shift":4.1,"offset":5.1,"min":7.1},\
"lennardJones")
def test_forcecap(self):
es.nonBondedInter.setForceCap(17.5)
self.assertEqual(es.nonBondedInter.getForceCap(),17.5)
if __name__ == "__main__":
print("Features: ",es.code_info.features())
ut.main()
|
olenz/espresso
|
testsuite/python/nonBondedInteractions.py
|
Python
|
gpl-3.0
| 3,803
|
[
"ESPResSo"
] |
4240f8e814a93a9757d2226433dfefbba8bc53125e1ddcef05d98b0c4a1b7a32
|
#!/usr/bin/env python3
"""
This is the new SONAR setup/install script. It will check for
prerequisites and configure some helper files.
Usage: setup.py
"""
import os,subprocess,sys,glob
SONAR_HOME=os.getcwd()
if len(glob.glob("%s/commonVars.py"%SONAR_HOME))==0:
SONAR_HOME = sys.argv[0]
if not os.path.isabs(SONAR_HOME):
sys.exit("Can't find full path to SONAR home directory. You may need to call setup.py from within the SONAR directory or use the full absolute path.")
if not sys.platform.startswith("linux") and not sys.platform.startswith("darwin"):
sys.exit("Error, cannot recognize OS. Expected 'linux' or 'darwin' (macos), but got '%s'"%sys.platform)
try:
from docopt import docopt
except ImportError:
sys.exit("DocOpt is a required library for SONAR. Please run `pip3 install docopt --user`")
try:
from Bio import SeqIO
except ImportError:
sys.exit("Biopython is required for SONAR. Please run `pip3 install Biopython --user`")
try:
import airr
except ImportError:
sys.exit("AIRR is a required library for SONAR. Please run `pip3 install airr --user`")
try:
from fuzzywuzzy import fuzz
except ImportError:
print("fuzzywuzzy is not installed - the master script will not work.\nYou can fix this later by running `pip3 install fuzzywuzzy --user`.\nProceeding with install...\n\n",file=sys.stderr)
try:
from ete3 import *
except ImportError:
print("ete3 is not installed - tree plotting will not work.\nYou can fix this later by running `pip3 install ete3 --user`.\nProceeding with install...\n\n",file=sys.stderr)
try:
from PyQt4.QtGui import QGraphicsSimpleTextItem, QGraphicsEllipseItem, QColor, QFont, QBrush, QPen
except ImportError:
print("PyQt4 is not installed - tree plotting will not work.\nYou can fix this later by running `sudo apt-get install python-numpy python-qt4 python-lxml python-six`.\nProceeding with install...\n\n",file=sys.stderr)
try:
import pandas
except ImportError:
print("pandas is not installed - comparison of GSSPs (5.4) will not work.\nYou can fix this later by running `pip3 install pandas --user`.\nProceeding with install...\n\n",file=sys.stderr)
check = subprocess.call(["perl", "-MBio::SeqIO", '-e', '1'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
if check == 1:
sys.exit("BioPerl is a required for SONAR. Please run `cpanm Bio::Perl`")
check = subprocess.call(["perl", "-MList::Util", '-e', '1'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
if check == 1:
sys.exit("List::Util is a required library for SONAR. Please run `cpanm List::Util`")
check = subprocess.call(["perl", "-MAlgorithm::Combinatorics", '-e', '1'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
if check == 1:
sys.exit("Algorithm::Combinatorics is a required library for SONAR. Please run `cpanm Algorithm::Combinatorics`")
check = subprocess.call(["perl", "-MPDL::LinearAlgebra::Trans", '-e', '1'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
if check == 1:
sys.warn("PDL::LinearAlgebra::Trans is not installed - ancestor inference will not work.\nYou can fix this later by running `cpanm PDL::LinearAlgebra::Trans`.\nProceeding with install...\n\n")
#R library checks
for lib in ["docopt","ggplot2","MASS","grid"]:
s=subprocess.Popen(['R','--vanilla','--slave','-e', '"%s" %%in%% installed.packages()[,"Package"]'%lib],
stderr=subprocess.PIPE,stdout=subprocess.PIPE,universal_newlines=True)
o,e = s.communicate()
if o.strip().split(" ")[1] == "FALSE":
sys.exit("R Package %s is not installed. Please start R and run the command `install.packages('%s')`"%(lib,lib))
#cluster?
cluster_exists = ""
while cluster_exists.upper() not in ["Y", "N"]:
cluster_exists = input("Is there a cluster available to use with SONAR [y/N]? ")
if cluster_exists == "":
cluster_exists = "N"
if cluster_exists.upper() == "Y":
qsub = input("Please enter the command used to submit jobs to the cluster [qsub]: ")
if qsub == "":
qsub = "qsub"
#print out sonar, paths.py, and PPvars.pm
##################################################################
with open("%s/PPvars.pm"%SONAR_HOME, "w") as ppvars:
ppvars.write("""#!/usr/bin/env perl
package PPvars;
use strict;
use vars '@ISA', '@EXPORT', '$NAME', '$VERSION', '$DATE', '$AUTHOR';
require Exporter;
@ISA = qw(Exporter);
@EXPORT = qw(ppath);
sub ppath{
return '%s/third-party/';
}
1;
""" % SONAR_HOME)
##################################################################
print_cluster = "clusterExists = False"
if cluster_exists.upper() == "Y":
print_cluster = "clusterExists = True\nqsub = '%s'" % qsub
blast = "blastn_linux64"
clustalo = "clustalo"
clustalw = "clustalw2"
muscle = "muscle"
vsearch = "vsearch"
if sys.platform.startswith("darwin"):
blast = "blastn_macos"
clustalo = "clustalo_macos"
clustalw = "clustalw2_macos"
muscle = "muscle_macos"
vsearch = "vsearch_macos"
##################################################################
with open("%s/paths.py"%SONAR_HOME, "w") as paths:
paths.write("""
SCRIPT_FOLDER = '%s'
blast_cmd = '%s/third-party/%s'
clustalo = '%s/third-party/%s'
clustalw = '%s/third-party/%s'
muscle = '%s/third-party/%s'
vsearch = '%s/third-party/%s'
%s
""" % (SONAR_HOME, SONAR_HOME, blast, SONAR_HOME, clustalo, SONAR_HOME, clustalw, SONAR_HOME, muscle, SONAR_HOME, vsearch, print_cluster))
##################################################################
with open("%s/sonar"%SONAR_HOME, "w") as sonar:
sonar.write("""#!/usr/bin/env python3
\"\"\"
sonar
This is a master script to allow easy access to SONAR scripts without
needing to remember the exact commands or to add multiple
directories to the path.
Usage: sonar COMMAND [ARGS...]
Options:
COMMAND Name of the SONAR script to run. Partial matches will be
honored. In case of ambiguity, the program will exit
with a list of possible matches.
ARGS Arguments to be passed to the script.
Created by Chaim A Schramm 2018-11-15.
Copyright (c) 2018 Vaccine Research Center, National Institutes
of Health, USA. All rights reserved.
\"\"\"
from docopt import docopt
import glob, os, subprocess, sys
from fuzzywuzzy import fuzz,process
def main():
script_list = [fn for fn in glob.glob(\"%s/*/*.py\") if not os.path.basename(fn).startswith(\"_\")] + glob.glob(\"%s/*/*.pl\") + glob.glob(\"%s/*/*.R\")
match_script = process.extract(arguments['COMMAND'], script_list, limit=5, scorer=fuzz.partial_ratio)
if match_script[0][1] == 100 and match_script[1][1] < 100:
subprocess.call( [ match_script[0][0] ] + arguments['ARGS'] )
else:
print(\"Input program '%%s' is unclear. Did you mean one of the following?\"%%arguments['COMMAND'])
for i in match_script:
print(\"\\t\"+os.path.basename(i[0]))
sys.exit()
if __name__ == '__main__':
arguments = docopt(__doc__, options_first=True, version=\"SONAR v4.0\")
main()
""" %(SONAR_HOME, SONAR_HOME, SONAR_HOME) )
##################################################################
os.chmod("%s/sonar"%SONAR_HOME, 0o755)
|
scharch/SOAnAR
|
setup.py
|
Python
|
gpl-3.0
| 7,164
|
[
"BLAST",
"BioPerl",
"Biopython"
] |
67b682b8efcf18620c470cae2c81f1ecfd9f59df18afc80420a110164360a4ae
|
import functools
import math
import numpy
from .Exponential import *
class GaussianExponential:
"""
Implements the convolution of a Gaussian with a multiexponential.
The Gaussian is assumed to be centered at the origin, and the exponential
is assumed to be modulated by a Heaviside function (also starting at the
origin).
"""
def __init__(self, gaussian_magnitude, gaussian_sigma,
exponential_parameters):
self.mx = MultiExponential(exponential_parameters)
self.gaussian_magnitude = gaussian_magnitude
self.gaussian_sigma = gaussian_sigma
def __call__(self, tau):
# k/2*(exp^(k^2*sigma^2/2 - k*tau))
# *(1+erf((tau-k*sigma^2)/(sigma*sqrt(2))
return(
functools.reduce(
lambda x, y: x+y,
map(
lambda exponential: \
self.gaussian_magnitude*exponential.magnitude*\
exponential.rate/2*\
(1+numpy.array(
list(map(lambda t: math.erf(\
(t-exponential.rate*self.gaussian_sigma**2)/\
(self.gaussian_sigma*math.sqrt(2))), tau))))*\
(numpy.exp(-exponential.rate*tau+\
exponential.rate**2*self.gaussian_sigma**2/2)),
self.mx)))
|
tsbischof/photon_correlation
|
python/photon_correlation/GaussianExponential.py
|
Python
|
bsd-3-clause
| 1,391
|
[
"Gaussian"
] |
39e53d448e7867b8fca63f40d3d1352d66fade5ce99283bf11de205a01529609
|
# -*- coding: utf-8 -*-
#
# rate_neuron_dm.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
rate_neuron decision making
------------------
A binary decision is implemented in the form of two rate neurons
engaging in mutual inhibition.
Evidence for each decision is reflected by the mean of Gaussian
white noise experienced by the respective neuron.
The activity of each neuron is recorded using multimeter devices.
It can be observed how noise as well as the difference in evidence
affects which neuron exhibits larger activity and hence which
decision will be made.
'''
import nest
import pylab
import numpy
'''
First, the Function build_network is defined to build the network and
return the handles of two decision units and the mutimeter
'''
def build_network(sigma, dt):
nest.ResetKernel()
nest.SetKernelStatus({'resolution': dt, 'use_wfr': False})
Params = {'lambda': 0.1, 'std': sigma, 'tau': 1., 'rectify_output': True}
D1 = nest.Create('lin_rate_ipn', params=Params)
D2 = nest.Create('lin_rate_ipn', params=Params)
nest.Connect(D1, D2, 'all_to_all', {
'model': 'rate_connection_instantaneous', 'weight': -0.2})
nest.Connect(D2, D1, 'all_to_all', {
'model': 'rate_connection_instantaneous', 'weight': -0.2})
mm = nest.Create('multimeter')
nest.SetStatus(mm, {'interval': dt, 'record_from': ['rate']})
nest.Connect(mm, D1, syn_spec={'delay': dt})
nest.Connect(mm, D2, syn_spec={'delay': dt})
return D1, D2, mm
'''
The function build_network takes the standard deviation of Gaussian
white noise and the time resolution as arguments.
First the Kernel is reset and the use_wfr (waveform-relaxation) is set to
false while the resolution is set to the specified value dt.
Two rate neurons with linear activation functions are created and the
handle is stored in the variables D1 and D2. The output of both decision
units is rectified at zero.
The two decisions units are coupled via mutual inhibition.
Next the multimeter is created and the handle stored in mm and the option
'record_from' is set. The multimeter is then connected to the two units
in order to 'observe' them. The connect function takes the handles as input.
'''
'''
The decision making process is simulated for three different levels of noise
and three differences in evidence for a given decision. The activity of both
decision units is plotted for each scenario.
'''
fig_size = [14, 8]
fig_rows = 3
fig_cols = 3
fig_plots = fig_rows * fig_cols
face = 'white'
edge = 'white'
ax = [None] * fig_plots
fig = pylab.figure(facecolor=face, edgecolor=edge, figsize=fig_size)
dt = 1e-3
sigma = [0.0, 0.1, 0.2]
dE = [0.0, 0.004, 0.008]
T = numpy.linspace(0, 200, 200 / dt - 1)
for i in range(9):
c = i % 3
r = int(i / 3)
D1, D2, mm = build_network(sigma[r], dt)
'''
First using build_network the network is build and the handles of
the decision units and the multimeter are stored in D1, D2 and mm
'''
nest.Simulate(100.0)
nest.SetStatus(D1, {'mean': 1. + dE[c]})
nest.SetStatus(D2, {'mean': 1. - dE[c]})
nest.Simulate(100.0)
'''
The network is simulated using `Simulate`, which takes the desired
simulation time in milliseconds and advances the network state by
this amount of time. After an initial period in the absence of evidence
for either decision, evidence is given by changing the state of each
decision unit. Note that both units receive evidence.
'''
data = nest.GetStatus(mm)
senders = data[0]['events']['senders']
voltages = data[0]['events']['rate']
'''
The activity values ('voltages') are read out by the multimeter
'''
ax[i] = fig.add_subplot(fig_rows, fig_cols, i + 1)
ax[i].plot(T, voltages[numpy.where(senders == D1)],
'b', linewidth=2, label="D1")
ax[i].plot(T, voltages[numpy.where(senders == D2)],
'r', linewidth=2, label="D2")
ax[i].set_ylim([-.5, 12.])
ax[i].get_xaxis().set_ticks([])
ax[i].get_yaxis().set_ticks([])
if c == 0:
ax[i].set_ylabel("activity ($\sigma=%.1f$) " % (sigma[r]))
ax[i].get_yaxis().set_ticks([0, 3, 6, 9, 12])
if r == 0:
ax[i].set_title("$\Delta E=%.3f$ " % (dE[c]))
if c == 2:
pylab.legend(loc=0)
if r == 2:
ax[i].get_xaxis().set_ticks([0, 50, 100, 150, 200])
ax[i].set_xlabel('time (ms)')
'''
The activity of the two units is plottedin each scenario.
In the absence of noise, the network will not make a decision if evidence
for both choices is equal. With noise, this symmetry can be broken and a
decision wil be taken despite identical evidence.
As evidence for D1 relative to D2 increases, it becomes more likely that
the corresponding decision will be taken. For small differences in the
evidence for the two decisions, noise can lead to the 'wrong' decision.
'''
pylab.show()
|
tobikausk/nest-simulator
|
pynest/examples/rate_neuron_dm.py
|
Python
|
gpl-2.0
| 5,588
|
[
"Gaussian",
"NEURON"
] |
b15ca8f0fb3a0d5e42b1b4a4720c6474d78e48beabcad7a3de61c09e3dc47e84
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from optparse import OptionParser
import zoo.orca.data.pandas
from zoo.orca import init_orca_context, stop_orca_context
def process_feature(df, awake_begin=6, awake_end=23):
import pandas as pd
df['datetime'] = pd.to_datetime(df['timestamp'])
df['hours'] = df['datetime'].dt.hour
df['awake'] = (((df['hours'] >= awake_begin) & (df['hours'] <= awake_end))
| (df['hours'] == 0)).astype(int)
return df
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-f", type=str, dest="file_path",
help="The file path to be read")
(options, args) = parser.parse_args(sys.argv)
sc = init_orca_context(cores="*", memory="4g")
# read data
file_path = options.file_path
data_shard = zoo.orca.data.pandas.read_csv(file_path)
data = data_shard.collect()
# repartition
data_shard = data_shard.repartition(2)
# apply function on each element
trans_data_shard = data_shard.transform_shard(process_feature)
data2 = trans_data_shard.collect()
stop_orca_context()
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/examples/orca/data/spark_pandas.py
|
Python
|
apache-2.0
| 1,686
|
[
"ORCA"
] |
75c03ce80f81d31a2f0f74304792de8ed8b429ac579f4e9d88bbfb0d54800087
|
from __future__ import division
import algebra
import math
def step_function(x):
return 1 if x >= 0 else 0
def perceptron_output(weights, bias, x):
"""returns 1 if the perceptron 'fires'; 0 if not"""
return step_function(algebra.dot(weights, x) + bias)
def sigmoid(t):
return 1 / (1 + math.exp(-t))
def neuron_output(weights, inputs):
return sigmoid(algebra.dot(weights, inputs))
def feed_forward(neural_network, input_vector):
"""takes a neural_network (represented as a
list of list of lists of weights) and returns the output
from forward-propagating the input"""
outputs = []
for layer in neural_network:
# add bias of [1]
input_with_bias = input_vector + [1]
output = [neuron_output(neuron, input_with_bias)
for neuron in layer]
outputs.append(output)
input_vector = output
return outputs
def backpropogate(network, input_vector, target):
hidden_outputs, outputs = feed_forward(network, input_vector)
# the output * (1 - output) is from the derivative of sigmoid
output_deltas = [output * (1 - output) * (output - target[i])
for i, output in enumerate(outputs)]
# adjust weights for output layer (network[-1])
for i, output_neuron in enumerate(network[-1]):
for j, hidden_output in enumerate(hidden_outputs + [1]):
output_neuron[j] -= output_deltas[i] * hidden_output
hidden_deltas = [hidden_output * (1 - hidden_output) *
algebra.dot(output_deltas, [n[i] for n in network[1]])
for i, hidden_output in enumerate(hidden_outputs)]
for i, hidden_neuron in enumerate(network[0]):
for j, input in enumerate(input_vector + 1):
hidden_neuron[j] -= hidden_deltas[i] * input
|
mjamesruggiero/tripp
|
tripp/neural_networks.py
|
Python
|
bsd-3-clause
| 1,824
|
[
"NEURON"
] |
43137e9ca6093bf1afe9760e5312b2a2a6344c0b170b3dec6685557b19a4f4e1
|
# coding: utf8
{
' Assessment Series Details': ' Assessment Series Details',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'# of International Staff': '# of International Staff',
'# of National Staff': '# of National Staff',
'# of Vehicles': '# of Vehicles',
'%(module)s not installed': '%(module)s not installed',
'%(system_name)s - Verify Email': '%(system_name)s - Verify Email',
'%.1f km': '%.1f km',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': '%s rows deleted',
'%s rows updated': '%s rows updated',
'& then click on the map below to adjust the Lat/Lon fields': '& then click on the map below to adjust the Lat/Lon fields',
"'Cancel' will indicate an asset log entry did not occur": "'Cancel' will indicate an asset log entry did not occur",
'* Required Fields': '* Required Fields',
'0-15 minutes': '0-15 minutes',
'1 Assessment': '1 Assessment',
'1 location, shorter time, can contain multiple Tasks': '1 location, shorter time, can contain multiple Tasks',
'1-3 days': '1-3 days',
'15-30 minutes': '15-30 minutes',
'2 different options are provided here currently:': '2 different options are provided here currently:',
'2x4 Car': '2x4 Car',
'30-60 minutes': '30-60 minutes',
'4-7 days': '4-7 days',
'4x4 Car': '4x4 Car',
'8-14 days': '8-14 days',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.',
'A Reference Document such as a file, URL or contact person to verify this data.': 'A Reference Document such as a file, URL or contact person to verify this data.',
'A brief description of the group (optional)': 'A brief description of the group (optional)',
'A file in GPX format taken from a GPS.': 'A file in GPX format taken from a GPS.',
'A library of digital resources, such as photos, documents and reports': 'A library of digital resources, such as photos, documents and reports',
'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': 'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.',
'A location group is a set of locations (often, a set of administrative regions representing a combined area).': 'A location group is a set of locations (often, a set of administrative regions representing a combined area).',
'A location group must have at least one member.': 'A location group must have at least one member.',
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": "A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.",
'A portal for volunteers allowing them to amend their own data & view assigned tasks.': 'A portal for volunteers allowing them to amend their own data & view assigned tasks.',
'A task is a piece of work that an individual or team can do in 1-2 days': 'A task is a piece of work that an individual or team can do in 1-2 days',
'ABOUT THIS MODULE': 'ABOUT THIS MODULE',
'ACCESS DATA': 'ACCESS DATA',
'ANY': 'ANY',
'API Key': 'API Key',
'API is documented here': 'API is documented here',
'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20 Rapid Evaluation modified for New Zealand',
'Abbreviation': 'Abbreviation',
'Ability to customize the list of details tracked at a Shelter': 'Ability to customize the list of details tracked at a Shelter',
'Ability to customize the list of human resource tracked at a Shelter': 'Ability to customize the list of human resource tracked at a Shelter',
'Ability to customize the list of important facilities needed at a Shelter': 'Ability to customize the list of important facilities needed at a Shelter',
'About': 'About',
'Accept Push': 'Accept Push',
'Accept Pushes': 'Accept Pushes',
'Access denied': 'Access denied',
'Access to Shelter': 'Access to Shelter',
'Access to education services': 'Access to education services',
'Accessibility of Affected Location': 'Accessibility of Affected Location',
'Accompanying Relative': 'Accompanying Relative',
'Account Registered - Please Check Your Email': 'Account Registered - Please Check Your Email',
'Acronym': 'Acronym',
"Acronym of the organization's name, eg. IFRC.": "Acronym of the organisation's name, eg. IFRC.",
'Actionable by all targeted recipients': 'Actionable by all targeted recipients',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>',
'Actioned?': 'Actioned?',
'Actions': 'Actions',
'Actions taken as a result of this request.': 'Actions taken as a result of this request.',
'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': 'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).',
'Active': 'Active',
'Active Problems': 'Active Problems',
'Activities': 'Activities',
'Activities matching Assessments:': 'Activities matching Assessments:',
'Activities of boys 13-17yrs before disaster': 'Activities of boys 13-17yrs before disaster',
'Activities of boys 13-17yrs now': 'Activities of boys 13-17yrs now',
'Activities of boys <12yrs before disaster': 'Activities of boys <12yrs before disaster',
'Activities of boys <12yrs now': 'Activities of boys <12yrs now',
'Activities of children': 'Activities of children',
'Activities of girls 13-17yrs before disaster': 'Activities of girls 13-17yrs before disaster',
'Activities of girls 13-17yrs now': 'Activities of girls 13-17yrs now',
'Activities of girls <12yrs before disaster': 'Activities of girls <12yrs before disaster',
'Activities of girls <12yrs now': 'Activities of girls <12yrs now',
'Activities:': 'Activities:',
'Activity': 'Activity',
'Activity Added': 'Activity Added',
'Activity Deleted': 'Activity Deleted',
'Activity Details': 'Activity Details',
'Activity Report': 'Activity Report',
'Activity Reports': 'Activity Reports',
'Activity Type': 'Activity Type',
'Activity Types': 'Activity Types',
'Activity Updated': 'Activity Updated',
'Activity added': 'Activity added',
'Activity removed': 'Activity removed',
'Activity updated': 'Activity updated',
'Add': 'Add',
'Add Activity': 'Add Activity',
'Add Activity Report': 'Add Activity Report',
'Add Activity Type': 'Add Activity Type',
'Add Address': 'Add Address',
'Add Alternative Item': 'Add Alternative Item',
'Add Assessment': 'Add Assessment',
'Add Assessment Answer': 'Add Assessment Answer',
'Add Assessment Series': 'Add Assessment Series',
'Add Assessment Summary': 'Add Assessment Summary',
'Add Assessment Template': 'Add Assessment Template',
'Add Asset': 'Add Asset',
'Add Asset Log Entry - Change Label': 'Add Asset Log Entry - Change Label',
'Add Availability': 'Add Availability',
'Add Baseline': 'Add Baseline',
'Add Baseline Type': 'Add Baseline Type',
'Add Bed Type': 'Add Bed Type',
'Add Brand': 'Add Brand',
'Add Budget': 'Add Budget',
'Add Bundle': 'Add Bundle',
'Add Camp': 'Add Camp',
'Add Camp Service': 'Add Camp Service',
'Add Camp Type': 'Add Camp Type',
'Add Catalog': 'Add Catalog',
'Add Catalog Item': 'Add Catalog Item',
'Add Certificate': 'Add Certificate',
'Add Certification': 'Add Certification',
'Add Cholera Treatment Capability Information': 'Add Cholera Treatment Capability Information',
'Add Cluster': 'Add Cluster',
'Add Cluster Subsector': 'Add Cluster Subsector',
'Add Competency Rating': 'Add Competency Rating',
'Add Contact': 'Add Contact',
'Add Contact Information': 'Add Contact Information',
'Add Course': 'Add Course',
'Add Course Certificate': 'Add Course Certificate',
'Add Credential': 'Add Credential',
'Add Credentials': 'Add Credentials',
'Add Dead Body Report': 'Add Dead Body Report',
'Add Disaster Victims': 'Add Disaster Victims',
'Add Distribution.': 'Add Distribution.',
'Add Document': 'Add Document',
'Add Donor': 'Add Donor',
'Add Facility': 'Add Facility',
'Add Feature Class': 'Add Feature Class',
'Add Feature Layer': 'Add Feature Layer',
'Add Flood Report': 'Add Flood Report',
'Add GPS data': 'Add GPS data',
'Add Group': 'Add Group',
'Add Group Member': 'Add Group Member',
'Add Home Address': 'Add Home Address',
'Add Hospital': 'Add Hospital',
'Add Human Resource': 'Add Human Resource',
'Add Identification Report': 'Add Identification Report',
'Add Identity': 'Add Identity',
'Add Image': 'Add Image',
'Add Impact': 'Add Impact',
'Add Impact Type': 'Add Impact Type',
'Add Incident': 'Add Incident',
'Add Incident Report': 'Add Incident Report',
'Add Item': 'Add Item',
'Add Item Category': 'Add Item Category',
'Add Item Pack': 'Add Item Pack',
'Add Item to Catalog': 'Add Item to Catalog',
'Add Item to Commitment': 'Add Item to Commitment',
'Add Item to Inventory': 'Add Item to Inventory',
'Add Item to Order': 'Add Item to Order',
'Add Item to Request': 'Add Item to Request',
'Add Item to Shipment': 'Add Item to Shipment',
'Add Job': 'Add Job',
'Add Job Role': 'Add Job Role',
'Add Kit': 'Add Kit',
'Add Layer': 'Add Layer',
'Add Level 1 Assessment': 'Add Level 1 Assessment',
'Add Level 2 Assessment': 'Add Level 2 Assessment',
'Add Location': 'Add Location',
'Add Log Entry': 'Add Log Entry',
'Add Map Configuration': 'Add Map Configuration',
'Add Marker': 'Add Marker',
'Add Member': 'Add Member',
'Add Membership': 'Add Membership',
'Add Message': 'Add Message',
'Add Mission': 'Add Mission',
'Add Need': 'Add Need',
'Add Need Type': 'Add Need Type',
'Add New': 'Add New',
'Add New Activity': 'Add New Activity',
'Add New Activity Type': 'Add New Activity Type',
'Add New Address': 'Add New Address',
'Add New Alternative Item': 'Add New Alternative Item',
'Add New Assessment': 'Add New Assessment',
'Add New Assessment Summary': 'Add New Assessment Summary',
'Add New Asset': 'Add New Asset',
'Add New Baseline': 'Add New Baseline',
'Add New Baseline Type': 'Add New Baseline Type',
'Add New Brand': 'Add New Brand',
'Add New Budget': 'Add New Budget',
'Add New Bundle': 'Add New Bundle',
'Add New Camp': 'Add New Camp',
'Add New Camp Service': 'Add New Camp Service',
'Add New Camp Type': 'Add New Camp Type',
'Add New Catalog': 'Add New Catalog',
'Add New Cluster': 'Add New Cluster',
'Add New Cluster Subsector': 'Add New Cluster Subsector',
'Add New Commitment Item': 'Add New Commitment Item',
'Add New Contact': 'Add New Contact',
'Add New Credential': 'Add New Credential',
'Add New Document': 'Add New Document',
'Add New Donor': 'Add New Donor',
'Add New Entry': 'Add New Entry',
'Add New Event': 'Add New Event',
'Add New Facility': 'Add New Facility',
'Add New Feature Class': 'Add New Feature Class',
'Add New Feature Layer': 'Add New Feature Layer',
'Add New Flood Report': 'Add New Flood Report',
'Add New Group': 'Add New Group',
'Add New Home': 'Add New Home',
'Add New Hospital': 'Add New Hospital',
'Add New Human Resource': 'Add New Human Resource',
'Add New Identity': 'Add New Identity',
'Add New Image': 'Add New Image',
'Add New Impact': 'Add New Impact',
'Add New Impact Type': 'Add New Impact Type',
'Add New Incident': 'Add New Incident',
'Add New Incident Report': 'Add New Incident Report',
'Add New Item': 'Add New Item',
'Add New Item Category': 'Add New Item Category',
'Add New Item Pack': 'Add New Item Pack',
'Add New Item to Kit': 'Add New Item to Kit',
'Add New Item to Order': 'Add New Item to Order',
'Add New Kit': 'Add New Kit',
'Add New Layer': 'Add New Layer',
'Add New Level 1 Assessment': 'Add New Level 1 Assessment',
'Add New Level 2 Assessment': 'Add New Level 2 Assessment',
'Add New Location': 'Add New Location',
'Add New Log Entry': 'Add New Log Entry',
'Add New Map Configuration': 'Add New Map Configuration',
'Add New Marker': 'Add New Marker',
'Add New Member': 'Add New Member',
'Add New Membership': 'Add New Membership',
'Add New Need': 'Add New Need',
'Add New Need Type': 'Add New Need Type',
'Add New Office': 'Add New Office',
'Add New Order': 'Add New Order',
'Add New Organization': 'Add New Organisation',
'Add New Organization Domain': 'Add New Organisation Domain',
'Add New Patient': 'Add New Patient',
'Add New Person to Commitment': 'Add New Person to Commitment',
'Add New Photo': 'Add New Photo',
'Add New Population Statistic': 'Add New Population Statistic',
'Add New Problem': 'Add New Problem',
'Add New Project': 'Add New Project',
'Add New Project Site': 'Add New Project Site',
'Add New Projection': 'Add New Projection',
'Add New Rapid Assessment': 'Add New Rapid Assessment',
'Add New Received Item': 'Add New Received Item',
'Add New Record': 'Add New Record',
'Add New Relative': 'Add New Relative',
'Add New Report': 'Add New Report',
'Add New Request': 'Add New Request',
'Add New Request Item': 'Add New Request Item',
'Add New Resource': 'Add New Resource',
'Add New River': 'Add New River',
'Add New Role': 'Add New Role',
'Add New Role to User': 'Add New Role to User',
'Add New Room': 'Add New Room',
'Add New Scenario': 'Add New Scenario',
'Add New Sector': 'Add New Sector',
'Add New Sent Item': 'Add New Sent Item',
'Add New Setting': 'Add New Setting',
'Add New Shelter': 'Add New Shelter',
'Add New Shelter Service': 'Add New Shelter Service',
'Add New Shelter Type': 'Add New Shelter Type',
'Add New Skill': 'Add New Skill',
'Add New Solution': 'Add New Solution',
'Add New Staff Member': 'Add New Staff Member',
'Add New Staff Type': 'Add New Staff Type',
'Add New Subsector': 'Add New Subsector',
'Add New Task': 'Add New Task',
'Add New Team': 'Add New Team',
'Add New Theme': 'Add New Theme',
'Add New Ticket': 'Add New Ticket',
'Add New User': 'Add New User',
'Add New User to Role': 'Add New User to Role',
'Add New Vehicle': 'Add New Vehicle',
'Add New Volunteer': 'Add New Volunteer',
'Add New Warehouse': 'Add New Warehouse',
'Add Office': 'Add Office',
'Add Order': 'Add Order',
'Add Organization': 'Add Organisation',
'Add Organization Domain': 'Add Organisation Domain',
'Add Organization to Project': 'Add Organisation to Project',
'Add Person': 'Add Person',
'Add Person to Commitment': 'Add Person to Commitment',
'Add Personal Effects': 'Add Personal Effects',
'Add Photo': 'Add Photo',
'Add Point': 'Add Point',
'Add Polygon': 'Add Polygon',
'Add Population Statistic': 'Add Population Statistic',
'Add Position': 'Add Position',
'Add Problem': 'Add Problem',
'Add Project': 'Add Project',
'Add Project Site': 'Add Project Site',
'Add Projection': 'Add Projection',
'Add Question Meta-Data': 'Add Question Meta-Data',
'Add Rapid Assessment': 'Add Rapid Assessment',
'Add Record': 'Add Record',
'Add Reference Document': 'Add Reference Document',
'Add Report': 'Add Report',
'Add Repository': 'Add Repository',
'Add Request': 'Add Request',
'Add Resource': 'Add Resource',
'Add River': 'Add River',
'Add Role': 'Add Role',
'Add Room': 'Add Room',
'Add Saved Search': 'Add Saved Search',
'Add Section': 'Add Section',
'Add Sector': 'Add Sector',
'Add Service Profile': 'Add Service Profile',
'Add Setting': 'Add Setting',
'Add Shelter': 'Add Shelter',
'Add Shelter Service': 'Add Shelter Service',
'Add Shelter Type': 'Add Shelter Type',
'Add Skill': 'Add Skill',
'Add Skill Equivalence': 'Add Skill Equivalence',
'Add Skill Provision': 'Add Skill Provision',
'Add Skill Type': 'Add Skill Type',
'Add Skill to Request': 'Add Skill to Request',
'Add Solution': 'Add Solution',
'Add Staff Member': 'Add Staff Member',
'Add Staff Type': 'Add Staff Type',
'Add Status': 'Add Status',
'Add Subscription': 'Add Subscription',
'Add Subsector': 'Add Subsector',
'Add Task': 'Add Task',
'Add Team': 'Add Team',
'Add Template Section': 'Add Template Section',
'Add Theme': 'Add Theme',
'Add Ticket': 'Add Ticket',
'Add Training': 'Add Training',
'Add Unit': 'Add Unit',
'Add User': 'Add User',
'Add Vehicle': 'Add Vehicle',
'Add Vehicle Detail': 'Add Vehicle Detail',
'Add Vehicle Details': 'Add Vehicle Details',
'Add Volunteer': 'Add Volunteer',
'Add Volunteer Availability': 'Add Volunteer Availability',
'Add Warehouse': 'Add Warehouse',
'Add a Person': 'Add a Person',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.',
'Add a new Assessment Answer': 'Add a new Assessment Answer',
'Add a new Assessment Question': 'Add a new Assessment Question',
'Add a new Assessment Series': 'Add a new Assessment Series',
'Add a new Assessment Template': 'Add a new Assessment Template',
'Add a new Completed Assessment': 'Add a new Completed Assessment',
'Add a new Template Section': 'Add a new Template Section',
'Add a new certificate to the catalog.': 'Add a new certificate to the catalogue.',
'Add a new competency rating to the catalog.': 'Add a new competency rating to the catalogue.',
'Add a new job role to the catalog.': 'Add a new job role to the catalogue.',
'Add a new skill provision to the catalog.': 'Add a new skill provision to the catalogue.',
'Add a new skill type to the catalog.': 'Add a new skill type to the catalogue.',
'Add an Assessment Question': 'Add an Assessment Question',
'Add new Group': 'Add new Group',
'Add new Individual': 'Add new Individual',
'Add new Patient': 'Add new Patient',
'Add new Question Meta-Data': 'Add new Question Meta-Data',
'Add new project.': 'Add new project.',
'Add staff members': 'Add staff members',
'Add to Bundle': 'Add to Bundle',
'Add to budget': 'Add to budget',
'Add volunteers': 'Add volunteers',
'Add/Edit/Remove Layers': 'Add/Edit/Remove Layers',
'Additional Beds / 24hrs': 'Additional Beds / 24hrs',
'Address': 'Address',
'Address Details': 'Address Details',
'Address Type': 'Address Type',
'Address added': 'Address added',
'Address deleted': 'Address deleted',
'Address updated': 'Address updated',
'Addresses': 'Addresses',
'Adequate': 'Adequate',
'Adequate food and water available': 'Adequate food and water available',
'Admin Email': 'Admin Email',
'Admin Name': 'Admin Name',
'Admin Tel': 'Admin Tel',
'Administration': 'Administration',
'Admissions/24hrs': 'Admissions/24hrs',
'Adolescent (12-20)': 'Adolescent (12-20)',
'Adolescent participating in coping activities': 'Adolescent participating in coping activities',
'Adult (21-50)': 'Adult (21-50)',
'Adult ICU': 'Adult ICU',
'Adult Psychiatric': 'Adult Psychiatric',
'Adult female': 'Adult female',
'Adult male': 'Adult male',
'Adults in prisons': 'Adults in prisons',
'Advanced:': 'Advanced:',
'Advisory': 'Advisory',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.',
'Age Group': 'Age Group',
'Age group': 'Age group',
'Age group does not match actual age.': 'Age group does not match actual age.',
'Aggravating factors': 'Aggravating factors',
'Agriculture': 'Agriculture',
'Air Transport Service': 'Air Transport Service',
'Air tajin': 'Air tajin',
'Aircraft Crash': 'Aircraft Crash',
'Aircraft Hijacking': 'Aircraft Hijacking',
'Airport Closure': 'Airport Closure',
'Airspace Closure': 'Airspace Closure',
'Alcohol': 'Alcohol',
'Alert': 'Alert',
'All': 'All',
'All Inbound & Outbound Messages are stored here': 'All Inbound & Outbound Messages are stored here',
'All Resources': 'All Resources',
'All data provided by the Sahana Software Foundation from this site is licensed under a Creative Commons Attribution license. However, not all data originates here. Please consult the source field of each entry.': 'All data provided by the Sahana Software Foundation from this site is licensed under a Creative Commons Attribution license. However, not all data originates here. Please consult the source field of each entry.',
'Allows a Budget to be drawn up': 'Allows a Budget to be drawn up',
'Alternative Item': 'Alternative Item',
'Alternative Item Details': 'Alternative Item Details',
'Alternative Item added': 'Alternative Item added',
'Alternative Item deleted': 'Alternative Item deleted',
'Alternative Item updated': 'Alternative Item updated',
'Alternative Items': 'Alternative Items',
'Alternative places for studying': 'Alternative places for studying',
'Ambulance Service': 'Ambulance Service',
'An item which can be used in place of another item': 'An item which can be used in place of another item',
'Analysis': 'Analysis',
'Analysis of assessments': 'Analysis of assessments',
'Animal Die Off': 'Animal Die Off',
'Animal Feed': 'Animal Feed',
'Answer': 'Answer',
'Anthropolgy': 'Anthropolgy',
'Antibiotics available': 'Antibiotics available',
'Antibiotics needed per 24h': 'Antibiotics needed per 24h',
'Apparent Age': 'Apparent Age',
'Apparent Gender': 'Apparent Gender',
'Application Deadline': 'Application Deadline',
'Approve': 'Approve',
'Approved': 'Approved',
'Approved By': 'Approved By',
'Approver': 'Approver',
'Arabic': 'Arabic',
'Arctic Outflow': 'Arctic Outflow',
'Are you sure you want to delete this record?': 'Are you sure you want to delete this record?',
'Areas inspected': 'Areas inspected',
'As of yet, no completed surveys have been added to this series.': 'As of yet, no completed surveys have been added to this series.',
'As of yet, no sections have been added to this template.': 'As of yet, no sections have been added to this template.',
'Assessment': 'Assessment',
'Assessment Answer': 'Assessment Answer',
'Assessment Answer Details': 'Assessment Answer Details',
'Assessment Answer added': 'Assessment Answer added',
'Assessment Answer deleted': 'Assessment Answer deleted',
'Assessment Answer updated': 'Assessment Answer updated',
'Assessment Details': 'Assessment Details',
'Assessment Question Details': 'Assessment Question Details',
'Assessment Question added': 'Assessment Question added',
'Assessment Question deleted': 'Assessment Question deleted',
'Assessment Question updated': 'Assessment Question updated',
'Assessment Reported': 'Assessment Reported',
'Assessment Series': 'Assessment Series',
'Assessment Series added': 'Assessment Series added',
'Assessment Series deleted': 'Assessment Series deleted',
'Assessment Series updated': 'Assessment Series updated',
'Assessment Summaries': 'Assessment Summaries',
'Assessment Summary Details': 'Assessment Summary Details',
'Assessment Summary added': 'Assessment Summary added',
'Assessment Summary deleted': 'Assessment Summary deleted',
'Assessment Summary updated': 'Assessment Summary updated',
'Assessment Template Details': 'Assessment Template Details',
'Assessment Template added': 'Assessment Template added',
'Assessment Template deleted': 'Assessment Template deleted',
'Assessment Template updated': 'Assessment Template updated',
'Assessment Templates': 'Assessment Templates',
'Assessment added': 'Assessment added',
'Assessment admin level': 'Assessment admin level',
'Assessment deleted': 'Assessment deleted',
'Assessment timeline': 'Assessment timeline',
'Assessment updated': 'Assessment updated',
'Assessments': 'Assessments',
'Assessments Needs vs. Activities': 'Assessments Needs vs. Activities',
'Assessments and Activities': 'Assessments and Activities',
'Assessments:': 'Assessments:',
'Assessor': 'Assessor',
'Asset': 'Asset',
'Asset Details': 'Asset Details',
'Asset Log': 'Asset Log',
'Asset Log Details': 'Asset Log Details',
'Asset Log Empty': 'Asset Log Empty',
'Asset Log Entry Added - Change Label': 'Asset Log Entry Added - Change Label',
'Asset Log Entry deleted': 'Asset Log Entry deleted',
'Asset Log Entry updated': 'Asset Log Entry updated',
'Asset Management': 'Asset Management',
'Asset Number': 'Asset Number',
'Asset added': 'Asset added',
'Asset deleted': 'Asset deleted',
'Asset removed': 'Asset removed',
'Asset updated': 'Asset updated',
'Assets': 'Assets',
'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Assets are resources which are not consumable but are expected back, so they need tracking.',
'Assign': 'Assign',
'Assign to Org.': 'Assign to Org.',
'Assign to Organisation': 'Assign to Organisation',
'Assign to Organization': 'Assign to Organisation',
'Assign to Person': 'Assign to Person',
'Assign to Site': 'Assign to Site',
'Assigned': 'Assigned',
'Assigned By': 'Assigned By',
'Assigned To': 'Assigned To',
'Assigned to Organisation': 'Assigned to Organisation',
'Assigned to Person': 'Assigned to Person',
'Assigned to Site': 'Assigned to Site',
'Assignment': 'Assignment',
'Assignments': 'Assignments',
'At/Visited Location (not virtual)': 'At/Visited Location (not virtual)',
'Attend to information sources as described in <instruction>': 'Attend to information sources as described in <instruction>',
'Attribution': 'Attribution',
"Authenticate system's Twitter account": "Authenticate system's Twitter account",
'Author': 'Author',
'Available Alternative Inventories': 'Available Alternative Inventories',
'Available Beds': 'Available Beds',
'Available Forms': 'Available Forms',
'Available Inventories': 'Available Inventories',
'Available Messages': 'Available Messages',
'Available Records': 'Available Records',
'Available databases and tables': 'Available databases and tables',
'Available for Location': 'Available for Location',
'Available from': 'Available from',
'Available in Viewer?': 'Available in Viewer?',
'Available until': 'Available until',
'Avalanche': 'Avalanche',
'Avoid the subject event as per the <instruction>': 'Avoid the subject event as per the <instruction>',
'Background Color': 'Background Colour',
'Background Color for Text blocks': 'Background Colour for Text blocks',
'Bahai': 'Bahai',
'Baldness': 'Baldness',
'Banana': 'Banana',
'Bank/micro finance': 'Bank/micro finance',
'Barricades are needed': 'Barricades are needed',
'Base Layer?': 'Base Layer?',
'Base Layers': 'Base Layers',
'Base Location': 'Base Location',
'Base Site Set': 'Base Site Set',
'Base URL of the remote Sahana-Eden site': 'Base URL of the remote Sahana-Eden site',
'Baseline Data': 'Baseline Data',
'Baseline Number of Beds': 'Baseline Number of Beds',
'Baseline Type': 'Baseline Type',
'Baseline Type Details': 'Baseline Type Details',
'Baseline Type added': 'Baseline Type added',
'Baseline Type deleted': 'Baseline Type deleted',
'Baseline Type updated': 'Baseline Type updated',
'Baseline Types': 'Baseline Types',
'Baseline added': 'Baseline added',
'Baseline deleted': 'Baseline deleted',
'Baseline number of beds of that type in this unit.': 'Baseline number of beds of that type in this unit.',
'Baseline updated': 'Baseline updated',
'Baselines': 'Baselines',
'Baselines Details': 'Baselines Details',
'Basic Assessment': 'Basic Assessment',
'Basic Assessment Reported': 'Basic Assessment Reported',
'Basic Details': 'Basic Details',
'Basic reports on the Shelter and drill-down by region': 'Basic reports on the Shelter and drill-down by region',
'Baud': 'Baud',
'Baud rate to use for your modem - The default is safe for most cases': 'Baud rate to use for your modem - The default is safe for most cases',
'Beam': 'Beam',
'Bed Capacity': 'Bed Capacity',
'Bed Capacity per Unit': 'Bed Capacity per Unit',
'Bed Type': 'Bed Type',
'Bed type already registered': 'Bed type already registered',
'Below ground level': 'Below ground level',
'Beneficiary Type': 'Beneficiary Type',
"Bing Layers cannot be displayed if there isn't a valid API Key": "Bing Layers cannot be displayed if there isn't a valid API Key",
'Biological Hazard': 'Biological Hazard',
'Biscuits': 'Biscuits',
'Blizzard': 'Blizzard',
'Blood Type (AB0)': 'Blood Type (AB0)',
'Blowing Snow': 'Blowing Snow',
'Boat': 'Boat',
'Bodies': 'Bodies',
'Bodies found': 'Bodies found',
'Bodies recovered': 'Bodies recovered',
'Body': 'Body',
'Body Recovery': 'Body Recovery',
'Body Recovery Request': 'Body Recovery Request',
'Body Recovery Requests': 'Body Recovery Requests',
'Bomb': 'Bomb',
'Bomb Explosion': 'Bomb Explosion',
'Bomb Threat': 'Bomb Threat',
'Border Color for Text blocks': 'Border Colour for Text blocks',
'Brand': 'Brand',
'Brand Details': 'Brand Details',
'Brand added': 'Brand added',
'Brand deleted': 'Brand deleted',
'Brand updated': 'Brand updated',
'Brands': 'Brands',
'Bricks': 'Bricks',
'Bridge Closed': 'Bridge Closed',
'Bucket': 'Bucket',
'Buddhist': 'Buddhist',
'Budget': 'Budget',
'Budget Details': 'Budget Details',
'Budget Updated': 'Budget Updated',
'Budget added': 'Budget added',
'Budget deleted': 'Budget deleted',
'Budget updated': 'Budget updated',
'Budgeting Module': 'Budgeting Module',
'Budgets': 'Budgets',
'Buffer': 'Buffer',
'Bug': 'Bug',
'Building Assessments': 'Building Assessments',
'Building Collapsed': 'Building Collapsed',
'Building Name': 'Building Name',
'Building Safety Assessments': 'Building Safety Assessments',
'Building Short Name/Business Name': 'Building Short Name/Business Name',
'Building or storey leaning': 'Building or storey leaning',
'Built using the Template agreed by a group of NGOs working together as the': 'Built using the Template agreed by a group of NGOs working together as the',
'Bulk Uploader': 'Bulk Uploader',
'Bundle': 'Bundle',
'Bundle Contents': 'Bundle Contents',
'Bundle Details': 'Bundle Details',
'Bundle Updated': 'Bundle Updated',
'Bundle added': 'Bundle added',
'Bundle deleted': 'Bundle deleted',
'Bundle updated': 'Bundle updated',
'Bundles': 'Bundles',
'Burn': 'Burn',
'Burn ICU': 'Burn ICU',
'Burned/charred': 'Burned/charred',
'By Facility': 'By Facility',
'By Inventory': 'By Inventory',
'CBA Women': 'CBA Women',
'CLOSED': 'CLOSED',
'CN': 'CN',
'CSS file %s not writable - unable to apply theme!': 'CSS file %s not writable - unable to apply theme!',
'Calculate': 'Calculate',
'Camp': 'Camp',
'Camp Coordination/Management': 'Camp Coordination/Management',
'Camp Details': 'Camp Details',
'Camp Service': 'Camp Service',
'Camp Service Details': 'Camp Service Details',
'Camp Service added': 'Camp Service added',
'Camp Service deleted': 'Camp Service deleted',
'Camp Service updated': 'Camp Service updated',
'Camp Services': 'Camp Services',
'Camp Type': 'Camp Type',
'Camp Type Details': 'Camp Type Details',
'Camp Type added': 'Camp Type added',
'Camp Type deleted': 'Camp Type deleted',
'Camp Type updated': 'Camp Type updated',
'Camp Types': 'Camp Types',
'Camp Types and Services': 'Camp Types and Services',
'Camp added': 'Camp added',
'Camp deleted': 'Camp deleted',
'Camp updated': 'Camp updated',
'Camps': 'Camps',
'Can only approve 1 record at a time!': 'Can only approve 1 record at a time!',
'Can only disable 1 record at a time!': 'Can only disable 1 record at a time!',
'Can only enable 1 record at a time!': 'Can only enable 1 record at a time!',
"Can't import tweepy": "Can't import tweepy",
'Cancel': 'Cancel',
'Cancel Log Entry': 'Cancel Log Entry',
'Cancel Shipment': 'Cancel Shipment',
'Canceled': 'Canceled',
'Candidate Matches for Body %s': 'Candidate Matches for Body %s',
'Canned Fish': 'Canned Fish',
'Cannot be empty': 'Cannot be empty',
'Cannot disable your own account!': 'Cannot disable your own account!',
'Capacity (Max Persons)': 'Capacity (Max Persons)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)',
'Capture Information on each disaster victim': 'Capture Information on each disaster victim',
'Capturing the projects each organization is providing and where': 'Capturing the projects each organisation is providing and where',
'Cardiology': 'Cardiology',
'Cassava': 'Cassava',
'Casual Labor': 'Casual Labor',
'Casualties': 'Casualties',
'Catalog': 'Catalog',
'Catalog Details': 'Catalog Details',
'Catalog Item added': 'Catalog Item added',
'Catalog Item deleted': 'Catalog Item deleted',
'Catalog Item updated': 'Catalog Item updated',
'Catalog Items': 'Catalog Items',
'Catalog added': 'Catalog added',
'Catalog deleted': 'Catalog deleted',
'Catalog updated': 'Catalog updated',
'Catalogs': 'Catalogs',
'Categories': 'Categories',
'Category': 'Category',
"Caution: doesn't respect the framework rules!": "Caution: doesn't respect the framework rules!",
'Ceilings, light fixtures': 'Ceilings, light fixtures',
'Cell Phone': 'Cell Phone',
'Central point to record details on People': 'Central point to record details on People',
'Certificate': 'Certificate',
'Certificate Catalog': 'Certificate Catalog',
'Certificate Details': 'Certificate Details',
'Certificate Status': 'Certificate Status',
'Certificate added': 'Certificate added',
'Certificate deleted': 'Certificate deleted',
'Certificate updated': 'Certificate updated',
'Certificates': 'Certificates',
'Certification': 'Certification',
'Certification Details': 'Certification Details',
'Certification added': 'Certification added',
'Certification deleted': 'Certification deleted',
'Certification updated': 'Certification updated',
'Certifications': 'Certifications',
'Certifying Organization': 'Certifying Organisation',
'Change Password': 'Change Password',
'Check': 'Check',
'Check Request': 'Check Request',
'Check for errors in the URL, maybe the address was mistyped.': 'Check for errors in the URL, maybe the address was mistyped.',
'Check if the URL is pointing to a directory instead of a webpage.': 'Check if the URL is pointing to a directory instead of a webpage.',
'Check outbox for the message status': 'Check outbox for the message status',
'Check to delete': 'Check to delete',
'Check to delete:': 'Check to delete:',
'Check-in at Facility': 'Check-in at Facility',
'Checked': 'Checked',
'Checklist': 'Checklist',
'Checklist created': 'Checklist created',
'Checklist deleted': 'Checklist deleted',
'Checklist of Operations': 'Checklist of Operations',
'Checklist updated': 'Checklist updated',
'Chemical Hazard': 'Chemical Hazard',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack',
'Chicken': 'Chicken',
'Child': 'Child',
'Child (2-11)': 'Child (2-11)',
'Child (< 18 yrs)': 'Child (< 18 yrs)',
'Child Abduction Emergency': 'Child Abduction Emergency',
'Child headed households (<18 yrs)': 'Child headed households (<18 yrs)',
'Children (2-5 years)': 'Children (2-5 years)',
'Children (5-15 years)': 'Children (5-15 years)',
'Children (< 2 years)': 'Children (< 2 years)',
'Children in adult prisons': 'Children in adult prisons',
'Children in boarding schools': 'Children in boarding schools',
'Children in homes for disabled children': 'Children in homes for disabled children',
'Children in juvenile detention': 'Children in juvenile detention',
'Children in orphanages': 'Children in orphanages',
'Children living on their own (without adults)': 'Children living on their own (without adults)',
'Children not enrolled in new school': 'Children not enrolled in new school',
'Children orphaned by the disaster': 'Children orphaned by the disaster',
'Children separated from their parents/caregivers': 'Children separated from their parents/caregivers',
'Children that have been sent to safe places': 'Children that have been sent to safe places',
'Children who have disappeared since the disaster': 'Children who have disappeared since the disaster',
'Chinese (Simplified)': 'Chinese (Simplified)',
'Chinese (Traditional)': 'Chinese (Traditional)',
'Cholera Treatment': 'Cholera Treatment',
'Cholera Treatment Capability': 'Cholera Treatment Capability',
'Cholera Treatment Center': 'Cholera Treatment Center',
'Cholera-Treatment-Center': 'Cholera-Treatment-Center',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.',
'Christian': 'Christian',
'Church': 'Church',
'City': 'City',
'Civil Emergency': 'Civil Emergency',
'Cladding, glazing': 'Cladding, glazing',
'Click on the link': 'Click on the link',
'Client IP': 'Client IP',
'Climate': 'Climate',
'Clinical Laboratory': 'Clinical Laboratory',
'Clinical Operations': 'Clinical Operations',
'Clinical Status': 'Clinical Status',
'Close map': 'Close map',
'Closed': 'Closed',
'Clothing': 'Clothing',
'Cluster': 'Cluster',
'Cluster Details': 'Cluster Details',
'Cluster Distance': 'Cluster Distance',
'Cluster Subsector': 'Cluster Subsector',
'Cluster Subsector Details': 'Cluster Subsector Details',
'Cluster Subsector added': 'Cluster Subsector added',
'Cluster Subsector deleted': 'Cluster Subsector deleted',
'Cluster Subsector updated': 'Cluster Subsector updated',
'Cluster Subsectors': 'Cluster Subsectors',
'Cluster Threshold': 'Cluster Threshold',
'Cluster added': 'Cluster added',
'Cluster deleted': 'Cluster deleted',
'Cluster updated': 'Cluster updated',
'Cluster(s)': 'Cluster(s)',
'Clusters': 'Clusters',
'Code': 'Code',
'Cold Wave': 'Cold Wave',
'Collapse, partial collapse, off foundation': 'Collapse, partial collapse, off foundation',
'Collective center': 'Collective center',
'Color for Underline of Subheadings': 'Colour for Underline of Subheadings',
'Color of Buttons when hovering': 'Colour of Buttons when hovering',
'Color of bottom of Buttons when not pressed': 'Colour of bottom of Buttons when not pressed',
'Color of bottom of Buttons when pressed': 'Colour of bottom of Buttons when pressed',
'Color of dropdown menus': 'Colour of dropdown menus',
'Color of selected Input fields': 'Colour of selected Input fields',
'Color of selected menu items': 'Colour of selected menu items',
'Columns, pilasters, corbels': 'Columns, pilasters, corbels',
'Combined Method': 'Combined Method',
'Come back later.': 'Come back later.',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.',
'Comments': 'Comments',
'Commercial/Offices': 'Commercial/Offices',
'Commit': 'Commit',
'Commit Date': 'Commit Date',
'Commit from %s': 'Commit from %s',
'Commit. Status': 'Commit. Status',
'Commiting a changed spreadsheet to the database': 'Commiting a changed spreadsheet to the database',
'Commitment': 'Commitment',
'Commitment Added': 'Commitment Added',
'Commitment Canceled': 'Commitment Canceled',
'Commitment Details': 'Commitment Details',
'Commitment Item Details': 'Commitment Item Details',
'Commitment Item added': 'Commitment Item added',
'Commitment Item deleted': 'Commitment Item deleted',
'Commitment Item updated': 'Commitment Item updated',
'Commitment Items': 'Commitment Items',
'Commitment Status': 'Commitment Status',
'Commitment Updated': 'Commitment Updated',
'Commitments': 'Commitments',
'Committed': 'Committed',
'Committed By': 'Committed By',
'Committed People': 'Committed People',
'Committed Person Details': 'Committed Person Details',
'Committed Person updated': 'Committed Person updated',
'Committing Inventory': 'Committing Inventory',
'Committing Organization': 'Committing Organisation',
'Committing Person': 'Committing Person',
'Communication problems': 'Communication problems',
'Community Centre': 'Community Centre',
'Community Health Center': 'Community Health Center',
'Community Member': 'Community Member',
'Competency': 'Competency',
'Competency Rating Catalog': 'Competency Rating Catalog',
'Competency Rating Details': 'Competency Rating Details',
'Competency Rating added': 'Competency Rating added',
'Competency Rating deleted': 'Competency Rating deleted',
'Competency Rating updated': 'Competency Rating updated',
'Competency Ratings': 'Competency Ratings',
'Complete': 'Complete',
'Complete a new Assessment': 'Complete a new Assessment',
'Completed': 'Completed',
'Completed Assessment': 'Completed Assessment',
'Completed Assessment Details': 'Completed Assessment Details',
'Completed Assessment added': 'Completed Assessment added',
'Completed Assessment deleted': 'Completed Assessment deleted',
'Completed Assessment updated': 'Completed Assessment updated',
'Completed Assessments': 'Completed Assessments',
'Completed surveys of this Series:': 'Completed surveys of this Series:',
'Complexion': 'Complexion',
'Compose': 'Compose',
'Compromised': 'Compromised',
'Concrete frame': 'Concrete frame',
'Concrete shear wall': 'Concrete shear wall',
'Condition': 'Condition',
'Configuration': 'Configuration',
'Configurations': 'Configurations',
'Configure Run-time Settings': 'Configure Run-time Settings',
'Configure connection details and authentication': 'Configure connection details and authentication',
'Configure resources to synchronize, update methods and policies': 'Configure resources to synchronise, update methods and policies',
'Configure the default proxy server to connect to remote repositories': 'Configure the default proxy server to connect to remote repositories',
'Confirm Shipment Received': 'Confirm Shipment Received',
'Confirmed': 'Confirmed',
'Confirming Organization': 'Confirming Organisation',
'Conflict Policy': 'Conflict Policy',
'Conflict policy': 'Conflict policy',
'Conflicts': 'Conflicts',
'Consignment Note': 'Consignment Note',
'Constraints Only': 'Constraints Only',
'Consumable': 'Consumable',
'Contact': 'Contact',
'Contact Data': 'Contact Data',
'Contact Details': 'Contact Details',
'Contact Info': 'Contact Info',
'Contact Information': 'Contact Information',
'Contact Information Added': 'Contact Information Added',
'Contact Information Deleted': 'Contact Information Deleted',
'Contact Information Updated': 'Contact Information Updated',
'Contact Method': 'Contact Method',
'Contact Name': 'Contact Name',
'Contact Person': 'Contact Person',
'Contact Phone': 'Contact Phone',
'Contact information added': 'Contact information added',
'Contact information deleted': 'Contact information deleted',
'Contact information updated': 'Contact information updated',
'Contact us': 'Contact us',
'Contacts': 'Contacts',
'Contents': 'Contents',
'Contributor': 'Contributor',
'Conversion Tool': 'Conversion Tool',
'Cooking NFIs': 'Cooking NFIs',
'Cooking Oil': 'Cooking Oil',
'Coordinate Conversion': 'Coordinate Conversion',
'Coping Activities': 'Coping Activities',
'Copy': 'Copy',
'Corn': 'Corn',
'Cost Type': 'Cost Type',
'Cost per Megabyte': 'Cost per Megabyte',
'Cost per Minute': 'Cost per Minute',
'Country': 'Country',
'Country is required!': 'Country is required!',
'Country of Residence': 'Country of Residence',
'County': 'County',
'Course': 'Course',
'Course Catalog': 'Course Catalog',
'Course Certificate Details': 'Course Certificate Details',
'Course Certificate added': 'Course Certificate added',
'Course Certificate deleted': 'Course Certificate deleted',
'Course Certificate updated': 'Course Certificate updated',
'Course Certificates': 'Course Certificates',
'Course Details': 'Course Details',
'Course added': 'Course added',
'Course deleted': 'Course deleted',
'Course updated': 'Course updated',
'Courses': 'Courses',
'Create & manage Distribution groups to receive Alerts': 'Create & manage Distribution groups to receive Alerts',
'Create Checklist': 'Create Checklist',
'Create Group Entry': 'Create Group Entry',
'Create Impact Assessment': 'Create Impact Assessment',
'Create Mobile Impact Assessment': 'Create Mobile Impact Assessment',
'Create New Asset': 'Create New Asset',
'Create New Catalog': 'Create New Catalog',
'Create New Catalog Item': 'Create New Catalog Item',
'Create New Event': 'Create New Event',
'Create New Item': 'Create New Item',
'Create New Item Category': 'Create New Item Category',
'Create New Location': 'Create New Location',
'Create New Request': 'Create New Request',
'Create New Scenario': 'Create New Scenario',
'Create New Vehicle': 'Create New Vehicle',
'Create Rapid Assessment': 'Create Rapid Assessment',
'Create Request': 'Create Request',
'Create Task': 'Create Task',
'Create a group entry in the registry.': 'Create a group entry in the registry.',
'Create new Office': 'Create new Office',
'Create new Organization': 'Create new Organisation',
'Create, enter, and manage surveys.': 'Create, enter, and manage surveys.',
'Creation of assessments': 'Creation of assessments',
'Credential Details': 'Credential Details',
'Credential added': 'Credential added',
'Credential deleted': 'Credential deleted',
'Credential updated': 'Credential updated',
'Credentialling Organization': 'Credentialling Organisation',
'Credentials': 'Credentials',
'Credit Card': 'Credit Card',
'Crime': 'Crime',
'Criteria': 'Criteria',
'Currency': 'Currency',
'Current Entries': 'Current Entries',
'Current Group Members': 'Current Group Members',
'Current Identities': 'Current Identities',
'Current Location': 'Current Location',
'Current Location Country': 'Current Location Country',
'Current Location Phone Number': 'Current Location Phone Number',
'Current Location Treating Hospital': 'Current Location Treating Hospital',
'Current Log Entries': 'Current Log Entries',
'Current Memberships': 'Current Memberships',
'Current Mileage': 'Current Mileage',
'Current Records': 'Current Records',
'Current Registrations': 'Current Registrations',
'Current Status': 'Current Status',
'Current Team Members': 'Current Team Members',
'Current Twitter account': 'Current Twitter account',
'Current community priorities': 'Current community priorities',
'Current general needs': 'Current general needs',
'Current greatest needs of vulnerable groups': 'Current greatest needs of vulnerable groups',
'Current health problems': 'Current health problems',
'Current number of patients': 'Current number of patients',
'Current problems, categories': 'Current problems, categories',
'Current problems, details': 'Current problems, details',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'Currently Configured Jobs': 'Currently Configured Jobs',
'Currently Configured Repositories': 'Currently Configured Repositories',
'Currently Configured Resources': 'Currently Configured Resources',
'Currently no Certifications registered': 'Currently no Certifications registered',
'Currently no Course Certificates registered': 'Currently no Course Certificates registered',
'Currently no Credentials registered': 'Currently no Credentials registered',
'Currently no Missions registered': 'Currently no Missions registered',
'Currently no Skill Equivalences registered': 'Currently no Skill Equivalences registered',
'Currently no Skills registered': 'Currently no Skills registered',
'Currently no Trainings registered': 'Currently no Trainings registered',
'Currently no entries in the catalog': 'Currently no entries in the catalogue',
'DC': 'DC',
'DNA Profile': 'DNA Profile',
'DNA Profiling': 'DNA Profiling',
'DVI Navigator': 'DVI Navigator',
'Dam Overflow': 'Dam Overflow',
'Damage': 'Damage',
'Dangerous Person': 'Dangerous Person',
'Dashboard': 'Dashboard',
'Data': 'Data',
'Data uploaded': 'Data uploaded',
'Database': 'Database',
'Date': 'Date',
'Date & Time': 'Date & Time',
'Date Available': 'Date Available',
'Date Delivered': 'Date Delivered',
'Date Expected': 'Date Expected',
'Date Received': 'Date Received',
'Date Requested': 'Date Requested',
'Date Required': 'Date Required',
'Date Required Until': 'Date Required Until',
'Date Sent': 'Date Sent',
'Date Until': 'Date Until',
'Date and Time': 'Date and Time',
'Date and time this report relates to.': 'Date and time this report relates to.',
'Date of Birth': 'Date of Birth',
'Date of Latest Information on Beneficiaries Reached': 'Date of Latest Information on Beneficiaries Reached',
'Date of Report': 'Date of Report',
'Date of Treatment': 'Date of Treatment',
'Date/Time': 'Date/Time',
'Date/Time of Find': 'Date/Time of Find',
'Date/Time when found': 'Date/Time when found',
'Date/Time when last seen': 'Date/Time when last seen',
'De-duplicator': 'De-duplicator',
'Dead Bodies': 'Dead Bodies',
'Dead Body': 'Dead Body',
'Dead Body Details': 'Dead Body Details',
'Dead Body Reports': 'Dead Body Reports',
'Dead body report added': 'Dead body report added',
'Dead body report deleted': 'Dead body report deleted',
'Dead body report updated': 'Dead body report updated',
'Deaths in the past 24h': 'Deaths in the past 24h',
'Deaths/24hrs': 'Deaths/24hrs',
'Decimal Degrees': 'Decimal Degrees',
'Decomposed': 'Decomposed',
'Default Height of the map window.': 'Default Height of the map window.',
'Default Location': 'Default Location',
'Default Map': 'Default Map',
'Default Marker': 'Default Marker',
'Default Width of the map window.': 'Default Width of the map window.',
'Defecation area for animals': 'Defecation area for animals',
'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': 'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).',
'Defines the icon used for display of features on handheld GPS.': 'Defines the icon used for display of features on handheld GPS.',
'Defines the icon used for display of features on interactive map & KML exports.': 'Defines the icon used for display of features on interactive map & KML exports.',
'Defines the marker used for display & the attributes visible in the popup.': 'Defines the marker used for display & the attributes visible in the popup.',
'Degrees must be a number between -180 and 180': 'Degrees must be a number between -180 and 180',
'Dehydration': 'Dehydration',
'Delete': 'Delete',
'Delete Alternative Item': 'Delete Alternative Item',
'Delete Assessment': 'Delete Assessment',
'Delete Assessment Summary': 'Delete Assessment Summary',
'Delete Asset': 'Delete Asset',
'Delete Asset Log Entry': 'Delete Asset Log Entry',
'Delete Baseline': 'Delete Baseline',
'Delete Baseline Type': 'Delete Baseline Type',
'Delete Brand': 'Delete Brand',
'Delete Budget': 'Delete Budget',
'Delete Bundle': 'Delete Bundle',
'Delete Catalog': 'Delete Catalog',
'Delete Catalog Item': 'Delete Catalog Item',
'Delete Certificate': 'Delete Certificate',
'Delete Certification': 'Delete Certification',
'Delete Cluster': 'Delete Cluster',
'Delete Cluster Subsector': 'Delete Cluster Subsector',
'Delete Commitment': 'Delete Commitment',
'Delete Commitment Item': 'Delete Commitment Item',
'Delete Competency Rating': 'Delete Competency Rating',
'Delete Contact Information': 'Delete Contact Information',
'Delete Course': 'Delete Course',
'Delete Course Certificate': 'Delete Course Certificate',
'Delete Credential': 'Delete Credential',
'Delete Document': 'Delete Document',
'Delete Donor': 'Delete Donor',
'Delete Event': 'Delete Event',
'Delete Feature Class': 'Delete Feature Class',
'Delete Feature Layer': 'Delete Feature Layer',
'Delete GPS data': 'Delete GPS data',
'Delete Group': 'Delete Group',
'Delete Home': 'Delete Home',
'Delete Hospital': 'Delete Hospital',
'Delete Image': 'Delete Image',
'Delete Impact': 'Delete Impact',
'Delete Impact Type': 'Delete Impact Type',
'Delete Incident Report': 'Delete Incident Report',
'Delete Item': 'Delete Item',
'Delete Item Category': 'Delete Item Category',
'Delete Item Pack': 'Delete Item Pack',
'Delete Job Role': 'Delete Job Role',
'Delete Kit': 'Delete Kit',
'Delete Layer': 'Delete Layer',
'Delete Level 1 Assessment': 'Delete Level 1 Assessment',
'Delete Level 2 Assessment': 'Delete Level 2 Assessment',
'Delete Location': 'Delete Location',
'Delete Map Configuration': 'Delete Map Configuration',
'Delete Marker': 'Delete Marker',
'Delete Membership': 'Delete Membership',
'Delete Message': 'Delete Message',
'Delete Mission': 'Delete Mission',
'Delete Need': 'Delete Need',
'Delete Need Type': 'Delete Need Type',
'Delete Office': 'Delete Office',
'Delete Order': 'Delete Order',
'Delete Organization': 'Delete Organisation',
'Delete Organization Domain': 'Delete Organisation Domain',
'Delete Patient': 'Delete Patient',
'Delete Person': 'Delete Person',
'Delete Photo': 'Delete Photo',
'Delete Population Statistic': 'Delete Population Statistic',
'Delete Position': 'Delete Position',
'Delete Project': 'Delete Project',
'Delete Projection': 'Delete Projection',
'Delete Rapid Assessment': 'Delete Rapid Assessment',
'Delete Received Shipment': 'Delete Received Shipment',
'Delete Record': 'Delete Record',
'Delete Relative': 'Delete Relative',
'Delete Report': 'Delete Report',
'Delete Request': 'Delete Request',
'Delete Request Item': 'Delete Request Item',
'Delete Request for Donations': 'Delete Request for Donations',
'Delete Request for Volunteers': 'Delete Request for Volunteers',
'Delete Resource': 'Delete Resource',
'Delete Room': 'Delete Room',
'Delete Saved Search': 'Delete Saved Search',
'Delete Scenario': 'Delete Scenario',
'Delete Section': 'Delete Section',
'Delete Sector': 'Delete Sector',
'Delete Sent Item': 'Delete Sent Item',
'Delete Sent Shipment': 'Delete Sent Shipment',
'Delete Service Profile': 'Delete Service Profile',
'Delete Skill': 'Delete Skill',
'Delete Skill Equivalence': 'Delete Skill Equivalence',
'Delete Skill Provision': 'Delete Skill Provision',
'Delete Skill Type': 'Delete Skill Type',
'Delete Staff Type': 'Delete Staff Type',
'Delete Status': 'Delete Status',
'Delete Subscription': 'Delete Subscription',
'Delete Subsector': 'Delete Subsector',
'Delete Training': 'Delete Training',
'Delete Unit': 'Delete Unit',
'Delete User': 'Delete User',
'Delete Vehicle': 'Delete Vehicle',
'Delete Vehicle Details': 'Delete Vehicle Details',
'Delete Warehouse': 'Delete Warehouse',
'Delete from Server?': 'Delete from Server?',
'Delete this Assessment Answer': 'Delete this Assessment Answer',
'Delete this Assessment Question': 'Delete this Assessment Question',
'Delete this Assessment Series': 'Delete this Assessment Series',
'Delete this Assessment Template': 'Delete this Assessment Template',
'Delete this Completed Assessment': 'Delete this Completed Assessment',
'Delete this Question Meta-Data': 'Delete this Question Meta-Data',
'Delete this Template Section': 'Delete this Template Section',
'Deliver To': 'Deliver To',
'Delivered To': 'Delivered To',
'Delphi Decision Maker': 'Delphi Decision Maker',
'Demographic': 'Demographic',
'Demonstrations': 'Demonstrations',
'Dental Examination': 'Dental Examination',
'Dental Profile': 'Dental Profile',
'Deployment Location': 'Deployment Location',
'Describe the condition of the roads to your hospital.': 'Describe the condition of the roads to your hospital.',
'Describe the procedure which this record relates to (e.g. "medical examination")': 'Describe the procedure which this record relates to (e.g. "medical examination")',
'Description': 'Description',
'Description of Contacts': 'Description of Contacts',
'Description of defecation area': 'Description of defecation area',
'Description of drinking water source': 'Description of drinking water source',
'Description of sanitary water source': 'Description of sanitary water source',
'Description of water source before the disaster': 'Description of water source before the disaster',
'Desire to remain with family': 'Desire to remain with family',
'Destination': 'Destination',
'Destroyed': 'Destroyed',
'Details': 'Details',
'Details field is required!': 'Details field is required!',
'Dialysis': 'Dialysis',
'Diaphragms, horizontal bracing': 'Diaphragms, horizontal bracing',
'Diarrhea': 'Diarrhea',
'Dignitary Visit': 'Dignitary Visit',
'Direction': 'Direction',
'Disable': 'Disable',
'Disabled': 'Disabled',
'Disabled participating in coping activities': 'Disabled participating in coping activities',
'Disabled?': 'Disabled?',
'Disaster Victim Identification': 'Disaster Victim Identification',
'Disaster Victim Registry': 'Disaster Victim Registry',
'Disaster clean-up/repairs': 'Disaster clean-up/repairs',
'Discharge (cusecs)': 'Discharge (cusecs)',
'Discharges/24hrs': 'Discharges/24hrs',
'Discussion Forum': 'Discussion Forum',
'Discussion Forum on item': 'Discussion Forum on item',
'Disease vectors': 'Disease vectors',
'Dispensary': 'Dispensary',
'Displaced': 'Displaced',
'Displaced Populations': 'Displaced Populations',
'Display': 'Display',
'Display Polygons?': 'Display Polygons?',
'Display Routes?': 'Display Routes?',
'Display Tracks?': 'Display Tracks?',
'Display Waypoints?': 'Display Waypoints?',
'Distance between defecation area and water source': 'Distance between defecation area and water source',
'Distance from %s:': 'Distance from %s:',
'Distance(Kms)': 'Distance(Kms)',
'Distribution': 'Distribution',
'Distribution groups': 'Distribution groups',
'District': 'District',
'Do you really want to delete these records?': 'Do you really want to delete these records?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': 'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!',
'Do you want to receive this shipment?': 'Do you want to receive this shipment?',
'Do you want to send these Committed items?': 'Do you want to send these Committed items?',
'Do you want to send this shipment?': 'Do you want to send this shipment?',
'Document Details': 'Document Details',
'Document Scan': 'Document Scan',
'Document added': 'Document added',
'Document deleted': 'Document deleted',
'Document removed': 'Document removed',
'Document updated': 'Document updated',
'Documents': 'Documents',
'Documents and Photos': 'Documents and Photos',
'Does this facility provide a cholera treatment center?': 'Does this facility provide a cholera treatment center?',
'Doing nothing (no structured activity)': 'Doing nothing (no structured activity)',
'Domain': 'Domain',
'Domestic chores': 'Domestic chores',
'Donated': 'Donated',
'Donation Certificate': 'Donation Certificate',
'Donation Phone #': 'Donation Phone #',
'Donations': 'Donations',
'Donor': 'Donor',
'Donor Details': 'Donor Details',
'Donor added': 'Donor added',
'Donor deleted': 'Donor deleted',
'Donor updated': 'Donor updated',
'Donors': 'Donors',
'Donors Report': 'Donors Report',
'Door frame': 'Door frame',
'Download OCR-able PDF Form': 'Download OCR-able PDF Form',
'Download Template': 'Download Template',
'Download last build': 'Download last build',
'Draft': 'Draft',
'Draft Features': 'Draft Features',
'Drainage': 'Drainage',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'Drawing up a Budget for Staff & Equipment across various Locations.',
'Drill Down by Group': 'Drill Down by Group',
'Drill Down by Incident': 'Drill Down by Incident',
'Drill Down by Shelter': 'Drill Down by Shelter',
'Driving License': 'Driving License',
'Drought': 'Drought',
'Drugs': 'Drugs',
'Dug Well': 'Dug Well',
'Dummy': 'Dummy',
'Duplicate?': 'Duplicate?',
'Duration': 'Duration',
'Dust Storm': 'Dust Storm',
'Dwelling': 'Dwelling',
'E-mail': 'E-mail',
'EMS Reason': 'EMS Reason',
'EMS Status': 'EMS Status',
'ER Status': 'ER Status',
'ER Status Reason': 'ER Status Reason',
'EXERCISE': 'EXERCISE',
'Early Recovery': 'Early Recovery',
'Earth Enabled?': 'Earth Enabled?',
'Earthquake': 'Earthquake',
'Edit': 'Edit',
'Edit Activity': 'Edit Activity',
'Edit Address': 'Edit Address',
'Edit Alternative Item': 'Edit Alternative Item',
'Edit Application': 'Edit Application',
'Edit Assessment': 'Edit Assessment',
'Edit Assessment Answer': 'Edit Assessment Answer',
'Edit Assessment Question': 'Edit Assessment Question',
'Edit Assessment Series': 'Edit Assessment Series',
'Edit Assessment Summary': 'Edit Assessment Summary',
'Edit Assessment Template': 'Edit Assessment Template',
'Edit Asset': 'Edit Asset',
'Edit Asset Log Entry': 'Edit Asset Log Entry',
'Edit Baseline': 'Edit Baseline',
'Edit Baseline Type': 'Edit Baseline Type',
'Edit Brand': 'Edit Brand',
'Edit Budget': 'Edit Budget',
'Edit Bundle': 'Edit Bundle',
'Edit Camp': 'Edit Camp',
'Edit Camp Service': 'Edit Camp Service',
'Edit Camp Type': 'Edit Camp Type',
'Edit Catalog': 'Edit Catalog',
'Edit Catalog Item': 'Edit Catalog Item',
'Edit Certificate': 'Edit Certificate',
'Edit Certification': 'Edit Certification',
'Edit Cluster': 'Edit Cluster',
'Edit Cluster Subsector': 'Edit Cluster Subsector',
'Edit Commitment': 'Edit Commitment',
'Edit Commitment Item': 'Edit Commitment Item',
'Edit Committed Person': 'Edit Committed Person',
'Edit Competency Rating': 'Edit Competency Rating',
'Edit Completed Assessment': 'Edit Completed Assessment',
'Edit Contact': 'Edit Contact',
'Edit Contact Information': 'Edit Contact Information',
'Edit Contents': 'Edit Contents',
'Edit Course': 'Edit Course',
'Edit Course Certificate': 'Edit Course Certificate',
'Edit Credential': 'Edit Credential',
'Edit Dead Body Details': 'Edit Dead Body Details',
'Edit Description': 'Edit Description',
'Edit Details': 'Edit Details',
'Edit Disaster Victims': 'Edit Disaster Victims',
'Edit Document': 'Edit Document',
'Edit Donor': 'Edit Donor',
'Edit Email Settings': 'Edit Email Settings',
'Edit Entry': 'Edit Entry',
'Edit Event': 'Edit Event',
'Edit Facility': 'Edit Facility',
'Edit Feature Class': 'Edit Feature Class',
'Edit Feature Layer': 'Edit Feature Layer',
'Edit Flood Report': 'Edit Flood Report',
'Edit GPS data': 'Edit GPS data',
'Edit Group': 'Edit Group',
'Edit Home': 'Edit Home',
'Edit Home Address': 'Edit Home Address',
'Edit Hospital': 'Edit Hospital',
'Edit Human Resource': 'Edit Human Resource',
'Edit Identification Report': 'Edit Identification Report',
'Edit Identity': 'Edit Identity',
'Edit Image Details': 'Edit Image Details',
'Edit Impact': 'Edit Impact',
'Edit Impact Type': 'Edit Impact Type',
'Edit Import File': 'Edit Import File',
'Edit Incident': 'Edit Incident',
'Edit Incident Report': 'Edit Incident Report',
'Edit Inventory Item': 'Edit Inventory Item',
'Edit Item': 'Edit Item',
'Edit Item Category': 'Edit Item Category',
'Edit Item Pack': 'Edit Item Pack',
'Edit Job': 'Edit Job',
'Edit Job Role': 'Edit Job Role',
'Edit Kit': 'Edit Kit',
'Edit Layer': 'Edit Layer',
'Edit Level %d Locations?': 'Edit Level %d Locations?',
'Edit Level 1 Assessment': 'Edit Level 1 Assessment',
'Edit Level 2 Assessment': 'Edit Level 2 Assessment',
'Edit Location': 'Edit Location',
'Edit Location Details': 'Edit Location Details',
'Edit Log Entry': 'Edit Log Entry',
'Edit Map Configuration': 'Edit Map Configuration',
'Edit Marker': 'Edit Marker',
'Edit Membership': 'Edit Membership',
'Edit Message': 'Edit Message',
'Edit Mission': 'Edit Mission',
'Edit Modem Settings': 'Edit Modem Settings',
'Edit Need': 'Edit Need',
'Edit Need Type': 'Edit Need Type',
'Edit Office': 'Edit Office',
'Edit Options': 'Edit Options',
'Edit Order': 'Edit Order',
'Edit Order Item': 'Edit Order Item',
'Edit Organization': 'Edit Organisation',
'Edit Organization Domain': 'Edit Organisation Domain',
'Edit Parameters': 'Edit Parameters',
'Edit Patient': 'Edit Patient',
'Edit Person Details': 'Edit Person Details',
'Edit Personal Effects Details': 'Edit Personal Effects Details',
'Edit Photo': 'Edit Photo',
'Edit Population Statistic': 'Edit Population Statistic',
'Edit Position': 'Edit Position',
'Edit Problem': 'Edit Problem',
'Edit Project': 'Edit Project',
'Edit Project Organization': 'Edit Project Organization',
'Edit Projection': 'Edit Projection',
'Edit Question Meta-Data': 'Edit Question Meta-Data',
'Edit Rapid Assessment': 'Edit Rapid Assessment',
'Edit Received Item': 'Edit Received Item',
'Edit Received Shipment': 'Edit Received Shipment',
'Edit Record': 'Edit Record',
'Edit Registration': 'Edit Registration',
'Edit Relative': 'Edit Relative',
'Edit Repository Configuration': 'Edit Repository Configuration',
'Edit Request': 'Edit Request',
'Edit Request Item': 'Edit Request Item',
'Edit Request for Donations': 'Edit Request for Donations',
'Edit Request for Volunteers': 'Edit Request for Volunteers',
'Edit Requested Skill': 'Edit Requested Skill',
'Edit Resource': 'Edit Resource',
'Edit Resource Configuration': 'Edit Resource Configuration',
'Edit River': 'Edit River',
'Edit Role': 'Edit Role',
'Edit Room': 'Edit Room',
'Edit SMS Settings': 'Edit SMS Settings',
'Edit SMTP to SMS Settings': 'Edit SMTP to SMS Settings',
'Edit Saved Search': 'Edit Saved Search',
'Edit Scenario': 'Edit Scenario',
'Edit Sector': 'Edit Sector',
'Edit Sent Item': 'Edit Sent Item',
'Edit Setting': 'Edit Setting',
'Edit Settings': 'Edit Settings',
'Edit Shelter': 'Edit Shelter',
'Edit Shelter Service': 'Edit Shelter Service',
'Edit Shelter Type': 'Edit Shelter Type',
'Edit Skill': 'Edit Skill',
'Edit Skill Equivalence': 'Edit Skill Equivalence',
'Edit Skill Provision': 'Edit Skill Provision',
'Edit Skill Type': 'Edit Skill Type',
'Edit Solution': 'Edit Solution',
'Edit Staff Type': 'Edit Staff Type',
'Edit Subscription': 'Edit Subscription',
'Edit Subsector': 'Edit Subsector',
'Edit Synchronization Settings': 'Edit Synchronisation Settings',
'Edit Task': 'Edit Task',
'Edit Team': 'Edit Team',
'Edit Template Section': 'Edit Template Section',
'Edit Theme': 'Edit Theme',
'Edit Themes': 'Edit Themes',
'Edit Ticket': 'Edit Ticket',
'Edit Training': 'Edit Training',
'Edit Tropo Settings': 'Edit Tropo Settings',
'Edit User': 'Edit User',
'Edit Vehicle': 'Edit Vehicle',
'Edit Vehicle Details': 'Edit Vehicle Details',
'Edit Volunteer Availability': 'Edit Volunteer Availability',
'Edit Warehouse': 'Edit Warehouse',
'Edit Web API Settings': 'Edit Web API Settings',
'Edit current record': 'Edit current record',
'Edit message': 'Edit message',
'Edit the OpenStreetMap data for this area': 'Edit the OpenStreetMap data for this area',
'Editable?': 'Editable?',
'Education': 'Education',
'Education materials received': 'Education materials received',
'Education materials, source': 'Education materials, source',
'Effects Inventory': 'Effects Inventory',
'Eggs': 'Eggs',
'Either a shelter or a location must be specified': 'Either a shelter or a location must be specified',
'Either file upload or document URL required.': 'Either file upload or document URL required.',
'Either file upload or image URL required.': 'Either file upload or image URL required.',
'Elderly person headed households (>60 yrs)': 'Elderly person headed households (>60 yrs)',
'Electrical': 'Electrical',
'Electrical, gas, sewerage, water, hazmats': 'Electrical, gas, sewerage, water, hazmats',
'Elevated': 'Elevated',
'Elevators': 'Elevators',
'Email': 'Email',
'Email Address to which to send SMS messages. Assumes sending to phonenumber@address': 'Email Address to which to send SMS messages. Assumes sending to phonenumber@address',
'Email Settings': 'Email Settings',
'Email and SMS': 'Email and SMS',
'Email settings updated': 'Email settings updated',
'Embalming': 'Embalming',
'Embassy': 'Embassy',
'Emergency Capacity Building project': 'Emergency Capacity Building project',
'Emergency Department': 'Emergency Department',
'Emergency Shelter': 'Emergency Shelter',
'Emergency Support Facility': 'Emergency Support Facility',
'Emergency Support Service': 'Emergency Support Service',
'Emergency Telecommunications': 'Emergency Telecommunications',
'Enable': 'Enable',
'Enable/Disable Layers': 'Enable/Disable Layers',
'Enabled': 'Enabled',
'Enabled?': 'Enabled?',
'Enabling MapMaker layers disables the StreetView functionality': 'Enabling MapMaker layers disables the StreetView functionality',
'End Date': 'End Date',
'End date': 'End date',
'End date should be after start date': 'End date should be after start date',
'English': 'English',
'Enter Coordinates:': 'Enter Coordinates:',
'Enter a GPS Coord': 'Enter a GPS Coord',
'Enter a name for the spreadsheet you are uploading.': 'Enter a name for the spreadsheet you are uploading.',
'Enter a new support request.': 'Enter a new support request.',
'Enter a unique label!': 'Enter a unique label!',
'Enter a valid date before': 'Enter a valid date before',
'Enter a valid email': 'Enter a valid email',
'Enter a valid future date': 'Enter a valid future date',
'Enter a valid past date': 'Enter a valid past date',
'Enter some characters to bring up a list of possible matches': 'Enter some characters to bring up a list of possible matches',
'Enter some characters to bring up a list of possible matches.': 'Enter some characters to bring up a list of possible matches.',
'Enter tags separated by commas.': 'Enter tags separated by commas.',
'Enter the data for an assessment': 'Enter the data for an assessment',
'Enter the same password as above': 'Enter the same password as above',
'Enter your firstname': 'Enter your firstname',
'Enter your organization': 'Enter your organisation',
'Entered': 'Entered',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.',
'Environment': 'Environment',
'Equipment': 'Equipment',
'Error encountered while applying the theme.': 'Error encountered while applying the theme.',
'Error in message': 'Error in message',
'Error logs for "%(app)s"': 'Error logs for "%(app)s"',
'Est. Delivery Date': 'Est. Delivery Date',
'Estimated # of households who are affected by the emergency': 'Estimated # of households who are affected by the emergency',
'Estimated # of people who are affected by the emergency': 'Estimated # of people who are affected by the emergency',
'Estimated Overall Building Damage': 'Estimated Overall Building Damage',
'Estimated total number of people in institutions': 'Estimated total number of people in institutions',
'Euros': 'Euros',
'Evacuating': 'Evacuating',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)',
'Event': 'Event',
'Event Details': 'Event Details',
'Event added': 'Event added',
'Event deleted': 'Event deleted',
'Event updated': 'Event updated',
'Events': 'Events',
'Example': 'Example',
'Exceeded': 'Exceeded',
'Excel': 'Excel',
'Excellent': 'Excellent',
'Exclude contents': 'Exclude contents',
'Excreta disposal': 'Excreta disposal',
'Execute a pre-planned activity identified in <instruction>': 'Execute a pre-planned activity identified in <instruction>',
'Exercise': 'Exercise',
'Exercise?': 'Exercise?',
'Exercises mean all screens have a watermark & all notifications have a prefix.': 'Exercises mean all screens have a watermark & all notifications have a prefix.',
'Existing Placard Type': 'Existing Placard Type',
'Existing Sections': 'Existing Sections',
'Existing food stocks': 'Existing food stocks',
'Existing location cannot be converted into a group.': 'Existing location cannot be converted into a group.',
'Exits': 'Exits',
'Expected Return Home': 'Expected Return Home',
'Experience': 'Experience',
'Expiry Date': 'Expiry Date',
'Explosive Hazard': 'Explosive Hazard',
'Export': 'Export',
'Export Data': 'Export Data',
'Export Database as CSV': 'Export Database as CSV',
'Export in GPX format': 'Export in GPX format',
'Export in KML format': 'Export in KML format',
'Export in OSM format': 'Export in OSM format',
'Export in PDF format': 'Export in PDF format',
'Export in RSS format': 'Export in RSS format',
'Export in XLS format': 'Export in XLS format',
'Exterior Only': 'Exterior Only',
'Exterior and Interior': 'Exterior and Interior',
'Eye Color': 'Eye Colour',
'Facial hair, color': 'Facial hair, colour',
'Facial hair, type': 'Facial hair, type',
'Facial hear, length': 'Facial hear, length',
'Facilities': 'Facilities',
'Facility': 'Facility',
'Facility Details': 'Facility Details',
'Facility Operations': 'Facility Operations',
'Facility Status': 'Facility Status',
'Facility Type': 'Facility Type',
'Facility added': 'Facility added',
'Facility or Location': 'Facility or Location',
'Facility removed': 'Facility removed',
'Facility updated': 'Facility updated',
'Fail': 'Fail',
'Failed!': 'Failed!',
'Fair': 'Fair',
'Falling Object Hazard': 'Falling Object Hazard',
'Families/HH': 'Families/HH',
'Family': 'Family',
'Family tarpaulins received': 'Family tarpaulins received',
'Family tarpaulins, source': 'Family tarpaulins, source',
'Family/friends': 'Family/friends',
'Farmland/fishing material assistance, Rank': 'Farmland/fishing material assistance, Rank',
'Fatalities': 'Fatalities',
'Fax': 'Fax',
'Feature Class': 'Feature Class',
'Feature Class Details': 'Feature Class Details',
'Feature Class added': 'Feature Class added',
'Feature Class deleted': 'Feature Class deleted',
'Feature Class updated': 'Feature Class updated',
'Feature Classes': 'Feature Classes',
'Feature Classes are collections of Locations (Features) of the same type': 'Feature Classes are collections of Locations (Features) of the same type',
'Feature Layer Details': 'Feature Layer Details',
'Feature Layer added': 'Feature Layer added',
'Feature Layer deleted': 'Feature Layer deleted',
'Feature Layer updated': 'Feature Layer updated',
'Feature Layers': 'Feature Layers',
'Feature Namespace': 'Feature Namespace',
'Feature Request': 'Feature Request',
'Feature Type': 'Feature Type',
'Features Include': 'Features Include',
'Female': 'Female',
'Female headed households': 'Female headed households',
'Few': 'Few',
'Field': 'Field',
'Field Hospital': 'Field Hospital',
'File': 'File',
'File Imported': 'File Imported',
'File Importer': 'File Importer',
'File name': 'File name',
'Fill in Latitude': 'Fill in Latitude',
'Fill in Longitude': 'Fill in Longitude',
'Filter': 'Filter',
'Filter Field': 'Filter Field',
'Filter Value': 'Filter Value',
'Find': 'Find',
'Find Dead Body Report': 'Find Dead Body Report',
'Find Hospital': 'Find Hospital',
'Find Person Record': 'Find Person Record',
'Find a Person Record': 'Find a Person Record',
'Finder': 'Finder',
'Fingerprint': 'Fingerprint',
'Fingerprinting': 'Fingerprinting',
'Fingerprints': 'Fingerprints',
'Fire': 'Fire',
'Fire suppression and rescue': 'Fire suppression and rescue',
'First Name': 'First Name',
'First name': 'First name',
'Fishing': 'Fishing',
'Flash Flood': 'Flash Flood',
'Flash Freeze': 'Flash Freeze',
'Flexible Impact Assessments': 'Flexible Impact Assessments',
'Flood': 'Flood',
'Flood Alerts': 'Flood Alerts',
'Flood Alerts show water levels in various parts of the country': 'Flood Alerts show water levels in various parts of the country',
'Flood Report': 'Flood Report',
'Flood Report Details': 'Flood Report Details',
'Flood Report added': 'Flood Report added',
'Flood Report deleted': 'Flood Report deleted',
'Flood Report updated': 'Flood Report updated',
'Flood Reports': 'Flood Reports',
'Flow Status': 'Flow Status',
'Fog': 'Fog',
'Food': 'Food',
'Food Supply': 'Food Supply',
'Food assistance': 'Food assistance',
'Footer': 'Footer',
'Footer file %s missing!': 'Footer file %s missing!',
'For': 'For',
'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.',
'For messages that support alert network internal functions': 'For messages that support alert network internal functions',
'Forest Fire': 'Forest Fire',
'Formal camp': 'Formal camp',
'Format': 'Format',
"Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}",
'Forms': 'Forms',
'Found': 'Found',
'Foundations': 'Foundations',
'Freezing Drizzle': 'Freezing Drizzle',
'Freezing Rain': 'Freezing Rain',
'Freezing Spray': 'Freezing Spray',
'French': 'French',
'Friday': 'Friday',
'From': 'From',
'From Facility': 'From Facility',
'From Inventory': 'From Inventory',
'From Location': 'From Location',
'From Organization': 'From Organisation',
'Frost': 'Frost',
'Fulfil. Status': 'Fulfil. Status',
'Fulfillment Status': 'Fulfillment Status',
'Full': 'Full',
'Full beard': 'Full beard',
'Fullscreen Map': 'Fullscreen Map',
'Functions available': 'Functions available',
'Funds Contributed by this Organization': 'Funds Contributed by this Organisation',
'Funding Organization': 'Funding Organisation',
'Funeral': 'Funeral',
'Further Action Recommended': 'Further Action Recommended',
'GIS Reports of Shelter': 'GIS Reports of Shelter',
'GIS integration to view location details of the Shelter': 'GIS integration to view location details of the Shelter',
'GPS': 'GPS',
'GPS Data': 'GPS Data',
'GPS ID': 'GPS ID',
'GPS Marker': 'GPS Marker',
'GPS Track': 'GPS Track',
'GPS Track File': 'GPS Track File',
'GPS data': 'GPS data',
'GPS data added': 'GPS data added',
'GPS data deleted': 'GPS data deleted',
'GPS data updated': 'GPS data updated',
'GRN': 'GRN',
'GRN Status': 'GRN Status',
'Gale Wind': 'Gale Wind',
'Gap Analysis': 'Gap Analysis',
'Gap Analysis Map': 'Gap Analysis Map',
'Gap Analysis Report': 'Gap Analysis Report',
'Gender': 'Gender',
'General Comment': 'General Comment',
'General Medical/Surgical': 'General Medical/Surgical',
'General emergency and public safety': 'General emergency and public safety',
'General information on demographics': 'General information on demographics',
'Generate portable application': 'Generate portable application',
'Generator': 'Generator',
'Geocode': 'Geocode',
'Geocoder Selection': 'Geocoder Selection',
'Geometry Name': 'Geometry Name',
'Geonames.org search requires Internet connectivity!': 'Geonames.org search requires Internet connectivity!',
'Geophysical (inc. landslide)': 'Geophysical (inc. landslide)',
'Geotechnical': 'Geotechnical',
'Geotechnical Hazards': 'Geotechnical Hazards',
'German': 'German',
'Get incoming recovery requests as RSS feed': 'Get incoming recovery requests as RSS feed',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).',
'Give information about where and when you have seen them': 'Give information about where and when you have seen them',
'Go to Request': 'Go to Request',
'Goatee': 'Goatee',
'Good': 'Good',
'Good Condition': 'Good Condition',
'Goods Received Note': 'Goods Received Note',
"Google Layers cannot be displayed if there isn't a valid API Key": "Google Layers cannot be displayed if there isn't a valid API Key",
'Government': 'Government',
'Government UID': 'Government UID',
'Government building': 'Government building',
'Grade': 'Grade',
'Great British Pounds': 'Great British Pounds',
'Greater than 10 matches. Please refine search further': 'Greater than 10 matches. Please refine search further',
'Greek': 'Greek',
'Green': 'Green',
'Ground movement, fissures': 'Ground movement, fissures',
'Ground movement, settlement, slips': 'Ground movement, settlement, slips',
'Group': 'Group',
'Group Description': 'Group Description',
'Group Details': 'Group Details',
'Group ID': 'Group ID',
'Group Member added': 'Group Member added',
'Group Members': 'Group Members',
'Group Memberships': 'Group Memberships',
'Group Name': 'Group Name',
'Group Title': 'Group Title',
'Group Type': 'Group Type',
'Group added': 'Group added',
'Group deleted': 'Group deleted',
'Group description': 'Group description',
'Group updated': 'Group updated',
'Groups': 'Groups',
'Groups removed': 'Groups removed',
'Guest': 'Guest',
'HFA Priorities': 'HFA Priorities',
'Hail': 'Hail',
'Hair Color': 'Hair Colour',
'Hair Length': 'Hair Length',
'Hair Style': 'Hair Style',
'Has data from this Reference Document been entered into Sahana?': 'Has data from this Reference Document been entered into Sahana?',
'Has the Certificate for receipt of the shipment been given to the sender?': 'Has the Certificate for receipt of the shipment been given to the sender?',
'Has the GRN (Goods Received Note) been completed?': 'Has the GRN (Goods Received Note) been completed?',
'Hazard Pay': 'Hazard Pay',
'Hazardous Material': 'Hazardous Material',
'Hazardous Road Conditions': 'Hazardous Road Conditions',
'Hazards': 'Hazards',
'Header Background': 'Header Background',
'Header background file %s missing!': 'Header background file %s missing!',
'Headquarters': 'Headquarters',
'Health': 'Health',
'Health care assistance, Rank': 'Health care assistance, Rank',
'Health center': 'Health center',
'Health center with beds': 'Health center with beds',
'Health center without beds': 'Health center without beds',
'Health services status': 'Health services status',
'Healthcare Worker': 'Healthcare Worker',
'Heat Wave': 'Heat Wave',
'Heat and Humidity': 'Heat and Humidity',
'Height': 'Height',
'Height (cm)': 'Height (cm)',
'Height (m)': 'Height (m)',
'Help': 'Help',
'Helps to monitor status of hospitals': 'Helps to monitor status of hospitals',
'Helps to report and search for missing persons': 'Helps to report and search for missing persons',
'Here are the solution items related to the problem.': 'Here are the solution items related to the problem.',
'Heritage Listed': 'Heritage Listed',
'Hierarchy Level 0 Name (i.e. Country)': 'Hierarchy Level 0 Name (i.e. Country)',
'Hierarchy Level 1 Name (e.g. State or Province)': 'Hierarchy Level 1 Name (e.g. State or Province)',
'Hierarchy Level 2 Name (e.g. District or County)': 'Hierarchy Level 2 Name (e.g. District or County)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Hierarchy Level 3 Name (e.g. City / Town / Village)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Hierarchy Level 4 Name (e.g. Neighbourhood)',
'Hierarchy Level 5 Name': 'Hierarchy Level 5 Name',
'High': 'High',
'High Water': 'High Water',
'Hindu': 'Hindu',
'Hit the back button on your browser to try again.': 'Hit the back button on your browser to try again.',
'Holiday Address': 'Holiday Address',
'Home': 'Home',
'Home Address': 'Home Address',
'Home City': 'Home City',
'Home Country': 'Home Country',
'Home Crime': 'Home Crime',
'Home Details': 'Home Details',
'Home Phone Number': 'Home Phone Number',
'Home Relative': 'Home Relative',
'Home added': 'Home added',
'Home deleted': 'Home deleted',
'Home updated': 'Home updated',
'Homes': 'Homes',
'Hospital': 'Hospital',
'Hospital Details': 'Hospital Details',
'Hospital Status Report': 'Hospital Status Report',
'Hospital information added': 'Hospital information added',
'Hospital information deleted': 'Hospital information deleted',
'Hospital information updated': 'Hospital information updated',
'Hospital status assessment.': 'Hospital status assessment.',
'Hospitals': 'Hospitals',
'Host National Society': 'Host National Society',
'Hot Spot': 'Hot Spot',
'Hour': 'Hour',
'Hours': 'Hours',
'Household kits received': 'Household kits received',
'Household kits, source': 'Household kits, source',
'How data shall be transferred': 'How data shall be transferred',
'How is this person affected by the disaster? (Select all that apply)': 'How is this person affected by the disaster? (Select all that apply)',
'How local records shall be updated': 'How local records shall be updated',
'How long will the food last?': 'How long will the food last?',
'How many Boys (0-17 yrs) are Dead due to the crisis': 'How many Boys (0-17 yrs) are Dead due to the crisis',
'How many Boys (0-17 yrs) are Injured due to the crisis': 'How many Boys (0-17 yrs) are Injured due to the crisis',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'How many Boys (0-17 yrs) are Missing due to the crisis',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'How many Girls (0-17 yrs) are Dead due to the crisis',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'How many Girls (0-17 yrs) are Injured due to the crisis',
'How many Girls (0-17 yrs) are Missing due to the crisis': 'How many Girls (0-17 yrs) are Missing due to the crisis',
'How many Men (18 yrs+) are Dead due to the crisis': 'How many Men (18 yrs+) are Dead due to the crisis',
'How many Men (18 yrs+) are Injured due to the crisis': 'How many Men (18 yrs+) are Injured due to the crisis',
'How many Men (18 yrs+) are Missing due to the crisis': 'How many Men (18 yrs+) are Missing due to the crisis',
'How many Women (18 yrs+) are Dead due to the crisis': 'How many Women (18 yrs+) are Dead due to the crisis',
'How many Women (18 yrs+) are Injured due to the crisis': 'How many Women (18 yrs+) are Injured due to the crisis',
'How many Women (18 yrs+) are Missing due to the crisis': 'How many Women (18 yrs+) are Missing due to the crisis',
'How many days will the supplies last?': 'How many days will the supplies last?',
'How many new cases have been admitted to this facility in the past 24h?': 'How many new cases have been admitted to this facility in the past 24h?',
'How many of the patients with the disease died in the past 24h at this facility?': 'How many of the patients with the disease died in the past 24h at this facility?',
'How many patients with the disease are currently hospitalized at this facility?': 'How many patients with the disease are currently hospitalized at this facility?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.',
'Human Resource': 'Human Resource',
'Human Resource Details': 'Human Resource Details',
'Human Resource Management': 'Human Resource Management',
'Human Resource added': 'Human Resource added',
'Human Resource removed': 'Human Resource removed',
'Human Resource updated': 'Human Resource updated',
'Human Resources': 'Human Resources',
'Human Resources Management': 'Human Resources Management',
'Humanitarian NGO': 'Humanitarian NGO',
'Hurricane': 'Hurricane',
'Hurricane Force Wind': 'Hurricane Force Wind',
'Hybrid Layer': 'Hybrid Layer',
'Hygiene': 'Hygiene',
'Hygiene NFIs': 'Hygiene NFIs',
'Hygiene kits received': 'Hygiene kits received',
'Hygiene kits, source': 'Hygiene kits, source',
'Hygiene practice': 'Hygiene practice',
'Hygiene problems': 'Hygiene problems',
'I accept. Create my account.': 'I accept. Create my account.',
'ID Tag': 'ID Tag',
'ID Tag Number': 'ID Tag Number',
'ID type': 'ID type',
'Ice Pressure': 'Ice Pressure',
'Iceberg': 'Iceberg',
'Identification': 'Identification',
'Identification Report': 'Identification Report',
'Identification Reports': 'Identification Reports',
'Identification Status': 'Identification Status',
'Identified as': 'Identified as',
'Identified by': 'Identified by',
'Identifier which the repository identifies itself with when sending synchronization requests': 'Identifier which the repository identifies itself with when sending synchronisation requests',
'Identity': 'Identity',
'Identity Details': 'Identity Details',
'Identity added': 'Identity added',
'Identity deleted': 'Identity deleted',
'Identity updated': 'Identity updated',
'If a ticket was issued then please provide the Ticket ID.': 'If a ticket was issued then please provide the Ticket ID.',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.',
'If it is a URL leading to HTML, then this will downloaded.': 'If it is a URL leading to HTML, then this will downloaded.',
'If neither are defined, then the Default Marker is used.': 'If neither are defined, then the Default Marker is used.',
'If no marker defined then the system default marker is used': 'If no marker defined then the system default marker is used',
'If no, specify why': 'If no, specify why',
'If none are selected, then all are searched.': 'If none are selected, then all are searched.',
'If not found, you can have a new location created.': 'If not found, you can have a new location created.',
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": "If selected, then this Asset's Location will be updated whenever the Person's Location is updated.",
'If the location is a geographic area, then state at what level here.': 'If the location is a geographic area, then state at what level here.',
'If the request is for %s, please enter the details on the next screen.': 'If the request is for %s, please enter the details on the next screen.',
'If the request type is "Other", please enter request details here.': 'If the request type is "Other", please enter request details here.',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": "If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.",
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": "If this field is populated then a user who specifies this Organisation when signing up will be assigned as a Staff of this Organisation unless their domain doesn't match the domain field.",
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organisation',
'If this is set to True then mails will be deleted from the server after downloading.': 'If this is set to True then mails will be deleted from the server after downloading.',
'If this record should be restricted then select which role is required to access the record here.': 'If this record should be restricted then select which role is required to access the record here.',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'If this record should be restricted then select which role(s) are permitted to access the record here.',
'If yes, specify what and by whom': 'If yes, specify what and by whom',
'If yes, which and how': 'If yes, which and how',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Add Hospital'.": "If you don't see the Hospital in the list, you can add a new one by clicking link 'Add Hospital'.",
"If you don't see the Office in the list, you can add a new one by clicking link 'Add Office'.": "If you don't see the Office in the list, you can add a new one by clicking link 'Add Office'.",
"If you don't see the Organization in the list, you can add a new one by clicking link 'Add Organization'.": "If you don't see the Organisation in the list, you can add a new one by clicking link 'Add Organisation'.",
"If you don't see the site in the list, you can add a new one by clicking link 'Add Project Site'.": "If you don't see the site in the list, you can add a new one by clicking link 'Add Project Site'.",
'If you have any questions or need support, please see': 'If you have any questions or need support, please see',
'If you know what the Geonames ID of this location is then you can enter it here.': 'If you know what the Geonames ID of this location is then you can enter it here.',
'If you know what the OSM ID of this location is then you can enter it here.': 'If you know what the OSM ID of this location is then you can enter it here.',
'If you need to add a new document then you can click here to attach one.': 'If you need to add a new document then you can click here to attach one.',
'If you want several values, then separate with': 'If you want several values, then separate with',
'If you would like to help, then please': 'If you would like to help, then please',
'Illegal Immigrant': 'Illegal Immigrant',
'Image': 'Image',
'Image Details': 'Image Details',
'Image File(s), one image per page': 'Image File(s), one image per page',
'Image Tags': 'Image Tags',
'Image Type': 'Image Type',
'Image Upload': 'Image Upload',
'Image added': 'Image added',
'Image deleted': 'Image deleted',
'Image updated': 'Image updated',
'Imagery': 'Imagery',
'Images': 'Images',
'Impact Assessments': 'Impact Assessments',
'Impact Details': 'Impact Details',
'Impact Type': 'Impact Type',
'Impact Type Details': 'Impact Type Details',
'Impact Type added': 'Impact Type added',
'Impact Type deleted': 'Impact Type deleted',
'Impact Type updated': 'Impact Type updated',
'Impact Types': 'Impact Types',
'Impact added': 'Impact added',
'Impact deleted': 'Impact deleted',
'Impact updated': 'Impact updated',
'Impacts': 'Impacts',
'Import': 'Import',
'Import Completed Responses': 'Import Completed Responses',
'Import Data': 'Import Data',
'Import File': 'Import File',
'Import File Details': 'Import File Details',
'Import File deleted': 'Import File deleted',
'Import Files': 'Import Files',
'Import Job Count': 'Import Job Count',
'Import Jobs': 'Import Jobs',
'Import New File': 'Import New File',
'Import Offices': 'Import Offices',
'Import Organizations': 'Import Organizations',
'Import Questions': 'Import Questions',
'Import Staff & Volunteers': 'Import Staff & Volunteers',
'Import Templates': 'Import Templates',
'Import from Ushahidi Instance': 'Import from Ushahidi Instance',
'Import multiple tables as CSV': 'Import multiple tables as CSV',
'Import/Export': 'Import/Export',
'Importantly where there are no aid services being provided': 'Importantly where there are no aid services being provided',
'Imported': 'Imported',
'Importing data from spreadsheets': 'Importing data from spreadsheets',
'Improper decontamination': 'Improper decontamination',
'Improper handling of dead bodies': 'Improper handling of dead bodies',
'In Catalogs': 'In Catalogs',
'In Inventories': 'In Inventories',
'In Process': 'In Process',
'In Progress': 'In Progress',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'In Window layout the map maximises to fill the window, so no need to set a large value here.',
'Inbound Mail Settings': 'Inbound Mail Settings',
'Incident': 'Incident',
'Incident Categories': 'Incident Categories',
'Incident Details': 'Incident Details',
'Incident Report': 'Incident Report',
'Incident Report Details': 'Incident Report Details',
'Incident Report added': 'Incident Report added',
'Incident Report deleted': 'Incident Report deleted',
'Incident Report updated': 'Incident Report updated',
'Incident Reporting': 'Incident Reporting',
'Incident Reporting System': 'Incident Reporting System',
'Incident Reports': 'Incident Reports',
'Incident added': 'Incident added',
'Incident removed': 'Incident removed',
'Incident updated': 'Incident updated',
'Incidents': 'Incidents',
'Include any special requirements such as equipment which they need to bring.': 'Include any special requirements such as equipment which they need to bring.',
'Incoming': 'Incoming',
'Incoming Shipment canceled': 'Incoming Shipment canceled',
'Incoming Shipment updated': 'Incoming Shipment updated',
'Incomplete': 'Incomplete',
'Individuals': 'Individuals',
'Industrial': 'Industrial',
'Industrial Crime': 'Industrial Crime',
'Industry Fire': 'Industry Fire',
'Infant (0-1)': 'Infant (0-1)',
'Infectious Disease': 'Infectious Disease',
'Infectious Disease (Hazardous Material)': 'Infectious Disease (Hazardous Material)',
'Infectious Diseases': 'Infectious Diseases',
'Infestation': 'Infestation',
'Informal Leader': 'Informal Leader',
'Informal camp': 'Informal camp',
'Information gaps': 'Information gaps',
'Infusion catheters available': 'Infusion catheters available',
'Infusion catheters need per 24h': 'Infusion catheters need per 24h',
'Infusion catheters needed per 24h': 'Infusion catheters needed per 24h',
'Infusions available': 'Infusions available',
'Infusions needed per 24h': 'Infusions needed per 24h',
'Inspected': 'Inspected',
'Inspection Date': 'Inspection Date',
'Inspection date and time': 'Inspection date and time',
'Inspection time': 'Inspection time',
'Inspector ID': 'Inspector ID',
'Instant Porridge': 'Instant Porridge',
'Institution': 'Institution',
'Insufficient': 'Insufficient',
'Insufficient privileges': 'Insufficient privileges',
'Insufficient vars: Need module, resource, jresource, instance': 'Insufficient vars: Need module, resource, jresource, instance',
'Insurance Renewal Due': 'Insurance Renewal Due',
'Intergovernmental Organization': 'Intergovernmental Organisation',
'Interior walls, partitions': 'Interior walls, partitions',
'Internal State': 'Internal State',
'International NGO': 'International NGO',
'International Organization': 'International Organisation',
'Interview taking place at': 'Interview taking place at',
'Invalid': 'Invalid',
'Invalid Query': 'Invalid Query',
'Invalid email': 'Invalid email',
'Invalid phone number': 'Invalid phone number',
'Invalid phone number!': 'Invalid phone number!',
'Invalid request!': 'Invalid request!',
'Invalid ticket': 'Invalid ticket',
'Inventories': 'Inventories',
'Inventory': 'Inventory',
'Inventory Item': 'Inventory Item',
'Inventory Item Details': 'Inventory Item Details',
'Inventory Item updated': 'Inventory Item updated',
'Inventory Items': 'Inventory Items',
'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.',
'Inventory Management': 'Inventory Management',
'Inventory Stock Position': 'Inventory Stock Position',
'Inventory functionality is available for': 'Inventory functionality is available for',
'Inventory of Effects': 'Inventory of Effects',
'Is editing level L%d locations allowed?': 'Is editing level L%d locations allowed?',
'Is it safe to collect water?': 'Is it safe to collect water?',
'Is this a strict hierarchy?': 'Is this a strict hierarchy?',
'Issuing Authority': 'Issuing Authority',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.',
'Italian': 'Italian',
'Item': 'Item',
'Item Added to Shipment': 'Item Added to Shipment',
'Item Catalog Details': 'Item Catalog Details',
'Item Categories': 'Item Categories',
'Item Category': 'Item Category',
'Item Category Details': 'Item Category Details',
'Item Category added': 'Item Category added',
'Item Category deleted': 'Item Category deleted',
'Item Category updated': 'Item Category updated',
'Item Details': 'Item Details',
'Item Pack Details': 'Item Pack Details',
'Item Pack added': 'Item Pack added',
'Item Pack deleted': 'Item Pack deleted',
'Item Pack updated': 'Item Pack updated',
'Item Packs': 'Item Packs',
'Item added': 'Item added',
'Item added to Inventory': 'Item added to Inventory',
'Item added to order': 'Item added to order',
'Item added to shipment': 'Item added to shipment',
'Item already in Bundle!': 'Item already in Bundle!',
'Item already in Kit!': 'Item already in Kit!',
'Item already in budget!': 'Item already in budget!',
'Item deleted': 'Item deleted',
'Item removed from Inventory': 'Item removed from Inventory',
'Item removed from order': 'Item removed from order',
'Item removed from shipment': 'Item removed from shipment',
'Item updated': 'Item updated',
'Items': 'Items',
'Items in Category can be Assets': 'Items in Category can be Assets',
'Japanese': 'Japanese',
'Jerry can': 'Jerry can',
'Jew': 'Jew',
'Job Role': 'Job Role',
'Job Role Catalog': 'Job Role Catalog',
'Job Role Details': 'Job Role Details',
'Job Role added': 'Job Role added',
'Job Role deleted': 'Job Role deleted',
'Job Role updated': 'Job Role updated',
'Job Roles': 'Job Roles',
'Job Title': 'Job Title',
'Job added': 'Job added',
'Job deleted': 'Job deleted',
'Job updated updated': 'Job updated updated',
'Journal': 'Journal',
'Journal Entry Details': 'Journal Entry Details',
'Journal entry added': 'Journal entry added',
'Journal entry deleted': 'Journal entry deleted',
'Journal entry updated': 'Journal entry updated',
'Kit': 'Kit',
'Kit Contents': 'Kit Contents',
'Kit Details': 'Kit Details',
'Kit Updated': 'Kit Updated',
'Kit added': 'Kit added',
'Kit deleted': 'Kit deleted',
'Kit updated': 'Kit updated',
'Kits': 'Kits',
'Known Identities': 'Known Identities',
'Known incidents of violence against women/girls': 'Known incidents of violence against women/girls',
'Known incidents of violence since disaster': 'Known incidents of violence since disaster',
'Korean': 'Korean',
'LICENSE': 'LICENSE',
'Label Question:': 'Label Question:',
'Lack of material': 'Lack of material',
'Lack of school uniform': 'Lack of school uniform',
'Lack of supplies at school': 'Lack of supplies at school',
'Lack of transport to school': 'Lack of transport to school',
'Lactating women': 'Lactating women',
'Lahar': 'Lahar',
'Landslide': 'Landslide',
'Language': 'Language',
'Last Name': 'Last Name',
'Last Synchronization': 'Last Synchronisation',
'Last known location': 'Last known location',
'Last name': 'Last name',
'Last status': 'Last status',
'Last synchronized on': 'Last synchronised on',
'Last updated ': 'Last updated ',
'Last updated by': 'Last updated by',
'Last updated on': 'Last updated on',
'Latitude': 'Latitude',
'Latitude & Longitude': 'Latitude & Longitude',
'Latitude is North-South (Up-Down).': 'Latitude is North-South (Up-Down).',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.',
'Latitude of Map Center': 'Latitude of Map Center',
'Latitude of far northern end of the region of interest.': 'Latitude of far northern end of the region of interest.',
'Latitude of far southern end of the region of interest.': 'Latitude of far southern end of the region of interest.',
'Latitude should be between': 'Latitude should be between',
'Latrines': 'Latrines',
'Law enforcement, military, homeland and local/private security': 'Law enforcement, military, homeland and local/private security',
'Layer Details': 'Layer Details',
'Layer ID': 'Layer ID',
'Layer Name': 'Layer Name',
'Layer Type': 'Layer Type',
'Layer added': 'Layer added',
'Layer deleted': 'Layer deleted',
'Layer has been Disabled': 'Layer has been Disabled',
'Layer has been Enabled': 'Layer has been Enabled',
'Layer updated': 'Layer updated',
'Layers': 'Layers',
'Layers updated': 'Layers updated',
'Leader': 'Leader',
'Leave blank to request an unskilled person': 'Leave blank to request an unskilled person',
'Legend Format': 'Legend Format',
'Length (m)': 'Length (m)',
'Level': 'Level',
'Level 1': 'Level 1',
'Level 1 Assessment Details': 'Level 1 Assessment Details',
'Level 1 Assessment added': 'Level 1 Assessment added',
'Level 1 Assessment deleted': 'Level 1 Assessment deleted',
'Level 1 Assessment updated': 'Level 1 Assessment updated',
'Level 1 Assessments': 'Level 1 Assessments',
'Level 2': 'Level 2',
'Level 2 Assessment Details': 'Level 2 Assessment Details',
'Level 2 Assessment added': 'Level 2 Assessment added',
'Level 2 Assessment deleted': 'Level 2 Assessment deleted',
'Level 2 Assessment updated': 'Level 2 Assessment updated',
'Level 2 Assessments': 'Level 2 Assessments',
'Level 2 or detailed engineering evaluation recommended': 'Level 2 or detailed engineering evaluation recommended',
"Level is higher than parent's": "Level is higher than parent's",
'Library support not available for OpenID': 'Library support not available for OpenID',
'License Number': 'License Number',
'License Plate': 'License Plate',
'LineString': 'LineString',
'List': 'List',
'List / Add Baseline Types': 'List / Add Baseline Types',
'List / Add Impact Types': 'List / Add Impact Types',
'List / Add Services': 'List / Add Services',
'List / Add Types': 'List / Add Types',
'List Activities': 'List Activities',
'List All': 'List All',
'List All Activity Types': 'List All Activity Types',
'List All Assets': 'List All Assets',
'List All Catalog Items': 'List All Catalog Items',
'List All Catalogs & Add Items to Catalogs': 'List All Catalogs & Add Items to Catalogs',
'List All Commitments': 'List All Commitments',
'List All Entries': 'List All Entries',
'List All Item Categories': 'List All Item Categories',
'List All Items': 'List All Items',
'List All Memberships': 'List All Memberships',
'List All Orders': 'List All Orders',
'List All Project Sites': 'List All Project Sites',
'List All Projects': 'List All Projects',
'List All Received Shipments': 'List All Received Shipments',
'List All Records': 'List All Records',
'List All Requested Items': 'List All Requested Items',
'List All Requested Skills': 'List All Requested Skills',
'List All Requests': 'List All Requests',
'List All Sent Shipments': 'List All Sent Shipments',
'List All Vehicles': 'List All Vehicles',
'List Alternative Items': 'List Alternative Items',
'List Assessment Summaries': 'List Assessment Summaries',
'List Assessments': 'List Assessments',
'List Assets': 'List Assets',
'List Availability': 'List Availability',
'List Baseline Types': 'List Baseline Types',
'List Baselines': 'List Baselines',
'List Brands': 'List Brands',
'List Budgets': 'List Budgets',
'List Bundles': 'List Bundles',
'List Camp Services': 'List Camp Services',
'List Camp Types': 'List Camp Types',
'List Camps': 'List Camps',
'List Catalog Items': 'List Catalog Items',
'List Catalogs': 'List Catalogs',
'List Certificates': 'List Certificates',
'List Certifications': 'List Certifications',
'List Checklists': 'List Checklists',
'List Cluster Subsectors': 'List Cluster Subsectors',
'List Clusters': 'List Clusters',
'List Commitment Items': 'List Commitment Items',
'List Commitments': 'List Commitments',
'List Committed People': 'List Committed People',
'List Competency Ratings': 'List Competency Ratings',
'List Contact Information': 'List Contact Information',
'List Contacts': 'List Contacts',
'List Course Certificates': 'List Course Certificates',
'List Courses': 'List Courses',
'List Credentials': 'List Credentials',
'List Current': 'List Current',
'List Documents': 'List Documents',
'List Donors': 'List Donors',
'List Events': 'List Events',
'List Facilities': 'List Facilities',
'List Feature Classes': 'List Feature Classes',
'List Feature Layers': 'List Feature Layers',
'List Flood Reports': 'List Flood Reports',
'List GPS data': 'List GPS data',
'List Groups': 'List Groups',
'List Groups/View Members': 'List Groups/View Members',
'List Homes': 'List Homes',
'List Hospitals': 'List Hospitals',
'List Human Resources': 'List Human Resources',
'List Identities': 'List Identities',
'List Images': 'List Images',
'List Impact Assessments': 'List Impact Assessments',
'List Impact Types': 'List Impact Types',
'List Impacts': 'List Impacts',
'List Import Files': 'List Import Files',
'List Incident Reports': 'List Incident Reports',
'List Incidents': 'List Incidents',
'List Item Categories': 'List Item Categories',
'List Item Packs': 'List Item Packs',
'List Items': 'List Items',
'List Items in Inventory': 'List Items in Inventory',
'List Job Roles': 'List Job Roles',
'List Jobs': 'List Jobs',
'List Kits': 'List Kits',
'List Layers': 'List Layers',
'List Level 1 Assessments': 'List Level 1 Assessments',
'List Level 1 assessments': 'List Level 1 assessments',
'List Level 2 Assessments': 'List Level 2 Assessments',
'List Level 2 assessments': 'List Level 2 assessments',
'List Locations': 'List Locations',
'List Log Entries': 'List Log Entries',
'List Map Configurations': 'List Map Configurations',
'List Markers': 'List Markers',
'List Members': 'List Members',
'List Memberships': 'List Memberships',
'List Messages': 'List Messages',
'List Missing Persons': 'List Missing Persons',
'List Missions': 'List Missions',
'List Need Types': 'List Need Types',
'List Needs': 'List Needs',
'List Offices': 'List Offices',
'List Order Items': 'List Order Items',
'List Orders': 'List Orders',
'List Organization Domains': 'List Organisation Domains',
'List Organizations': 'List Organisations',
'List Patients': 'List Patients',
'List Personal Effects': 'List Personal Effects',
'List Persons': 'List Persons',
'List Photos': 'List Photos',
'List Population Statistics': 'List Population Statistics',
'List Positions': 'List Positions',
'List Problems': 'List Problems',
'List Project Organizations': 'List Project Organisations',
'List Project Sites': 'List Project Sites',
'List Projections': 'List Projections',
'List Projects': 'List Projects',
'List Rapid Assessments': 'List Rapid Assessments',
'List Received Items': 'List Received Items',
'List Received Shipments': 'List Received Shipments',
'List Records': 'List Records',
'List Registrations': 'List Registrations',
'List Relatives': 'List Relatives',
'List Reports': 'List Reports',
'List Repositories': 'List Repositories',
'List Request Items': 'List Request Items',
'List Requested Skills': 'List Requested Skills',
'List Requests': 'List Requests',
'List Requests for Donations': 'List Requests for Donations',
'List Requests for Volunteers': 'List Requests for Volunteers',
'List Resources': 'List Resources',
'List Rivers': 'List Rivers',
'List Roles': 'List Roles',
'List Rooms': 'List Rooms',
'List Saved Searches': 'List Saved Searches',
'List Scenarios': 'List Scenarios',
'List Sections': 'List Sections',
'List Sectors': 'List Sectors',
'List Sent Items': 'List Sent Items',
'List Sent Shipments': 'List Sent Shipments',
'List Service Profiles': 'List Service Profiles',
'List Settings': 'List Settings',
'List Shelter Services': 'List Shelter Services',
'List Shelter Types': 'List Shelter Types',
'List Shelters': 'List Shelters',
'List Skill Equivalences': 'List Skill Equivalences',
'List Skill Provisions': 'List Skill Provisions',
'List Skill Types': 'List Skill Types',
'List Skills': 'List Skills',
'List Solutions': 'List Solutions',
'List Staff Types': 'List Staff Types',
'List Status': 'List Status',
'List Subscriptions': 'List Subscriptions',
'List Subsectors': 'List Subsectors',
'List Support Requests': 'List Support Requests',
'List Tasks': 'List Tasks',
'List Teams': 'List Teams',
'List Themes': 'List Themes',
'List Tickets': 'List Tickets',
'List Trainings': 'List Trainings',
'List Units': 'List Units',
'List Users': 'List Users',
'List Vehicle Details': 'List Vehicle Details',
'List Vehicles': 'List Vehicles',
'List Warehouses': 'List Warehouses',
'List all': 'List all',
'List all Assessment Answer': 'List all Assessment Answer',
'List all Assessment Questions': 'List all Assessment Questions',
'List all Assessment Series': 'List all Assessment Series',
'List all Assessment Templates': 'List all Assessment Templates',
'List all Completed Assessment': 'List all Completed Assessment',
'List all Question Meta-Data': 'List all Question Meta-Data',
'List all Template Sections': 'List all Template Sections',
'List available Scenarios': 'List available Scenarios',
'List of Assessment Answers': 'List of Assessment Answers',
'List of Assessment Questions': 'List of Assessment Questions',
'List of Assessment Series': 'List of Assessment Series',
'List of Assessment Templates': 'List of Assessment Templates',
'List of CSV files': 'List of CSV files',
'List of CSV files uploaded': 'List of CSV files uploaded',
'List of Completed Assessments': 'List of Completed Assessments',
'List of Items': 'List of Items',
'List of Missing Persons': 'List of Missing Persons',
'List of Question Meta-Data': 'List of Question Meta-Data',
'List of Reports': 'List of Reports',
'List of Requests': 'List of Requests',
'List of Selected Answers': 'List of Selected Answers',
'List of Spreadsheets': 'List of Spreadsheets',
'List of Spreadsheets uploaded': 'List of Spreadsheets uploaded',
'List of Template Sections': 'List of Template Sections',
'List of addresses': 'List of addresses',
'List unidentified': 'List unidentified',
'List/Add': 'List/Add',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
'Live Help': 'Live Help',
'Livelihood': 'Livelihood',
'Load Cleaned Data into Database': 'Load Cleaned Data into Database',
'Load Raw File into Grid': 'Load Raw File into Grid',
'Load Search': 'Load Search',
'Loading': 'Loading',
'Local Name': 'Local Name',
'Local Names': 'Local Names',
'Location': 'Location',
'Location 1': 'Location 1',
'Location 2': 'Location 2',
'Location Details': 'Location Details',
'Location Hierarchy Level 0 Name': 'Location Hierarchy Level 0 Name',
'Location Hierarchy Level 1 Name': 'Location Hierarchy Level 1 Name',
'Location Hierarchy Level 2 Name': 'Location Hierarchy Level 2 Name',
'Location Hierarchy Level 3 Name': 'Location Hierarchy Level 3 Name',
'Location Hierarchy Level 4 Name': 'Location Hierarchy Level 4 Name',
'Location Hierarchy Level 5 Name': 'Location Hierarchy Level 5 Name',
'Location added': 'Location added',
'Location deleted': 'Location deleted',
'Location group cannot be a parent.': 'Location group cannot be a parent.',
'Location group cannot have a parent.': 'Location group cannot have a parent.',
'Location groups can be used in the Regions menu.': 'Location groups can be used in the Regions menu.',
'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.',
'Location updated': 'Location updated',
'Location: ': 'Location: ',
'Locations': 'Locations',
'Locations of this level need to have a parent of level': 'Locations of this level need to have a parent of level',
'Lockdown': 'Lockdown',
'Log': 'Log',
'Log Entry': 'Log Entry',
'Log Entry Deleted': 'Log Entry Deleted',
'Log Entry Details': 'Log Entry Details',
'Log entry added': 'Log entry added',
'Log entry deleted': 'Log entry deleted',
'Log entry updated': 'Log entry updated',
'Login': 'Login',
'Logistics': 'Logistics',
'Logo': 'Logo',
'Logo file %s missing!': 'Logo file %s missing!',
'Logout': 'Logout',
'Longitude': 'Longitude',
'Longitude is West - East (sideways).': 'Longitude is West - East (sideways).',
'Longitude is West-East (sideways).': 'Longitude is West-East (sideways).',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.',
'Longitude of Map Center': 'Longitude of Map Center',
'Longitude of far eastern end of the region of interest.': 'Longitude of far eastern end of the region of interest.',
'Longitude of far western end of the region of interest.': 'Longitude of far western end of the region of interest.',
'Longitude should be between': 'Longitude should be between',
'Looting': 'Looting',
'Lost': 'Lost',
'Lost Password': 'Lost Password',
'Low': 'Low',
'Magnetic Storm': 'Magnetic Storm',
'Major Damage': 'Major Damage',
'Major expenses': 'Major expenses',
'Major outward damage': 'Major outward damage',
'Make Commitment': 'Make Commitment',
'Make New Commitment': 'Make New Commitment',
'Make Request': 'Make Request',
'Make a Request for Donations': 'Make a Request for Donations',
'Make a Request for Volunteers': 'Make a Request for Volunteers',
'Make preparations per the <instruction>': 'Make preparations per the <instruction>',
'Male': 'Male',
'Manage Events': 'Manage Events',
'Manage Users & Roles': 'Manage Users & Roles',
'Manage Vehicles': 'Manage Vehicles',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': 'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.',
'Manage requests of hospitals for assistance.': 'Manage requests of hospitals for assistance.',
'Manager': 'Manager',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).',
'Mandatory. The URL to access the service.': 'Mandatory. The URL to access the service.',
'Manual Synchronization': 'Manual Synchronisation',
'Many': 'Many',
'Map': 'Map',
'Map Center Latitude': 'Map Center Latitude',
'Map Center Longitude': 'Map Center Longitude',
'Map Configuration': 'Map Configuration',
'Map Configuration Details': 'Map Configuration Details',
'Map Configuration added': 'Map Configuration added',
'Map Configuration deleted': 'Map Configuration deleted',
'Map Configuration removed': 'Map Configuration removed',
'Map Configuration updated': 'Map Configuration updated',
'Map Configurations': 'Map Configurations',
'Map Height': 'Map Height',
'Map Service Catalogue': 'Map Service Catalogue',
'Map Settings': 'Map Settings',
'Map Viewing Client': 'Map Viewing Client',
'Map Width': 'Map Width',
'Map Zoom': 'Map Zoom',
'Map of Hospitals': 'Map of Hospitals',
'MapMaker Hybrid Layer': 'MapMaker Hybrid Layer',
'MapMaker Layer': 'MapMaker Layer',
'Maps': 'Maps',
'Marine Security': 'Marine Security',
'Marital Status': 'Marital Status',
'Marker': 'Marker',
'Marker Details': 'Marker Details',
'Marker added': 'Marker added',
'Marker deleted': 'Marker deleted',
'Marker updated': 'Marker updated',
'Markers': 'Markers',
'Master': 'Master',
'Master Message Log': 'Master Message Log',
'Master Message Log to process incoming reports & requests': 'Master Message Log to process incoming reports & requests',
'Match Percentage': 'Match Percentage',
'Match Requests': 'Match Requests',
'Match percentage indicates the % match between these two records': 'Match percentage indicates the % match between these two records',
'Match?': 'Match?',
'Matching Catalog Items': 'Matching Catalog Items',
'Matching Items': 'Matching Items',
'Matching Records': 'Matching Records',
'Maximum Location Latitude': 'Maximum Location Latitude',
'Maximum Location Longitude': 'Maximum Location Longitude',
'Measure Area: Click the points around the polygon & end with a double-click': 'Measure Area: Click the points around the polygon & end with a double-click',
'Measure Length: Click the points along the path & end with a double-click': 'Measure Length: Click the points along the path & end with a double-click',
'Medical and public health': 'Medical and public health',
'Medium': 'Medium',
'Megabytes per Month': 'Megabytes per Month',
'Members': 'Members',
'Membership': 'Membership',
'Membership Details': 'Membership Details',
'Membership added': 'Membership added',
'Membership deleted': 'Membership deleted',
'Membership updated': 'Membership updated',
'Memberships': 'Memberships',
'Message': 'Message',
'Message Details': 'Message Details',
'Message Variable': 'Message Variable',
'Message added': 'Message added',
'Message deleted': 'Message deleted',
'Message updated': 'Message updated',
'Message variable': 'Message variable',
'Messages': 'Messages',
'Messaging': 'Messaging',
'Meteorite': 'Meteorite',
'Meteorological (inc. flood)': 'Meteorological (inc. flood)',
'Method used': 'Method used',
'Middle Name': 'Middle Name',
'Migrants or ethnic minorities': 'Migrants or ethnic minorities',
'Mileage': 'Mileage',
'Military': 'Military',
'Minimum Location Latitude': 'Minimum Location Latitude',
'Minimum Location Longitude': 'Minimum Location Longitude',
'Minimum shift time is 6 hours': 'Minimum shift time is 6 hours',
'Minor Damage': 'Minor Damage',
'Minor/None': 'Minor/None',
'Minorities participating in coping activities': 'Minorities participating in coping activities',
'Minute': 'Minute',
'Minutes must be a number between 0 and 60': 'Minutes must be a number between 0 and 60',
'Minutes per Month': 'Minutes per Month',
'Minutes should be a number greater than 0 and less than 60': 'Minutes should be a number greater than 0 and less than 60',
'Miscellaneous': 'Miscellaneous',
'Missing': 'Missing',
'Missing Person': 'Missing Person',
'Missing Person Details': 'Missing Person Details',
'Missing Person Registry': 'Missing Person Registry',
'Missing Persons': 'Missing Persons',
'Missing Persons Registry': 'Missing Persons Registry',
'Missing Persons Report': 'Missing Persons Report',
'Missing Report': 'Missing Report',
'Missing Senior Citizen': 'Missing Senior Citizen',
'Missing Vulnerable Person': 'Missing Vulnerable Person',
'Mission Details': 'Mission Details',
'Mission Record': 'Mission Record',
'Mission added': 'Mission added',
'Mission deleted': 'Mission deleted',
'Mission updated': 'Mission updated',
'Missions': 'Missions',
'Mobile': 'Mobile',
'Mobile Basic Assessment': 'Mobile Basic Assessment',
'Mobile Phone': 'Mobile Phone',
'Mode': 'Mode',
'Model/Type': 'Model/Type',
'Modem settings updated': 'Modem settings updated',
'Moderate': 'Moderate',
'Moderator': 'Moderator',
'Modify Information on groups and individuals': 'Modify Information on groups and individuals',
'Modifying data in spreadsheet before importing it to the database': 'Modifying data in spreadsheet before importing it to the database',
'Module': 'Module',
'Module provides access to information on current Flood Levels.': 'Module provides access to information on current Flood Levels.',
'Monday': 'Monday',
'Monthly Cost': 'Monthly Cost',
'Monthly Salary': 'Monthly Salary',
'Months': 'Months',
'Morgue': 'Morgue',
'Morgue Details': 'Morgue Details',
'Morgue Status': 'Morgue Status',
'Morgue Units Available': 'Morgue Units Available',
'Morgues': 'Morgues',
'Mosque': 'Mosque',
'Motorcycle': 'Motorcycle',
'Moustache': 'Moustache',
'MultiPolygon': 'MultiPolygon',
'Multiple': 'Multiple',
'Multiple Matches': 'Multiple Matches',
'Muslim': 'Muslim',
'Must a location have a parent location?': 'Must a location have a parent location?',
'My Details': 'My Details',
'My Tasks': 'My Tasks',
'N/A': 'N/A',
'NO': 'NO',
'NZSEE Level 1': 'NZSEE Level 1',
'NZSEE Level 2': 'NZSEE Level 2',
'Name': 'Name',
'Name and/or ID': 'Name and/or ID',
'Name field is required!': 'Name field is required!',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'Name of the file (& optional sub-path) located in static which should be used for the background of the header.',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Name of the file (& optional sub-path) located in static which should be used for the top-left image.',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Name of the file (& optional sub-path) located in views which should be used for footer.',
'Name of the person in local language and script (optional).': 'Name of the person in local language and script (optional).',
'Name of the repository (for you own reference)': 'Name of the repository (for you own reference)',
'Name, Org and/or ID': 'Name, Org and/or ID',
'Names can be added in multiple languages': 'Names can be added in multiple languages',
'National': 'National',
'National ID Card': 'National ID Card',
'National NGO': 'National NGO',
'Nationality': 'Nationality',
'Nationality of the person.': 'Nationality of the person.',
'Nautical Accident': 'Nautical Accident',
'Nautical Hijacking': 'Nautical Hijacking',
'Need Type': 'Need Type',
'Need Type Details': 'Need Type Details',
'Need Type added': 'Need Type added',
'Need Type deleted': 'Need Type deleted',
'Need Type updated': 'Need Type updated',
'Need Types': 'Need Types',
"Need a 'url' argument!": "Need a 'url' argument!",
'Need added': 'Need added',
'Need deleted': 'Need deleted',
'Need to be logged-in to be able to submit assessments': 'Need to be logged-in to be able to submit assessments',
'Need to configure Twitter Authentication': 'Need to configure Twitter Authentication',
'Need to specify a Budget!': 'Need to specify a Budget!',
'Need to specify a Kit!': 'Need to specify a Kit!',
'Need to specify a Resource!': 'Need to specify a Resource!',
'Need to specify a bundle!': 'Need to specify a bundle!',
'Need to specify a group!': 'Need to specify a group!',
'Need to specify a location to search for.': 'Need to specify a location to search for.',
'Need to specify a role!': 'Need to specify a role!',
'Need to specify a table!': 'Need to specify a table!',
'Need to specify a user!': 'Need to specify a user!',
'Need updated': 'Need updated',
'Needs': 'Needs',
'Needs Details': 'Needs Details',
'Needs Maintenance': 'Needs Maintenance',
'Needs to reduce vulnerability to violence': 'Needs to reduce vulnerability to violence',
'Negative Flow Isolation': 'Negative Flow Isolation',
'Neighborhood': 'Neighborhood',
'Neighbourhood': 'Neighbourhood',
'Neighbouring building hazard': 'Neighbouring building hazard',
'Neonatal ICU': 'Neonatal ICU',
'Neonatology': 'Neonatology',
'Network': 'Network',
'Neurology': 'Neurology',
'New': 'New',
'New Assessment': 'New Assessment',
'New Assessment reported from': 'New Assessment reported from',
'New Certificate': 'New Certificate',
'New Checklist': 'New Checklist',
'New Entry': 'New Entry',
'New Event': 'New Event',
'New Home': 'New Home',
'New Item Category': 'New Item Category',
'New Job Role': 'New Job Role',
'New Location': 'New Location',
'New Location Group': 'New Location Group',
'New Patient': 'New Patient',
'New Record': 'New Record',
'New Relative': 'New Relative',
'New Request': 'New Request',
'New Scenario': 'New Scenario',
'New Skill': 'New Skill',
'New Solution Choice': 'New Solution Choice',
'New Staff Member': 'New Staff Member',
'New Support Request': 'New Support Request',
'New Team': 'New Team',
'New Ticket': 'New Ticket',
'New Training Course': 'New Training Course',
'New Volunteer': 'New Volunteer',
'New cases in the past 24h': 'New cases in the past 24h',
'Next': 'Next',
'Next View': 'Next View',
'No': 'No',
'No Activities Found': 'No Activities Found',
'No Activities currently registered in this event': 'No Activities currently registered in this event',
'No Alternative Items currently registered': 'No Alternative Items currently registered',
'No Assessment Answers currently registered': 'No Assessment Answers currently registered',
'No Assessment Question currently registered': 'No Assessment Question currently registered',
'No Assessment Series currently registered': 'No Assessment Series currently registered',
'No Assessment Summaries currently registered': 'No Assessment Summaries currently registered',
'No Assessment Template currently registered': 'No Assessment Template currently registered',
'No Assessments currently registered': 'No Assessments currently registered',
'No Assets currently registered': 'No Assets currently registered',
'No Assets currently registered in this event': 'No Assets currently registered in this event',
'No Assets currently registered in this scenario': 'No Assets currently registered in this scenario',
'No Baseline Types currently registered': 'No Baseline Types currently registered',
'No Baselines currently registered': 'No Baselines currently registered',
'No Brands currently registered': 'No Brands currently registered',
'No Budgets currently registered': 'No Budgets currently registered',
'No Bundles currently registered': 'No Bundles currently registered',
'No Camp Services currently registered': 'No Camp Services currently registered',
'No Camp Types currently registered': 'No Camp Types currently registered',
'No Camps currently registered': 'No Camps currently registered',
'No Catalog Items currently registered': 'No Catalog Items currently registered',
'No Catalogs currently registered': 'No Catalogs currently registered',
'No Checklist available': 'No Checklist available',
'No Cluster Subsectors currently registered': 'No Cluster Subsectors currently registered',
'No Clusters currently registered': 'No Clusters currently registered',
'No Commitment Items currently registered': 'No Commitment Items currently registered',
'No Commitments': 'No Commitments',
'No Completed Assessments currently registered': 'No Completed Assessments currently registered',
'No Credentials currently set': 'No Credentials currently set',
'No Details currently registered': 'No Details currently registered',
'No Documents currently attached to this request': 'No Documents currently attached to this request',
'No Documents found': 'No Documents found',
'No Donors currently registered': 'No Donors currently registered',
'No Events currently registered': 'No Events currently registered',
'No Facilities currently registered in this event': 'No Facilities currently registered in this event',
'No Facilities currently registered in this scenario': 'No Facilities currently registered in this scenario',
'No Feature Classes currently defined': 'No Feature Classes currently defined',
'No Feature Layers currently defined': 'No Feature Layers currently defined',
'No Flood Reports currently registered': 'No Flood Reports currently registered',
'No GPS data currently registered': 'No GPS data currently registered',
'No Groups currently defined': 'No Groups currently defined',
'No Groups currently registered': 'No Groups currently registered',
'No Homes currently registered': 'No Homes currently registered',
'No Hospitals currently registered': 'No Hospitals currently registered',
'No Human Resources currently registered in this event': 'No Human Resources currently registered in this event',
'No Human Resources currently registered in this scenario': 'No Human Resources currently registered in this scenario',
'No Identification Report Available': 'No Identification Report Available',
'No Identities currently registered': 'No Identities currently registered',
'No Image': 'No Image',
'No Images currently registered': 'No Images currently registered',
'No Impact Types currently registered': 'No Impact Types currently registered',
'No Impacts currently registered': 'No Impacts currently registered',
'No Import Files currently uploaded': 'No Import Files currently uploaded',
'No Incident Reports currently registered': 'No Incident Reports currently registered',
'No Incidents currently registered in this event': 'No Incidents currently registered in this event',
'No Incoming Shipments': 'No Incoming Shipments',
'No Inventories currently have suitable alternative items in stock': 'No Inventories currently have suitable alternative items in stock',
'No Inventories currently have this item in stock': 'No Inventories currently have this item in stock',
'No Item Categories currently registered': 'No Item Categories currently registered',
'No Item Packs currently registered': 'No Item Packs currently registered',
'No Items currently registered': 'No Items currently registered',
'No Items currently registered in this Inventory': 'No Items currently registered in this Inventory',
'No Kits currently registered': 'No Kits currently registered',
'No Level 1 Assessments currently registered': 'No Level 1 Assessments currently registered',
'No Level 2 Assessments currently registered': 'No Level 2 Assessments currently registered',
'No Locations currently available': 'No Locations currently available',
'No Locations currently registered': 'No Locations currently registered',
'No Map Configurations currently defined': 'No Map Configurations currently defined',
'No Map Configurations currently registered in this event': 'No Map Configurations currently registered in this event',
'No Map Configurations currently registered in this scenario': 'No Map Configurations currently registered in this scenario',
'No Markers currently available': 'No Markers currently available',
'No Match': 'No Match',
'No Matching Catalog Items': 'No Matching Catalog Items',
'No Matching Items': 'No Matching Items',
'No Matching Records': 'No Matching Records',
'No Members currently registered': 'No Members currently registered',
'No Memberships currently defined': 'No Memberships currently defined',
'No Memberships currently registered': 'No Memberships currently registered',
'No Messages currently in Outbox': 'No Messages currently in Outbox',
'No Need Types currently registered': 'No Need Types currently registered',
'No Needs currently registered': 'No Needs currently registered',
'No Offices currently registered': 'No Offices currently registered',
'No Order Items currently registered': 'No Order Items currently registered',
'No Orders registered': 'No Orders registered',
'No Organization Domains currently registered': 'No Organisation Domains currently registered',
'No Organizations currently registered': 'No Organisations currently registered',
'No Packs for Item': 'No Packs for Item',
'No Patients currently registered': 'No Patients currently registered',
'No People currently committed': 'No People currently committed',
'No People currently registered in this camp': 'No People currently registered in this camp',
'No People currently registered in this shelter': 'No People currently registered in this shelter',
'No Persons currently registered': 'No Persons currently registered',
'No Persons currently reported missing': 'No Persons currently reported missing',
'No Persons found': 'No Persons found',
'No Photos found': 'No Photos found',
'No Picture': 'No Picture',
'No Population Statistics currently registered': 'No Population Statistics currently registered',
'No Presence Log Entries currently registered': 'No Presence Log Entries currently registered',
'No Problems currently defined': 'No Problems currently defined',
'No Projections currently defined': 'No Projections currently defined',
'No Projects currently registered': 'No Projects currently registered',
'No Question Meta-Data currently registered': 'No Question Meta-Data currently registered',
'No Rapid Assessments currently registered': 'No Rapid Assessments currently registered',
'No Ratings for Skill Type': 'No Ratings for Skill Type',
'No Received Items currently registered': 'No Received Items currently registered',
'No Received Shipments': 'No Received Shipments',
'No Records currently available': 'No Records currently available',
'No Relatives currently registered': 'No Relatives currently registered',
'No Request Items currently registered': 'No Request Items currently registered',
'No Requests': 'No Requests',
'No Requests for Donations': 'No Requests for Donations',
'No Requests for Volunteers': 'No Requests for Volunteers',
'No Rivers currently registered': 'No Rivers currently registered',
'No Roles currently defined': 'No Roles currently defined',
'No Rooms currently registered': 'No Rooms currently registered',
'No Scenarios currently registered': 'No Scenarios currently registered',
'No Search saved': 'No Search saved',
'No Sections currently registered': 'No Sections currently registered',
'No Sectors currently registered': 'No Sectors currently registered',
'No Sent Items currently registered': 'No Sent Items currently registered',
'No Sent Shipments': 'No Sent Shipments',
'No Settings currently defined': 'No Settings currently defined',
'No Shelter Services currently registered': 'No Shelter Services currently registered',
'No Shelter Types currently registered': 'No Shelter Types currently registered',
'No Shelters currently registered': 'No Shelters currently registered',
'No Skills currently requested': 'No Skills currently requested',
'No Solutions currently defined': 'No Solutions currently defined',
'No Staff Types currently registered': 'No Staff Types currently registered',
'No Subscription available': 'No Subscription available',
'No Subsectors currently registered': 'No Subsectors currently registered',
'No Support Requests currently registered': 'No Support Requests currently registered',
'No Tasks currently registered in this event': 'No Tasks currently registered in this event',
'No Tasks currently registered in this scenario': 'No Tasks currently registered in this scenario',
'No Teams currently registered': 'No Teams currently registered',
'No Template Section currently registered': 'No Template Section currently registered',
'No Themes currently defined': 'No Themes currently defined',
'No Tickets currently registered': 'No Tickets currently registered',
'No Users currently registered': 'No Users currently registered',
'No Vehicle Details currently defined': 'No Vehicle Details currently defined',
'No Vehicles currently registered': 'No Vehicles currently registered',
'No Warehouses currently registered': 'No Warehouses currently registered',
'No access at all': 'No access at all',
'No access to this record!': 'No access to this record!',
'No action recommended': 'No action recommended',
'No contact information available': 'No contact information available',
'No contact method found': 'No contact method found',
'No contacts currently registered': 'No contacts currently registered',
'No data in this table - cannot create PDF!': 'No data in this table - cannot create PDF!',
'No databases in this application': 'No databases in this application',
'No dead body reports available': 'No dead body reports available',
'No entries found': 'No entries found',
'No entry available': 'No entry available',
'No forms to the corresponding resource have been downloaded yet.': 'No forms to the corresponding resource have been downloaded yet.',
'No jobs configured': 'No jobs configured',
'No jobs configured yet': 'No jobs configured yet',
'No match': 'No match',
'No matching records found': 'No matching records found',
'No messages in the system': 'No messages in the system',
'No person record found for current user.': 'No person record found for current user.',
'No problem group defined yet': 'No problem group defined yet',
'No reports available.': 'No reports available.',
'No reports currently available': 'No reports currently available',
'No repositories configured': 'No repositories configured',
'No requests found': 'No requests found',
'No resources configured yet': 'No resources configured yet',
'No resources currently reported': 'No resources currently reported',
'No service profile available': 'No service profile available',
'No skills currently set': 'No skills currently set',
'No staff or volunteers currently registered': 'No staff or volunteers currently registered',
'No status information available': 'No status information available',
'No tasks currently assigned': 'No tasks currently assigned',
'No tasks currently registered': 'No tasks currently registered',
'No units currently registered': 'No units currently registered',
'No volunteer availability registered': 'No volunteer availability registered',
'Non-structural Hazards': 'Non-structural Hazards',
'None': 'None',
'None (no such record)': 'None (no such record)',
'Noodles': 'Noodles',
'Normal': 'Normal',
'Not Applicable': 'Not Applicable',
'Not Authorised!': 'Not Authorised!',
'Not Possible': 'Not Possible',
'Not authorised!': 'Not authorised!',
'Not installed or incorrectly configured.': 'Not installed or incorrectly configured.',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead',
'Notes': 'Notes',
'Notice to Airmen': 'Notice to Airmen',
'Number of Patients': 'Number of Patients',
'Number of People Required': 'Number of People Required',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Number of additional beds of that type expected to become available in this unit within the next 24 hours.',
'Number of alternative places for studying': 'Number of alternative places for studying',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Number of available/vacant beds of that type in this unit at the time of reporting.',
'Number of bodies found': 'Number of bodies found',
'Number of deaths during the past 24 hours.': 'Number of deaths during the past 24 hours.',
'Number of discharged patients during the past 24 hours.': 'Number of discharged patients during the past 24 hours.',
'Number of doctors': 'Number of doctors',
'Number of in-patients at the time of reporting.': 'Number of in-patients at the time of reporting.',
'Number of newly admitted patients during the past 24 hours.': 'Number of newly admitted patients during the past 24 hours.',
'Number of non-medical staff': 'Number of non-medical staff',
'Number of nurses': 'Number of nurses',
'Number of private schools': 'Number of private schools',
'Number of public schools': 'Number of public schools',
'Number of religious schools': 'Number of religious schools',
'Number of residential units': 'Number of residential units',
'Number of residential units not habitable': 'Number of residential units not habitable',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Number of vacant/available beds in this hospital. Automatically updated from daily reports.',
'Number of vacant/available units to which victims can be transported immediately.': 'Number of vacant/available units to which victims can be transported immediately.',
'Number or Label on the identification tag this person is wearing (if any).': 'Number or Label on the identification tag this person is wearing (if any).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Number/Percentage of affected population that is Female & Aged 0-5',
'Number/Percentage of affected population that is Female & Aged 13-17': 'Number/Percentage of affected population that is Female & Aged 13-17',
'Number/Percentage of affected population that is Female & Aged 18-25': 'Number/Percentage of affected population that is Female & Aged 18-25',
'Number/Percentage of affected population that is Female & Aged 26-60': 'Number/Percentage of affected population that is Female & Aged 26-60',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Number/Percentage of affected population that is Female & Aged 6-12',
'Number/Percentage of affected population that is Female & Aged 61+': 'Number/Percentage of affected population that is Female & Aged 61+',
'Number/Percentage of affected population that is Male & Aged 0-5': 'Number/Percentage of affected population that is Male & Aged 0-5',
'Number/Percentage of affected population that is Male & Aged 13-17': 'Number/Percentage of affected population that is Male & Aged 13-17',
'Number/Percentage of affected population that is Male & Aged 18-25': 'Number/Percentage of affected population that is Male & Aged 18-25',
'Number/Percentage of affected population that is Male & Aged 26-60': 'Number/Percentage of affected population that is Male & Aged 26-60',
'Number/Percentage of affected population that is Male & Aged 6-12': 'Number/Percentage of affected population that is Male & Aged 6-12',
'Number/Percentage of affected population that is Male & Aged 61+': 'Number/Percentage of affected population that is Male & Aged 61+',
'Numeric Question:': 'Numeric Question:',
'Nursery Beds': 'Nursery Beds',
'Nutrition': 'Nutrition',
'Nutrition problems': 'Nutrition problems',
'OCR Form Review': 'OCR Form Review',
'OK': 'OK',
'OR Reason': 'OR Reason',
'OR Status': 'OR Status',
'OR Status Reason': 'OR Status Reason',
'Objectives': 'Objectives',
'Observer': 'Observer',
'Obsolete': 'Obsolete',
'Obstetrics/Gynecology': 'Obstetrics/Gynecology',
'Office': 'Office',
'Office Address': 'Office Address',
'Office Details': 'Office Details',
'Office Phone': 'Office Phone',
'Office added': 'Office added',
'Office deleted': 'Office deleted',
'Office updated': 'Office updated',
'Offices': 'Offices',
'Older people as primary caregivers of children': 'Older people as primary caregivers of children',
'Older people in care homes': 'Older people in care homes',
'Older people participating in coping activities': 'Older people participating in coping activities',
'Older person (>60 yrs)': 'Older person (>60 yrs)',
'On by default?': 'On by default?',
'On by default? (only applicable to Overlays)': 'On by default? (only applicable to Overlays)',
'One Time Cost': 'One Time Cost',
'One time cost': 'One time cost',
'One-time': 'One-time',
'One-time costs': 'One-time costs',
'Oops! Something went wrong...': 'Oops! Something went wrong...',
'Oops! something went wrong on our side.': 'Oops! something went wrong on our side.',
'Opacity (1 for opaque, 0 for fully-transparent)': 'Opacity (1 for opaque, 0 for fully-transparent)',
'Open': 'Open',
'Open area': 'Open area',
'Open recent': 'Open recent',
'Operating Rooms': 'Operating Rooms',
'Optical Character Recognition': 'Optical Character Recognition',
'Optical Character Recognition for reading the scanned handwritten paper forms.': 'Optical Character Recognition for reading the scanned handwritten paper forms.',
'Optional': 'Optional',
'Optional Subject to put into Email - can be used as a Security Password by the service provider': 'Optional Subject to put into Email - can be used as a Security Password by the service provider',
'Optional link to an Incident which this Assessment was triggered by.': 'Optional link to an Incident which this Assessment was triggered by.',
'Optional selection of a MapServer map.': 'Optional selection of a MapServer map.',
'Optional selection of a background color.': 'Optional selection of a background colour.',
'Optional selection of an alternate style.': 'Optional selection of an alternate style.',
'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': 'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.',
'Optional. The name of an element whose contents should be put into Popups.': 'Optional. The name of an element whose contents should be put into Popups.',
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.",
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.',
'Options': 'Options',
'Order': 'Order',
'Order Created': 'Order Created',
'Order Details': 'Order Details',
'Order Item Details': 'Order Item Details',
'Order Item updated': 'Order Item updated',
'Order Items': 'Order Items',
'Order canceled': 'Order canceled',
'Order updated': 'Order updated',
'Orders': 'Orders',
'Organization': 'Organisation',
'Organization Details': 'Organisation Details',
'Organization Domain Details': 'Organisation Domain Details',
'Organization Domain added': 'Organisation Domain added',
'Organization Domain deleted': 'Organisation Domain deleted',
'Organization Domain updated': 'Organisation Domain updated',
'Organization Domains': 'Organisation Domains',
'Organization Registry': 'Organisation Registry',
'Organization added': 'Organisation added',
'Organization added to Project': 'Organisation added to Project',
'Organization deleted': 'Organisation deleted',
'Organization removed from Project': 'Organisation removed from Project',
'Organization updated': 'Organisation updated',
'Organizations': 'Organisations',
'Origin': 'Origin',
'Origin of the separated children': 'Origin of the separated children',
'Other': 'Other',
'Other (describe)': 'Other (describe)',
'Other (specify)': 'Other (specify)',
'Other Evidence': 'Other Evidence',
'Other Faucet/Piped Water': 'Other Faucet/Piped Water',
'Other Isolation': 'Other Isolation',
'Other Name': 'Other Name',
'Other activities of boys 13-17yrs': 'Other activities of boys 13-17yrs',
'Other activities of boys 13-17yrs before disaster': 'Other activities of boys 13-17yrs before disaster',
'Other activities of boys <12yrs': 'Other activities of boys <12yrs',
'Other activities of boys <12yrs before disaster': 'Other activities of boys <12yrs before disaster',
'Other activities of girls 13-17yrs': 'Other activities of girls 13-17yrs',
'Other activities of girls 13-17yrs before disaster': 'Other activities of girls 13-17yrs before disaster',
'Other activities of girls<12yrs': 'Other activities of girls<12yrs',
'Other activities of girls<12yrs before disaster': 'Other activities of girls<12yrs before disaster',
'Other alternative infant nutrition in use': 'Other alternative infant nutrition in use',
'Other alternative places for study': 'Other alternative places for study',
'Other assistance needed': 'Other assistance needed',
'Other assistance, Rank': 'Other assistance, Rank',
'Other current health problems, adults': 'Other current health problems, adults',
'Other current health problems, children': 'Other current health problems, children',
'Other events': 'Other events',
'Other factors affecting school attendance': 'Other factors affecting school attendance',
'Other major expenses': 'Other major expenses',
'Other non-food items': 'Other non-food items',
'Other recommendations': 'Other recommendations',
'Other residential': 'Other residential',
'Other school assistance received': 'Other school assistance received',
'Other school assistance, details': 'Other school assistance, details',
'Other school assistance, source': 'Other school assistance, source',
'Other settings can only be set by editing a file on the server': 'Other settings can only be set by editing a file on the server',
'Other side dishes in stock': 'Other side dishes in stock',
'Other types of water storage containers': 'Other types of water storage containers',
'Other ways to obtain food': 'Other ways to obtain food',
'Outbound Mail settings are configured in models/000_config.py.': 'Outbound Mail settings are configured in models/000_config.py.',
'Outbox': 'Outbox',
'Outgoing SMS Handler': 'Outgoing SMS Handler',
'Outgoing SMS handler': 'Outgoing SMS handler',
'Overall Hazards': 'Overall Hazards',
'Overhead falling hazard': 'Overhead falling hazard',
'Overland Flow Flood': 'Overland Flow Flood',
'Overlays': 'Overlays',
'Owned Resources': 'Owned Resources',
'PAHO UID': 'PAHO UID',
'PDAM': 'PDAM',
'PDF File': 'PDF File',
'PIN': 'PIN',
'PIN number ': 'PIN number ',
'PL Women': 'PL Women',
'Pack': 'Pack',
'Packs': 'Packs',
'Page': 'Page',
'Pan Map: keep the left mouse button pressed and drag the map': 'Pan Map: keep the left mouse button pressed and drag the map',
'Parameters': 'Parameters',
'Parapets, ornamentation': 'Parapets, ornamentation',
'Parent': 'Parent',
'Parent Office': 'Parent Office',
"Parent level should be higher than this record's level. Parent level is": "Parent level should be higher than this record's level. Parent level is",
'Parent needs to be of the correct level': 'Parent needs to be of the correct level',
'Parent needs to be set': 'Parent needs to be set',
'Parent needs to be set for locations of level': 'Parent needs to be set for locations of level',
'Parents/Caregivers missing children': 'Parents/Caregivers missing children',
'Parking Area': 'Parking Area',
'Partial': 'Partial',
'Participant': 'Participant',
'Partner National Society': 'Partner National Society',
'Pass': 'Pass',
'Passport': 'Passport',
'Password': 'Password',
"Password fields don't match": "Password fields don't match",
'Password to use for authentication at the remote site': 'Password to use for authentication at the remote site',
'Path': 'Path',
'Pathology': 'Pathology',
'Patient': 'Patient',
'Patient Details': 'Patient Details',
'Patient Tracking': 'Patient Tracking',
'Patient added': 'Patient added',
'Patient deleted': 'Patient deleted',
'Patient updated': 'Patient updated',
'Patients': 'Patients',
'Pediatric ICU': 'Pediatric ICU',
'Pediatric Psychiatric': 'Pediatric Psychiatric',
'Pediatrics': 'Pediatrics',
'Pending': 'Pending',
'People': 'People',
'People Needing Food': 'People Needing Food',
'People Needing Shelter': 'People Needing Shelter',
'People Needing Water': 'People Needing Water',
'People Trapped': 'People Trapped',
'Performance Rating': 'Performance Rating',
'Person': 'Person',
'Person 1': 'Person 1',
'Person 1, Person 2 are the potentially duplicate records': 'Person 1, Person 2 are the potentially duplicate records',
'Person 2': 'Person 2',
'Person De-duplicator': 'Person De-duplicator',
'Person Details': 'Person Details',
'Person Name': 'Person Name',
'Person Registry': 'Person Registry',
'Person added': 'Person added',
'Person added to Commitment': 'Person added to Commitment',
'Person deleted': 'Person deleted',
'Person details updated': 'Person details updated',
'Person interviewed': 'Person interviewed',
'Person must be specified!': 'Person must be specified!',
'Person removed from Commitment': 'Person removed from Commitment',
'Person who has actually seen the person/group.': 'Person who has actually seen the person/group.',
'Person/Group': 'Person/Group',
'Personal Data': 'Personal Data',
'Personal Effects': 'Personal Effects',
'Personal Effects Details': 'Personal Effects Details',
'Personal Map': 'Personal Map',
'Personal Profile': 'Personal Profile',
'Personal impact of disaster': 'Personal impact of disaster',
'Persons': 'Persons',
'Persons in institutions': 'Persons in institutions',
'Persons with disability (mental)': 'Persons with disability (mental)',
'Persons with disability (physical)': 'Persons with disability (physical)',
'Phone': 'Phone',
'Phone 1': 'Phone 1',
'Phone 2': 'Phone 2',
'Phone number is required': 'Phone number is required',
"Phone number to donate to this organization's relief efforts.": "Phone number to donate to this organization's relief efforts.",
'Phone/Business': 'Phone/Business',
'Phone/Emergency': 'Phone/Emergency',
'Phone/Exchange (Switchboard)': 'Phone/Exchange (Switchboard)',
'Photo': 'Photo',
'Photo Details': 'Photo Details',
'Photo Taken?': 'Photo Taken?',
'Photo added': 'Photo added',
'Photo deleted': 'Photo deleted',
'Photo updated': 'Photo updated',
'Photograph': 'Photograph',
'Photos': 'Photos',
'Physical Description': 'Physical Description',
'Physical Safety': 'Physical Safety',
'Picture': 'Picture',
'Picture upload and finger print upload facility': 'Picture upload and finger print upload facility',
'Place': 'Place',
'Place of Recovery': 'Place of Recovery',
'Place on Map': 'Place on Map',
'Places for defecation': 'Places for defecation',
'Places the children have been sent to': 'Places the children have been sent to',
'Playing': 'Playing',
"Please come back after sometime if that doesn't help.": "Please come back after sometime if that doesn't help.",
'Please enter a first name': 'Please enter a first name',
'Please enter a number only': 'Please enter a number only',
'Please enter a site OR a location': 'Please enter a site OR a location',
'Please enter a valid email address': 'Please enter a valid email address',
'Please enter the first few letters of the Person/Group for the autocomplete.': 'Please enter the first few letters of the Person/Group for the autocomplete.',
'Please enter the recipient': 'Please enter the recipient',
'Please fill this!': 'Please fill this!',
'Please give an estimated figure about how many bodies have been found.': 'Please give an estimated figure about how many bodies have been found.',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.',
'Please report here where you are:': 'Please report here where you are:',
'Please select': 'Please select',
'Please select another level': 'Please select another level',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Please use this field to record any additional information, including a history of the record if it is updated.',
'Please use this field to record any additional information, including any Special Needs.': 'Please use this field to record any additional information, including any Special Needs.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.',
'Pledge Support': 'Pledge Support',
'Point': 'Point',
'Poisoning': 'Poisoning',
'Poisonous Gas': 'Poisonous Gas',
'Police': 'Police',
'Pollution and other environmental': 'Pollution and other environmental',
'Polygon': 'Polygon',
'Polygon reference of the rating unit': 'Polygon reference of the rating unit',
'Poor': 'Poor',
'Population': 'Population',
'Population Statistic Details': 'Population Statistic Details',
'Population Statistic added': 'Population Statistic added',
'Population Statistic deleted': 'Population Statistic deleted',
'Population Statistic updated': 'Population Statistic updated',
'Population Statistics': 'Population Statistics',
'Population and number of households': 'Population and number of households',
'Popup Fields': 'Popup Fields',
'Popup Label': 'Popup Label',
'Porridge': 'Porridge',
'Port': 'Port',
'Port Closure': 'Port Closure',
'Portable App': 'Portable App',
'Portal at': 'Portal at',
'Portuguese': 'Portuguese',
'Portuguese (Brazil)': 'Portuguese (Brazil)',
'Position': 'Position',
'Position Catalog': 'Position Catalog',
'Position Details': 'Position Details',
'Position added': 'Position added',
'Position deleted': 'Position deleted',
'Position updated': 'Position updated',
'Positions': 'Positions',
'Postcode': 'Postcode',
'Poultry': 'Poultry',
'Poultry restocking, Rank': 'Poultry restocking, Rank',
'Power Failure': 'Power Failure',
'Powered by Sahana Eden': 'Powered by Sahana Eden',
'Pre-cast connections': 'Pre-cast connections',
'Preferred Name': 'Preferred Name',
'Pregnant women': 'Pregnant women',
'Preliminary': 'Preliminary',
'Presence': 'Presence',
'Presence Condition': 'Presence Condition',
'Presence Log': 'Presence Log',
'Previous View': 'Previous View',
'Primary Occupancy': 'Primary Occupancy',
'Priority': 'Priority',
'Priority from 1 to 9. 1 is most preferred.': 'Priority from 1 to 9. 1 is most preferred.',
'Privacy': 'Privacy',
'Private': 'Private',
'Problem': 'Problem',
'Problem Administration': 'Problem Administration',
'Problem Details': 'Problem Details',
'Problem Group': 'Problem Group',
'Problem Title': 'Problem Title',
'Problem added': 'Problem added',
'Problem connecting to twitter.com - please refresh': 'Problem connecting to twitter.com - please refresh',
'Problem deleted': 'Problem deleted',
'Problem updated': 'Problem updated',
'Problems': 'Problems',
'Procedure': 'Procedure',
'Process Received Shipment': 'Process Received Shipment',
'Process Shipment to Send': 'Process Shipment to Send',
'Profile': 'Profile',
'Project': 'Project',
'Project Details': 'Project Details',
'Project Details including organizations': 'Project Details including organisations',
'Project Organization updated': 'Project Organisation updated',
'Project Organizations': 'Project Organisations',
'Project Site': 'Project Site',
'Project Sites': 'Project Sites',
'Project Status': 'Project Status',
'Project Tracking': 'Project Tracking',
'Project added': 'Project added',
'Project deleted': 'Project deleted',
'Project updated': 'Project updated',
'Projection': 'Projection',
'Projection Details': 'Projection Details',
'Projection added': 'Projection added',
'Projection deleted': 'Projection deleted',
'Projection updated': 'Projection updated',
'Projections': 'Projections',
'Projects': 'Projects',
'Property reference in the council system': 'Property reference in the council system',
'Protection': 'Protection',
'Provide Metadata for your media files': 'Provide Metadata for your media files',
'Provide a password': 'Provide a password',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': 'Provide an optional sketch of the entire building or damage points. Indicate damage points.',
'Proxy Server URL': 'Proxy Server URL',
'Psychiatrics/Adult': 'Psychiatrics/Adult',
'Psychiatrics/Pediatric': 'Psychiatrics/Pediatric',
'Public': 'Public',
'Public Event': 'Public Event',
'Public and private transportation': 'Public and private transportation',
'Public assembly': 'Public assembly',
'Pull tickets from external feed': 'Pull tickets from external feed',
'Purchase Date': 'Purchase Date',
'Purpose': 'Purpose',
'Push tickets to external system': 'Push tickets to external system',
'Pyroclastic Flow': 'Pyroclastic Flow',
'Pyroclastic Surge': 'Pyroclastic Surge',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Serial module not available within the running Python - this needs installing to activate the Modem',
'Quantity': 'Quantity',
'Quantity Committed': 'Quantity Committed',
'Quantity Fulfilled': 'Quantity Fulfilled',
"Quantity in %s's Inventory": "Quantity in %s's Inventory",
'Quantity in Transit': 'Quantity in Transit',
'Quarantine': 'Quarantine',
'Queries': 'Queries',
'Query': 'Query',
'Queryable?': 'Queryable?',
'Question': 'Question',
'Question Details': 'Question Details',
'Question Meta-Data': 'Question Meta-Data',
'Question Meta-Data Details': 'Question Meta-Data Details',
'Question Meta-Data added': 'Question Meta-Data added',
'Question Meta-Data deleted': 'Question Meta-Data deleted',
'Question Meta-Data updated': 'Question Meta-Data updated',
'Question Summary': 'Question Summary',
'RC frame with masonry infill': 'RC frame with masonry infill',
'RECORD A': 'RECORD A',
'RECORD B': 'RECORD B',
'RMS': 'RMS',
'RMS Team': 'RMS Team',
'Race': 'Race',
'Radio': 'Radio',
'Radio Details': 'Radio Details',
'Radiological Hazard': 'Radiological Hazard',
'Radiology': 'Radiology',
'Railway Accident': 'Railway Accident',
'Railway Hijacking': 'Railway Hijacking',
'Rain Fall': 'Rain Fall',
'Rapid Assessment': 'Rapid Assessment',
'Rapid Assessment Details': 'Rapid Assessment Details',
'Rapid Assessment added': 'Rapid Assessment added',
'Rapid Assessment deleted': 'Rapid Assessment deleted',
'Rapid Assessment updated': 'Rapid Assessment updated',
'Rapid Assessments': 'Rapid Assessments',
'Rapid Assessments & Flexible Impact Assessments': 'Rapid Assessments & Flexible Impact Assessments',
'Rapid Close Lead': 'Rapid Close Lead',
'Rapid Data Entry': 'Rapid Data Entry',
'Raw Database access': 'Raw Database access',
'Receive': 'Receive',
'Receive New Shipment': 'Receive New Shipment',
'Receive Shipment': 'Receive Shipment',
'Receive this shipment?': 'Receive this shipment?',
'Received': 'Received',
'Received By': 'Received By',
'Received By Person': 'Received By Person',
'Received Item Details': 'Received Item Details',
'Received Item updated': 'Received Item updated',
'Received Shipment Details': 'Received Shipment Details',
'Received Shipment canceled': 'Received Shipment canceled',
'Received Shipment canceled and items removed from Inventory': 'Received Shipment canceled and items removed from Inventory',
'Received Shipment updated': 'Received Shipment updated',
'Received Shipments': 'Received Shipments',
'Receiving and Sending Items': 'Receiving and Sending Items',
'Recipient': 'Recipient',
'Recipients': 'Recipients',
'Recommendations for Repair and Reconstruction or Demolition': 'Recommendations for Repair and Reconstruction or Demolition',
'Record': 'Record',
'Record Details': 'Record Details',
'Record Saved': 'Record Saved',
'Record added': 'Record added',
'Record any restriction on use or entry': 'Record any restriction on use or entry',
'Record created': 'Record created',
'Record deleted': 'Record deleted',
'Record last updated': 'Record last updated',
'Record not found': 'Record not found',
'Record not found!': 'Record not found!',
'Record updated': 'Record updated',
'Recording and Assigning Assets': 'Recording and Assigning Assets',
'Recovery': 'Recovery',
'Recovery Request': 'Recovery Request',
'Recovery Request added': 'Recovery Request added',
'Recovery Request deleted': 'Recovery Request deleted',
'Recovery Request updated': 'Recovery Request updated',
'Recurring': 'Recurring',
'Recurring Cost': 'Recurring Cost',
'Recurring cost': 'Recurring cost',
'Recurring costs': 'Recurring costs',
'Red': 'Red',
'Red Cross / Red Crescent': 'Red Cross / Red Crescent',
'Reference Document': 'Reference Document',
'Refresh Rate (seconds)': 'Refresh Rate (seconds)',
'Region': 'Region',
'Region Location': 'Region Location',
'Regional': 'Regional',
'Register': 'Register',
'Register Person': 'Register Person',
'Register Person into this Camp': 'Register Person into this Camp',
'Register Person into this Shelter': 'Register Person into this Shelter',
'Register them as a volunteer': 'Register them as a volunteer',
'Registered People': 'Registered People',
'Registered users can': 'Registered users can',
'Registration': 'Registration',
'Registration Details': 'Registration Details',
'Registration added': 'Registration added',
'Registration entry deleted': 'Registration entry deleted',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': 'Registration is still pending approval from Approver (%s) - please wait until confirmation received.',
'Registration key': 'Registration key',
'Registration updated': 'Registration updated',
'Rehabilitation/Long Term Care': 'Rehabilitation/Long Term Care',
'Reinforced masonry': 'Reinforced masonry',
'Rejected': 'Rejected',
'Relative Details': 'Relative Details',
'Relative added': 'Relative added',
'Relative deleted': 'Relative deleted',
'Relative updated': 'Relative updated',
'Relatives': 'Relatives',
'Relief': 'Relief',
'Relief Team': 'Relief Team',
'Religion': 'Religion',
'Religious': 'Religious',
'Religious Leader': 'Religious Leader',
'Relocate as instructed in the <instruction>': 'Relocate as instructed in the <instruction>',
'Remote Error': 'Remote Error',
'Remove': 'Remove',
'Remove Activity from this event': 'Remove Activity from this event',
'Remove Asset from this event': 'Remove Asset from this event',
'Remove Asset from this scenario': 'Remove Asset from this scenario',
'Remove Document from this request': 'Remove Document from this request',
'Remove Facility from this event': 'Remove Facility from this event',
'Remove Facility from this scenario': 'Remove Facility from this scenario',
'Remove Human Resource from this event': 'Remove Human Resource from this event',
'Remove Human Resource from this scenario': 'Remove Human Resource from this scenario',
'Remove Incident from this event': 'Remove Incident from this event',
'Remove Item from Inventory': 'Remove Item from Inventory',
'Remove Item from Order': 'Remove Item from Order',
'Remove Item from Shipment': 'Remove Item from Shipment',
'Remove Map Configuration from this event': 'Remove Map Configuration from this event',
'Remove Map Configuration from this scenario': 'Remove Map Configuration from this scenario',
'Remove Organization from Project': 'Remove Organisation from Project',
'Remove Person from Commitment': 'Remove Person from Commitment',
'Remove Skill': 'Remove Skill',
'Remove Skill from Request': 'Remove Skill from Request',
'Remove Task from this event': 'Remove Task from this event',
'Remove Task from this scenario': 'Remove Task from this scenario',
'Remove this asset from this event': 'Remove this asset from this event',
'Remove this asset from this scenario': 'Remove this asset from this scenario',
'Remove this facility from this event': 'Remove this facility from this event',
'Remove this facility from this scenario': 'Remove this facility from this scenario',
'Remove this human resource from this event': 'Remove this human resource from this event',
'Remove this human resource from this scenario': 'Remove this human resource from this scenario',
'Remove this task from this event': 'Remove this task from this event',
'Remove this task from this scenario': 'Remove this task from this scenario',
'Repair': 'Repair',
'Repaired': 'Repaired',
'Repeat your password': 'Repeat your password',
'Report': 'Report',
'Report Another Assessment...': 'Report Another Assessment...',
'Report Details': 'Report Details',
'Report Resource': 'Report Resource',
'Report To': 'Report To',
'Report Types Include': 'Report Types Include',
'Report added': 'Report added',
'Report deleted': 'Report deleted',
'Report my location': 'Report my location',
'Report the contributing factors for the current EMS status.': 'Report the contributing factors for the current EMS status.',
'Report the contributing factors for the current OR status.': 'Report the contributing factors for the current OR status.',
'Report them as found': 'Report them as found',
'Report them missing': 'Report them missing',
'Report updated': 'Report updated',
'ReportLab module not available within the running Python - this needs installing for PDF output!': 'ReportLab module not available within the running Python - this needs installing for PDF output!',
'Reported To': 'Reported To',
'Reporter': 'Reporter',
'Reporter Name': 'Reporter Name',
'Reporting on the projects in the region': 'Reporting on the projects in the region',
'Reports': 'Reports',
'Repositories': 'Repositories',
'Repository': 'Repository',
'Repository Base URL': 'Repository Base URL',
'Repository Configuration': 'Repository Configuration',
'Repository Name': 'Repository Name',
'Repository UUID': 'Repository UUID',
'Repository configuration deleted': 'Repository configuration deleted',
'Repository configuration updated': 'Repository configuration updated',
'Repository configured': 'Repository configured',
'Request': 'Request',
'Request Added': 'Request Added',
'Request Canceled': 'Request Canceled',
'Request Details': 'Request Details',
'Request From': 'Request From',
'Request Item': 'Request Item',
'Request Item Details': 'Request Item Details',
'Request Item added': 'Request Item added',
'Request Item deleted': 'Request Item deleted',
'Request Item from Available Inventory': 'Request Item from Available Inventory',
'Request Item updated': 'Request Item updated',
'Request Items': 'Request Items',
'Request New People': 'Request New People',
'Request Number': 'Request Number',
'Request Status': 'Request Status',
'Request Type': 'Request Type',
'Request Updated': 'Request Updated',
'Request added': 'Request added',
'Request deleted': 'Request deleted',
'Request for Account': 'Request for Account',
'Request for Donations Added': 'Request for Donations Added',
'Request for Donations Canceled': 'Request for Donations Canceled',
'Request for Donations Details': 'Request for Donations Details',
'Request for Donations Updated': 'Request for Donations Updated',
'Request for Role Upgrade': 'Request for Role Upgrade',
'Request for Volunteers Added': 'Request for Volunteers Added',
'Request for Volunteers Canceled': 'Request for Volunteers Canceled',
'Request for Volunteers Details': 'Request for Volunteers Details',
'Request for Volunteers Updated': 'Request for Volunteers Updated',
'Request updated': 'Request updated',
'Request, Response & Session': 'Request, Response & Session',
'Requested': 'Requested',
'Requested By': 'Requested By',
'Requested By Facility': 'Requested By Facility',
'Requested For': 'Requested For',
'Requested For Facility': 'Requested For Facility',
'Requested From': 'Requested From',
'Requested Items': 'Requested Items',
'Requested Skill Details': 'Requested Skill Details',
'Requested Skill updated': 'Requested Skill updated',
'Requested Skills': 'Requested Skills',
'Requester': 'Requester',
'Requests': 'Requests',
'Requests Management': 'Requests Management',
'Requests for Donations': 'Requests for Donations',
'Requests for Volunteers': 'Requests for Volunteers',
'Required Skills': 'Required Skills',
'Requires Login': 'Requires Login',
'Requires Login!': 'Requires Login!',
'Rescue and recovery': 'Rescue and recovery',
'Reset': 'Reset',
'Reset Password': 'Reset Password',
'Resolve': 'Resolve',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.',
'Resource': 'Resource',
'Resource Configuration': 'Resource Configuration',
'Resource Details': 'Resource Details',
'Resource Mapping System': 'Resource Mapping System',
'Resource Mapping System account has been activated': 'Resource Mapping System account has been activated',
'Resource Name': 'Resource Name',
'Resource added': 'Resource added',
'Resource configuration deleted': 'Resource configuration deleted',
'Resource configuration updated': 'Resource configuration updated',
'Resource configured': 'Resource configured',
'Resource deleted': 'Resource deleted',
'Resource updated': 'Resource updated',
'Resources': 'Resources',
'Respiratory Infections': 'Respiratory Infections',
'Response': 'Response',
'Restricted Access': 'Restricted Access',
'Restricted Use': 'Restricted Use',
'Results': 'Results',
'Retail Crime': 'Retail Crime',
'Retrieve Password': 'Retrieve Password',
'Return': 'Return',
'Return to Request': 'Return to Request',
'Returned': 'Returned',
'Returned From': 'Returned From',
'Review Incoming Shipment to Receive': 'Review Incoming Shipment to Receive',
'Rice': 'Rice',
'Riot': 'Riot',
'River': 'River',
'River Details': 'River Details',
'River added': 'River added',
'River deleted': 'River deleted',
'River updated': 'River updated',
'Rivers': 'Rivers',
'Road Accident': 'Road Accident',
'Road Closed': 'Road Closed',
'Road Conditions': 'Road Conditions',
'Road Delay': 'Road Delay',
'Road Hijacking': 'Road Hijacking',
'Road Usage Condition': 'Road Usage Condition',
'Roads Layer': 'Roads Layer',
'Role': 'Role',
'Role Details': 'Role Details',
'Role Required': 'Role Required',
'Role Updated': 'Role Updated',
'Role added': 'Role added',
'Role deleted': 'Role deleted',
'Role updated': 'Role updated',
'Roles': 'Roles',
'Roles Permitted': 'Roles Permitted',
'Roof tile': 'Roof tile',
'Roofs, floors (vertical load)': 'Roofs, floors (vertical load)',
'Room': 'Room',
'Room Details': 'Room Details',
'Room added': 'Room added',
'Room deleted': 'Room deleted',
'Room updated': 'Room updated',
'Rooms': 'Rooms',
'Rows in table': 'Rows in table',
'Rows selected': 'Rows selected',
'Running Cost': 'Running Cost',
'Russian': 'Russian',
'SMS': 'SMS',
'SMS Modems (Inbound & Outbound)': 'SMS Modems (Inbound & Outbound)',
'SMS Outbound': 'SMS Outbound',
'SMS Settings': 'SMS Settings',
'SMS settings updated': 'SMS settings updated',
'SMTP to SMS settings updated': 'SMTP to SMS settings updated',
'Safe environment for vulnerable groups': 'Safe environment for vulnerable groups',
'Safety Assessment Form': 'Safety Assessment Form',
'Safety of children and women affected by disaster?': 'Safety of children and women affected by disaster?',
'Sahana Eden': 'Sahana Eden',
'Sahana Eden Humanitarian Management Platform': 'Sahana Eden Humanitarian Management Platform',
'Sahana Eden portable application generator': 'Sahana Eden portable application generator',
'Salted Fish': 'Salted Fish',
'Sanitation problems': 'Sanitation problems',
'Satellite': 'Satellite',
'Satellite Layer': 'Satellite Layer',
'Saturday': 'Saturday',
'Save': 'Save',
'Save Search': 'Save Search',
'Save: Default Lat, Lon & Zoom for the Viewport': 'Save: Default Lat, Lon & Zoom for the Viewport',
'Saved Search Details': 'Saved Search Details',
'Saved Search added': 'Saved Search added',
'Saved Search deleted': 'Saved Search deleted',
'Saved Search updated': 'Saved Search updated',
'Saved Searches': 'Saved Searches',
'Saved.': 'Saved.',
'Saving...': 'Saving...',
'Scale of Results': 'Scale of Results',
'Scanned Copy': 'Scanned Copy',
'Scanned Forms Upload': 'Scanned Forms Upload',
'Scenario': 'Scenario',
'Scenario Details': 'Scenario Details',
'Scenario added': 'Scenario added',
'Scenario deleted': 'Scenario deleted',
'Scenario updated': 'Scenario updated',
'Scenarios': 'Scenarios',
'Schedule': 'Schedule',
'Schedule synchronization jobs': 'Schedule synchronisation jobs',
'Schema': 'Schema',
'School': 'School',
'School Closure': 'School Closure',
'School Lockdown': 'School Lockdown',
'School Teacher': 'School Teacher',
'School activities': 'School activities',
'School assistance': 'School assistance',
'School attendance': 'School attendance',
'School destroyed': 'School destroyed',
'School heavily damaged': 'School heavily damaged',
'School tents received': 'School tents received',
'School tents, source': 'School tents, source',
'School used for other purpose': 'School used for other purpose',
'School/studying': 'School/studying',
'Search': 'Search',
'Search Activities': 'Search Activities',
'Search Activity Report': 'Search Activity Report',
'Search Addresses': 'Search Addresses',
'Search Alternative Items': 'Search Alternative Items',
'Search Assessment Summaries': 'Search Assessment Summaries',
'Search Assessments': 'Search Assessments',
'Search Asset Log': 'Search Asset Log',
'Search Assets': 'Search Assets',
'Search Baseline Type': 'Search Baseline Type',
'Search Baselines': 'Search Baselines',
'Search Brands': 'Search Brands',
'Search Budgets': 'Search Budgets',
'Search Bundles': 'Search Bundles',
'Search Camp Services': 'Search Camp Services',
'Search Camp Types': 'Search Camp Types',
'Search Camps': 'Search Camps',
'Search Catalog Items': 'Search Catalog Items',
'Search Catalogs': 'Search Catalogs',
'Search Certificates': 'Search Certificates',
'Search Certifications': 'Search Certifications',
'Search Checklists': 'Search Checklists',
'Search Cluster Subsectors': 'Search Cluster Subsectors',
'Search Clusters': 'Search Clusters',
'Search Commitment Items': 'Search Commitment Items',
'Search Commitments': 'Search Commitments',
'Search Committed People': 'Search Committed People',
'Search Competency Ratings': 'Search Competency Ratings',
'Search Contact Information': 'Search Contact Information',
'Search Contacts': 'Search Contacts',
'Search Course Certificates': 'Search Course Certificates',
'Search Courses': 'Search Courses',
'Search Credentials': 'Search Credentials',
'Search Criteria': 'Search Criteria',
'Search Documents': 'Search Documents',
'Search Donors': 'Search Donors',
'Search Entries': 'Search Entries',
'Search Events': 'Search Events',
'Search Facilities': 'Search Facilities',
'Search Feature Class': 'Search Feature Class',
'Search Feature Layers': 'Search Feature Layers',
'Search Flood Reports': 'Search Flood Reports',
'Search GPS data': 'Search GPS data',
'Search Geonames': 'Search Geonames',
'Search Groups': 'Search Groups',
'Search Homes': 'Search Homes',
'Search Human Resources': 'Search Human Resources',
'Search Identity': 'Search Identity',
'Search Images': 'Search Images',
'Search Impact Type': 'Search Impact Type',
'Search Impacts': 'Search Impacts',
'Search Import Files': 'Search Import Files',
'Search Incident Reports': 'Search Incident Reports',
'Search Incidents': 'Search Incidents',
'Search Inventory Items': 'Search Inventory Items',
'Search Inventory items': 'Search Inventory items',
'Search Item Categories': 'Search Item Categories',
'Search Item Packs': 'Search Item Packs',
'Search Items': 'Search Items',
'Search Job Roles': 'Search Job Roles',
'Search Kits': 'Search Kits',
'Search Layers': 'Search Layers',
'Search Level': 'Search Level',
'Search Level 1 Assessments': 'Search Level 1 Assessments',
'Search Level 2 Assessments': 'Search Level 2 Assessments',
'Search Locations': 'Search Locations',
'Search Log Entry': 'Search Log Entry',
'Search Map Configurations': 'Search Map Configurations',
'Search Markers': 'Search Markers',
'Search Member': 'Search Member',
'Search Membership': 'Search Membership',
'Search Memberships': 'Search Memberships',
'Search Missions': 'Search Missions',
'Search Need Type': 'Search Need Type',
'Search Needs': 'Search Needs',
'Search Offices': 'Search Offices',
'Search Order Items': 'Search Order Items',
'Search Orders': 'Search Orders',
'Search Organization Domains': 'Search Organisation Domains',
'Search Organizations': 'Search Organisations',
'Search Patients': 'Search Patients',
'Search Personal Effects': 'Search Personal Effects',
'Search Persons': 'Search Persons',
'Search Photos': 'Search Photos',
'Search Population Statistics': 'Search Population Statistics',
'Search Positions': 'Search Positions',
'Search Problems': 'Search Problems',
'Search Projections': 'Search Projections',
'Search Projects': 'Search Projects',
'Search Rapid Assessments': 'Search Rapid Assessments',
'Search Received Items': 'Search Received Items',
'Search Received Shipments': 'Search Received Shipments',
'Search Records': 'Search Records',
'Search Registations': 'Search Registations',
'Search Relatives': 'Search Relatives',
'Search Report': 'Search Report',
'Search Request': 'Search Request',
'Search Request Items': 'Search Request Items',
'Search Requested Items': 'Search Requested Items',
'Search Requested Skills': 'Search Requested Skills',
'Search Requests': 'Search Requests',
'Search Requests for Donations': 'Search Requests for Donations',
'Search Requests for Volunteers': 'Search Requests for Volunteers',
'Search Resources': 'Search Resources',
'Search Rivers': 'Search Rivers',
'Search Roles': 'Search Roles',
'Search Rooms': 'Search Rooms',
'Search Saved Searches': 'Search Saved Searches',
'Search Scenarios': 'Search Scenarios',
'Search Sections': 'Search Sections',
'Search Sectors': 'Search Sectors',
'Search Sent Items': 'Search Sent Items',
'Search Sent Shipments': 'Search Sent Shipments',
'Search Service Profiles': 'Search Service Profiles',
'Search Settings': 'Search Settings',
'Search Shelter Services': 'Search Shelter Services',
'Search Shelter Types': 'Search Shelter Types',
'Search Shelters': 'Search Shelters',
'Search Skill Equivalences': 'Search Skill Equivalences',
'Search Skill Provisions': 'Search Skill Provisions',
'Search Skill Types': 'Search Skill Types',
'Search Skills': 'Search Skills',
'Search Solutions': 'Search Solutions',
'Search Staff Types': 'Search Staff Types',
'Search Staff or Volunteer': 'Search Staff or Volunteer',
'Search Status': 'Search Status',
'Search Subscriptions': 'Search Subscriptions',
'Search Subsectors': 'Search Subsectors',
'Search Support Requests': 'Search Support Requests',
'Search Tasks': 'Search Tasks',
'Search Teams': 'Search Teams',
'Search Themes': 'Search Themes',
'Search Tickets': 'Search Tickets',
'Search Trainings': 'Search Trainings',
'Search Twitter Tags': 'Search Twitter Tags',
'Search Units': 'Search Units',
'Search Users': 'Search Users',
'Search Vehicle Details': 'Search Vehicle Details',
'Search Vehicles': 'Search Vehicles',
'Search Volunteer Availability': 'Search Volunteer Availability',
'Search Warehouses': 'Search Warehouses',
'Search and Edit Group': 'Search and Edit Group',
'Search and Edit Individual': 'Search and Edit Individual',
'Search by organization.': 'Search by organisation.',
'Search for Job': 'Search for Job',
'Search for Repository': 'Search for Repository',
'Search for Resource': 'Search for Resource',
'Search for Staff or Volunteers': 'Search for Staff or Volunteers',
'Search for a Location by name, including local names.': 'Search for a Location by name, including local names.',
'Search for a Person': 'Search for a Person',
'Search for a Project': 'Search for a Project',
'Search for a shipment by looking for text in any field.': 'Search for a shipment by looking for text in any field.',
'Search for a shipment received between these dates': 'Search for a shipment received between these dates',
'Search for a vehicle by text.': 'Search for a vehicle by text.',
'Search for an Organization by name or acronym': 'Search for an Organisation by name or acronym',
'Search for an Organization by name or acronym.': 'Search for an Organisation by name or acronym.',
'Search for an asset by text.': 'Search for an asset by text.',
'Search for an item by Year of Manufacture.': 'Search for an item by Year of Manufacture.',
'Search for an item by brand.': 'Search for an item by brand.',
'Search for an item by catalog.': 'Search for an item by catalogue.',
'Search for an item by category.': 'Search for an item by category.',
'Search for an item by its code, name, model and/or comment.': 'Search for an item by its code, name, model and/or comment.',
'Search for an item by text.': 'Search for an item by text.',
'Search for an order by looking for text in any field.': 'Search for an order by looking for text in any field.',
'Search for an order expected between these dates': 'Search for an order expected between these dates',
'Search for asset by location.': 'Search for asset by location.',
'Search for office by location.': 'Search for office by location.',
'Search for office by organization.': 'Search for office by organisation.',
'Search for office by text.': 'Search for office by text.',
'Search for vehicle by location.': 'Search for vehicle by location.',
'Search for warehouse by location.': 'Search for warehouse by location.',
'Search for warehouse by organization.': 'Search for warehouse by organisation.',
'Search for warehouse by text.': 'Search for warehouse by text.',
'Search here for a person record in order to:': 'Search here for a person record in order to:',
'Search messages': 'Search messages',
'Searching for different groups and individuals': 'Searching for different groups and individuals',
'Secondary Server (Optional)': 'Secondary Server (Optional)',
'Seconds must be a number between 0 and 60': 'Seconds must be a number between 0 and 60',
'Section': 'Section',
'Section Details': 'Section Details',
'Section deleted': 'Section deleted',
'Section updated': 'Section updated',
'Sections': 'Sections',
'Sections that are part of this template': 'Sections that are part of this template',
'Sections that can be selected': 'Sections that can be selected',
'Sector': 'Sector',
'Sector Details': 'Sector Details',
'Sector added': 'Sector added',
'Sector deleted': 'Sector deleted',
'Sector updated': 'Sector updated',
'Sector(s)': 'Sector(s)',
'Sectors': 'Sectors',
'Security Required': 'Security Required',
'Security Status': 'Security Status',
'Security problems': 'Security problems',
'See All Entries': 'See All Entries',
'See a detailed description of the module on the Sahana Eden wiki': 'See a detailed description of the module on the Sahana Eden wiki',
'See all': 'See all',
'See the universally unique identifier (UUID) of this repository': 'See the universally unique identifier (UUID) of this repository',
'See unassigned recovery requests': 'See unassigned recovery requests',
'Select Existing Location': 'Select Existing Location',
'Select Items from the Request': 'Select Items from the Request',
'Select Items from this Inventory': 'Select Items from this Inventory',
'Select This Location': 'Select This Location',
"Select a Room from the list or click 'Add Room'": "Select a Room from the list or click 'Add Room'",
'Select a location': 'Select a location',
"Select a manager for status 'assigned'": "Select a manager for status 'assigned'",
'Select a range for the number of total beds': 'Select a range for the number of total beds',
'Select all that apply': 'Select all that apply',
'Select an Organization to see a list of offices': 'Select an Organisation to see a list of offices',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Select the overlays for Assessments and Activities relating to each Need to identify the gap.',
'Select the person assigned to this role for this project.': 'Select the person assigned to this role for this project.',
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.",
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": "Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.",
'Select to show this configuration in the menu.': 'Select to show this configuration in the menu.',
'Selected Answers': 'Selected Answers',
'Selected Jobs': 'Selected Jobs',
'Selects what type of gateway to use for outbound SMS': 'Selects what type of gateway to use for outbound SMS',
'Send': 'Send',
'Send Alerts using Email &/or SMS': 'Send Alerts using Email &/or SMS',
'Send Commitment as Shipment': 'Send Commitment as Shipment',
'Send New Shipment': 'Send New Shipment',
'Send Notification': 'Send Notification',
'Send Shipment': 'Send Shipment',
'Send a message to this person': 'Send a message to this person',
'Send from %s': 'Send from %s',
'Send message': 'Send message',
'Send new message': 'Send new message',
'Sends & Receives Alerts via Email & SMS': 'Sends & Receives Alerts via Email & SMS',
'Senior (50+)': 'Senior (50+)',
'Sent': 'Sent',
'Sent By': 'Sent By',
'Sent By Person': 'Sent By Person',
'Sent Item Details': 'Sent Item Details',
'Sent Item deleted': 'Sent Item deleted',
'Sent Item updated': 'Sent Item updated',
'Sent Shipment Details': 'Sent Shipment Details',
'Sent Shipment canceled': 'Sent Shipment canceled',
'Sent Shipment canceled and items returned to Inventory': 'Sent Shipment canceled and items returned to Inventory',
'Sent Shipment updated': 'Sent Shipment updated',
'Sent Shipments': 'Sent Shipments',
'Separated children, caregiving arrangements': 'Separated children, caregiving arrangements',
'Serial Number': 'Serial Number',
'Series': 'Series',
'Series Analysis': 'Series Analysis',
'Series Details': 'Series Details',
'Series Map': 'Series Map',
'Series Summary': 'Series Summary',
'Server': 'Server',
'Service Catalogue': 'Service Catalogue',
'Service Due': 'Service Due',
'Service or Facility': 'Service or Facility',
'Service profile added': 'Service profile added',
'Service profile deleted': 'Service profile deleted',
'Service profile updated': 'Service profile updated',
'Services': 'Services',
'Services Available': 'Services Available',
'Set Base Site': 'Set Base Site',
'Set By': 'Set By',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.',
'Setting Details': 'Setting Details',
'Setting added': 'Setting added',
'Setting deleted': 'Setting deleted',
'Setting updated': 'Setting updated',
'Settings': 'Settings',
'Settings updated': 'Settings updated',
'Settings were reset because authenticating with Twitter failed': 'Settings were reset because authenticating with Twitter failed',
'Settings which can be configured through the web interface are available here.': 'Settings which can be configured through the web interface are available here.',
'Severe': 'Severe',
'Severity': 'Severity',
'Share a common Marker (unless over-ridden at the Feature level)': 'Share a common Marker (unless over-ridden at the Feature level)',
'Shelter': 'Shelter',
'Shelter & Essential NFIs': 'Shelter & Essential NFIs',
'Shelter Details': 'Shelter Details',
'Shelter Name': 'Shelter Name',
'Shelter Registry': 'Shelter Registry',
'Shelter Service': 'Shelter Service',
'Shelter Service Details': 'Shelter Service Details',
'Shelter Service added': 'Shelter Service added',
'Shelter Service deleted': 'Shelter Service deleted',
'Shelter Service updated': 'Shelter Service updated',
'Shelter Services': 'Shelter Services',
'Shelter Type': 'Shelter Type',
'Shelter Type Details': 'Shelter Type Details',
'Shelter Type added': 'Shelter Type added',
'Shelter Type deleted': 'Shelter Type deleted',
'Shelter Type updated': 'Shelter Type updated',
'Shelter Types': 'Shelter Types',
'Shelter Types and Services': 'Shelter Types and Services',
'Shelter added': 'Shelter added',
'Shelter deleted': 'Shelter deleted',
'Shelter updated': 'Shelter updated',
'Shelter/NFI Assistance': 'Shelter/NFI Assistance',
'Shelters': 'Shelters',
'Shipment Created': 'Shipment Created',
'Shipment Items': 'Shipment Items',
'Shipment Items received by Inventory': 'Shipment Items received by Inventory',
'Shipment Items sent from Inventory': 'Shipment Items sent from Inventory',
'Shipment to Send': 'Shipment to Send',
'Shipments': 'Shipments',
'Shipments To': 'Shipments To',
'Shooting': 'Shooting',
'Short Assessment': 'Short Assessment',
'Short Description': 'Short Description',
'Show Checklist': 'Show Checklist',
'Show Map': 'Show Map',
'Show in Menu?': 'Show in Menu?',
'Show on Map': 'Show on Map',
'Show on map': 'Show on map',
'Show selected answers': 'Show selected answers',
'Showing latest entries first': 'Showing latest entries first',
'Sign-up for Account': 'Sign-up for Account',
'Single PDF File': 'Single PDF File',
'Site': 'Site',
'Site Administration': 'Site Administration',
'Sites': 'Sites',
'Situation Awareness & Geospatial Analysis': 'Situation Awareness & Geospatial Analysis',
'Sketch': 'Sketch',
'Skill': 'Skill',
'Skill Catalog': 'Skill Catalog',
'Skill Details': 'Skill Details',
'Skill Equivalence': 'Skill Equivalence',
'Skill Equivalence Details': 'Skill Equivalence Details',
'Skill Equivalence added': 'Skill Equivalence added',
'Skill Equivalence deleted': 'Skill Equivalence deleted',
'Skill Equivalence updated': 'Skill Equivalence updated',
'Skill Equivalences': 'Skill Equivalences',
'Skill Provision': 'Skill Provision',
'Skill Provision Catalog': 'Skill Provision Catalog',
'Skill Provision Details': 'Skill Provision Details',
'Skill Provision added': 'Skill Provision added',
'Skill Provision deleted': 'Skill Provision deleted',
'Skill Provision updated': 'Skill Provision updated',
'Skill Provisions': 'Skill Provisions',
'Skill Type': 'Skill Type',
'Skill Type Catalog': 'Skill Type Catalog',
'Skill Type added': 'Skill Type added',
'Skill Type deleted': 'Skill Type deleted',
'Skill Type updated': 'Skill Type updated',
'Skill Types': 'Skill Types',
'Skill added': 'Skill added',
'Skill added to Request': 'Skill added to Request',
'Skill deleted': 'Skill deleted',
'Skill removed': 'Skill removed',
'Skill removed from Request': 'Skill removed from Request',
'Skill updated': 'Skill updated',
'Skills': 'Skills',
'Skills Catalog': 'Skills Catalog',
'Skills Management': 'Skills Management',
'Skype ID': 'Skype ID',
'Slope failure, debris': 'Slope failure, debris',
'Small Trade': 'Small Trade',
'Smoke': 'Smoke',
'Snapshot': 'Snapshot',
'Snapshot Report': 'Snapshot Report',
'Snow Fall': 'Snow Fall',
'Snow Squall': 'Snow Squall',
'Soil bulging, liquefaction': 'Soil bulging, liquefaction',
'Solid waste': 'Solid waste',
'Solution': 'Solution',
'Solution Details': 'Solution Details',
'Solution Item': 'Solution Item',
'Solution added': 'Solution added',
'Solution deleted': 'Solution deleted',
'Solution updated': 'Solution updated',
'Solutions': 'Solutions',
'Some': 'Some',
'Sorry - the server has a problem, please try again later.': 'Sorry - the server has a problem, please try again later.',
'Sorry that location appears to be outside the area of the Parent.': 'Sorry that location appears to be outside the area of the Parent.',
'Sorry that location appears to be outside the area supported by this deployment.': 'Sorry that location appears to be outside the area supported by this deployment.',
'Sorry, I could not understand your request': 'Sorry, I could not understand your request',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': 'Sorry, only users with the MapAdmin role are allowed to create location groups.',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Sorry, only users with the MapAdmin role are allowed to edit these locations',
'Sorry, something went wrong.': 'Sorry, something went wrong.',
'Sorry, that page is forbidden for some reason.': 'Sorry, that page is forbidden for some reason.',
'Sorry, that service is temporary unavailable.': 'Sorry, that service is temporary unavailable.',
'Sorry, there are no addresses to display': 'Sorry, there are no addresses to display',
"Sorry, things didn't get done on time.": "Sorry, things didn't get done on time.",
"Sorry, we couldn't find that page.": "Sorry, we couldn't find that page.",
'Source': 'Source',
'Source ID': 'Source ID',
'Source Time': 'Source Time',
'Sources of income': 'Sources of income',
'Space Debris': 'Space Debris',
'Spanish': 'Spanish',
'Special Ice': 'Special Ice',
'Special Marine': 'Special Marine',
'Specialized Hospital': 'Specialized Hospital',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.',
'Specific locations need to have a parent of level': 'Specific locations need to have a parent of level',
'Specify a descriptive title for the image.': 'Specify a descriptive title for the image.',
'Specify the bed type of this unit.': 'Specify the bed type of this unit.',
'Specify the number of available sets': 'Specify the number of available sets',
'Specify the number of available units (adult doses)': 'Specify the number of available units (adult doses)',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions',
'Specify the number of sets needed per 24h': 'Specify the number of sets needed per 24h',
'Specify the number of units (adult doses) needed per 24h': 'Specify the number of units (adult doses) needed per 24h',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h',
'Speed': 'Speed',
'Spherical Mercator?': 'Spherical Mercator?',
'Spreadsheet Importer': 'Spreadsheet Importer',
'Spreadsheet uploaded': 'Spreadsheet uploaded',
'Spring': 'Spring',
'Squall': 'Squall',
'Staff': 'Staff',
'Staff & Volunteers': 'Staff & Volunteers',
'Staff ID': 'Staff ID',
'Staff Member Details': 'Staff Member Details',
'Staff Members': 'Staff Members',
'Staff Record': 'Staff Record',
'Staff Type Details': 'Staff Type Details',
'Staff Type added': 'Staff Type added',
'Staff Type deleted': 'Staff Type deleted',
'Staff Type updated': 'Staff Type updated',
'Staff Types': 'Staff Types',
'Staff and Volunteers': 'Staff and Volunteers',
'Staff and volunteers': 'Staff and volunteers',
'Staff member added': 'Staff member added',
'Staff present and caring for residents': 'Staff present and caring for residents',
'Staff2': 'Staff2',
'Staffing': 'Staffing',
'Stairs': 'Stairs',
'Start Date': 'Start Date',
'Start date': 'Start date',
'Start date and end date should have valid date values': 'Start date and end date should have valid date values',
'State': 'State',
'Stationery': 'Stationery',
'Status': 'Status',
'Status Report': 'Status Report',
'Status Updated': 'Status Updated',
'Status added': 'Status added',
'Status deleted': 'Status deleted',
'Status of clinical operation of the facility.': 'Status of clinical operation of the facility.',
'Status of general operation of the facility.': 'Status of general operation of the facility.',
'Status of morgue capacity.': 'Status of morgue capacity.',
'Status of operations of the emergency department of this hospital.': 'Status of operations of the emergency department of this hospital.',
'Status of security procedures/access restrictions in the hospital.': 'Status of security procedures/access restrictions in the hospital.',
'Status of the operating rooms of this hospital.': 'Status of the operating rooms of this hospital.',
'Status updated': 'Status updated',
'Steel frame': 'Steel frame',
'Stolen': 'Stolen',
'Store spreadsheets in the Eden database': 'Store spreadsheets in the Eden database',
'Storeys at and above ground level': 'Storeys at and above ground level',
'Storm Force Wind': 'Storm Force Wind',
'Storm Surge': 'Storm Surge',
'Stowaway': 'Stowaway',
'Strategy': 'Strategy',
'Street Address': 'Street Address',
'Streetview Enabled?': 'Streetview Enabled?',
'Strong Wind': 'Strong Wind',
'Structural': 'Structural',
'Structural Hazards': 'Structural Hazards',
'Style': 'Style',
'Style Field': 'Style Field',
'Style Values': 'Style Values',
'Subject': 'Subject',
'Submission successful - please wait': 'Submission successful - please wait',
'Submit': 'Submit',
'Submit New': 'Submit New',
'Submit New (full form)': 'Submit New (full form)',
'Submit New (triage)': 'Submit New (triage)',
'Submit a request for recovery': 'Submit a request for recovery',
'Submit new Level 1 assessment (full form)': 'Submit new Level 1 assessment (full form)',
'Submit new Level 1 assessment (triage)': 'Submit new Level 1 assessment (triage)',
'Submit new Level 2 assessment': 'Submit new Level 2 assessment',
'Subscribe': 'Subscribe',
'Subscription Details': 'Subscription Details',
'Subscription added': 'Subscription added',
'Subscription deleted': 'Subscription deleted',
'Subscription updated': 'Subscription updated',
'Subscriptions': 'Subscriptions',
'Subsector': 'Subsector',
'Subsector Details': 'Subsector Details',
'Subsector added': 'Subsector added',
'Subsector deleted': 'Subsector deleted',
'Subsector updated': 'Subsector updated',
'Subsectors': 'Subsectors',
'Subsistence Cost': 'Subsistence Cost',
'Suburb': 'Suburb',
'Suggest not changing this field unless you know what you are doing.': 'Suggest not changing this field unless you know what you are doing.',
'Summary': 'Summary',
'Summary by Administration Level': 'Summary by Administration Level',
'Summary by Question Type': 'Summary by Question Type',
'Summary of Responses within Series': 'Summary of Responses within Series',
'Sunday': 'Sunday',
'Supply Chain Management': 'Supply Chain Management',
'Supply Item Categories': 'Supply Item Categories',
'Support Request': 'Support Request',
'Support Requests': 'Support Requests',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.',
'Surgery': 'Surgery',
'Survey Module': 'Survey Module',
'Surveys': 'Surveys',
'Symbology': 'Symbology',
'Synchronization': 'Synchronisation',
'Synchronization Job': 'Synchronisation Job',
'Synchronization Log': 'Synchronisation Log',
'Synchronization Schedule': 'Synchronisation Schedule',
'Synchronization Settings': 'Synchronisation Settings',
'Synchronization mode': 'Synchronisation mode',
'Synchronization settings updated': 'Synchronisation settings updated',
'Synchronize now': 'Synchronise now',
"System's Twitter account updated": "System's Twitter account updated",
'Table name of the resource to synchronize': 'Table name of the resource to synchronise',
'Tags': 'Tags',
'Take shelter in place or per <instruction>': 'Take shelter in place or per <instruction>',
'Task': 'Task',
'Task Details': 'Task Details',
'Task List': 'Task List',
'Task Status': 'Task Status',
'Task added': 'Task added',
'Task deleted': 'Task deleted',
'Task removed': 'Task removed',
'Task updated': 'Task updated',
'Tasks': 'Tasks',
'Team Description': 'Team Description',
'Team Details': 'Team Details',
'Team ID': 'Team ID',
'Team Leader': 'Team Leader',
'Team Member added': 'Team Member added',
'Team Members': 'Team Members',
'Team Name': 'Team Name',
'Team Type': 'Team Type',
'Team added': 'Team added',
'Team deleted': 'Team deleted',
'Team updated': 'Team updated',
'Teams': 'Teams',
'Technical testing only, all recipients disregard': 'Technical testing only, all recipients disregard',
'Telecommunications': 'Telecommunications',
'Telephone': 'Telephone',
'Telephone Details': 'Telephone Details',
'Telephony': 'Telephony',
'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.': 'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.',
'Temp folder %s not writable - unable to apply theme!': 'Temp folder %s not writable - unable to apply theme!',
'Template': 'Template',
'Template Name': 'Template Name',
'Template Section Details': 'Template Section Details',
'Template Section added': 'Template Section added',
'Template Section deleted': 'Template Section deleted',
'Template Section updated': 'Template Section updated',
'Template Sections': 'Template Sections',
'Template Summary': 'Template Summary',
'Template file %s not readable - unable to apply theme!': 'Template file %s not readable - unable to apply theme!',
'Templates': 'Templates',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).',
'Term for the primary within-country administrative division (e.g. State or Province).': 'Term for the primary within-country administrative division (e.g. State or Province).',
'Term for the secondary within-country administrative division (e.g. District or County).': 'Term for the secondary within-country administrative division (e.g. District or County).',
'Term for the third-level within-country administrative division (e.g. City or Town).': 'Term for the third-level within-country administrative division (e.g. City or Town).',
'Term for the top-level administrative division (i.e. Country).': 'Term for the top-level administrative division (i.e. Country).',
'Terms of Service\n\nYou have to be eighteen or over to register as a volunteer.': 'Terms of Service\n\nYou have to be eighteen or over to register as a volunteer.',
'Terms of Service:': 'Terms of Service:',
'Territorial Authority': 'Territorial Authority',
'Terrorism': 'Terrorism',
'Tertiary Server (Optional)': 'Tertiary Server (Optional)',
'Text': 'Text',
'Text Color for Text blocks': 'Text Colour for Text blocks',
'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': 'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.',
'Thanks for your assistance': 'Thanks for your assistance',
'The': 'The',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.',
'The Area which this Site is located within.': 'The Area which this Site is located within.',
'The Assessments module allows field workers to send in assessments.': 'The Assessments module allows field workers to send in assessments.',
'The Author of this Document (optional)': 'The Author of this Document (optional)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.',
'The Camp this Request is from': 'The Camp this Request is from',
'The Camp this person is checking into.': 'The Camp this person is checking into.',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.",
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Media Library provides a catalog of digital media.': 'The Media Library provides a catalogue of digital media.',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.',
'The Organization Registry keeps track of all the relief organizations working in the area.': 'The Organisation Registry keeps track of all the relief organisations working in the area.',
'The Patient Tracking system keeps track of all the evacuated patients & their relatives.': 'The Patient Tracking system keeps track of all the evacuated patients & their relatives.',
"The Project Tool can be used to record project Information and generate Who's Doing What Where Reports.": "The Project Tool can be used to record project Information and generate Who's Doing What Where Reports.",
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.',
'The Role this person plays within this hospital.': 'The Role this person plays within this hospital.',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.',
'The Shelter this Request is from': 'The Shelter this Request is from',
'The Shelter this person is checking into.': 'The Shelter this person is checking into.',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": "The URL of the image file. If you don't upload an image file, then you must specify its location here.",
'The URL of your web gateway without the post parameters': 'The URL of your web gateway without the post parameters',
'The URL to access the service.': 'The URL to access the service.',
'The Unique Identifier (UUID) as assigned to this facility by the government.': 'The Unique Identifier (UUID) as assigned to this facility by the government.',
'The area is': 'The area is',
'The asset must be assigned to a site OR location.': 'The asset must be assigned to a site OR location.',
'The attribute which is used for the title of popups.': 'The attribute which is used for the title of popups.',
'The attribute within the KML which is used for the title of popups.': 'The attribute within the KML which is used for the title of popups.',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)',
'The body height (crown to heel) in cm.': 'The body height (crown to heel) in cm.',
'The country the person usually lives in.': 'The country the person usually lives in.',
'The default Facility for which this person is acting.': 'The default Facility for which this person is acting.',
'The default Facility for which you are acting.': 'The default Facility for which you are acting.',
'The default Organization for whom this person is acting.': 'The default Organisation for whom this person is acting.',
'The default Organization for whom you are acting.': 'The default Organisation for whom you are acting.',
'The duplicate record will be deleted': 'The duplicate record will be deleted',
'The first or only name of the person (mandatory).': 'The first or only name of the person (mandatory).',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.',
'The language you wish the site to be displayed in.': 'The language you wish the site to be displayed in.',
'The length is': 'The length is',
'The level at which Searches are filtered.': 'The level at which Searches are filtered.',
'The list of Brands are maintained by the Administrators.': 'The list of Brands are maintained by the Administrators.',
'The list of Catalogs are maintained by the Administrators.': 'The list of Catalogs are maintained by the Administrators.',
'The map will be displayed initially with this latitude at the center.': 'The map will be displayed initially with this latitude at the center.',
'The map will be displayed initially with this longitude at the center.': 'The map will be displayed initially with this longitude at the center.',
'The minimum number of characters is ': 'The minimum number of characters is ',
'The minimum number of features to form a cluster.': 'The minimum number of features to form a cluster.',
'The name to be used when calling for or directly addressing the person (optional).': 'The name to be used when calling for or directly addressing the person (optional).',
'The next screen will allow you to detail the number of people here & their needs.': 'The next screen will allow you to detail the number of people here & their needs.',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': 'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item',
'The number of pixels apart that features need to be before they are clustered.': 'The number of pixels apart that features need to be before they are clustered.',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.',
'The person at the location who is reporting this incident (optional)': 'The person at the location who is reporting this incident (optional)',
'The post variable containing the phone number': 'The post variable containing the phone number',
'The post variable on the URL used for sending messages': 'The post variable on the URL used for sending messages',
'The post variables other than the ones containing the message and the phone number': 'The post variables other than the ones containing the message and the phone number',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'The server received an incorrect response from another server that it was accessing to fill the request by the browser.',
'The site where this position is based.': 'The site where this position is based.',
'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>',
'The synchronization module allows the synchronization of data resources between Sahana Eden instances.': 'The synchronisation module allows the synchronisation of data resources between Sahana Eden instances.',
'The time at which the Event started.': 'The time at which the Event started.',
'The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones.': 'The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones.',
'The token associated with this application on': 'The token associated with this application on',
'The way in which an item is normally distributed': 'The way in which an item is normally distributed',
'The weight in kg.': 'The weight in kg.',
'Theme': 'Theme',
'Theme Details': 'Theme Details',
'Theme added': 'Theme added',
'Theme deleted': 'Theme deleted',
'Theme updated': 'Theme updated',
'Themes': 'Themes',
'There are errors': 'There are errors',
'There are insufficient items in the Inventory to send this shipment': 'There are insufficient items in the Inventory to send this shipment',
'There are multiple records at this location': 'There are multiple records at this location',
'There is no address for this person yet. Add new address.': 'There is no address for this person yet. Add new address.',
'There was a problem, sorry, please try again later.': 'There was a problem, sorry, please try again later.',
'These are settings for Inbound Mail.': 'These are settings for Inbound Mail.',
'These are the Incident Categories visible to normal End-Users': 'These are the Incident Categories visible to normal End-Users',
'These need to be added in Decimal Degrees.': 'These need to be added in Decimal Degrees.',
'They': 'They',
'This appears to be a duplicate of ': 'This appears to be a duplicate of ',
'This email address is already in use': 'This email address is already in use',
'This file already exists on the server as': 'This file already exists on the server as',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.',
'This is the way to transfer data between machines as it maintains referential integrity.': 'This is the way to transfer data between machines as it maintains referential integrity.',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!',
'This level is not open for editing.': 'This level is not open for editing.',
'This might be due to a temporary overloading or maintenance of the server.': 'This might be due to a temporary overloading or maintenance of the server.',
'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': 'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.',
'This module allows you to manage Events - whether pre-planned (e.g. exercises) or Live Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'This module allows you to manage Events - whether pre-planned (e.g. exercises) or Live Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.',
'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.',
'This resource is already configured for this repository': 'This resource is already configured for this repository',
'This screen allows you to upload a collection of photos to the server.': 'This screen allows you to upload a collection of photos to the server.',
'This setting can only be controlled by the Administrator.': 'This setting can only be controlled by the Administrator.',
'This shipment has already been received.': 'This shipment has already been received.',
'This shipment has already been sent.': 'This shipment has already been sent.',
'This shipment has not been received - it has NOT been canceled because can still be edited.': 'This shipment has not been received - it has NOT been canceled because can still be edited.',
'This shipment has not been sent - it has NOT been canceled because can still be edited.': 'This shipment has not been sent - it has NOT been canceled because can still be edited.',
'This shipment will be confirmed as received.': 'This shipment will be confirmed as received.',
'Thunderstorm': 'Thunderstorm',
'Thursday': 'Thursday',
'Ticket': 'Ticket',
'Ticket Details': 'Ticket Details',
'Ticket ID': 'Ticket ID',
'Ticket added': 'Ticket added',
'Ticket deleted': 'Ticket deleted',
'Ticket updated': 'Ticket updated',
'Ticketing Module': 'Ticketing Module',
'Tickets': 'Tickets',
'Tiled': 'Tiled',
'Tilt-up concrete': 'Tilt-up concrete',
'Timber frame': 'Timber frame',
'Timeline': 'Timeline',
'Timeline Report': 'Timeline Report',
'Timestamp': 'Timestamp',
'Timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Timestamps can be correlated with the timestamps on the photos to locate them on the map.',
'Title': 'Title',
'Title to show for the Web Map Service panel in the Tools panel.': 'Title to show for the Web Map Service panel in the Tools panel.',
'To': 'To',
'To Location': 'To Location',
'To Person': 'To Person',
'To create a personal map configuration, click ': 'To create a personal map configuration, click ',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py',
'To search by job title, enter any portion of the title. You may use % as wildcard.': 'To search by job title, enter any portion of the title. You may use % as wildcard.',
"To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.",
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.",
"To search for a hospital, enter any of the names or IDs of the hospital, or the organisation name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "To search for a hospital, enter any of the names or IDs of the hospital, or the organisation name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.",
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.",
"To search for a patient, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all patients.": "To search for a patient, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all patients.",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.",
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.",
'To variable': 'To variable',
'Tools': 'Tools',
'Tornado': 'Tornado',
'Total': 'Total',
'Total # of Target Beneficiaries': 'Total # of Target Beneficiaries',
'Total # of households of site visited': 'Total # of households of site visited',
'Total Beds': 'Total Beds',
'Total Beneficiaries': 'Total Beneficiaries',
'Total Cost per Megabyte': 'Total Cost per Megabyte',
'Total Cost per Minute': 'Total Cost per Minute',
'Total Monthly': 'Total Monthly',
'Total Monthly Cost': 'Total Monthly Cost',
'Total Monthly Cost: ': 'Total Monthly Cost: ',
'Total One-time Costs': 'Total One-time Costs',
'Total Persons': 'Total Persons',
'Total Recurring Costs': 'Total Recurring Costs',
'Total Unit Cost': 'Total Unit Cost',
'Total Unit Cost: ': 'Total Unit Cost: ',
'Total Units': 'Total Units',
'Total gross floor area (square meters)': 'Total gross floor area (square meters)',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'Total number of beds in this hospital. Automatically updated from daily reports.',
'Total number of houses in the area': 'Total number of houses in the area',
'Total number of schools in affected area': 'Total number of schools in affected area',
'Total population of site visited': 'Total population of site visited',
'Totals for Budget:': 'Totals for Budget:',
'Totals for Bundle:': 'Totals for Bundle:',
'Totals for Kit:': 'Totals for Kit:',
'Tourist Group': 'Tourist Group',
'Town': 'Town',
'Traces internally displaced people (IDPs) and their needs': 'Traces internally displaced people (IDPs) and their needs',
'Track with this Person?': 'Track with this Person?',
'Tracking of Patients': 'Tracking of Patients',
'Tracking of Projects, Activities and Tasks': 'Tracking of Projects, Activities and Tasks',
'Tracking of basic information on the location, facilities and size of the Shelters': 'Tracking of basic information on the location, facilities and size of the Shelters',
'Tracks the location, capacity and breakdown of victims in Shelters': 'Tracks the location, capacity and breakdown of victims in Shelters',
'Traffic Report': 'Traffic Report',
'Training': 'Training',
'Training Course Catalog': 'Training Course Catalog',
'Training Details': 'Training Details',
'Training added': 'Training added',
'Training deleted': 'Training deleted',
'Training updated': 'Training updated',
'Trainings': 'Trainings',
'Transit': 'Transit',
'Transit Status': 'Transit Status',
'Transition Effect': 'Transition Effect',
'Transparent?': 'Transparent?',
'Transportation Required': 'Transportation Required',
'Transportation assistance, Rank': 'Transportation assistance, Rank',
'Trauma Center': 'Trauma Center',
'Travel Cost': 'Travel Cost',
'Tropical Storm': 'Tropical Storm',
'Tropo Messaging Token': 'Tropo Messaging Token',
'Tropo Voice Token': 'Tropo Voice Token',
'Tropo settings updated': 'Tropo settings updated',
'Truck': 'Truck',
'Try checking the URL for errors, maybe it was mistyped.': 'Try checking the URL for errors, maybe it was mistyped.',
'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Try hitting refresh/reload button or trying the URL from the address bar again.',
'Try refreshing the page or hitting the back button on your browser.': 'Try refreshing the page or hitting the back button on your browser.',
'Tsunami': 'Tsunami',
'Tuesday': 'Tuesday',
'Twitter': 'Twitter',
'Twitter ID or #hashtag': 'Twitter ID or #hashtag',
'Twitter Settings': 'Twitter Settings',
'Type': 'Type',
'Type of Construction': 'Type of Construction',
'Type of water source before the disaster': 'Type of water source before the disaster',
"Type the first few characters of one of the Person's names.": "Type the first few characters of one of the Person's names.",
'UN': 'UN',
'URL': 'URL',
'URL of the default proxy server to connect to remote repositories (if required). If only some of the repositories require the use of a proxy server, you can configure this in the respective repository configuration.': 'URL of the default proxy server to connect to remote repositories (if required). If only some of the repositories require the use of a proxy server, you can configure this in the respective repository configuration.',
'URL of the proxy server to connect to this repository (leave empty for default proxy)': 'URL of the proxy server to connect to this repository (leave empty for default proxy)',
'UTC Offset': 'UTC Offset',
'UUID': 'UUID',
'Un-Repairable': 'Un-Repairable',
'Unable to parse CSV file!': 'Unable to parse CSV file!',
'Under which condition a local record shall be updated if it also has been modified locally since the last synchronization': 'Under which condition a local record shall be updated if it also has been modified locally since the last synchronisation',
'Under which conditions local records shall be updated': 'Under which conditions local records shall be updated',
'Understaffed': 'Understaffed',
'Unidentified': 'Unidentified',
'Unit Cost': 'Unit Cost',
'Unit added': 'Unit added',
'Unit deleted': 'Unit deleted',
'Unit of Measure': 'Unit of Measure',
'Unit updated': 'Unit updated',
'United States Dollars': 'United States Dollars',
'Units': 'Units',
'Universally unique identifier for the local repository, needed to register the local repository at remote instances to allow push-synchronization.': 'Universally unique identifier for the local repository, needed to register the local repository at remote instances to allow push-synchronisation.',
'Unknown': 'Unknown',
'Unknown type of facility': 'Unknown type of facility',
'Unreinforced masonry': 'Unreinforced masonry',
'Unsafe': 'Unsafe',
'Unselect to disable the modem': 'Unselect to disable the modem',
'Unselect to disable this API service': 'Unselect to disable this API service',
'Unselect to disable this SMTP service': 'Unselect to disable this SMTP service',
'Unsent': 'Unsent',
'Unsubscribe': 'Unsubscribe',
'Unsupported data format!': 'Unsupported data format!',
'Unsupported method!': 'Unsupported method!',
'Update': 'Update',
'Update Activity Report': 'Update Activity Report',
'Update Cholera Treatment Capability Information': 'Update Cholera Treatment Capability Information',
'Update Method': 'Update Method',
'Update Policy': 'Update Policy',
'Update Request': 'Update Request',
'Update Service Profile': 'Update Service Profile',
'Update Status': 'Update Status',
'Update Task Status': 'Update Task Status',
'Update Unit': 'Update Unit',
'Update your current ordered list': 'Update your current ordered list',
'Updated By': 'Updated By',
'Upload Comma Separated Value File': 'Upload Comma Separated Value File',
'Upload Format': 'Upload Format',
'Upload Photos': 'Upload Photos',
'Upload Scanned OCR Form': 'Upload Scanned OCR Form',
'Upload Spreadsheet': 'Upload Spreadsheet',
'Upload Web2py portable build as a zip file': 'Upload Web2py portable build as a zip file',
'Upload a Assessment Template import file': 'Upload a Assessment Template import file',
'Upload a CSV file': 'Upload a CSV file',
'Upload a CSV file formatted according to the Template.': 'Upload a CSV file formatted according to the Template.',
'Upload a Question List import file': 'Upload a Question List import file',
'Upload a Spreadsheet': 'Upload a Spreadsheet',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': 'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!',
'Upload an image file here.': 'Upload an image file here.',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.",
'Upload an image, such as a photo': 'Upload an image, such as a photo',
'Upload the Completed Assessments import file': 'Upload the Completed Assessments import file',
'Uploaded': 'Uploaded',
'Urban Fire': 'Urban Fire',
'Urban area': 'Urban area',
'Urdu': 'Urdu',
'Urgent': 'Urgent',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'Use Geocoder for address lookups?': 'Use Geocoder for address lookups?',
'Use default': 'Use default',
'Use these links to download data that is currently in the database.': 'Use these links to download data that is currently in the database.',
'Use this to set the starting location for the Location Selector.': 'Use this to set the starting location for the Location Selector.',
'Used by IRS & Assess': 'Used by IRS & Assess',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Used in onHover Tooltip & Cluster Popups to differentiate between types.',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.',
'Used to import data from spreadsheets into the database': 'Used to import data from spreadsheets into the database',
'Used within Inventory Management, Request Management and Asset Management': 'Used within Inventory Management, Request Management and Asset Management',
'User': 'User',
'User %(id)s Logged-in': 'User %(id)s Logged-in',
'User %(id)s Registered': 'User %(id)s Registered',
'User Account has been Approved': 'User Account has been Approved',
'User Account has been Disabled': 'User Account has been Disabled',
'User Details': 'User Details',
'User Guidelines Synchronization': 'User Guidelines Synchronisation',
'User ID': 'User ID',
'User Management': 'User Management',
'User Profile': 'User Profile',
'User Requests': 'User Requests',
'User Updated': 'User Updated',
'User added': 'User added',
'User already has this role': 'User already has this role',
'User deleted': 'User deleted',
'User updated': 'User updated',
'Username': 'Username',
'Username to use for authentication at the remote site': 'Username to use for authentication at the remote site',
'Users': 'Users',
'Users removed': 'Users removed',
'Uses the REST Query Format defined in': 'Uses the REST Query Format defined in',
'Utilities': 'Utilities',
'Utility, telecommunication, other non-transport infrastructure': 'Utility, telecommunication, other non-transport infrastructure',
'Value': 'Value',
'Value per Pack': 'Value per Pack',
'Various Reporting functionalities': 'Various Reporting functionalities',
'Vehicle': 'Vehicle',
'Vehicle Crime': 'Vehicle Crime',
'Vehicle Details': 'Vehicle Details',
'Vehicle Details added': 'Vehicle Details added',
'Vehicle Details deleted': 'Vehicle Details deleted',
'Vehicle Details updated': 'Vehicle Details updated',
'Vehicle Management': 'Vehicle Management',
'Vehicle Types': 'Vehicle Types',
'Vehicle added': 'Vehicle added',
'Vehicle deleted': 'Vehicle deleted',
'Vehicle updated': 'Vehicle updated',
'Vehicles': 'Vehicles',
'Vehicles are assets with some extra details.': 'Vehicles are assets with some extra details.',
'Verification Status': 'Verification Status',
'Verified?': 'Verified?',
'Verify Password': 'Verify Password',
'Verify password': 'Verify password',
'Version': 'Version',
'Very Good': 'Very Good',
'Very High': 'Very High',
'Vietnamese': 'Vietnamese',
'View Alerts received using either Email or SMS': 'View Alerts received using either Email or SMS',
'View All': 'View All',
'View All Tickets': 'View All Tickets',
'View Error Tickets': 'View Error Tickets',
'View Fullscreen Map': 'View Fullscreen Map',
'View Image': 'View Image',
'View Items': 'View Items',
'View Location Details': 'View Location Details',
'View Outbox': 'View Outbox',
'View Picture': 'View Picture',
'View Results of completed and/or partially completed assessments': 'View Results of completed and/or partially completed assessments',
'View Settings': 'View Settings',
'View Tickets': 'View Tickets',
'View all log entries': 'View all log entries',
'View and/or update their details': 'View and/or update their details',
'View log entries per repository': 'View log entries per repository',
'View on Map': 'View on Map',
'View or update the status of a hospital.': 'View or update the status of a hospital.',
'View pending requests and pledge support.': 'View pending requests and pledge support.',
'View the hospitals on a map.': 'View the hospitals on a map.',
'View/Edit the Database directly': 'View/Edit the Database directly',
'Village': 'Village',
'Village Leader': 'Village Leader',
'Visual Recognition': 'Visual Recognition',
'Volcanic Ash Cloud': 'Volcanic Ash Cloud',
'Volcanic Event': 'Volcanic Event',
'Volume (m3)': 'Volume (m3)',
'Volunteer Availability': 'Volunteer Availability',
'Volunteer Details': 'Volunteer Details',
'Volunteer Information': 'Volunteer Information',
'Volunteer Management': 'Volunteer Management',
'Volunteer Project': 'Volunteer Project',
'Volunteer Record': 'Volunteer Record',
'Volunteer Request': 'Volunteer Request',
'Volunteer added': 'Volunteer added',
'Volunteer availability added': 'Volunteer availability added',
'Volunteer availability deleted': 'Volunteer availability deleted',
'Volunteer availability updated': 'Volunteer availability updated',
'Volunteers': 'Volunteers',
'Vote': 'Vote',
'Votes': 'Votes',
'WASH': 'WASH',
'Walking Only': 'Walking Only',
'Wall or other structural damage': 'Wall or other structural damage',
'Warehouse': 'Warehouse',
'Warehouse Details': 'Warehouse Details',
'Warehouse added': 'Warehouse added',
'Warehouse deleted': 'Warehouse deleted',
'Warehouse updated': 'Warehouse updated',
'Warehouses': 'Warehouses',
'WatSan': 'WatSan',
'Water Sanitation Hygiene': 'Water Sanitation Hygiene',
'Water collection': 'Water collection',
'Water gallon': 'Water gallon',
'Water storage containers in households': 'Water storage containers in households',
'Water supply': 'Water supply',
'Waterspout': 'Waterspout',
'We have tried': 'We have tried',
'Web API settings updated': 'Web API settings updated',
'Web Map Service Browser Name': 'Web Map Service Browser Name',
'Web Map Service Browser URL': 'Web Map Service Browser URL',
'Web2py executable zip file found - Upload to replace the existing file': 'Web2py executable zip file found - Upload to replace the existing file',
'Web2py executable zip file needs to be uploaded to use this function.': 'Web2py executable zip file needs to be uploaded to use this function.',
'Website': 'Website',
'Wednesday': 'Wednesday',
'Weight': 'Weight',
'Weight (kg)': 'Weight (kg)',
'Welcome to the': 'Welcome to the',
'Well-Known Text': 'Well-Known Text',
'What order to be contacted in.': 'What order to be contacted in.',
'What the Items will be used for': 'What the Items will be used for',
'Wheat': 'Wheat',
'When reports were entered': 'When reports were entered',
'Where Project is implemented, including activities and beneficiaries': 'Where Project is implemented, including activities and beneficiaries',
'Whether to accept unsolicited data transmissions from the repository': 'Whether to accept unsolicited data transmissions from the repository',
'Which methods to apply when importing data to the local repository': 'Which methods to apply when importing data to the local repository',
'Whiskers': 'Whiskers',
'Who is doing what and where': 'Who is doing what and where',
'Who usually collects water for the family?': 'Who usually collects water for the family?',
'Width (m)': 'Width (m)',
'Wild Fire': 'Wild Fire',
'Wind Chill': 'Wind Chill',
'Window frame': 'Window frame',
'Winter Storm': 'Winter Storm',
'With best regards': 'With best regards',
'Women of Child Bearing Age': 'Women of Child Bearing Age',
'Women participating in coping activities': 'Women participating in coping activities',
'Women who are Pregnant or in Labour': 'Women who are Pregnant or in Labour',
'Womens Focus Groups': 'Womens Focus Groups',
'Wooden plank': 'Wooden plank',
'Wooden poles': 'Wooden poles',
'Working hours end': 'Working hours end',
'Working hours start': 'Working hours start',
'Working or other to provide money/food': 'Working or other to provide money/food',
'X-Ray': 'X-Ray',
'YES': 'YES',
"Yahoo Layers cannot be displayed if there isn't a valid API Key": "Yahoo Layers cannot be displayed if there isn't a valid API Key",
'Year': 'Year',
'Year built': 'Year built',
'Year of Manufacture': 'Year of Manufacture',
'Yellow': 'Yellow',
'Yes': 'Yes',
'You are a recovery team?': 'You are a recovery team?',
'You are attempting to delete your own account - are you sure you want to proceed?': 'You are attempting to delete your own account - are you sure you want to proceed?',
'You are currently reported missing!': 'You are currently reported missing!',
'You can click on the map below to select the Lat/Lon fields': 'You can click on the map below to select the Lat/Lon fields',
'You can select the Draw tool': 'You can select the Draw tool',
'You can set the modem settings for SMS here.': 'You can set the modem settings for SMS here.',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.',
'You do not have permission for any facility to add an order.': 'You do not have permission for any facility to add an order.',
'You do not have permission for any facility to make a commitment.': 'You do not have permission for any facility to make a commitment.',
'You do not have permission for any facility to make a request.': 'You do not have permission for any facility to make a request.',
'You do not have permission for any facility to receive a shipment.': 'You do not have permission for any facility to receive a shipment.',
'You do not have permission for any facility to send a shipment.': 'You do not have permission for any facility to send a shipment.',
'You do not have permission for any site to add an inventory item.': 'You do not have permission for any site to add an inventory item.',
'You do not have permission to cancel this received shipment.': 'You do not have permission to cancel this received shipment.',
'You do not have permission to cancel this sent shipment.': 'You do not have permission to cancel this sent shipment.',
'You do not have permission to make this commitment.': 'You do not have permission to make this commitment.',
'You do not have permission to receive this shipment.': 'You do not have permission to receive this shipment.',
'You do not have permission to send a shipment from this site.': 'You do not have permission to send a shipment from this site.',
'You do not have permission to send this shipment.': 'You do not have permission to send this shipment.',
'You have a personal map configuration. To change your personal configuration, click ': 'You have a personal map configuration. To change your personal configuration, click ',
'You have found a dead body?': 'You have found a dead body?',
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.",
"You haven't made any calculations": "You haven't made any calculations",
'You must be logged in to register volunteers.': 'You must be logged in to register volunteers.',
'You must be logged in to report persons missing or found.': 'You must be logged in to report persons missing or found.',
'You should edit Twitter settings in models/000_config.py': 'You should edit Twitter settings in models/000_config.py',
'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Your current ordered list of solution items is shown below. You can change it by voting again.',
'Your post was added successfully.': 'Your post was added successfully.',
'Your request for Red Cross and Red Crescent Resource Mapping System (RMS) has been approved and you can now access the system at': 'Your request for Red Cross and Red Crescent Resource Mapping System (RMS) has been approved and you can now access the system at',
'ZIP Code': 'ZIP Code',
'Zero Hour': 'Zero Hour',
'Zinc roof': 'Zinc roof',
'Zoom': 'Zoom',
'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'Zoom In: click in the map or use the left mouse button and drag to create a rectangle',
'Zoom Levels': 'Zoom Levels',
'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle',
'Zoom to Current Location': 'Zoom to Current Location',
'Zoom to maximum map extent': 'Zoom to maximum map extent',
'access granted': 'access granted',
'active': 'active',
'added': 'added',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'allows a budget to be developed based on staff & equipment costs, including any admin overheads.',
'allows for creation and management of assessments.': 'allows for creation and management of assessments.',
'always update': 'always update',
'an individual/team to do in 1-2 days': 'an individual/team to do in 1-2 days',
'assigned': 'assigned',
'average': 'average',
'black': 'black',
'blond': 'blond',
'blue': 'blue',
'brown': 'brown',
'business_damaged': 'business_damaged',
'by': 'by',
'by %(person)s': 'by %(person)s',
'c/o Name': 'c/o Name',
'can be used to extract data from spreadsheets and put them into database tables.': 'can be used to extract data from spreadsheets and put them into database tables.',
'cancelled': 'cancelled',
'caucasoid': 'caucasoid',
'check all': 'check all',
'click for more details': 'click for more details',
'click here': 'click here',
'completed': 'completed',
'consider': 'consider',
'curly': 'curly',
'currently registered': 'currently registered',
'dark': 'dark',
'data uploaded': 'data uploaded',
'database': 'database',
'database %s select': 'database %s select',
'days': 'days',
'db': 'db',
'deceased': 'deceased',
'delete all checked': 'delete all checked',
'deleted': 'deleted',
'design': 'design',
'diseased': 'diseased',
'displaced': 'displaced',
'divorced': 'divorced',
'done!': 'done!',
'duplicate': 'duplicate',
'edit': 'edit',
'eg. gas, electricity, water': 'eg. gas, electricity, water',
'enclosed area': 'enclosed area',
'enter a number between %(min)g and %(max)g': 'enter a number between %(min)g and %(max)g',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'export as csv file': 'export as csv file',
'fat': 'fat',
'feedback': 'feedback',
'female': 'female',
'flush latrine with septic tank': 'flush latrine with septic tank',
'food_sources': 'food_sources',
'forehead': 'forehead',
'form data': 'form data',
'found': 'found',
'from Twitter': 'from Twitter',
'getting': 'getting',
'green': 'green',
'grey': 'grey',
'here': 'here',
'hours': 'hours',
'households': 'households',
'identified': 'identified',
'ignore': 'ignore',
'in Deg Min Sec format': 'in Deg Min Sec format',
'in GPS format': 'in GPS format',
'in Inv.': 'in Inv.',
'inactive': 'inactive',
'injured': 'injured',
'insert new': 'insert new',
'insert new %s': 'insert new %s',
'invalid': 'invalid',
'invalid request': 'invalid request',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.',
'latrines': 'latrines',
'leave empty to detach account': 'leave empty to detach account',
'legend URL': 'legend URL',
'light': 'light',
'login': 'login',
'long': 'long',
'long>12cm': 'long>12cm',
'male': 'male',
'married': 'married',
'maxExtent': 'maxExtent',
'maxResolution': 'maxResolution',
'medium': 'medium',
'medium<12cm': 'medium<12cm',
'meters': 'meters',
'minutes': 'minutes',
'missing': 'missing',
'module allows the site administrator to configure various options.': 'module allows the site administrator to configure various options.',
'module helps monitoring the status of hospitals.': 'module helps monitoring the status of hospitals.',
'mongoloid': 'mongoloid',
'more': 'more',
'negroid': 'negroid',
'never': 'never',
'never update': 'never update',
'new': 'new',
'new record inserted': 'new record inserted',
'next 100 rows': 'next 100 rows',
'no': 'no',
'none': 'none',
'not specified': 'not specified',
'obsolete': 'obsolete',
'on': 'on',
'on %(date)s': 'on %(date)s',
'open defecation': 'open defecation',
'optional': 'optional',
'or import from csv file': 'or import from csv file',
'other': 'other',
'over one hour': 'over one hour',
'people': 'people',
'piece': 'piece',
'pit': 'pit',
'pit latrine': 'pit latrine',
'postponed': 'postponed',
'preliminary template or draft, not actionable in its current form': 'preliminary template or draft, not actionable in its current form',
'previous 100 rows': 'previous 100 rows',
'pull': 'pull',
'pull and push': 'pull and push',
'push': 'push',
'record does not exist': 'record does not exist',
'record id': 'record id',
'red': 'red',
'replace': 'replace',
'reports successfully imported.': 'reports successfully imported.',
'representation of the Polygon/Line.': 'representation of the Polygon/Line.',
'retired': 'retired',
'retry': 'retry',
'river': 'river',
'see comment': 'see comment',
'selected': 'selected',
'separated': 'separated',
'separated from family': 'separated from family',
'shaved': 'shaved',
'short': 'short',
'short<6cm': 'short<6cm',
'sides': 'sides',
'sign-up now': 'sign-up now',
'single': 'single',
'slim': 'slim',
'specify': 'specify',
'staff': 'staff',
'staff members': 'staff members',
'state': 'state',
'state location': 'state location',
'straight': 'straight',
'suffered financial losses': 'suffered financial losses',
'table': 'table',
'tall': 'tall',
'times and it is still not working. We give in. Sorry.': 'times and it is still not working. We give in. Sorry.',
'to access the system': 'to access the system',
'to download a OCR Form.': 'to download a OCR Form.',
'to reset your password': 'to reset your password',
'to verify your email': 'to verify your email',
'tonsure': 'tonsure',
'total': 'total',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!',
'unable to parse csv file': 'unable to parse csv file',
'uncheck all': 'uncheck all',
'unidentified': 'unidentified',
'unknown': 'unknown',
'unspecified': 'unspecified',
'unverified': 'unverified',
'update': 'update',
'update if master': 'update if master',
'update if newer': 'update if newer',
'updated': 'updated',
'verified': 'verified',
'volunteer': 'volunteer',
'volunteers': 'volunteers',
'wavy': 'wavy',
'weeks': 'weeks',
'white': 'white',
'wider area, longer term, usually contain multiple Activities': 'wider area, longer term, usually contain multiple Activities',
'widowed': 'widowed',
'within human habitat': 'within human habitat',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt module not available within the running Python - this needs installing for XLS output!',
'yes': 'yes',
}
|
flavour/porto
|
languages/en-gb.py
|
Python
|
mit
| 259,796
|
[
"VisIt"
] |
523521af21a80d0a7a492f5e228b5f32e032afa8e92c9db86db0e5d9dcc5345e
|
""" The Cloud Director is a simple agent performing VM instantiations
"""
import random
import socket
import hashlib
from collections import defaultdict
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals, Registry, Resources
from DIRAC.WorkloadManagementSystem.Client.MatcherClient import MatcherClient
from DIRAC.Core.Utilities.List import fromChar
from DIRAC.WorkloadManagementSystem.Client.ServerUtils import pilotAgentsDB
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
from DIRAC.Resources.Cloud.EndpointFactory import EndpointFactory
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import (
findGenericCloudCredentials,
getVMTypes,
getPilotBootstrapParameters,
)
from DIRAC.WorkloadManagementSystem.Client.ServerUtils import virtualMachineDB
from DIRAC.WorkloadManagementSystem.Utilities.Utils import getProxyFileForCloud
class CloudDirector(AgentModule):
"""The CloudDirector works like a SiteDirector for cloud sites:
It looks at the queued jobs in the task queues and attempts to
start VM instances to meet the current demand.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.vmTypeDict = {}
self.vmTypeCECache = {}
self.vmTypeSlots = {}
self.failedVMTypes = defaultdict(int)
self.firstPass = True
self.vo = ""
self.group = ""
# self.voGroups contain all the eligible user groups for clouds submitted by this SiteDirector
self.voGroups = []
self.cloudDN = ""
self.cloudGroup = ""
self.platforms = []
self.sites = []
self.siteClient = None
self.proxy = None
self.updateStatus = True
self.getOutput = False
self.sendAccounting = True
def initialize(self):
self.siteClient = SiteStatus()
return S_OK()
def beginExecution(self):
# The Director is for a particular user community
self.vo = self.am_getOption("VO", "")
if not self.vo:
self.vo = CSGlobals.getVO()
# The SiteDirector is for a particular user group
self.group = self.am_getOption("Group", "")
# Choose the group for which clouds will be submitted. This is a hack until
# we will be able to match clouds to VOs.
if not self.group:
if self.vo:
result = Registry.getGroupsForVO(self.vo)
if not result["OK"]:
return result
self.voGroups = []
for group in result["Value"]:
if "NormalUser" in Registry.getPropertiesForGroup(group):
self.voGroups.append(group)
else:
self.voGroups = [self.group]
result = findGenericCloudCredentials(vo=self.vo)
if not result["OK"]:
return result
self.cloudDN, self.cloudGroup = result["Value"]
self.maxVMsToSubmit = self.am_getOption("MaxVMsToSubmit", 1)
self.runningPod = self.am_getOption("RunningPod", self.vo)
# Get the site description dictionary
siteNames = None
if not self.am_getOption("Site", "Any").lower() == "any":
siteNames = self.am_getOption("Site", [])
if not siteNames:
siteNames = None
ces = None
if not self.am_getOption("CEs", "Any").lower() == "any":
ces = self.am_getOption("CEs", [])
if not ces:
ces = None
result = getVMTypes(vo=self.vo, siteList=siteNames)
if not result["OK"]:
return result
resourceDict = result["Value"]
result = self.getEndpoints(resourceDict)
if not result["OK"]:
return result
# if not siteNames:
# siteName = gConfig.getValue( '/DIRAC/Site', 'Unknown' )
# if siteName == 'Unknown':
# return S_OK( 'No site specified for the SiteDirector' )
# else:
# siteNames = [siteName]
# self.siteNames = siteNames
self.log.always("Sites:", siteNames)
self.log.always("CEs:", ces)
self.log.always("CloudDN:", self.cloudDN)
self.log.always("CloudGroup:", self.cloudGroup)
self.localhost = socket.getfqdn()
self.proxy = ""
if self.firstPass:
if self.vmTypeDict:
self.log.always("Agent will serve VM types:")
for vmType in self.vmTypeDict:
self.log.always(
"Site: %s, CE: %s, VMType: %s"
% (self.vmTypeDict[vmType]["Site"], self.vmTypeDict[vmType]["CEName"], vmType)
)
self.firstPass = False
return S_OK()
def __generateVMTypeHash(self, vmTypeDict):
"""Generate a hash of the queue description"""
myMD5 = hashlib.md5()
myMD5.update(str(sorted(vmTypeDict.items())).encode())
hexstring = myMD5.hexdigest()
return hexstring
def getEndpoints(self, resourceDict):
"""Get the list of relevant CEs and their descriptions"""
self.vmTypeDict = {}
ceFactory = EndpointFactory()
result = getPilotBootstrapParameters(vo=self.vo, runningPod=self.runningPod)
if not result["OK"]:
return result
opParameters = result["Value"]
for site in resourceDict:
for ce in resourceDict[site]:
ceDict = resourceDict[site][ce]
ceTags = ceDict.get("Tag", [])
if isinstance(ceTags, str):
ceTags = fromChar(ceTags)
ceMaxRAM = ceDict.get("MaxRAM", None)
qDict = ceDict.pop("VMTypes")
for vmType in qDict:
vmTypeName = "%s_%s" % (ce, vmType)
self.vmTypeDict[vmTypeName] = {}
self.vmTypeDict[vmTypeName]["ParametersDict"] = qDict[vmType]
self.vmTypeDict[vmTypeName]["ParametersDict"]["VMType"] = vmType
self.vmTypeDict[vmTypeName]["ParametersDict"]["Site"] = site
self.vmTypeDict[vmTypeName]["ParametersDict"]["Setup"] = gConfig.getValue("/DIRAC/Setup", "unknown")
self.vmTypeDict[vmTypeName]["ParametersDict"]["CPUTime"] = 99999999
vmTypeTags = self.vmTypeDict[vmTypeName]["ParametersDict"].get("Tag")
if vmTypeTags and isinstance(vmTypeTags, str):
vmTypeTags = fromChar(vmTypeTags)
self.vmTypeDict[vmTypeName]["ParametersDict"]["Tag"] = vmTypeTags
if ceTags:
if vmTypeTags:
allTags = list(set(ceTags + vmTypeTags))
self.vmTypeDict[vmTypeName]["ParametersDict"]["Tag"] = allTags
else:
self.vmTypeDict[vmTypeName]["ParametersDict"]["Tag"] = ceTags
maxRAM = self.vmTypeDict[vmTypeName]["ParametersDict"].get("MaxRAM")
maxRAM = ceMaxRAM if not maxRAM else maxRAM
if maxRAM:
self.vmTypeDict[vmTypeName]["ParametersDict"]["MaxRAM"] = maxRAM
ceWholeNode = ceDict.get("WholeNode", "true")
wholeNode = self.vmTypeDict[vmTypeName]["ParametersDict"].get("WholeNode", ceWholeNode)
if wholeNode.lower() in ("yes", "true"):
self.vmTypeDict[vmTypeName]["ParametersDict"].setdefault("Tag", [])
self.vmTypeDict[vmTypeName]["ParametersDict"]["Tag"].append("WholeNode")
platform = ""
if "Platform" in self.vmTypeDict[vmTypeName]["ParametersDict"]:
platform = self.vmTypeDict[vmTypeName]["ParametersDict"]["Platform"]
elif "Platform" in ceDict:
platform = ceDict["Platform"]
if platform and platform not in self.platforms:
self.platforms.append(platform)
if "Platform" not in self.vmTypeDict[vmTypeName]["ParametersDict"] and platform:
result = Resources.getDIRACPlatform(platform)
if result["OK"]:
self.vmTypeDict[vmTypeName]["ParametersDict"]["Platform"] = result["Value"][0]
ceVMTypeDict = dict(ceDict)
ceVMTypeDict["CEName"] = ce
ceVMTypeDict["VO"] = self.vo
ceVMTypeDict["VMType"] = vmType
ceVMTypeDict["RunningPod"] = self.runningPod
ceVMTypeDict["CSServers"] = gConfig.getValue("/DIRAC/Configuration/Servers", [])
ceVMTypeDict.update(self.vmTypeDict[vmTypeName]["ParametersDict"])
# Allow a resource-specifc CAPath to be set (as some clouds have their own CAs)
# Otherwise fall back to the system-wide default(s)
if "CAPath" not in ceVMTypeDict:
ceVMTypeDict["CAPath"] = gConfig.getValue(
"/DIRAC/Security/CAPath", "/opt/dirac/etc/grid-security/certificates/cas.pem"
)
# Generate the CE object for the vmType or pick the already existing one
# if the vmType definition did not change
vmTypeHash = self.__generateVMTypeHash(ceVMTypeDict)
if vmTypeName in self.vmTypeCECache and self.vmTypeCECache[vmTypeName]["Hash"] == vmTypeHash:
vmTypeCE = self.vmTypeCECache[vmTypeName]["CE"]
else:
result = ceFactory.getCEObject(parameters=ceVMTypeDict)
if not result["OK"]:
return result
self.vmTypeCECache.setdefault(vmTypeName, {})
self.vmTypeCECache[vmTypeName]["Hash"] = vmTypeHash
self.vmTypeCECache[vmTypeName]["CE"] = result["Value"]
vmTypeCE = self.vmTypeCECache[vmTypeName]["CE"]
vmTypeCE.setBootstrapParameters(opParameters)
self.vmTypeDict[vmTypeName]["CE"] = vmTypeCE
self.vmTypeDict[vmTypeName]["CEName"] = ce
self.vmTypeDict[vmTypeName]["CEType"] = ceDict["CEType"]
self.vmTypeDict[vmTypeName]["Site"] = site
self.vmTypeDict[vmTypeName]["VMType"] = vmType
self.vmTypeDict[vmTypeName]["Platform"] = platform
self.vmTypeDict[vmTypeName]["MaxInstances"] = ceDict["MaxInstances"]
if not self.vmTypeDict[vmTypeName]["CE"].isValid():
self.log.error("Failed to instantiate CloudEndpoint for %s" % vmTypeName)
continue
if site not in self.sites:
self.sites.append(site)
return S_OK()
def execute(self):
"""Main execution method"""
if not self.vmTypeDict:
self.log.warn("No site defined, exiting the cycle")
return S_OK()
result = self.createVMs()
if not result["OK"]:
self.log.error("Errors in the job submission: ", result["Message"])
# cyclesDone = self.am_getModuleParam( 'cyclesDone' )
# if self.updateStatus and cyclesDone % self.cloudStatusUpdateCycleFactor == 0:
# result = self.updatePilotStatus()
# if not result['OK']:
# self.log.error( 'Errors in updating cloud status: ', result['Message'] )
return S_OK()
def createVMs(self):
"""Go through defined computing elements and submit jobs if necessary"""
vmTypeList = list(self.vmTypeDict.keys())
# Check that there is some work at all
setup = CSGlobals.getSetup()
tqDict = {"Setup": setup, "CPUTime": 9999999}
if self.vo:
tqDict["VO"] = self.vo
if self.voGroups:
tqDict["OwnerGroup"] = self.voGroups
result = Resources.getCompatiblePlatforms(self.platforms)
if not result["OK"]:
return result
tqDict["Platform"] = result["Value"]
tqDict["Site"] = self.sites
tags = []
for vmType in vmTypeList:
if "Tag" in self.vmTypeDict[vmType]["ParametersDict"]:
tags += self.vmTypeDict[vmType]["ParametersDict"]["Tag"]
tqDict["Tag"] = list(set(tags))
self.log.verbose("Checking overall TQ availability with requirements")
self.log.verbose(tqDict)
matcherClient = MatcherClient()
result = matcherClient.getMatchingTaskQueues(tqDict)
if not result["OK"]:
return result
if not result["Value"]:
self.log.verbose("No Waiting jobs suitable for the director")
return S_OK()
jobSites = set()
anySite = False
testSites = set()
totalWaitingJobs = 0
for tqID in result["Value"]:
if "Sites" in result["Value"][tqID]:
for site in result["Value"][tqID]["Sites"]:
if site.lower() != "any":
jobSites.add(site)
else:
anySite = True
else:
anySite = True
if "JobTypes" in result["Value"][tqID]:
if "Sites" in result["Value"][tqID]:
for site in result["Value"][tqID]["Sites"]:
if site.lower() != "any":
testSites.add(site)
totalWaitingJobs += result["Value"][tqID]["Jobs"]
tqIDList = list(result["Value"].keys())
result = virtualMachineDB.getInstanceCounters("Status", {})
totalVMs = 0
if result["OK"]:
for status in result["Value"]:
if status in ["New", "Submitted", "Running"]:
totalVMs += result["Value"][status]
self.log.info("Total %d jobs in %d task queues with %d VMs" % (totalWaitingJobs, len(tqIDList), totalVMs))
# Check if the site is allowed in the mask
result = self.siteClient.getUsableSites()
if not result["OK"]:
return S_ERROR("Can not get the site mask")
siteMaskList = result.get("Value", [])
vmTypeList = list(self.vmTypeDict.keys())
random.shuffle(vmTypeList)
totalSubmittedPilots = 0
matchedQueues = 0
for vmType in vmTypeList:
ce = self.vmTypeDict[vmType]["CE"]
ceName = self.vmTypeDict[vmType]["CEName"]
vmTypeName = self.vmTypeDict[vmType]["VMType"]
siteName = self.vmTypeDict[vmType]["Site"]
platform = self.vmTypeDict[vmType]["Platform"]
vmTypeTags = self.vmTypeDict[vmType]["ParametersDict"].get("Tag", [])
siteMask = siteName in siteMaskList
endpoint = "%s::%s" % (siteName, ceName)
maxInstances = int(self.vmTypeDict[vmType]["MaxInstances"])
processorTags = []
# vms support WholeNode naturally
processorTags.append("WholeNode")
if not anySite and siteName not in jobSites:
self.log.verbose("Skipping queue %s at %s: no workload expected" % (vmTypeName, siteName))
continue
if not siteMask and siteName not in testSites:
self.log.verbose("Skipping queue %s: site %s not in the mask" % (vmTypeName, siteName))
continue
if "CPUTime" in self.vmTypeDict[vmType]["ParametersDict"]:
vmTypeCPUTime = int(self.vmTypeDict[vmType]["ParametersDict"]["CPUTime"])
else:
self.log.warn("CPU time limit is not specified for queue %s, skipping..." % vmType)
continue
# Prepare the queue description to look for eligible jobs
ceDict = ce.getParameterDict()
if not siteMask:
ceDict["JobType"] = "Test"
if self.vo:
ceDict["VO"] = self.vo
if self.voGroups:
ceDict["OwnerGroup"] = self.voGroups
result = Resources.getCompatiblePlatforms(platform)
if not result["OK"]:
continue
ceDict["Platform"] = result["Value"]
ceDict["Tag"] = list(set(processorTags + vmTypeTags))
# Get the number of eligible jobs for the target site/queue
result = matcherClient.getMatchingTaskQueues(ceDict)
if not result["OK"]:
self.log.error("Could not retrieve TaskQueues from TaskQueueDB", result["Message"])
return result
taskQueueDict = result["Value"]
if not taskQueueDict:
self.log.verbose("No matching TQs found for %s" % vmType)
continue
matchedQueues += 1
totalTQJobs = 0
tqIDList = list(taskQueueDict.keys())
for tq in taskQueueDict:
totalTQJobs += taskQueueDict[tq]["Jobs"]
self.log.verbose(
"%d job(s) from %d task queue(s) are eligible for %s queue" % (totalTQJobs, len(tqIDList), vmType)
)
# Get the number of already instantiated VMs for these task queues
totalWaitingVMs = 0
result = virtualMachineDB.getInstanceCounters("Status", {"Endpoint": endpoint})
if result["OK"]:
for status in result["Value"]:
if status in ["New", "Submitted"]:
totalWaitingVMs += result["Value"][status]
if totalWaitingVMs >= totalTQJobs:
self.log.verbose("%d VMs already for all the available jobs" % totalWaitingVMs)
self.log.verbose("%d VMs for the total of %d eligible jobs for %s" % (totalWaitingVMs, totalTQJobs, vmType))
# Get proxy to be used to connect to the cloud endpoint
authType = ce.parameters.get("Auth")
if authType and authType.lower() in ["x509", "voms"]:
self.log.verbose("Getting cloud proxy for %s/%s" % (siteName, ceName))
result = getProxyFileForCloud(ce)
if not result["OK"]:
continue
ce.setProxy(result["Value"])
# Get the number of available slots on the target site/endpoint
totalSlots = self.getVMInstances(endpoint, maxInstances)
if totalSlots == 0:
self.log.debug("%s: No slots available" % vmType)
continue
vmsToSubmit = max(0, min(totalSlots, totalTQJobs - totalWaitingVMs))
self.log.info(
"%s: Slots=%d, TQ jobs=%d, VMs: %d, to submit=%d"
% (vmType, totalSlots, totalTQJobs, totalWaitingVMs, vmsToSubmit)
)
# Limit the number of VM instances to create to vmsToSubmit
vmsToSubmit = min(self.maxVMsToSubmit, vmsToSubmit)
if vmsToSubmit == 0:
continue
self.log.info("Going to submit %d VMs to %s queue" % (vmsToSubmit, vmType))
result = ce.createInstances(vmsToSubmit)
# result = S_OK()
if not result["OK"]:
self.log.error("Failed submission to queue %s:\n" % vmType, result["Message"])
self.failedVMTypes.setdefault(vmType, 0)
self.failedVMTypes[vmType] += 1
continue
# Add VMs to the VirtualMachineDB
vmDict = result["Value"]
totalSubmittedPilots += len(vmDict)
self.log.info("Submitted %d VMs to %s@%s" % (len(vmDict), vmTypeName, ceName))
pilotList = []
for uuID in vmDict:
diracUUID = vmDict[uuID]["InstanceID"]
endpoint = "%s::%s" % (self.vmTypeDict[vmType]["Site"], ceName)
result = virtualMachineDB.insertInstance(uuID, vmTypeName, diracUUID, endpoint, self.vo)
if not result["OK"]:
continue
pRef = "vm://" + ceName + "/" + diracUUID + ":00"
pilotList.append(pRef)
stampDict = {}
tqPriorityList = []
sumPriority = 0.0
for tq in taskQueueDict:
sumPriority += taskQueueDict[tq]["Priority"]
tqPriorityList.append((tq, sumPriority))
tqDict = {}
for pilotID in pilotList:
rndm = random.random() * sumPriority
for tq, prio in tqPriorityList:
if rndm < prio:
tqID = tq
break
if tqID not in tqDict:
tqDict[tqID] = []
tqDict[tqID].append(pilotID)
for tqID, pilotList in tqDict.items():
result = pilotAgentsDB.addPilotTQReference(pilotList, tqID, "", "", self.localhost, "Cloud", stampDict)
if not result["OK"]:
self.log.error("Failed to insert pilots into the PilotAgentsDB: %s" % result["Message"])
self.log.info(
"%d VMs submitted in total in this cycle, %d matched queues" % (totalSubmittedPilots, matchedQueues)
)
return S_OK()
def getVMInstances(self, endpoint, maxInstances):
result = virtualMachineDB.getInstanceCounters("Status", {"Endpoint": endpoint})
if not result["OK"]:
return result
count = 0
for status in result["Value"]:
if status in ["New", "Submitted", "Running"]:
count += int(result["Value"][status])
return max(0, maxInstances - count)
|
DIRACGrid/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Agent/CloudDirector.py
|
Python
|
gpl-3.0
| 22,033
|
[
"DIRAC"
] |
3a90042a96019739242b8d5bfef2cd16989a03a4236ffa9be14a28c0e8a86b0f
|
# -*- coding: utf-8 -*-
"""
Copyright 2015, Institute for Systems Biology.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: William Poole
Email: william.poole@systemsbiology.org / tknijnen@systemsbiology.org
Created: June 2015
"""
import numpy as np
from EmpiricalBrownsMethod import *
from scipy.stats import pearsonr
# ARTIFICIAL DATASET
#RandomData.tsv contains gaussian random data.
#--Independent Var [line 1] are 25 samples from a unit normal distribution.
#--Depedent Var 1-10 [line 2-11] are each 25 samples drawn from a 10 dimensional normal distribution centered at the origin with off diagonal terms a=0.25.
#--The P values from a pearson correlation between the independent var and each dependent var are combined
raw_data = open("../Data/RandomData.tsv")
data = []
for line in raw_data:
L = line.replace("\n", "").split("\t")
if "Independent Var" in L[0]:
indV = np.array([float(l) for l in L[1:]])
else:
data.append([float(l) for l in L[1:]])
raw_data.close()
data = np.array(data)
pvals = [pearsonr(indV, data[i])[1] for i in range(data.shape[0])]
transformed_data1 = TransformData(data[0, :])
print "\n\nRandom Data, EMB"
print EmpiricalBrownsMethod(data, pvals, extra_info = True)
print "\nRandom Data, Kost's"
print KostsMethod(data, pvals, extra_info = True)
#Should give:
#(0.72288173732954353, 0.86138425703434118, 2.4580096358564503, 8.1366646038518677)
#(Pbrown,Pfisger,Scale_Factor C,DFbrown)
# TCGA dataset
#Pathways.tsv contains a list of 45 genes that belong to 3 pathways:
#--'FOXA1 TRANSCRIPTION FACTOR NETWORK', 'SUMOYLATION BY RANBP2 REGULATES TRANSCRIPTIONAL REPRESSION', 'GLYPICAN 3 NETWORK'
#CDH4_Pvalues.tsv contains P values from the spearman correlation between CHD4 and each of the 45 genes from TCGA GBM [data from feb 12th 2013].
#The P-values for each set of genes in each pathway are combined using our method or fishers method
pathways = ['FOXA1 TRANSCRIPTION FACTOR NETWORK', 'SUMOYLATION BY RANBP2 REGULATES TRANSCRIPTIONAL REPRESSION', 'GLYPICAN 3 NETWORK']
PathwayGeneDict = {p:[] for p in pathways}
#load pathways:
f = open("../Data/pathways.tsv")
f.readline()
gene_list = []
for line in f:
L = line.replace("\n", "").replace("\r", "").split("\t")
PathwayGeneDict[L[0]].append(L[1])
gene_list.append(L[1])
f.close()
gene_list = list(set(gene_list))
PValueDict = {}
f = open("../Data/CDH4_Pvalues.tsv")
f.readline()
for line in f:
L = line.replace("\n", "").replace("\r", "").split("\t")
PValueDict[L[0]] = float(L[1])
f.close()
GeneData = {}
FM = open("../Data/ReducedFeatureMatrix.tsv")
for line in FM:
L = line.replace("\n", "").replace("\r", "").split("\t")
GeneData[L[0]] = [float(l) for l in L[1:]]
FM.close()
for p in pathways:
print "\n\npathway:", p
DataMatrix = np.array([GeneData[g] for g in PathwayGeneDict[p] if g in GeneData])
Pvalues = np.array([PValueDict[g] for g in PathwayGeneDict[p] if g in PValueDict])
print "\nEBM"
print EmpiricalBrownsMethod(DataMatrix, Pvalues, extra_info = True)
print "\nKosts"
print KostsMethod(DataMatrix, Pvalues, extra_info = True)
#Should give:
#pathway: FOXA1 TRANSCRIPTION FACTOR NETWORK
#(7.7778969794178595e-53, 4.043406925735029e-139, 2.7193665607584965, 21.328496436251836)
#pathway: SUMOYLATION BY RANBP2 REGULATES TRANSCRIPTIONAL REPRESSION
#(1.6980563950404756e-41, 6.4438388244313223e-45, 1.0877310573657077, 18.386897997043924)
#pathway: GLYPICAN 3 NETWORK
#(4.8216794064099692e-07, 1.4387321406058163e-08, 1.2976927497874169, 10.788378067376447)
#(Pbrown,Pfisger,Scale_Factor C,DFbrown)
|
IlyaLab/CombiningDependentPvaluesUsingEBM
|
Python/WorkFlow.py
|
Python
|
apache-2.0
| 4,079
|
[
"Gaussian"
] |
63b8f8dff63cf12cac5929ff974da695a5c8398b6238faeed4a892082be4872c
|
#!/usr/bin/env python
"""
Author: Ryan Golhar <ryan.golhar@bms.com>
Date: 12/23/14
This script creates a folder within a library in Galaxy.
Usage: create_library.py <API_KEY> <API_URL> <library_name> folder_name
Algorithm:
"""
import argparse
from string import split
from common import display, submit
import sys
api_url = ''
api_key = ''
library_to_create = ''
_debug = 0
def main():
print 'Galaxy API URL: %s' % api_url
print 'Galaxy API Key: %s' % api_key
print 'Library to create: %s' % library_to_create
print ''
libs = display(api_key, api_url + '/api/libraries', return_formatted=False)
for library in libs:
if library['name'] == library_to_create:
print 'Library already exists.'
sys.exit(1)
data = {}
data['name'] = library_to_create
result = submit(api_key, api_url + "/api/libraries", data, return_formatted = False)
if not result['id'] == 0:
print 'Library created.'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("api_key", help="API KEY")
parser.add_argument('api_url', help='API URL')
parser.add_argument('library', help="Library")
args = parser.parse_args()
api_key = args.api_key
api_url = args.api_url
library_to_create = args.library
main()
|
golharam/rgtools
|
scripts/galaxy/api/create_folder.py
|
Python
|
lgpl-3.0
| 1,330
|
[
"Galaxy"
] |
f0dffd1bd478d42e33e06194979ab5db2ba21c076d37ceaf11e1ae55e3ad4d81
|
#
# Bugwarrior documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 16 15:09:22 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Bugwarrior'
copyright = '2014-2016, Ralph Bean and contributors'
docs_authors = [
'Adam Coddington',
'Ben Boeckel',
'Boris Churzin',
'Brian (bex) Exelbierd',
'Dustin J. Mitchell',
'Francesco de Virgilio',
'Grégoire Détrez',
'Iain R. Learmonth',
'Ivan Čukić',
'Jakub Wilk',
'Jens Ohlig',
'Mark Mulligan',
'Matthew Avant',
'Nick Douma',
'Ralph Bean',
'Ryan S. Brown',
'Ryne Everett',
'Sayan Chowdhury',
]
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.8.0'
# The full version, including alpha/beta/rc tags.
release = '0.8.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The default language to highlight source code in.
highlight_language = 'ini'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bugwarriordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Bugwarrior.tex', 'Bugwarrior Documentation',
'Ralph Bean', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bugwarrior', 'Bugwarrior Documentation',
docs_authors, 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Bugwarrior', 'Bugwarrior Documentation',
'Ralph Bean', 'Bugwarrior', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
ralphbean/bugwarrior
|
bugwarrior/docs/conf.py
|
Python
|
gpl-3.0
| 8,914
|
[
"Brian"
] |
ca1b8cfb56d51b8aadb9602b02c424d55046a5ccef677156c50d9bd98e49fab6
|
"""Manage IPython.parallel clusters in the notebook.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
from tornado import web
from zmq.eventloop import ioloop
from IPython.config.configurable import LoggingConfigurable
from IPython.utils.traitlets import Dict, Instance, CFloat
from IPython.parallel.apps.ipclusterapp import IPClusterStart
from IPython.core.profileapp import list_profiles_in
from IPython.core.profiledir import ProfileDir
from IPython.utils.path import get_ipython_dir
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class DummyIPClusterStart(IPClusterStart):
"""Dummy subclass to skip init steps that conflict with global app.
Instantiating and initializing this class should result in fully configured
launchers, but no other side effects or state.
"""
def init_signal(self):
pass
def reinit_logging(self):
pass
class ClusterManager(LoggingConfigurable):
profiles = Dict()
delay = CFloat(1., config=True,
help="delay (in s) between starting the controller and the engines")
loop = Instance('zmq.eventloop.ioloop.IOLoop')
def _loop_default(self):
from zmq.eventloop.ioloop import IOLoop
return IOLoop.instance()
def build_launchers(self, profile_dir):
starter = DummyIPClusterStart(log=self.log)
starter.initialize(['--profile-dir', profile_dir])
cl = starter.controller_launcher
esl = starter.engine_launcher
n = starter.n
return cl, esl, n
def get_profile_dir(self, name, path):
p = ProfileDir.find_profile_dir_by_name(path,name=name)
return p.location
def update_profiles(self):
"""List all profiles in the ipython_dir and cwd.
"""
for path in [get_ipython_dir(), os.getcwdu()]:
for profile in list_profiles_in(path):
pd = self.get_profile_dir(profile, path)
if profile not in self.profiles:
self.log.debug("Adding cluster profile '%s'" % profile)
self.profiles[profile] = {
'profile': profile,
'profile_dir': pd,
'status': 'stopped'
}
def list_profiles(self):
self.update_profiles()
# sorted list, but ensure that 'default' always comes first
default_first = lambda name: name if name != 'default' else ''
result = [self.profile_info(p) for p in sorted(self.profiles, key=default_first)]
return result
def check_profile(self, profile):
if profile not in self.profiles:
raise web.HTTPError(404, u'profile not found')
def profile_info(self, profile):
self.check_profile(profile)
result = {}
data = self.profiles.get(profile)
result['profile'] = profile
result['profile_dir'] = data['profile_dir']
result['status'] = data['status']
if 'n' in data:
result['n'] = data['n']
return result
def start_cluster(self, profile, n=None):
"""Start a cluster for a given profile."""
self.check_profile(profile)
data = self.profiles[profile]
if data['status'] == 'running':
raise web.HTTPError(409, u'cluster already running')
cl, esl, default_n = self.build_launchers(data['profile_dir'])
n = n if n is not None else default_n
def clean_data():
data.pop('controller_launcher',None)
data.pop('engine_set_launcher',None)
data.pop('n',None)
data['status'] = 'stopped'
def engines_stopped(r):
self.log.debug('Engines stopped')
if cl.running:
cl.stop()
clean_data()
esl.on_stop(engines_stopped)
def controller_stopped(r):
self.log.debug('Controller stopped')
if esl.running:
esl.stop()
clean_data()
cl.on_stop(controller_stopped)
dc = ioloop.DelayedCallback(lambda: cl.start(), 0, self.loop)
dc.start()
dc = ioloop.DelayedCallback(lambda: esl.start(n), 1000*self.delay, self.loop)
dc.start()
self.log.debug('Cluster started')
data['controller_launcher'] = cl
data['engine_set_launcher'] = esl
data['n'] = n
data['status'] = 'running'
return self.profile_info(profile)
def stop_cluster(self, profile):
"""Stop a cluster for a given profile."""
self.check_profile(profile)
data = self.profiles[profile]
if data['status'] == 'stopped':
raise web.HTTPError(409, u'cluster not running')
data = self.profiles[profile]
cl = data['controller_launcher']
esl = data['engine_set_launcher']
if cl.running:
cl.stop()
if esl.running:
esl.stop()
# Return a temp info dict, the real one is updated in the on_stop
# logic above.
result = {
'profile': data['profile'],
'profile_dir': data['profile_dir'],
'status': 'stopped'
}
return result
def stop_all_clusters(self):
for p in self.profiles.keys():
self.stop_cluster(p)
|
noslenfa/tdjangorest
|
uw/lib/python2.7/site-packages/IPython/html/services/clusters/clustermanager.py
|
Python
|
apache-2.0
| 5,921
|
[
"Brian"
] |
fb08645a4fb6b7fa5bd7819ffc34fe654786e17bfe63547a593f9b961918702f
|
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit as u
import pdbfixer
padding = 1.0 * u.nanometers
cutoff = 0.95 * u.nanometers
ff = app.ForceField('amber99sbnmr.xml', 'tip3p-fb.xml')
temperature = 293.
pressure = 1.0 * u.atmospheres
fixer = pdbfixer.PDBFixer("./1am7.pdb")
fixer.findMissingResidues()
fixer.findNonstandardResidues()
fixer.replaceNonstandardResidues()
fixer.findMissingAtoms()
fixer.addMissingAtoms()
fixer.removeHeterogens(True)
fixer.addMissingHydrogens()
fixer.removeChains([1, 2, 3, 4, 5])
app.PDBFile.writeFile(fixer.topology, fixer.positions, open("1am7_fixed.pdb", 'w'))
|
choderalab/open-forcefield-group
|
nmr/code/build_T4.py
|
Python
|
gpl-2.0
| 631
|
[
"OpenMM"
] |
7fa140ff8edbb8d7cd3a4ab8e85f0095e923ff1d47e2907bc1d60a898e1535c0
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Moab(AutotoolsPackage):
"""MOAB is a component for representing and evaluating mesh
data. MOAB can store structured and unstructured mesh, consisting
of elements in the finite element 'zoo.' The functional interface
to MOAB is simple yet powerful, allowing the representation of
many types of metadata commonly found on the mesh. MOAB is
optimized for efficiency in space and time, based on access to
mesh in chunks rather than through individual entities, while also
versatile enough to support individual entity access."""
homepage = "https://bitbucket.org/fathomteam/moab"
url = "http://ftp.mcs.anl.gov/pub/fathom/moab-5.0.0.tar.gz"
version('5.0.0', '1840ca02366f4d3237d44af63e239e3b')
version('4.9.2', '540931a604c180bbd3c1bb3ee8c51dd0')
version('4.9.1', '19cc2189fa266181ad9109b18d0b2ab8')
version('4.9.0', '40695d0a159040683cfa05586ad4a7c2')
version('4.8.2', '1dddd10f162fce3cfffaedc48f6f467d')
variant('mpi', default=True, description='enable mpi support')
variant('hdf5', default=True,
description='Required to enable the hdf5 (default I/O) format')
variant('netcdf', default=False,
description='Required to enable the ExodusII reader/writer.')
variant('pnetcdf', default=False,
description='Enable pnetcdf (AKA parallel-netcdf) support')
variant('netcdf', default=False,
description='Required to enable the ExodusII reader/writer.')
variant('zoltan', default=False, description='Enable zoltan support')
variant('cgm', default=False, description='Enable common geometric module')
variant('metis', default=True, description='Enable metis link')
variant('parmetis', default=True, description='Enable parmetis link')
variant('irel', default=False, description='Enable irel interface')
variant('fbigeom', default=False, description='Enable fbigeom interface')
variant('coupler', default=True, description='Enable mbcoupler tool')
variant("debug", default=False, description='enable debug symbols')
variant('shared', default=False,
description='Enables the build of shared libraries')
variant('fortran', default=True, description='Enable Fortran support')
conflicts('+irel', when='~cgm')
conflicts('+pnetcdf', when='~mpi')
conflicts('+parmetis', when='~mpi')
conflicts('+coupler', when='~mpi')
# There are many possible variants for MOAB. Here are examples for
# two of them:
#
# variant('vtk', default=False, description='Enable VTK support')
# variant('cgns', default=False, description='Enable CGNS support')
# depends_on('cgns', when='+cgns')
# depends_on('vtk', when='+vtk')
depends_on('blas')
depends_on('lapack')
depends_on('mpi', when='+mpi')
depends_on('hdf5', when='+hdf5')
depends_on('hdf5+mpi', when='+hdf5+mpi')
depends_on('netcdf', when='+netcdf')
depends_on('parallel-netcdf', when='+pnetcdf')
depends_on('cgm', when='+cgm')
depends_on('metis', when='+metis')
depends_on('parmetis', when='+parmetis')
# FIXME it seems that zoltan needs to be built without fortran
depends_on('zoltan~fortran', when='+zoltan')
def configure_args(self):
spec = self.spec
options = [
'--enable-optimize',
'--disable-vtkMOABReader',
'--disable-mbtagprop',
'--disable-mbmem',
'--disable-spheredecomp',
'--disable-mbsurfplot',
'--disable-gsets',
'--disable-mcnpmit',
'--disable-refiner',
'--disable-h5mtools',
'--disable-mbcslam',
'--with-pic',
'--without-vtk'
]
if '+mpi' in spec:
options.extend([
'--with-mpi=%s' % spec['mpi'].prefix,
'CXX=%s' % spec['mpi'].mpicxx,
'CC=%s' % spec['mpi'].mpicc,
'FC=%s' % spec['mpi'].mpifc
])
if '+parmetis' in spec:
options.append('--with-parmetis=%s' % spec['parmetis'].prefix)
else:
options.append('--without-parmetis')
# FIXME: --without-mpi does not configure right
# else:
# options.append('--without-mpi')
options.append('--with-blas=%s' % spec['blas'].libs.ld_flags)
options.append('--with-lapack=%s' % spec['lapack'].libs.ld_flags)
if '+hdf5' in spec:
options.append('--with-hdf5=%s' % spec['hdf5'].prefix)
else:
options.append('--without-hdf5')
if '+netcdf' in spec:
options.append('--with-netcdf=%s' % spec['netcdf'].prefix)
else:
options.append('--without-netcdf')
if '+pnetcdf' in spec:
options.append('--with-pnetcdf=%s'
% spec['parallel-netcdf'].prefix)
else:
options.append('--without-pnetcdf')
if '+cgm' in spec:
options.append('--with-cgm=%s' % spec['cgm'].prefix)
if '+irel' in spec:
options.append('--enable-irel')
else:
options.append('--disable-irel')
else:
options.append('--without-cgm')
if '+fbigeom' in spec:
options.append('--enable-fbigeom')
else:
options.append('--disable-fbigeom')
if '+coupler' in spec:
options.append('--enable-mbcoupler')
else:
options.append('--disable-mbcoupler')
if '+metis' in spec:
options.append('--with-metis=%s' % spec['metis'].prefix)
else:
options.append('--without-metis')
if '+parmetis' in spec:
options.append('--with-parmetis=%s' % spec['parmetis'].prefix)
else:
options.append('--without-parmetis')
if '+zoltan' in spec:
options.append('--with-zoltan=%s' % spec['zoltan'].prefix)
else:
options.append('--without-zoltan')
if '+debug' in spec:
options.append('--enable-debug')
else:
options.append('--disable-debug')
# FIXME it seems that with cgm and shared, we have a link
# issue in tools/geometry
if '+shared' in spec:
options.append('--enable-shared')
else:
options.append('--disable-shared')
if '~fortran' in spec:
options.append('--disable-fortran')
else:
options.append('--enable-fortran')
return options
# FIXME Run the install phase with -j 1. There seems to be a problem with
# parallel installations of examples
def install(self, spec, prefix):
make('install', parallel=False)
|
mfherbst/spack
|
var/spack/repos/builtin/packages/moab/package.py
|
Python
|
lgpl-2.1
| 8,028
|
[
"NetCDF",
"VTK"
] |
09d8268aa57fd99e6f8d0d89bda5ff1ac854d59cd80fe853a11fa80dd6816ff9
|
"""
BSD 3-Clause License
Copyright (c) 2017, Mairie de Paris
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import cv2
import imutils
import logging
import numpy as np
from skimage.filters import threshold_local
from imutils.perspective import four_point_transform
from franceocr.cni.exceptions import InvalidChecksumException, InvalidMRZException
from franceocr.exceptions import ImageProcessingException
from franceocr.extraction import find_significant_contours
from franceocr.ocr import ocr_cni_mrz, ocr_read_text, ocr_read_number
from franceocr.utils import DEBUG_display_image, INFO_display_image
def checksum_mrz(string):
"""Compute the checksum of a substring of the MRZ.
Source: https://fr.wikipedia.org/wiki/Carte_nationale_d%27identit%C3%A9_en_France#Codage_Bande_MRZ_.28lecture_optique.29
"""
factors = [7, 3, 1]
result = 0
for index, c in enumerate(string):
if c == '<':
val = 0
elif '0' <= c <= '9':
val = int(c)
elif 'A' <= c <= 'Z':
val = ord(c) - 55
else:
raise ValueError
result += val * factors[index % 3]
return result % 10
def cni_mrz_extract(image, improved):
"""
Find and extract the MRZ region from a CNI image.
"""
# resize the image, and convert it to grayscale
image = imutils.resize(image, width=900)
if len(image.shape) == 3 and image.shape[2] == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# smooth the image using a 3x3 Gaussian, then apply the blackhat
# morphological operator to find dark regions on a light background
image = cv2.GaussianBlur(image, (3, 3), 0)
blackhatKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (27, 12))
blackhat = cv2.morphologyEx(image, cv2.MORPH_BLACKHAT, blackhatKernel)
DEBUG_display_image(blackhat, "Blackhat")
# compute the Scharr gradient of the blackhat image and scale the
# result into the range [0, 255]
gradX = cv2.Sobel(blackhat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradX = np.absolute(gradX)
(minVal, maxVal) = (np.min(gradX), np.max(gradX))
gradX = (255 * ((gradX - minVal) / (maxVal - minVal))).astype("uint8")
# disregard strong gradients above 400 pixels
gradX[:400] = 0
DEBUG_display_image(gradX, "GradX")
# apply a closing operation using the rectangular kernel to close
# gaps in between letters -- then apply Otsu's thresholding method
closingKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (27, 12))
thresh = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, closingKernel)
thresh = cv2.threshold(thresh, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
DEBUG_display_image(thresh, "Before")
# perform another closing operation, this time using the square
# kernel to close gaps between lines of the MRZ, then perform a
# series of erosions to break apart connected components
openingKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (27, 12))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, openingKernel)
DEBUG_display_image(thresh, "After1")
contours = cv2.findContours(
thresh,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE
)[-2]
def patch_data(contour):
(x, y, w, h) = cv2.boundingRect(contour)
ar = w / h
crWidth = w / image.shape[1]
return ar, crWidth
def is_small_patch(contour):
ar, crWidth = patch_data(contour)
return ar < 5 or crWidth < 0.5
small_patches = filter(is_small_patch, contours)
cv2.fillPoly(thresh, list(small_patches), 0)
DEBUG_display_image(thresh, "After1bis")
mrzClosingKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (80, 40))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, mrzClosingKernel)
DEBUG_display_image(thresh, "After2")
thresh = cv2.erode(thresh, None, iterations=3)
DEBUG_display_image(thresh, "After3")
# during thresholding, it's possible that border pixels were
# included in the thresholding, so let's set 5% of the left and
# right borders to zero
p = int(image.shape[1] * 0.05)
thresh[:, 0:p] = 0
thresh[:, image.shape[1] - p:] = 0
contours = find_significant_contours(thresh)
# loop over the contours
for contour in contours:
(cx, cy), (w, h), angle = cv2.minAreaRect(cv2.convexHull(contour))
if angle < -10:
angle += 90
w, h = h, w
if angle > 10:
angle -= 90
w, h = h, w
ar = w / h
crWidth = w / image.shape[1]
logging.debug("Aspect Ratio %f Width Ratio %f Angle %f", ar, crWidth, angle)
# check to see if the aspect ratio and coverage width are within
# acceptable criteria
# expected_aspect_ratio = 93.3 / (17.9 - 7.25)
if 7 <= ar and crWidth > 0.7:
# pad the bounding box since we applied erosions and now need
# to re-grow it
bbox = cv2.boxPoints(
((cx, cy), (1.12 * w, 1.65 * h), angle)
)
# extract the ROI from the image and draw a bounding box
# surrounding the MRZ
# mrz_image = image[y:y + h, x:x + w].copy()
mrz_image = four_point_transform(image, bbox.reshape(4, 2))
break
INFO_display_image(mrz_image, "MRZ")
# Further improve MRZ image quality
thresh = threshold_local(mrz_image, 35, offset=13)
mrz_image = mrz_image > thresh
mrz_image = mrz_image.astype("uint8") * 255
INFO_display_image(mrz_image, "MRZ Improved", resize=False)
return mrz_image
def cni_mrz_read(image):
"""Read the extracted MRZ image to a list of two 36-chars strings."""
mrz_data = ocr_cni_mrz(image)
mrz_data = mrz_data.replace(' ', '')
mrz_data = mrz_data.split('\n')
# FIlter out small strings
mrz_data = list(filter(lambda x: len(x) >= 30, mrz_data))
logging.debug("MRZ data: %s", mrz_data)
return mrz_data
def mrz_read_last_name(text):
return text.rstrip('<').replace('<', '-')
def mrz_read_first_name(text):
return " ".join([first_name.replace('<', '-') for first_name in text.rstrip('<').split('<<')])
def mrz_read_sex(text):
sex = text
if sex not in ('M', 'F'):
raise InvalidMRZException(
"INVALID_MRZ_SEX",
"Expected sex M/F lines, got {}".format(sex)
)
return sex
def cni_mrz_to_dict(mrz_data):
"""Extract human-readable data from the MRZ strings."""
if len(mrz_data) != 2:
raise InvalidMRZException(
"INVALID_MRZ_LINES_COUNT",
"Expected 2 lines, got {}".format(len(mrz_data))
)
if len(mrz_data[0]) > 36 and mrz_data[0][29] == '<' and mrz_data[0][30] != '<':
mrz_data[0] = mrz_data[0][:36]
if len(mrz_data[0]) > 36 and mrz_data[0][-34:-31] == "FRA":
mrz_data[0] = mrz_data[0][-36:]
if len(mrz_data[0]) != 36:
raise InvalidMRZException(
"INVALID_LINE0_LENGTH",
"Expected line 0 to be 36-chars long, is {} ({})".format(
len(mrz_data[0]),
mrz_data[0],
)
)
if len(mrz_data[1]) > 36 and mrz_data[1][34] in ('M', 'F', 'H'):
mrz_data[1] = mrz_data[1][:36]
if len(mrz_data[1]) > 36 and mrz_data[1][-2] in ('M', 'F', 'H'):
mrz_data[1] = mrz_data[1][-36:]
if len(mrz_data[1]) != 36:
raise InvalidMRZException(
"INVALID_LINE1_LENGTH",
"Expected line 1 to be 36-chars long, is {} ({})".format(
len(mrz_data[1]),
mrz_data[1],
)
)
IS_NUMBER = [
(0, 30, 36),
(1, 0, 4),
(1, 7, 13),
(1, 27, 34),
(1, 35, 36),
]
for line, start, end in IS_NUMBER:
mrz_data[line] = mrz_data[line][:start] + ocr_read_number(mrz_data[line][start:end]) + mrz_data[line][end:]
IS_TEXT = [
(0, 0, 30),
(1, 13, 27),
]
for line, start, end in IS_TEXT:
mrz_data[line] = mrz_data[line][:start] + ocr_read_text(mrz_data[line][start:end]) + mrz_data[line][end:]
if mrz_data[1][34] == 'H':
mrz_data[1] = mrz_data[1][:34] + 'M' + mrz_data[1][35]
logging.debug("Clean MRZ data: %s", mrz_data)
line1, line2 = mrz_data
values = {
"id": line1[0:2],
"country": line1[2:5],
"last_name": mrz_read_last_name(line1[5:30]),
"adm_code": line1[30:36],
"emission_year": int(line2[0:2]),
"emission_month": int(line2[2:4]),
"adm_code2": line2[4:7],
"emission_code": int(line2[7:12]),
"checksum_emission": int(line2[12]),
"first_name": mrz_read_first_name(line2[13:27]),
"birth_year": int(line2[27:29]),
"birth_month": int(line2[29:31]),
"birth_day": int(line2[31:33]),
"checksum_birth": int(line2[33]),
"sex": mrz_read_sex(line2[34]),
"checksum": int(line2[35]),
}
if values["id"] != "ID":
raise InvalidMRZException(
"INVALID_MRZ_ID",
"Expected id to be ID, got {}".format(values["id"])
)
# assert(values["adm_code2"] == values["adm_code"][0:3])
if checksum_mrz(line2[0:12]) != values["checksum_emission"]:
raise InvalidChecksumException(
"INVALID_EMIT_CHECKSUM",
"Invalid emit checksum"
)
if checksum_mrz(line2[27:33]) != values["checksum_birth"]:
raise InvalidChecksumException(
"INVALID_BIRTHDATE_CHECKSUM",
"Invalid birth_date checksum"
)
if checksum_mrz(line1 + line2[:-1]) != values["checksum"]:
raise InvalidChecksumException(
"INVALID_GLOBAL_CHECKSUM",
"Invalid global checksum"
)
return values
def process_cni_mrz(image, improved):
try:
mrz_image = cni_mrz_extract(image, improved)
except Exception as ex:
logging.exception("MRZ extraction failed")
raise ImageProcessingException("MRZ_EXTRACTION_FAILED", "MRZ extraction failed") from ex
mrz_data = cni_mrz_read(mrz_image)
return cni_mrz_to_dict(mrz_data)
|
LouisTrezzini/projet-mairie
|
api/franceocr/cni/mrz.py
|
Python
|
bsd-3-clause
| 11,613
|
[
"Gaussian"
] |
8104960311c099c1e83f787e73c15e93b7a575e9832c4e67595540a032d148cc
|
"""
Classes for point set registration
Author: Jeff Mahler
"""
from abc import ABCMeta, abstractmethod
import copy
import IPython
import logging
import numpy as np
import scipy.spatial.distance as ssd
import scipy.optimize as opt
try:
import mayavi.mlab as mlab
except:
logging.warning('Failed to import mayavi')
from alan.core import RigidTransform, PointCloud, NormalCloud
from alan.rgbd import PointToPlaneFeatureMatcher
class RegistrationResult(object):
def __init__(self, T_source_target, cost):
self.T_source_target = T_source_target
self.cost = cost
def skew(xi):
S = np.array([[0, -xi[2,0], xi[1,0]],
[xi[2,0], 0, -xi[0,0]],
[-xi[1,0], xi[0,0], 0]])
return S
class IterativeRegistrationSolver:
__metaclass__ = ABCMeta
@abstractmethod
def register(self, source, target, matcher, num_iterations=1):
""" Iteratively register objects to one another """
pass
class PointToPlaneICPSolver(IterativeRegistrationSolver):
def __init__(self, sample_size=100, cost_sample_size=100, gamma=100.0, mu=1e-2):
self.sample_size_ = sample_size
self.cost_sample_size_ = cost_sample_size
self.gamma_ = gamma
self.mu_ = mu
IterativeRegistrationSolver.__init__(self)
def register(self, source_point_cloud, target_point_cloud,
source_normal_cloud, target_normal_cloud, matcher,
num_iterations=1, compute_total_cost=True, vis=False):
"""
Iteratively register objects to one another using a modified version of point to plane ICP.
The cost func is actually PointToPlane_COST + gamma * PointToPoint_COST
Params:
source_point_cloud: (PointCloud object) source object points
target_point_cloud: (PointCloud object) target object points
source_normal_cloud: (NormalCloud object) source object outward-pointing normals
target_normal_cloud: (NormalCloud object) target object outward-pointing normals
matcher: (PointToPlaneFeatureMatcher object) object to match the point sets
num_iterations: (int) the number of iterations to run
Returns:
RegistrationResult object containing the source to target transformation
"""
# check valid data
if not isinstance(source_point_cloud, PointCloud) or not isinstance(target_point_cloud, PointCloud):
raise ValueError('Source and target point clouds must be PointCloud objects')
if not isinstance(source_normal_cloud, NormalCloud) or not isinstance(target_normal_cloud, NormalCloud):
raise ValueError('Source and target normal clouds must be NormalCloud objects')
if not isinstance(matcher, PointToPlaneFeatureMatcher):
raise ValueError('Feature matcher must be a PointToPlaneFeatureMatcher object')
if source_point_cloud.num_points != source_normal_cloud.num_points or target_point_cloud.num_points != target_normal_cloud.num_points:
raise ValueError('Input point clouds must have the same number of points as corresponding normal cloud')
# extract source and target point and normal data arrays
orig_source_points = source_point_cloud.data.T
orig_target_points = target_point_cloud.data.T
orig_source_normals = source_normal_cloud.data.T
orig_target_normals = target_normal_cloud.data.T
# setup the problem
normal_norms = np.linalg.norm(target_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_target_points = orig_target_points[valid_inds[0],:]
orig_target_normals = orig_target_normals[valid_inds[0],:]
normal_norms = np.linalg.norm(orig_source_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_source_points = orig_source_points[valid_inds[0],:]
orig_source_normals = orig_source_normals[valid_inds[0],:]
# alloc buffers for solutions
source_mean_point = np.mean(orig_source_points, axis=0)
target_mean_point = np.mean(orig_target_points, axis=0)
R_sol = np.eye(3)
t_sol = np.zeros([3, 1]) #init with diff between means
t_sol[:,0] = target_mean_point - source_mean_point
# iterate through
for i in range(num_iterations):
logging.info('Point to plane ICP iteration %d' %(i))
# subsample points
source_subsample_inds = np.random.choice(orig_source_points.shape[0], size=self.sample_size_)
source_points = orig_source_points[source_subsample_inds,:]
source_normals = orig_source_normals[source_subsample_inds,:]
target_subsample_inds = np.random.choice(orig_target_points.shape[0], size=self.sample_size_)
target_points = orig_target_points[target_subsample_inds,:]
target_normals = orig_target_normals[target_subsample_inds,:]
# transform source points
source_points = (R_sol.dot(source_points.T) + np.tile(t_sol, [1, source_points.shape[0]])).T
source_normals = (R_sol.dot(source_normals.T)).T
# closest points
corrs = matcher.match(source_points, target_points, source_normals, target_normals)
# solve optimal rotation + translation
valid_corrs = np.where(corrs.index_map != -1)[0]
source_corr_points = corrs.source_points[valid_corrs,:]
target_corr_points = corrs.target_points[corrs.index_map[valid_corrs], :]
target_corr_normals = corrs.target_normals[corrs.index_map[valid_corrs], :]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
break
# create A and b matrices for Gauss-Newton step on joint cost function
A = np.zeros([6,6])
b = np.zeros([6,1])
Ap = np.zeros([6,6])
bp = np.zeros([6,1])
G = np.zeros([3,6])
G[:,3:] = np.eye(3)
for i in range(num_corrs):
s = source_corr_points[i:i+1,:].T
t = target_corr_points[i:i+1,:].T
n = target_corr_normals[i:i+1,:].T
G[:,:3] = skew(s).T
A += G.T.dot(n).dot(n.T).dot(G)
b += G.T.dot(n).dot(n.T).dot(t - s)
Ap += G.T.dot(G)
bp += G.T.dot(t - s)
v = np.linalg.solve(A + self.gamma_*Ap + self.mu_*np.eye(6),
b + self.gamma_*bp)
# create pose values from the solution
R = np.eye(3)
R = R + skew(v[:3])
U, S, V = np.linalg.svd(R)
R = U.dot(V)
t = v[3:]
# incrementally update the final transform
R_sol = R.dot(R_sol)
t_sol = R.dot(t_sol) + t
T_source_target = RigidTransform(R_sol, t_sol, from_frame=source_point_cloud.frame, to_frame=target_point_cloud.frame)
total_cost = 0
source_points = (R_sol.dot(orig_source_points.T) + np.tile(t_sol, [1, orig_source_points.shape[0]])).T
source_normals = (R_sol.dot(orig_source_normals.T)).T
if compute_total_cost:
# rematch all points to get the final cost
corrs = matcher.match(source_points, orig_target_points, source_normals, orig_target_normals)
valid_corrs = np.where(corrs.index_map != -1)[0]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
return RegistrationResult(T_source_target, np.inf)
# get the corresponding points
source_corr_points = corrs.source_points[valid_corrs,:]
target_corr_points = corrs.target_points[corrs.index_map[valid_corrs], :]
target_corr_normals = corrs.target_normals[corrs.index_map[valid_corrs], :]
# determine total cost
source_target_alignment = np.diag((source_corr_points - target_corr_points).dot(target_corr_normals.T))
point_plane_cost = (1.0 / num_corrs) * np.sum(source_target_alignment * source_target_alignment)
point_dist_cost = (1.0 / num_corrs) * np.sum(np.linalg.norm(source_corr_points - target_corr_points, axis=1)**2)
total_cost = point_plane_cost + self.gamma_ * point_dist_cost
return RegistrationResult(T_source_target, total_cost)
def register_2d(self, source_point_cloud, target_point_cloud,
source_normal_cloud, target_normal_cloud, matcher,
num_iterations=1, compute_total_cost=True, vis=False):
"""
Iteratively register objects to one another using a modified version of point to plane ICP
which only solves for tx and ty (translation in the plane) and theta (rotation about the z axis).
The cost func is actually PointToPlane_COST + gamma * PointToPoint_COST
Points should be specified in the basis of the planar worksurface
Params:
source_point_cloud: (PointCloud object) source object points
target_point_cloud: (PointCloud object) target object points
source_normal_cloud: (NormalCloud object) source object outward-pointing normals
target_normal_cloud: (NormalCloud object) target object outward-pointing normals
matcher: (PointToPlaneFeatureMatcher) object to match the point sets
num_iterations: (int) the number of iterations to run
Returns:
RegistrationResult object containing the source to target transformation
"""
if not isinstance(source_point_cloud, PointCloud) or not isinstance(target_point_cloud, PointCloud):
raise ValueError('Source and target point clouds must be PointCloud objects')
if not isinstance(source_normal_cloud, NormalCloud) or not isinstance(target_normal_cloud, NormalCloud):
raise ValueError('Source and target normal clouds must be NormalCloud objects')
if not isinstance(matcher, PointToPlaneFeatureMatcher):
raise ValueError('Feature matcher must be a PointToPlaneFeatureMatcher object')
if source_point_cloud.num_points != source_normal_cloud.num_points or target_point_cloud.num_points != target_normal_cloud.num_points:
raise ValueError('Input point clouds must have the same number of points as corresponding normal cloud')
# extract source and target point and normal data arrays
orig_source_points = source_point_cloud.data.T
orig_target_points = target_point_cloud.data.T
orig_source_normals = source_normal_cloud.data.T
orig_target_normals = target_normal_cloud.data.T
# setup the problem
logging.info('Setting up problem')
normal_norms = np.linalg.norm(orig_target_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_target_points = orig_target_points[valid_inds[0],:]
orig_target_normals = orig_target_normals[valid_inds[0],:]
normal_norms = np.linalg.norm(orig_source_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_source_points = orig_source_points[valid_inds[0],:]
orig_source_normals = orig_source_normals[valid_inds[0],:]
# alloc buffers for solutions
source_mean_point = np.mean(orig_source_points, axis=0)
target_mean_point = np.mean(orig_target_points, axis=0)
R_sol = np.eye(3)
t_sol = np.zeros([3, 1])
# iterate through
for i in range(num_iterations):
logging.info('Point to plane ICP iteration %d' %(i))
# subsample points
source_subsample_inds = np.random.choice(orig_source_points.shape[0], size=self.sample_size_)
source_points = orig_source_points[source_subsample_inds,:]
source_normals = orig_source_normals[source_subsample_inds,:]
target_subsample_inds = np.random.choice(orig_target_points.shape[0], size=self.sample_size_)
target_points = orig_target_points[target_subsample_inds,:]
target_normals = orig_target_normals[target_subsample_inds,:]
# transform source points
source_points = (R_sol.dot(source_points.T) + np.tile(t_sol, [1, source_points.shape[0]])).T
source_normals = (R_sol.dot(source_normals.T)).T
# closest points
corrs = matcher.match(source_points, target_points, source_normals, target_normals)
# solve optimal rotation + translation
valid_corrs = np.where(corrs.index_map != -1)[0]
source_corr_points = corrs.source_points[valid_corrs,:]
target_corr_points = corrs.target_points[corrs.index_map[valid_corrs], :]
target_corr_normals = corrs.target_normals[corrs.index_map[valid_corrs], :]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
break
# create A and b matrices for Gauss-Newton step on joint cost function
A = np.zeros([3,3]) # A and b for point to plane cost
b = np.zeros([3,1])
Ap = np.zeros([3,3]) # A and b for point to point cost
bp = np.zeros([3,1])
G = np.zeros([3,3])
G[:2,1:] = np.eye(2)
for i in range(num_corrs):
s = source_corr_points[i:i+1,:].T
t = target_corr_points[i:i+1,:].T
n = target_corr_normals[i:i+1,:].T
G[0,0] = -s[1]
G[1,0] = s[0]
A += G.T.dot(n).dot(n.T).dot(G)
b += G.T.dot(n).dot(n.T).dot(t - s)
Ap += G.T.dot(G)
bp += G.T.dot(t - s)
v = np.linalg.solve(A + self.gamma_*Ap + self.mu_*np.eye(3),
b + self.gamma_*bp)
# create pose values from the solution
R = np.eye(3)
R = R + skew(np.array([[0],[0],[v[0,0]]]))
U, S, V = np.linalg.svd(R)
R = U.dot(V)
t = np.array([[v[1,0]], [v[2,0]], [0]])
# incrementally update the final transform
R_sol = R.dot(R_sol)
t_sol = R.dot(t_sol) + t
# compute solution transform
T_source_target = RigidTransform(R_sol, t_sol, from_frame=source_point_cloud.frame, to_frame=target_point_cloud.frame)
total_cost = 0
if compute_total_cost:
# subsample points
source_subsample_inds = np.random.choice(orig_source_points.shape[0], size=self.cost_sample_size_)
source_points = orig_source_points[source_subsample_inds,:]
source_normals = orig_source_normals[source_subsample_inds,:]
target_subsample_inds = np.random.choice(orig_target_points.shape[0], size=self.cost_sample_size_)
target_points = orig_target_points[target_subsample_inds,:]
target_normals = orig_target_normals[target_subsample_inds,:]
# transform source points
source_points = (R_sol.dot(source_points.T) + np.tile(t_sol, [1, source_points.shape[0]])).T
source_normals = (R_sol.dot(source_normals.T)).T
# rematch to get the total cost
corrs = matcher.match(source_points, target_points, source_normals, target_normals)
valid_corrs = np.where(corrs.index_map != -1)[0]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
return RegistrationResult(T_source_target, np.inf)
# get the corresponding points
source_corr_points = corrs.source_points[valid_corrs,:]
target_corr_points = corrs.target_points[corrs.index_map[valid_corrs], :]
target_corr_normals = corrs.target_normals[corrs.index_map[valid_corrs], :]
# determine total cost
source_target_alignment = np.diag((source_corr_points - target_corr_points).dot(target_corr_normals.T))
point_plane_cost = (1.0 / num_corrs) * np.sum(source_target_alignment * source_target_alignment)
point_dist_cost = (1.0 / num_corrs) * np.sum(np.linalg.norm(source_corr_points - target_corr_points, axis=1)**2)
total_cost = point_plane_cost + self.gamma_ * point_dist_cost
return RegistrationResult(T_source_target, total_cost)
"""
BELOW ARE DEPRECATED, BUT SHOULD BE UPDATED WHEN THE TIME COMES
"""
class RegistrationFunc:
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def register(self, correspondences):
""" Register objects to one another """
pass
class RigidRegistrationSolver(RegistrationFunc):
def __init__(self):
passo
def register(self, correspondences, weights=None):
""" Register objects to one another """
# setup the problem
self.source_points = correspondences.source_points
self.target_points = correspondences.target_points
N = correspondences.num_matches
if weights is None:
weights = np.ones([correspondences.num_matches, 1])
if weights.shape[1] == 1:
weights = np.tile(weights, (1, 3)) # tile to get to 3d space
# calculate centroids (using weights)
source_centroid = np.sum(weights * self.source_points, axis=0) / np.sum(weights, axis=0)
target_centroid = np.sum(weights * self.target_points, axis=0) / np.sum(weights, axis=0)
# center the datasets
source_centered_points = self.source_points - np.tile(source_centroid, (N,1))
target_centered_points = self.target_points - np.tile(target_centroid, (N,1))
# find the covariance matrix and finding the SVD
H = np.dot((weights * source_centered_points).T, weights * target_centered_points)
U, S, V = np.linalg.svd(H) # this decomposes H = USV, so V is "V.T"
# calculate the rotation
R = np.dot(V.T, U.T)
# special case (reflection)
if np.linalg.det(R) < 0:
V[2,:] *= -1
R = np.dot(V.T, U.T)
# calculate the translation + concatenate the rotation and translation
t = np.matrix(np.dot(-R, source_centroid) + target_centroid)
tf_source_target = np.hstack([R, t.T])
self.R_=R
self.t_=t
self.source_centroid=source_centroid
self.target_centroid=target_centroid
def transform(self,x):
return self.R_.dot(x.T)+self.t_.T
|
mdlaskey/DeepLfD
|
src/deep_lfd/rgbd/registration.py
|
Python
|
gpl-3.0
| 18,495
|
[
"Mayavi"
] |
b66d6cfd12161e3026a7257f6208b2b421943f5532de04e313b008d926d50dc5
|
########################################################################
# $HeadURL$
# File : JobPathAgent.py
# Author : Stuart Paterson
########################################################################
"""
The Job Path Agent determines the chain of Optimizing Agents that must
work on the job prior to the scheduling decision.
Initially this takes jobs in the received state and starts the jobs on the
optimizer chain. The next development will be to explicitly specify the
path through the optimizers.
"""
__RCSID__ = "$Id$"
from DIRAC.WorkloadManagementSystem.Agent.OptimizerModule import OptimizerModule
from DIRAC.Core.Utilities.ModuleFactory import ModuleFactory
from DIRAC.Core.Utilities import List
from DIRAC.WorkloadManagementSystem.Client.JobDescription import JobDescription
from DIRAC import S_OK, S_ERROR
OPTIMIZER_NAME = 'JobPath'
class JobPathAgent( OptimizerModule ):
"""
The specific Optimizer must provide the following methods:
- checkJob() - the main method called for each job
and it can provide:
- initializeOptimizer() before each execution cycle
"""
#############################################################################
def initializeOptimizer( self ):
"""Initialize specific parameters for JobPathAgent.
"""
self.startingMajorStatus = "Received"
self.startingMinorStatus = False
#self.requiredJobInfo = "jdlOriginal"
return S_OK()
def beginExecution( self ):
"""Called before each Agent execution cycle
"""
self.basePath = self.am_getOption( 'BasePath', ['JobPath', 'JobSanity'] )
self.inputData = self.am_getOption( 'InputData', ['InputData'] )
self.endPath = self.am_getOption( 'EndPath', ['JobScheduling', 'TaskQueue'] )
self.voPlugin = self.am_getOption( 'VOPlugin', '' )
return S_OK()
def __syncJobDesc( self, jobId, jobDesc, classAdJob ):
""" ???
"""
if not jobDesc.isDirty():
return
for op in jobDesc.getOptions():
classAdJob.insertAttributeString( op, jobDesc.getVar( op ) )
self.jobDB.setJobJDL( jobId, jobDesc.dumpDescriptionAsJDL() )
#############################################################################
def checkJob( self, job, classAdJob ):
"""This method controls the checking of the job.
"""
jobDesc = JobDescription()
result = jobDesc.loadDescription( classAdJob.asJDL() )
if not result[ 'OK' ]:
self.setFailedJob( job, result['Message'], classAdJob )
return result
self.__syncJobDesc( job, jobDesc, classAdJob )
#Check if job defines a path itself
# FIXME: only some group might be able to overwrite the jobPath
jobPath = classAdJob.get_expression( 'JobPath' ).replace( '"', '' ).replace( 'Unknown', '' )
#jobPath = jobDesc.getVarWithDefault( 'JobPath' ).replace( 'Unknown', '' )
if jobPath:
# HACK: Remove the { and } to ensure we have a simple string
jobPath = jobPath.replace( "{", "" ).replace( "}", "" )
self.log.info( 'Job %s defines its own optimizer chain %s' % ( job, jobPath ) )
return self.processJob( job, List.fromChar( jobPath ) )
#If no path, construct based on JDL and VO path module if present
path = list( self.basePath )
if self.voPlugin:
argumentsDict = {'JobID':job, 'ClassAd':classAdJob, 'ConfigPath':self.am_getModuleParam( "section" )}
moduleFactory = ModuleFactory()
moduleInstance = moduleFactory.getModule( self.voPlugin, argumentsDict )
if not moduleInstance['OK']:
self.log.error( 'Could not instantiate module:', '%s' % ( self.voPlugin ) )
self.setFailedJob( job, 'Could not instantiate module: %s' % ( self.voPlugin ), classAdJob )
return S_ERROR( 'Holding pending jobs' )
module = moduleInstance['Value']
result = module.execute()
if not result['OK']:
self.log.warn( 'Execution of %s failed' % ( self.voPlugin ) )
return result
extraPath = List.fromChar( result['Value'] )
if extraPath:
path.extend( extraPath )
self.log.verbose( 'Adding extra VO specific optimizers to path: %s' % ( extraPath ) )
else:
self.log.verbose( 'No VO specific plugin module specified' )
#Should only rely on an input data setting in absence of VO plugin
result = self.jobDB.getInputData( job )
if not result['OK']:
self.log.error( 'Failed to get input data from JobDB', job )
self.log.warn( result['Message'] )
return result
if result['Value']:
# if the returned tuple is not empty it will evaluate true
self.log.info( 'Job %s has an input data requirement' % ( job ) )
path.extend( self.inputData )
else:
self.log.info( 'Job %s has no input data requirement' % ( job ) )
path.extend( self.endPath )
self.log.info( 'Constructed path for job %s is: %s' % ( job, path ) )
return self.processJob( job, path )
#############################################################################
def processJob( self, job, chain ):
"""Set job path and send to next optimizer
"""
result = self.setOptimizerChain( job, chain )
if not result['OK']:
self.log.warn( result['Message'] )
result = self.setJobParam( job, 'JobPath', ','.join( chain ) )
if not result['OK']:
self.log.warn( result['Message'] )
return self.setNextOptimizer( job )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
Sbalbp/DIRAC
|
WorkloadManagementSystem/Agent/JobPathAgent.py
|
Python
|
gpl-3.0
| 5,598
|
[
"DIRAC"
] |
b971374bd92b3bfb84ef6db98206f0ad8e12b7e03b2b7278aa7b76582b334775
|
from tapiriik.settings import WEB_ROOT, HTTP_SOURCE_ADDR, GARMIN_CONNECT_USER_WATCH_ACCOUNTS
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.services.service_record import ServiceRecord
from tapiriik.services.interchange import UploadedActivity, ActivityType, ActivityStatistic, ActivityStatisticUnit, Waypoint, Location, Lap
from tapiriik.services.api import APIException, APIWarning, APIExcludeActivity, UserException, UserExceptionType
from tapiriik.services.statistic_calculator import ActivityStatisticCalculator
from tapiriik.services.tcx import TCXIO
from tapiriik.services.gpx import GPXIO
from tapiriik.services.fit import FITIO
from tapiriik.services.sessioncache import SessionCache
from tapiriik.services.devices import DeviceIdentifier, DeviceIdentifierType, Device
from tapiriik.database import cachedb, db
from django.core.urlresolvers import reverse
import pytz
from datetime import datetime, timedelta
import requests
import os
import math
import logging
import time
import json
import re
import random
import tempfile
from urllib.parse import urlencode
logger = logging.getLogger(__name__)
class GarminConnectService(ServiceBase):
ID = "garminconnect"
DisplayName = "Garmin Connect"
DisplayAbbreviation = "GC"
AuthenticationType = ServiceAuthenticationType.UsernamePassword
RequiresExtendedAuthorizationDetails = True
PartialSyncRequiresTrigger = len(GARMIN_CONNECT_USER_WATCH_ACCOUNTS) > 0
PartialSyncTriggerPollInterval = timedelta(minutes=20)
PartialSyncTriggerPollMultiple = len(GARMIN_CONNECT_USER_WATCH_ACCOUNTS.keys())
ConfigurationDefaults = {
"WatchUserKey": None,
"WatchUserLastID": 0
}
_activityMappings = {
"running": ActivityType.Running,
"cycling": ActivityType.Cycling,
"mountain_biking": ActivityType.MountainBiking,
"walking": ActivityType.Walking,
"hiking": ActivityType.Hiking,
"resort_skiing_snowboarding": ActivityType.DownhillSkiing,
"cross_country_skiing": ActivityType.CrossCountrySkiing,
"skate_skiing": ActivityType.CrossCountrySkiing, # Well, it ain't downhill?
"backcountry_skiing_snowboarding": ActivityType.CrossCountrySkiing, # ish
"skating": ActivityType.Skating,
"swimming": ActivityType.Swimming,
"rowing": ActivityType.Rowing,
"elliptical": ActivityType.Elliptical,
"fitness_equipment": ActivityType.Gym,
"mountaineering": ActivityType.Climbing,
"all": ActivityType.Other, # everything will eventually resolve to this
"multi_sport": ActivityType.Other # Most useless type? You decide!
}
_reverseActivityMappings = { # Removes ambiguities when mapping back to their activity types
"running": ActivityType.Running,
"cycling": ActivityType.Cycling,
"mountain_biking": ActivityType.MountainBiking,
"walking": ActivityType.Walking,
"hiking": ActivityType.Hiking,
"resort_skiing_snowboarding": ActivityType.DownhillSkiing,
"cross_country_skiing": ActivityType.CrossCountrySkiing,
"skating": ActivityType.Skating,
"swimming": ActivityType.Swimming,
"rowing": ActivityType.Rowing,
"elliptical": ActivityType.Elliptical,
"fitness_equipment": ActivityType.Gym,
"mountaineering": ActivityType.Climbing,
"other": ActivityType.Other # I guess? (vs. "all" that is)
}
SupportedActivities = list(_activityMappings.values())
SupportsHR = SupportsCadence = True
SupportsActivityDeletion = True
_sessionCache = SessionCache("garminconnect", lifetime=timedelta(minutes=120), freshen_on_get=True)
_unitMap = {
"mph": ActivityStatisticUnit.MilesPerHour,
"kph": ActivityStatisticUnit.KilometersPerHour,
"hmph": ActivityStatisticUnit.HectometersPerHour,
"hydph": ActivityStatisticUnit.HundredYardsPerHour,
"celcius": ActivityStatisticUnit.DegreesCelcius,
"fahrenheit": ActivityStatisticUnit.DegreesFahrenheit,
"mile": ActivityStatisticUnit.Miles,
"kilometer": ActivityStatisticUnit.Kilometers,
"foot": ActivityStatisticUnit.Feet,
"meter": ActivityStatisticUnit.Meters,
"yard": ActivityStatisticUnit.Yards,
"kilocalorie": ActivityStatisticUnit.Kilocalories,
"bpm": ActivityStatisticUnit.BeatsPerMinute,
"stepsPerMinute": ActivityStatisticUnit.DoubledStepsPerMinute,
"rpm": ActivityStatisticUnit.RevolutionsPerMinute,
"watt": ActivityStatisticUnit.Watts,
"second": ActivityStatisticUnit.Seconds,
"ms": ActivityStatisticUnit.Milliseconds
}
_obligatory_headers = {
"Referer": "https://sync.tapiriik.com"
}
def __init__(self):
cachedHierarchy = cachedb.gc_type_hierarchy.find_one()
if not cachedHierarchy:
rawHierarchy = requests.get("https://connect.garmin.com/proxy/activity-service-1.2/json/activity_types", headers=self._obligatory_headers).text
self._activityHierarchy = json.loads(rawHierarchy)["dictionary"]
cachedb.gc_type_hierarchy.insert({"Hierarchy": rawHierarchy})
else:
self._activityHierarchy = json.loads(cachedHierarchy["Hierarchy"])["dictionary"]
rate_lock_path = tempfile.gettempdir() + "/gc_rate.%s.lock" % HTTP_SOURCE_ADDR
# Ensure the rate lock file exists (...the easy way)
open(rate_lock_path, "a").close()
self._rate_lock = open(rate_lock_path, "r+")
def _rate_limit(self):
import fcntl, struct, time
min_period = 1 # I appear to been banned from Garmin Connect while determining this.
fcntl.flock(self._rate_lock,fcntl.LOCK_EX)
try:
self._rate_lock.seek(0)
last_req_start = self._rate_lock.read()
if not last_req_start:
last_req_start = 0
else:
last_req_start = float(last_req_start)
wait_time = max(0, min_period - (time.time() - last_req_start))
time.sleep(wait_time)
self._rate_lock.seek(0)
self._rate_lock.write(str(time.time()))
self._rate_lock.flush()
finally:
fcntl.flock(self._rate_lock,fcntl.LOCK_UN)
def _get_session(self, record=None, email=None, password=None, skip_cache=False):
from tapiriik.auth.credential_storage import CredentialStore
cached = self._sessionCache.Get(record.ExternalID if record else email)
if cached and not skip_cache:
logger.debug("Using cached credential")
return cached
if record:
# longing for C style overloads...
password = CredentialStore.Decrypt(record.ExtendedAuthorization["Password"])
email = CredentialStore.Decrypt(record.ExtendedAuthorization["Email"])
session = requests.Session()
# JSIG CAS, cool I guess.
# Not quite OAuth though, so I'll continue to collect raw credentials.
# Commented stuff left in case this ever breaks because of missing parameters...
data = {
"username": email,
"password": password,
"_eventId": "submit",
"embed": "true",
# "displayNameRequired": "false"
}
params = {
"service": "https://connect.garmin.com/post-auth/login",
# "redirectAfterAccountLoginUrl": "http://connect.garmin.com/post-auth/login",
# "redirectAfterAccountCreationUrl": "http://connect.garmin.com/post-auth/login",
# "webhost": "olaxpw-connect00.garmin.com",
"clientId": "GarminConnect",
# "gauthHost": "https://sso.garmin.com/sso",
# "rememberMeShown": "true",
# "rememberMeChecked": "false",
"consumeServiceTicket": "false",
# "id": "gauth-widget",
# "embedWidget": "false",
# "cssUrl": "https://static.garmincdn.com/com.garmin.connect/ui/src-css/gauth-custom.css",
# "source": "http://connect.garmin.com/en-US/signin",
# "createAccountShown": "true",
# "openCreateAccount": "false",
# "usernameShown": "true",
# "displayNameShown": "false",
# "initialFocus": "true",
# "locale": "en"
}
# I may never understand what motivates people to mangle a perfectly good protocol like HTTP in the ways they do...
preResp = session.get("https://sso.garmin.com/sso/login", params=params)
if preResp.status_code != 200:
raise APIException("SSO prestart error %s %s" % (preResp.status_code, preResp.text))
data["lt"] = re.search("name=\"lt\"\s+value=\"([^\"]+)\"", preResp.text).groups(1)[0]
ssoResp = session.post("https://sso.garmin.com/sso/login", params=params, data=data, allow_redirects=False)
if ssoResp.status_code != 200 or "temporarily unavailable" in ssoResp.text:
raise APIException("SSO error %s %s" % (ssoResp.status_code, ssoResp.text))
ticket_match = re.search("ticket=([^']+)'", ssoResp.text)
if not ticket_match:
raise APIException("Invalid login", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
ticket = ticket_match.groups(1)[0]
# ...AND WE'RE NOT DONE YET!
self._rate_limit()
gcRedeemResp = session.get("https://connect.garmin.com/post-auth/login", params={"ticket": ticket}, allow_redirects=False)
if gcRedeemResp.status_code != 302:
raise APIException("GC redeem-start error %s %s" % (gcRedeemResp.status_code, gcRedeemResp.text))
# There are 6 redirects that need to be followed to get the correct cookie
# ... :(
expected_redirect_count = 6
current_redirect_count = 1
while True:
self._rate_limit()
gcRedeemResp = session.get(gcRedeemResp.headers["location"], allow_redirects=False)
if current_redirect_count >= expected_redirect_count and gcRedeemResp.status_code != 200:
raise APIException("GC redeem %d/%d error %s %s" % (current_redirect_count, expected_redirect_count, gcRedeemResp.status_code, gcRedeemResp.text))
if gcRedeemResp.status_code == 200 or gcRedeemResp.status_code == 404:
break
current_redirect_count += 1
if current_redirect_count > expected_redirect_count:
break
self._sessionCache.Set(record.ExternalID if record else email, session)
session.headers.update(self._obligatory_headers)
return session
def WebInit(self):
self.UserAuthorizationURL = WEB_ROOT + reverse("auth_simple", kwargs={"service": self.ID})
def Authorize(self, email, password):
from tapiriik.auth.credential_storage import CredentialStore
session = self._get_session(email=email, password=password, skip_cache=True)
# TODO: http://connect.garmin.com/proxy/userprofile-service/socialProfile/ has the proper immutable user ID, not that anyone ever changes this one...
self._rate_limit()
username = session.get("http://connect.garmin.com/user/username").json()["username"]
if not len(username):
raise APIException("Unable to retrieve username", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
return (username, {}, {"Email": CredentialStore.Encrypt(email), "Password": CredentialStore.Encrypt(password)})
def UserUploadedActivityURL(self, uploadId):
return "https://connect.garmin.com/modern/activity/%d" % uploadId
def _resolveActivityType(self, act_type):
# Mostly there are two levels of a hierarchy, so we don't really need this as the parent is included in the listing.
# But maybe they'll change that some day?
while act_type not in self._activityMappings:
try:
act_type = [x["parent"]["key"] for x in self._activityHierarchy if x["key"] == act_type][0]
except IndexError:
raise ValueError("Activity type not found in activity hierarchy")
return self._activityMappings[act_type]
def DownloadActivityList(self, serviceRecord, exhaustive=False):
#http://connect.garmin.com/proxy/activity-search-service-1.0/json/activities?&start=0&limit=50
session = self._get_session(record=serviceRecord)
page = 1
pageSz = 100
activities = []
exclusions = []
while True:
logger.debug("Req with " + str({"start": (page - 1) * pageSz, "limit": pageSz}))
self._rate_limit()
retried_auth = False
while True:
res = session.get("https://connect.garmin.com/modern/proxy/activity-search-service-1.0/json/activities", params={"start": (page - 1) * pageSz, "limit": pageSz})
# It's 10 PM and I have no clue why it's throwing these errors, maybe we just need to log in again?
if res.status_code in [500, 403] and not retried_auth:
logger.debug("Retrying auth w/o cache")
retried_auth = True
session = self._get_session(serviceRecord, skip_cache=True)
else:
break
try:
res = res.json()["results"]
except ValueError:
res_txt = res.text # So it can capture in the log message
raise APIException("Parse failure in GC list resp: %s - %s" % (res.status_code, res.text))
if "activities" not in res:
break # No activities on this page - empty account.
for act in res["activities"]:
act = act["activity"]
activity = UploadedActivity()
# Don't really know why sumSampleCountTimestamp doesn't appear in swim activities - they're definitely timestamped...
activity.Stationary = "sumSampleCountSpeed" not in act and "sumSampleCountTimestamp" not in act
activity.GPS = "endLatitude" in act
activity.Private = act["privacy"]["key"] == "private"
try:
activity.TZ = pytz.timezone(act["activityTimeZone"]["key"])
except pytz.exceptions.UnknownTimeZoneError:
activity.TZ = pytz.FixedOffset(float(act["activityTimeZone"]["offset"]) * 60)
logger.debug("Name " + act["activityName"]["value"] + ":")
if len(act["activityName"]["value"].strip()) and act["activityName"]["value"] != "Untitled": # This doesn't work for internationalized accounts, oh well.
activity.Name = act["activityName"]["value"]
if len(act["activityDescription"]["value"].strip()):
activity.Notes = act["activityDescription"]["value"]
# beginTimestamp/endTimestamp is in UTC
activity.StartTime = pytz.utc.localize(datetime.utcfromtimestamp(float(act["beginTimestamp"]["millis"])/1000))
if "sumElapsedDuration" in act:
activity.EndTime = activity.StartTime + timedelta(0, round(float(act["sumElapsedDuration"]["value"])))
elif "sumDuration" in act:
activity.EndTime = activity.StartTime + timedelta(minutes=float(act["sumDuration"]["minutesSeconds"].split(":")[0]), seconds=float(act["sumDuration"]["minutesSeconds"].split(":")[1]))
else:
activity.EndTime = pytz.utc.localize(datetime.utcfromtimestamp(float(act["endTimestamp"]["millis"])/1000))
logger.debug("Activity s/t " + str(activity.StartTime) + " on page " + str(page))
activity.AdjustTZ()
if "sumDistance" in act and float(act["sumDistance"]["value"]) != 0:
activity.Stats.Distance = ActivityStatistic(self._unitMap[act["sumDistance"]["uom"]], value=float(act["sumDistance"]["value"]))
if "device" in act and act["device"]["key"] != "unknown":
devId = DeviceIdentifier.FindMatchingIdentifierOfType(DeviceIdentifierType.GC, {"Key": act["device"]["key"]})
ver_split = act["device"]["key"].split(".")
ver_maj = None
ver_min = None
if len(ver_split) == 4:
# 2.90.0.0
ver_maj = int(ver_split[0])
ver_min = int(ver_split[1])
activity.Device = Device(devId, verMaj=ver_maj, verMin=ver_min)
activity.Type = self._resolveActivityType(act["activityType"]["key"])
activity.CalculateUID()
activity.ServiceData = {"ActivityID": int(act["activityId"])}
activities.append(activity)
logger.debug("Finished page " + str(page) + " of " + str(res["search"]["totalPages"]))
if not exhaustive or int(res["search"]["totalPages"]) == page:
break
else:
page += 1
return activities, exclusions
def _downloadActivitySummary(self, serviceRecord, activity):
activityID = activity.ServiceData["ActivityID"]
session = self._get_session(record=serviceRecord)
self._rate_limit()
res = session.get("https://connect.garmin.com/modern/proxy/activity-service-1.3/json/activity/" + str(activityID))
try:
raw_data = res.json()
except ValueError:
raise APIException("Failure downloading activity summary %s:%s" % (res.status_code, res.text))
stat_map = {}
def mapStat(gcKey, statKey, type):
stat_map[gcKey] = {
"key": statKey,
"attr": type
}
def applyStats(gc_dict, stats_obj):
for gc_key, stat in stat_map.items():
if gc_key in gc_dict:
value = float(gc_dict[gc_key]["value"])
units = self._unitMap[gc_dict[gc_key]["uom"]]
if math.isinf(value):
continue # GC returns the minimum speed as "-Infinity" instead of 0 some times :S
getattr(stats_obj, stat["key"]).update(ActivityStatistic(units, **({stat["attr"]: value})))
mapStat("SumMovingDuration", "MovingTime", "value")
mapStat("SumDuration", "TimerTime", "value")
mapStat("SumDistance", "Distance", "value")
mapStat("MinSpeed", "Speed", "min")
mapStat("MaxSpeed", "Speed", "max")
mapStat("WeightedMeanSpeed", "Speed", "avg")
mapStat("MinAirTemperature", "Temperature", "min")
mapStat("MaxAirTemperature", "Temperature", "max")
mapStat("WeightedMeanAirTemperature", "Temperature", "avg")
mapStat("SumEnergy", "Energy", "value")
mapStat("MaxHeartRate", "HR", "max")
mapStat("WeightedMeanHeartRate", "HR", "avg")
mapStat("MaxDoubleCadence", "RunCadence", "max")
mapStat("WeightedMeanDoubleCadence", "RunCadence", "avg")
mapStat("MaxBikeCadence", "Cadence", "max")
mapStat("WeightedMeanBikeCadence", "Cadence", "avg")
mapStat("MinPower", "Power", "min")
mapStat("MaxPower", "Power", "max")
mapStat("WeightedMeanPower", "Power", "avg")
mapStat("MinElevation", "Elevation", "min")
mapStat("MaxElevation", "Elevation", "max")
mapStat("GainElevation", "Elevation", "gain")
mapStat("LossElevation", "Elevation", "loss")
applyStats(raw_data["activity"]["activitySummary"], activity.Stats)
for lap_data in raw_data["activity"]["totalLaps"]["lapSummaryList"]:
lap = Lap()
if "BeginTimestamp" in lap_data:
lap.StartTime = pytz.utc.localize(datetime.utcfromtimestamp(float(lap_data["BeginTimestamp"]["value"]) / 1000))
if "EndTimestamp" in lap_data:
lap.EndTime = pytz.utc.localize(datetime.utcfromtimestamp(float(lap_data["EndTimestamp"]["value"]) / 1000))
elapsed_duration = None
if "SumElapsedDuration" in lap_data:
elapsed_duration = timedelta(seconds=round(float(lap_data["SumElapsedDuration"]["value"])))
elif "SumDuration" in lap_data:
elapsed_duration = timedelta(seconds=round(float(lap_data["SumDuration"]["value"])))
if lap.StartTime and elapsed_duration:
# Always recalculate end time based on duration, if we have the start time
lap.EndTime = lap.StartTime + elapsed_duration
if not lap.StartTime and lap.EndTime and elapsed_duration:
# Sometimes calculate start time based on duration
lap.StartTime = lap.EndTime - elapsed_duration
if not lap.StartTime or not lap.EndTime:
# Garmin Connect is weird.
raise APIExcludeActivity("Activity lap has no BeginTimestamp or EndTimestamp", user_exception=UserException(UserExceptionType.Corrupt))
applyStats(lap_data, lap.Stats)
activity.Laps.append(lap)
# In Garmin Land, max can be smaller than min for this field :S
if activity.Stats.Power.Max is not None and activity.Stats.Power.Min is not None and activity.Stats.Power.Min > activity.Stats.Power.Max:
activity.Stats.Power.Min = None
def DownloadActivity(self, serviceRecord, activity):
# First, download the summary stats and lap stats
self._downloadActivitySummary(serviceRecord, activity)
if len(activity.Laps) == 1:
activity.Stats = activity.Laps[0].Stats # They must be identical to pass the verification
if activity.Stationary:
# Nothing else to download
return activity
# https://connect.garmin.com/proxy/activity-service-1.3/json/activityDetails/####
activityID = activity.ServiceData["ActivityID"]
session = self._get_session(record=serviceRecord)
self._rate_limit()
res = session.get("https://connect.garmin.com/modern/proxy/activity-service-1.3/json/activityDetails/" + str(activityID) + "?maxSize=999999999")
try:
raw_data = res.json()["com.garmin.activity.details.json.ActivityDetails"]
except ValueError:
raise APIException("Activity data parse error for %s: %s" % (res.status_code, res.text))
if "measurements" not in raw_data:
activity.Stationary = True # We were wrong, oh well
return activity
attrs_map = {}
def _map_attr(gc_key, wp_key, units, in_location=False, is_timestamp=False):
attrs_map[gc_key] = {
"key": wp_key,
"to_units": units,
"in_location": in_location, # Blegh
"is_timestamp": is_timestamp # See above
}
_map_attr("directSpeed", "Speed", ActivityStatisticUnit.MetersPerSecond)
_map_attr("sumDistance", "Distance", ActivityStatisticUnit.Meters)
_map_attr("directHeartRate", "HR", ActivityStatisticUnit.BeatsPerMinute)
_map_attr("directBikeCadence", "Cadence", ActivityStatisticUnit.RevolutionsPerMinute)
_map_attr("directDoubleCadence", "RunCadence", ActivityStatisticUnit.StepsPerMinute) # 2*x mystery solved
_map_attr("directAirTemperature", "Temp", ActivityStatisticUnit.DegreesCelcius)
_map_attr("directPower", "Power", ActivityStatisticUnit.Watts)
_map_attr("directElevation", "Altitude", ActivityStatisticUnit.Meters, in_location=True)
_map_attr("directLatitude", "Latitude", None, in_location=True)
_map_attr("directLongitude", "Longitude", None, in_location=True)
_map_attr("directTimestamp", "Timestamp", None, is_timestamp=True)
# Figure out which metrics we'll be seeing in this activity
attrs_indexed = {}
attr_count = len(raw_data["measurements"])
for measurement in raw_data["measurements"]:
key = measurement["key"]
if key in attrs_map:
if attrs_map[key]["to_units"]:
attrs_map[key]["from_units"] = self._unitMap[measurement["unit"]]
if attrs_map[key]["to_units"] == attrs_map[key]["from_units"]:
attrs_map[key]["to_units"] = attrs_map[key]["from_units"] = None
attrs_indexed[measurement["metricsIndex"]] = attrs_map[key]
# Process the data frames
frame_idx = 0
active_lap_idx = 0
for frame in raw_data["metrics"]:
wp = Waypoint()
for idx, attr in attrs_indexed.items():
value = frame["metrics"][idx]
target_obj = wp
if attr["in_location"]:
if not wp.Location:
wp.Location = Location()
target_obj = wp.Location
# Handle units
if attr["is_timestamp"]:
value = pytz.utc.localize(datetime.utcfromtimestamp(value / 1000))
elif attr["to_units"]:
value = ActivityStatistic.convertValue(value, attr["from_units"], attr["to_units"])
# Write the value (can't use __dict__ because __slots__)
setattr(target_obj, attr["key"], value)
# Fix up lat/lng being zero (which appear to represent missing coords)
if wp.Location and wp.Location.Latitude == 0 and wp.Location.Longitude == 0:
wp.Location.Latitude = None
wp.Location.Longitude = None
# Please visit a physician before complaining about this
if wp.HR == 0:
wp.HR = None
# Bump the active lap if required
while (active_lap_idx < len(activity.Laps) - 1 and # Not the last lap
activity.Laps[active_lap_idx + 1].StartTime <= wp.Timestamp):
active_lap_idx += 1
activity.Laps[active_lap_idx].Waypoints.append(wp)
frame_idx += 1
return activity
def UploadActivity(self, serviceRecord, activity):
#/proxy/upload-service-1.1/json/upload/.fit
fit_file = FITIO.Dump(activity)
files = {"data": ("tap-sync-" + str(os.getpid()) + "-" + activity.UID + ".fit", fit_file)}
session = self._get_session(record=serviceRecord)
self._rate_limit()
res = session.post("https://connect.garmin.com/proxy/upload-service-1.1/json/upload/.fit", files=files)
res = res.json()["detailedImportResult"]
if len(res["successes"]) == 0:
if len(res["failures"]) and len(res["failures"][0]["messages"]) and res["failures"][0]["messages"][0]["content"] == "Duplicate activity":
logger.debug("Duplicate")
return # ...cool?
raise APIException("Unable to upload activity %s" % res)
if len(res["successes"]) > 1:
raise APIException("Uploaded succeeded, resulting in too many activities")
actid = res["successes"][0]["internalId"]
name = activity.Name # Capture in logs
notes = activity.Notes
encoding_headers = {"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"} # GC really, really needs this part, otherwise it throws obscure errors like "Invalid signature for signature method HMAC-SHA1"
warnings = []
try:
if activity.Name and activity.Name.strip():
self._rate_limit()
res = session.post("https://connect.garmin.com/proxy/activity-service-1.2/json/name/" + str(actid), data=urlencode({"value": activity.Name}).encode("UTF-8"), headers=encoding_headers)
try:
res = res.json()
except:
raise APIWarning("Activity name request failed - %s" % res.text)
if "display" not in res or res["display"]["value"] != activity.Name:
raise APIWarning("Unable to set activity name")
except APIWarning as e:
warnings.append(e)
try:
if activity.Notes and activity.Notes.strip():
self._rate_limit()
res = session.post("https://connect.garmin.com/proxy/activity-service-1.2/json/description/" + str(actid), data=urlencode({"value": activity.Notes}).encode("UTF-8"), headers=encoding_headers)
try:
res = res.json()
except:
raise APIWarning("Activity notes request failed - %s" % res.text)
if "display" not in res or res["display"]["value"] != activity.Notes:
raise APIWarning("Unable to set activity notes")
except APIWarning as e:
warnings.append(e)
try:
if activity.Type not in [ActivityType.Running, ActivityType.Cycling, ActivityType.Other]:
# Set the legit activity type - whatever it is, it's not supported by the TCX schema
acttype = [k for k, v in self._reverseActivityMappings.items() if v == activity.Type]
if len(acttype) == 0:
raise APIWarning("GarminConnect does not support activity type " + activity.Type)
else:
acttype = acttype[0]
self._rate_limit()
res = session.post("https://connect.garmin.com/proxy/activity-service-1.2/json/type/" + str(actid), data={"value": acttype})
res = res.json()
if "activityType" not in res or res["activityType"]["key"] != acttype:
raise APIWarning("Unable to set activity type")
except APIWarning as e:
warnings.append(e)
try:
if activity.Private:
self._rate_limit()
res = session.post("https://connect.garmin.com/proxy/activity-service-1.2/json/privacy/" + str(actid), data={"value": "private"})
res = res.json()
if "definition" not in res or res["definition"]["key"] != "private":
raise APIWarning("Unable to set activity privacy")
except APIWarning as e:
warnings.append(e)
if len(warnings):
raise APIWarning(str(warnings)) # Meh
return actid
def _user_watch_user(self, serviceRecord):
if not serviceRecord.GetConfiguration()["WatchUserKey"]:
user_key = random.choice(list(GARMIN_CONNECT_USER_WATCH_ACCOUNTS.keys()))
logger.info("Assigning %s a new watch user %s" % (serviceRecord.ExternalID, user_key))
serviceRecord.SetConfiguration({"WatchUserKey": user_key})
return GARMIN_CONNECT_USER_WATCH_ACCOUNTS[user_key]
else:
return GARMIN_CONNECT_USER_WATCH_ACCOUNTS[serviceRecord.GetConfiguration()["WatchUserKey"]]
def SubscribeToPartialSyncTrigger(self, serviceRecord):
# PUT http://connect.garmin.com/proxy/userprofile-service/connection/request/cpfair
# (the poll worker finishes the connection)
user_name = self._user_watch_user(serviceRecord)["Name"]
logger.info("Requesting connection to %s from %s" % (user_name, serviceRecord.ExternalID))
self._rate_limit()
resp = self._get_session(record=serviceRecord, skip_cache=True).put("https://connect.garmin.com/proxy/userprofile-service/connection/request/%s" % user_name)
try:
assert resp.status_code == 200
assert resp.json()["requestStatus"] == "Created"
except:
raise APIException("Connection request failed with user watch account %s: %s %s" % (user_name, resp.status_code, resp.text))
else:
serviceRecord.SetConfiguration({"WatchConnectionID": resp.json()["id"]})
serviceRecord.SetPartialSyncTriggerSubscriptionState(True)
def UnsubscribeFromPartialSyncTrigger(self, serviceRecord):
# GET http://connect.garmin.com/proxy/userprofile-service/socialProfile/connections to get the ID
# {"fullName":null,"userConnections":[{"userId":5754439,"displayName":"TapiirikAPITEST","fullName":null,"location":null,"profileImageUrlMedium":null,"profileImageUrlSmall":null,"connectionRequestId":1566024,"userConnectionStatus":2,"userRoles":["ROLE_CONNECTUSER","ROLE_FITNESS_USER"],"userPro":false}]}
# PUT http://connect.garmin.com/proxy/userprofile-service/connection/end/1904201
# Unfortunately there's no way to delete a pending request - the poll worker will do this from the other end
active_watch_user = self._user_watch_user(serviceRecord)
session = self._get_session(email=active_watch_user["Username"], password=active_watch_user["Password"], skip_cache=True)
if "WatchConnectionID" in serviceRecord.GetConfiguration():
self._rate_limit()
dc_resp = session.put("https://connect.garmin.com/modern/proxy/userprofile-service/connection/end/%s" % serviceRecord.GetConfiguration()["WatchConnectionID"])
if dc_resp.status_code != 200:
raise APIException("Error disconnecting user watch accunt %s from %s: %s %s" % (active_watch_user, serviceRecord.ExternalID, dc_resp.status_code, dc_resp.text))
serviceRecord.SetConfiguration({"WatchUserKey": None, "WatchConnectionID": None})
serviceRecord.SetPartialSyncTriggerSubscriptionState(False)
else:
# I broke Garmin Connect by having too many connections per account, so I can no longer query the connection list
# All the connection request emails are sitting unopened in an email inbox, though, so I'll be backfilling the IDs from those
raise APIException("Did not store connection ID")
def ShouldForcePartialSyncTrigger(self, serviceRecord):
# The poll worker can't see private activities.
return serviceRecord.GetConfiguration()["sync_private"]
def PollPartialSyncTrigger(self, multiple_index):
# TODO: ensure the appropriate users are connected
# GET http://connect.garmin.com/modern/proxy/userprofile-service/connection/pending to get ID
# [{"userId":6244126,"displayName":"tapiriik-sync-ulukhaktok","fullName":"tapiriik sync ulukhaktok","profileImageUrlSmall":null,"connectionRequestId":1904086,"requestViewed":true,"userRoles":["ROLE_CONNECTUSER"],"userPro":false}]
# PUT http://connect.garmin.com/proxy/userprofile-service/connection/accept/1904086
# ...later...
# GET http://connect.garmin.com/proxy/activitylist-service/activities/comments/subscriptionFeed?start=1&limit=10
# First, accept any pending connections
watch_user_key = sorted(list(GARMIN_CONNECT_USER_WATCH_ACCOUNTS.keys()))[multiple_index]
watch_user = GARMIN_CONNECT_USER_WATCH_ACCOUNTS[watch_user_key]
session = self._get_session(email=watch_user["Username"], password=watch_user["Password"], skip_cache=True)
# Then, check for users with new activities
self._rate_limit()
watch_activities_resp = session.get("https://connect.garmin.com/modern/proxy/activitylist-service/activities/subscriptionFeed?limit=1000")
try:
watch_activities = watch_activities_resp.json()
except ValueError:
raise Exception("Could not parse new activities list: %s %s" % (watch_activities_resp.status_code, watch_activities_resp.text))
active_user_pairs = [(x["ownerDisplayName"], x["activityId"]) for x in watch_activities["activityList"]]
active_user_pairs.sort(key=lambda x: x[1]) # Highest IDs last (so they make it into the dict, supplanting lower IDs where appropriate)
active_users = dict(active_user_pairs)
active_user_recs = [ServiceRecord(x) for x in db.connections.find({"ExternalID": {"$in": list(active_users.keys())}, "Service": "garminconnect"}, {"Config": 1, "ExternalID": 1, "Service": 1})]
if len(active_user_recs) != len(active_users.keys()):
logger.warning("Mismatch %d records found for %d active users" % (len(active_user_recs), len(active_users.keys())))
to_sync_ids = []
for active_user_rec in active_user_recs:
last_active_id = active_user_rec.GetConfiguration()["WatchUserLastID"]
this_active_id = active_users[active_user_rec.ExternalID]
if this_active_id > last_active_id:
to_sync_ids.append(active_user_rec.ExternalID)
active_user_rec.SetConfiguration({"WatchUserLastID": this_active_id, "WatchUserKey": watch_user_key})
self._rate_limit()
pending_connections_resp = session.get("https://connect.garmin.com/modern/proxy/userprofile-service/connection/pending")
try:
pending_connections = pending_connections_resp.json()
except ValueError:
logger.error("Could not parse pending connection requests: %s %s" % (pending_connections_resp.status_code, pending_connections_resp.text))
else:
valid_pending_connections_external_ids = [x["ExternalID"] for x in db.connections.find({"Service": "garminconnect", "ExternalID": {"$in": [x["displayName"] for x in pending_connections]}}, {"ExternalID": 1})]
logger.info("Accepting %d, denying %d connection requests for %s" % (len(valid_pending_connections_external_ids), len(pending_connections) - len(valid_pending_connections_external_ids), watch_user_key))
for pending_connect in pending_connections:
if pending_connect["displayName"] in valid_pending_connections_external_ids:
self._rate_limit()
connect_resp = session.put("https://connect.garmin.com/modern/proxy/userprofile-service/connection/accept/%s" % pending_connect["connectionRequestId"])
if connect_resp.status_code != 200:
logger.error("Error accepting request on watch account %s: %s %s" % (watch_user["Name"], connect_resp.status_code, connect_resp.text))
else:
self._rate_limit()
ignore_resp = session.put("https://connect.garmin.com/modern/proxy/userprofile-service/connection/decline/%s" % pending_connect["connectionRequestId"])
return to_sync_ids
def RevokeAuthorization(self, serviceRecord):
# nothing to do here...
pass
def DeleteCachedData(self, serviceRecord):
# nothing cached...
pass
def DeleteActivity(self, serviceRecord, uploadId):
session = self._get_session(record=serviceRecord)
self._rate_limit()
del_res = session.delete("https://connect.garmin.com/modern/proxy/activity-service/activity/%d" % uploadId)
del_res.raise_for_status()
|
dlenski/tapiriik
|
tapiriik/services/GarminConnect/garminconnect.py
|
Python
|
apache-2.0
| 40,482
|
[
"VisIt"
] |
412728bcc2e1ceaeb67d6aa5f94608b7371b203a98cfdeaf455f51ac9bc36855
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions that call the four main :py:mod:`driver`
functions: :py:mod:`driver.energy`, :py:mod:`driver.optimize`,
:py:mod:`driver.response`, and :py:mod:`driver.frequency`.
"""
from __future__ import absolute_import
import re
import os
import math
import warnings
import pickle
import copy
import collections
from psi4.driver import p4const
from psi4.driver.driver import *
# never import aliases into this file
#########################
## Start of Database ##
#########################
DB_RGT = {}
DB_RXN = {}
def database(name, db_name, **kwargs):
r"""Function to access the molecule objects and reference energies of
popular chemical databases.
:aliases: db()
:returns: (*float*) Mean absolute deviation of the database in kcal/mol
:PSI variables:
.. hlist::
:columns: 1
* :psivar:`db_name DATABASE MEAN SIGNED DEVIATION <db_nameDATABASEMEANSIGNEDDEVIATION>`
* :psivar:`db_name DATABASE MEAN ABSOLUTE DEVIATION <db_nameDATABASEMEANABSOLUTEDEVIATION>`
* :psivar:`db_name DATABASE ROOT-MEAN-SQUARE DEVIATION <db_nameDATABASEROOT-MEAN-SQUARESIGNEDDEVIATION>`
* Python dictionaries of results accessible as ``DB_RGT`` and ``DB_RXN``.
.. note:: It is very easy to make a database from a collection of xyz files
using the script :source:`share/scripts/ixyz2database.py`.
See :ref:`sec:createDatabase` for details.
.. caution:: Some features are not yet implemented. Buy a developer some coffee.
- In sow/reap mode, use only global options (e.g., the local option set by ``set scf scf_type df`` will not be respected).
.. note:: To access a database that is not embedded in a |PSIfour|
distribution, add the path to the directory containing the database
to the environment variable :envvar:`PYTHONPATH`.
:type name: string
:param name: ``'scf'`` || ``'sapt0'`` || ``'ccsd(t)'`` || etc.
First argument, usually unlabeled. Indicates the computational method
to be applied to the database. May be any valid argument to
:py:func:`~driver.energy`.
:type db_name: string
:param db_name: ``'BASIC'`` || ``'S22'`` || ``'HTBH'`` || etc.
Second argument, usually unlabeled. Indicates the requested database
name, matching (case insensitive) the name of a python file in
``psi4/share/databases`` or :envvar:`PYTHONPATH`. Consult that
directory for available databases and literature citations.
:type func: :ref:`function <op_py_function>`
:param func: |dl| ``energy`` |dr| || ``optimize`` || ``cbs``
Indicates the type of calculation to be performed on each database
member. The default performs a single-point ``energy('name')``, while
``optimize`` perfoms a geometry optimization on each reagent, and
``cbs`` performs a compound single-point energy. If a nested series
of python functions is intended (see :ref:`sec:intercalls`), use
keyword ``db_func`` instead of ``func``.
:type mode: string
:param mode: |dl| ``'continuous'`` |dr| || ``'sow'`` || ``'reap'``
Indicates whether the calculations required to complete the
database are to be run in one file (``'continuous'``) or are to be
farmed out in an embarrassingly parallel fashion
(``'sow'``/``'reap'``). For the latter, run an initial job with
``'sow'`` and follow instructions in its output file.
:type cp: :ref:`boolean <op_py_boolean>`
:param cp: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether counterpoise correction is employed in computing
interaction energies. Use this option and NOT the :py:func:`~wrappers.cp`
function for BSSE correction in database(). Option available
(See :ref:`sec:availableDatabases`) only for databases of bimolecular complexes.
:type rlxd: :ref:`boolean <op_py_boolean>`
:param rlxd: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether correction for deformation energy is
employed in computing interaction energies. Option available
(See :ref:`sec:availableDatabases`) only for databases of bimolecular complexes
with non-frozen monomers, e.g., HBC6.
:type symm: :ref:`boolean <op_py_boolean>`
:param symm: |dl| ``'on'`` |dr| || ``'off'``
Indicates whether the native symmetry of the database reagents is
employed (``'on'``) or whether it is forced to :math:`C_1` symmetry
(``'off'``). Some computational methods (e.g., SAPT) require no
symmetry, and this will be set by database().
:type zpe: :ref:`boolean <op_py_boolean>`
:param zpe: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether zero-point-energy corrections are appended to
single-point energy values. Option valid only for certain
thermochemical databases. Disabled until Hessians ready.
:type benchmark: string
:param benchmark: |dl| ``'default'`` |dr| || ``'S22A'`` || etc.
Indicates whether a non-default set of reference energies, if
available (See :ref:`sec:availableDatabases`), are employed for the
calculation of error statistics.
:type tabulate: array of strings
:param tabulate: |dl| ``[]`` |dr| || ``['scf total energy', 'natom']`` || etc.
Indicates whether to form tables of variables other than the
primary requested energy. Available for any PSI variable.
:type subset: string or array of strings
:param subset:
Indicates a subset of the full database to run. This is a very
flexible option and can be used in three distinct ways, outlined
below. Note that two take a string and the last takes an array.
See `Available Databases`_ for available values.
* ``'small'`` || ``'large'`` || ``'equilibrium'``
Calls predefined subsets of the requested database, either
``'small'``, a few of the smallest database members,
``'large'``, the largest of the database members, or
``'equilibrium'``, the equilibrium geometries for a database
composed of dissociation curves.
* ``'BzBz_S'`` || ``'FaOOFaON'`` || ``'ArNe'`` || ``'HB'`` || etc.
For databases composed of dissociation curves, or otherwise
divided into subsets, individual curves and subsets can be
called by name. Consult the database python files for available
molecular systems (case insensitive).
* ``[1,2,5]`` || ``['1','2','5']`` || ``['BzMe-3.5', 'MeMe-5.0']`` || etc.
Specify a list of database members to run. Consult the
database python files for available molecular systems. This
is the only portion of database input that is case sensitive;
choices for this keyword must match the database python file.
:examples:
>>> # [1] Two-stage SCF calculation on short, equilibrium, and long helium dimer
>>> db('scf','RGC10',cast_up='sto-3g',subset=['HeHe-0.85','HeHe-1.0','HeHe-1.5'], tabulate=['scf total energy','natom'])
>>> # [2] Counterpoise-corrected interaction energies for three complexes in S22
>>> # Error statistics computed wrt an old benchmark, S22A
>>> database('mp2','S22',cp=1,subset=[16,17,8],benchmark='S22A')
>>> # [3] SAPT0 on the neon dimer dissociation curve
>>> db('sapt0',subset='NeNe',cp=0,symm=0,db_name='RGC10')
>>> # [4] Optimize system 1 in database S22, producing tables of scf and mp2 energy
>>> db('mp2','S22',db_func=optimize,subset=[1], tabulate=['mp2 total energy','current energy'])
>>> # [5] CCSD on the smallest systems of HTBH, a hydrogen-transfer database
>>> database('ccsd','HTBH',subset='small', tabulate=['ccsd total energy', 'mp2 total energy'])
"""
lowername = name #TODO
kwargs = p4util.kwargs_lower(kwargs)
# Wrap any positional arguments into kwargs (for intercalls among wrappers)
if not('name' in kwargs) and name:
kwargs['name'] = name #.lower()
if not('db_name' in kwargs) and db_name:
kwargs['db_name'] = db_name
# Establish function to call
func = kwargs.pop('db_func', kwargs.pop('func', energy))
kwargs['db_func'] = func
# Bounce to CP if bsse kwarg (someday)
if kwargs.get('bsse_type', None) is not None:
raise ValidationError("""Database: Cannot specify bsse_type for database. Use the cp keyword withing database instead.""")
optstash = p4util.OptionsState(
['WRITER_FILE_LABEL'],
['SCF', 'REFERENCE'])
# Wrapper wholly defines molecule. discard any passed-in
kwargs.pop('molecule', None)
# Paths to search for database files: here + PSIPATH + library + PYTHONPATH
psidatadir = os.environ.get('PSIDATADIR', None)
#nolongerpredictable psidatadir = __file__ + '/../..' if psidatadir is None else psidatadir
libraryPath = ':' + os.path.abspath(psidatadir) + '/databases'
driver_loc = os.path.dirname(os.path.abspath(__file__))
dbPath = os.path.abspath('.') + \
':' + ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':')]) + \
libraryPath + \
':' + driver_loc # so the databases can "import qcdb"
sys.path = [sys.path[0]] + dbPath.split(':') + sys.path[1:]
# TODO this should be modernized a la interface_cfour
# Define path and load module for requested database
database = p4util.import_ignorecase(db_name)
if database is None:
core.print_out('\nPython module for database %s failed to load\n\n' % (db_name))
core.print_out('\nSearch path that was tried:\n')
core.print_out(", ".join(map(str, sys.path)))
raise ValidationError("Python module loading problem for database " + str(db_name))
else:
dbse = database.dbse
HRXN = database.HRXN
ACTV = database.ACTV
RXNM = database.RXNM
BIND = database.BIND
TAGL = database.TAGL
GEOS = database.GEOS
try:
DATA = database.DATA
except AttributeError:
DATA = {}
user_writer_file_label = core.get_global_option('WRITER_FILE_LABEL')
user_reference = core.get_global_option('REFERENCE')
# Configuration based upon e_name & db_name options
# Force non-supramolecular if needed
if not hasattr(lowername, '__call__') and re.match(r'^.*sapt', lowername):
try:
database.ACTV_SA
except AttributeError:
raise ValidationError('Database %s not suitable for non-supramolecular calculation.' % (db_name))
else:
ACTV = database.ACTV_SA
# Force open-shell if needed
openshell_override = 0
if user_reference in ['RHF', 'RKS']:
try:
database.isOS
except AttributeError:
pass
else:
if yes.match(str(database.isOS)):
openshell_override = 1
core.print_out('\nSome reagents in database %s require an open-shell reference; will be reset to UHF/UKS as needed.\n' % (db_name))
# Configuration based upon database keyword options
# Option symmetry- whether symmetry treated normally or turned off (currently req'd for dfmp2 & dft)
db_symm = kwargs.get('symm', True)
symmetry_override = 0
if db_symm is False:
symmetry_override = 1
elif db_symm is True:
pass
else:
raise ValidationError("""Symmetry mode '%s' not valid.""" % (db_symm))
# Option mode of operation- whether db run in one job or files farmed out
db_mode = kwargs.pop('db_mode', kwargs.pop('mode', 'continuous')).lower()
kwargs['db_mode'] = db_mode
if db_mode == 'continuous':
pass
elif db_mode == 'sow':
pass
elif db_mode == 'reap':
db_linkage = kwargs.get('linkage', None)
if db_linkage is None:
raise ValidationError("""Database execution mode 'reap' requires a linkage option.""")
else:
raise ValidationError("""Database execution mode '%s' not valid.""" % (db_mode))
# Option counterpoise- whether for interaction energy databases run in bsse-corrected or not
db_cp = kwargs.get('cp', False)
if db_cp is True:
try:
database.ACTV_CP
except AttributeError:
raise ValidationError("""Counterpoise correction mode 'yes' invalid for database %s.""" % (db_name))
else:
ACTV = database.ACTV_CP
elif db_cp is False:
pass
else:
raise ValidationError("""Counterpoise correction mode '%s' not valid.""" % (db_cp))
# Option relaxed- whether for non-frozen-monomer interaction energy databases include deformation correction or not?
db_rlxd = kwargs.get('rlxd', False)
if db_rlxd is True:
if db_cp is True:
try:
database.ACTV_CPRLX
database.RXNM_CPRLX
except AttributeError:
raise ValidationError('Deformation and counterpoise correction mode \'yes\' invalid for database %s.' % (db_name))
else:
ACTV = database.ACTV_CPRLX
RXNM = database.RXNM_CPRLX
elif db_cp is False:
try:
database.ACTV_RLX
except AttributeError:
raise ValidationError('Deformation correction mode \'yes\' invalid for database %s.' % (db_name))
else:
ACTV = database.ACTV_RLX
elif db_rlxd is False:
#elif no.match(str(db_rlxd)):
pass
else:
raise ValidationError('Deformation correction mode \'%s\' not valid.' % (db_rlxd))
# Option zero-point-correction- whether for thermochem databases jobs are corrected by zpe
db_zpe = kwargs.get('zpe', False)
if db_zpe is True:
raise ValidationError('Zero-point-correction mode \'yes\' not yet implemented.')
elif db_zpe is False:
pass
else:
raise ValidationError('Zero-point-correction \'mode\' %s not valid.' % (db_zpe))
# Option benchmark- whether error statistics computed wrt alternate reference energies
db_benchmark = 'default'
if 'benchmark' in kwargs:
db_benchmark = kwargs['benchmark']
if db_benchmark.lower() == 'default':
pass
else:
BIND = p4util.getattr_ignorecase(database, 'BIND_' + db_benchmark)
if BIND is None:
raise ValidationError('Special benchmark \'%s\' not available for database %s.' % (db_benchmark, db_name))
# Option tabulate- whether tables of variables other than primary energy method are formed
# TODO db(func=cbs,tabulate=[non-current-energy]) # broken
db_tabulate = []
if 'tabulate' in kwargs:
db_tabulate = kwargs['tabulate']
# Option subset- whether all of the database or just a portion is run
db_subset = HRXN
if 'subset' in kwargs:
db_subset = kwargs['subset']
if isinstance(db_subset, basestring):
if db_subset.lower() == 'small':
try:
database.HRXN_SM
except AttributeError:
raise ValidationError("""Special subset 'small' not available for database %s.""" % (db_name))
else:
HRXN = database.HRXN_SM
elif db_subset.lower() == 'large':
try:
database.HRXN_LG
except AttributeError:
raise ValidationError("""Special subset 'large' not available for database %s.""" % (db_name))
else:
HRXN = database.HRXN_LG
elif db_subset.lower() == 'equilibrium':
try:
database.HRXN_EQ
except AttributeError:
raise ValidationError("""Special subset 'equilibrium' not available for database %s.""" % (db_name))
else:
HRXN = database.HRXN_EQ
else:
HRXN = p4util.getattr_ignorecase(database, db_subset)
if HRXN is None:
HRXN = p4util.getattr_ignorecase(database, 'HRXN_' + db_subset)
if HRXN is None:
raise ValidationError("""Special subset '%s' not available for database %s.""" % (db_subset, db_name))
else:
temp = []
for rxn in db_subset:
if rxn in HRXN:
temp.append(rxn)
else:
raise ValidationError("""Subset element '%s' not a member of database %s.""" % (str(rxn), db_name))
HRXN = temp
temp = []
for rxn in HRXN:
temp.append(ACTV['%s-%s' % (dbse, rxn)])
HSYS = p4util.drop_duplicates(sum(temp, []))
# Sow all the necessary reagent computations
core.print_out("\n\n")
p4util.banner(("Database %s Computation" % (db_name)))
core.print_out("\n")
# write index of calcs to output file
if db_mode == 'continuous':
instructions = """\n The database single-job procedure has been selected through mode='continuous'.\n"""
instructions += """ Calculations for the reagents will proceed in the order below and will be followed\n"""
instructions += """ by summary results for the database.\n\n"""
for rgt in HSYS:
instructions += """ %-s\n""" % (rgt)
instructions += """\n Alternatively, a farming-out of the database calculations may be accessed through\n"""
instructions += """ the database wrapper option mode='sow'/'reap'.\n\n"""
core.print_out(instructions)
# write sow/reap instructions and index of calcs to output file and reap input file
if db_mode == 'sow':
instructions = """\n The database sow/reap procedure has been selected through mode='sow'. In addition\n"""
instructions += """ to this output file (which contains no quantum chemical calculations), this job\n"""
instructions += """ has produced a number of input files (%s-*.in) for individual database members\n""" % (dbse)
instructions += """ and a single input file (%s-master.in) with a database(mode='reap') command.\n""" % (dbse)
instructions += """ The former may look very peculiar since processed and pickled python rather than\n"""
instructions += """ raw input is written. Follow the instructions below to continue.\n\n"""
instructions += """ (1) Run all of the %s-*.in input files on any variety of computer architecture.\n""" % (dbse)
instructions += """ The output file names must be as given below.\n\n"""
for rgt in HSYS:
instructions += """ psi4 -i %-27s -o %-27s\n""" % (rgt + '.in', rgt + '.out')
instructions += """\n (2) Gather all the resulting output files in a directory. Place input file\n"""
instructions += """ %s-master.in into that directory and run it. The job will be trivial in\n""" % (dbse)
instructions += """ length and give summary results for the database in its output file.\n\n"""
instructions += """ psi4 -i %-27s -o %-27s\n\n""" % (dbse + '-master.in', dbse + '-master.out')
instructions += """ Alternatively, a single-job execution of the database may be accessed through\n"""
instructions += """ the database wrapper option mode='continuous'.\n\n"""
core.print_out(instructions)
with open('%s-master.in' % (dbse), 'w') as fmaster:
fmaster.write('# This is a psi4 input file auto-generated from the database() wrapper.\n\n')
fmaster.write("database('%s', '%s', mode='reap', cp='%s', rlxd='%s', zpe='%s', benchmark='%s', linkage=%d, subset=%s, tabulate=%s)\n\n" %
(name, db_name, db_cp, db_rlxd, db_zpe, db_benchmark, os.getpid(), HRXN, db_tabulate))
# Loop through chemical systems
ERGT = {}
ERXN = {}
VRGT = {}
VRXN = {}
for rgt in HSYS:
VRGT[rgt] = {}
# build string of title banner
banners = ''
banners += """core.print_out('\\n')\n"""
banners += """p4util.banner(' Database %s Computation: Reagent %s \\n %s')\n""" % (db_name, rgt, TAGL[rgt])
banners += """core.print_out('\\n')\n\n"""
# build string of lines that defines contribution of rgt to each rxn
actives = ''
actives += """core.print_out(' Database Contributions Map:\\n %s\\n')\n""" % ('-' * 75)
for rxn in HRXN:
db_rxn = dbse + '-' + str(rxn)
if rgt in ACTV[db_rxn]:
actives += """core.print_out(' reagent %s contributes by %.4f to reaction %s\\n')\n""" \
% (rgt, RXNM[db_rxn][rgt], db_rxn)
actives += """core.print_out('\\n')\n\n"""
# build string of commands for options from the input file TODO: handle local options too
commands = ''
commands += """\ncore.set_memory(%s)\n\n""" % (core.get_memory())
for chgdopt in core.get_global_option_list():
if core.has_global_option_changed(chgdopt):
chgdoptval = core.get_global_option(chgdopt)
#chgdoptval = core.get_option(chgdopt)
if isinstance(chgdoptval, basestring):
commands += """core.set_global_option('%s', '%s')\n""" % (chgdopt, chgdoptval)
elif isinstance(chgdoptval, int) or isinstance(chgdoptval, float):
commands += """core.set_global_option('%s', %s)\n""" % (chgdopt, chgdoptval)
else:
pass
#raise ValidationError('Option \'%s\' is not of a type (string, int, float, bool) that can be processed by database wrapper.' % (chgdopt))
# build string of molecule and commands that are dependent on the database
commands += '\n'
if symmetry_override:
commands += """molecule.reset_point_group('c1')\n"""
commands += """molecule.fix_orientation(True)\n"""
commands += """molecule.fix_com(True)\n"""
commands += """molecule.update_geometry()\n"""
if (openshell_override) and (molecule.multiplicity() != 1):
if user_reference == 'RHF':
commands += """core.set_global_option('REFERENCE', 'UHF')\n"""
elif user_reference == 'RKS':
commands += """core.set_global_option('REFERENCE', 'UKS')\n"""
commands += """core.set_global_option('WRITER_FILE_LABEL', '%s')\n""" % \
(user_writer_file_label + ('' if user_writer_file_label == '' else '-') + rgt)
# all modes need to step through the reagents but all for different purposes
# continuous: defines necessary commands, executes energy(method) call, and collects results into dictionary
# sow: opens individual reagent input file, writes the necessary commands, and writes energy(method) call
# reap: opens individual reagent output file, collects results into a dictionary
if db_mode == 'continuous':
exec(banners)
molecule = core.Molecule.create_molecule_from_string(GEOS[rgt].create_psi4_string_from_molecule())
molecule.set_name(rgt)
molecule.update_geometry()
exec(commands)
#print 'MOLECULE LIVES %23s %8s %4d %4d %4s' % (rgt, core.get_global_option('REFERENCE'),
# molecule.molecular_charge(), molecule.multiplicity(), molecule.schoenflies_symbol())
ERGT[rgt] = func(molecule=molecule, **kwargs)
core.print_variables()
exec(actives)
for envv in db_tabulate:
VRGT[rgt][envv.upper()] = core.get_variable(envv)
core.set_global_option("REFERENCE", user_reference)
core.clean()
#core.opt_clean()
core.clean_variables()
elif db_mode == 'sow':
with open('%s.in' % (rgt), 'w') as freagent:
freagent.write('# This is a psi4 input file auto-generated from the database() wrapper.\n\n')
freagent.write(banners)
freagent.write(p4util.format_molecule_for_input(GEOS[rgt], 'dbmol'))
freagent.write(commands)
freagent.write('''\npickle_kw = ("""''')
pickle.dump(kwargs, freagent)
freagent.write('''""")\n''')
freagent.write("""\nkwargs = pickle.loads(pickle_kw)\n""")
freagent.write("""electronic_energy = %s(**kwargs)\n\n""" % (func.__name__))
freagent.write("""core.print_variables()\n""")
freagent.write("""core.print_out('\\nDATABASE RESULT: computation %d for reagent %s """
% (os.getpid(), rgt))
freagent.write("""yields electronic energy %20.12f\\n' % (electronic_energy))\n\n""")
freagent.write("""core.set_variable('NATOM', dbmol.natom())\n""")
for envv in db_tabulate:
freagent.write("""core.print_out('DATABASE RESULT: computation %d for reagent %s """
% (os.getpid(), rgt))
freagent.write("""yields variable value %20.12f for variable %s\\n' % (core.get_variable(""")
freagent.write("""'%s'), '%s'))\n""" % (envv.upper(), envv.upper()))
elif db_mode == 'reap':
ERGT[rgt] = 0.0
for envv in db_tabulate:
VRGT[rgt][envv.upper()] = 0.0
exec(banners)
exec(actives)
try:
freagent = open('%s.out' % (rgt), 'r')
except IOError:
core.print_out('Warning: Output file \'%s.out\' not found.\n' % (rgt))
core.print_out(' Database summary will have 0.0 and **** in its place.\n')
else:
while 1:
line = freagent.readline()
if not line:
if ERGT[rgt] == 0.0:
core.print_out('Warning: Output file \'%s.out\' has no DATABASE RESULT line.\n' % (rgt))
core.print_out(' Database summary will have 0.0 and **** in its place.\n')
break
s = line.split()
if (len(s) != 0) and (s[0:3] == ['DATABASE', 'RESULT:', 'computation']):
if int(s[3]) != db_linkage:
raise ValidationError('Output file \'%s.out\' has linkage %s incompatible with master.in linkage %s.'
% (rgt, str(s[3]), str(db_linkage)))
if s[6] != rgt:
raise ValidationError('Output file \'%s.out\' has nominal affiliation %s incompatible with reagent %s.'
% (rgt, s[6], rgt))
if (s[8:10] == ['electronic', 'energy']):
ERGT[rgt] = float(s[10])
core.print_out('DATABASE RESULT: electronic energy = %20.12f\n' % (ERGT[rgt]))
elif (s[8:10] == ['variable', 'value']):
for envv in db_tabulate:
envv = envv.upper()
if (s[13:] == envv.split()):
VRGT[rgt][envv] = float(s[10])
core.print_out('DATABASE RESULT: variable %s value = %20.12f\n' % (envv, VRGT[rgt][envv]))
freagent.close()
# end sow after writing files
if db_mode == 'sow':
return 0.0
# Reap all the necessary reaction computations
core.print_out("\n")
p4util.banner(("Database %s Results" % (db_name)))
core.print_out("\n")
maxactv = []
for rxn in HRXN:
maxactv.append(len(ACTV[dbse + '-' + str(rxn)]))
maxrgt = max(maxactv)
table_delimit = '-' * (62 + 20 * maxrgt)
tables = ''
# find any reactions that are incomplete
FAIL = collections.defaultdict(int)
for rxn in HRXN:
db_rxn = dbse + '-' + str(rxn)
for i in range(len(ACTV[db_rxn])):
if abs(ERGT[ACTV[db_rxn][i]]) < 1.0e-12:
FAIL[rxn] = 1
# tabulate requested process::environment variables
tables += """ For each VARIABLE requested by tabulate, a 'Reaction Value' will be formed from\n"""
tables += """ 'Reagent' values according to weightings 'Wt', as for the REQUESTED ENERGY below.\n"""
tables += """ Depending on the nature of the variable, this may or may not make any physical sense.\n"""
for rxn in HRXN:
db_rxn = dbse + '-' + str(rxn)
VRXN[db_rxn] = {}
for envv in db_tabulate:
envv = envv.upper()
tables += """\n ==> %s <==\n\n""" % (envv.title())
tables += _tblhead(maxrgt, table_delimit, 2)
for rxn in HRXN:
db_rxn = dbse + '-' + str(rxn)
if FAIL[rxn]:
tables += """\n%23s %8s %8s %8s %8s""" % (db_rxn, '', '****', '', '')
for i in range(len(ACTV[db_rxn])):
tables += """ %16.8f %2.0f""" % (VRGT[ACTV[db_rxn][i]][envv], RXNM[db_rxn][ACTV[db_rxn][i]])
else:
VRXN[db_rxn][envv] = 0.0
for i in range(len(ACTV[db_rxn])):
VRXN[db_rxn][envv] += VRGT[ACTV[db_rxn][i]][envv] * RXNM[db_rxn][ACTV[db_rxn][i]]
tables += """\n%23s %16.8f """ % (db_rxn, VRXN[db_rxn][envv])
for i in range(len(ACTV[db_rxn])):
tables += """ %16.8f %2.0f""" % (VRGT[ACTV[db_rxn][i]][envv], RXNM[db_rxn][ACTV[db_rxn][i]])
tables += """\n %s\n""" % (table_delimit)
# tabulate primary requested energy variable with statistics
count_rxn = 0
minDerror = 100000.0
maxDerror = 0.0
MSDerror = 0.0
MADerror = 0.0
RMSDerror = 0.0
tables += """\n ==> %s <==\n\n""" % ('Requested Energy')
tables += _tblhead(maxrgt, table_delimit, 1)
for rxn in HRXN:
db_rxn = dbse + '-' + str(rxn)
if FAIL[rxn]:
tables += """\n%23s %8.4f %8s %10s %10s""" % (db_rxn, BIND[db_rxn], '****', '****', '****')
for i in range(len(ACTV[db_rxn])):
tables += """ %16.8f %2.0f""" % (ERGT[ACTV[db_rxn][i]], RXNM[db_rxn][ACTV[db_rxn][i]])
else:
ERXN[db_rxn] = 0.0
for i in range(len(ACTV[db_rxn])):
ERXN[db_rxn] += ERGT[ACTV[db_rxn][i]] * RXNM[db_rxn][ACTV[db_rxn][i]]
error = p4const.psi_hartree2kcalmol * ERXN[db_rxn] - BIND[db_rxn]
tables += """\n%23s %8.4f %8.4f %10.4f %10.4f""" % (db_rxn, BIND[db_rxn], p4const.psi_hartree2kcalmol * ERXN[db_rxn],
error, error * p4const.psi_cal2J)
for i in range(len(ACTV[db_rxn])):
tables += """ %16.8f %2.0f""" % (ERGT[ACTV[db_rxn][i]], RXNM[db_rxn][ACTV[db_rxn][i]])
if abs(error) < abs(minDerror):
minDerror = error
if abs(error) > abs(maxDerror):
maxDerror = error
MSDerror += error
MADerror += abs(error)
RMSDerror += error * error
count_rxn += 1
tables += """\n %s\n""" % (table_delimit)
if count_rxn:
MSDerror /= float(count_rxn)
MADerror /= float(count_rxn)
RMSDerror = math.sqrt(RMSDerror / float(count_rxn))
tables += """%23s %19s %10.4f %10.4f\n""" % ('Minimal Dev', '', minDerror, minDerror * p4const.psi_cal2J)
tables += """%23s %19s %10.4f %10.4f\n""" % ('Maximal Dev', '', maxDerror, maxDerror * p4const.psi_cal2J)
tables += """%23s %19s %10.4f %10.4f\n""" % ('Mean Signed Dev', '', MSDerror, MSDerror * p4const.psi_cal2J)
tables += """%23s %19s %10.4f %10.4f\n""" % ('Mean Absolute Dev', '', MADerror, MADerror * p4const.psi_cal2J)
tables += """%23s %19s %10.4f %10.4f\n""" % ('RMS Dev', '', RMSDerror, RMSDerror * p4const.psi_cal2J)
tables += """ %s\n""" % (table_delimit)
core.set_variable('%s DATABASE MEAN SIGNED DEVIATION' % (db_name), MSDerror)
core.set_variable('%s DATABASE MEAN ABSOLUTE DEVIATION' % (db_name), MADerror)
core.set_variable('%s DATABASE ROOT-MEAN-SQUARE DEVIATION' % (db_name), RMSDerror)
core.print_out(tables)
finalenergy = MADerror
else:
finalenergy = 0.0
optstash.restore()
DB_RGT.clear()
DB_RGT.update(VRGT)
DB_RXN.clear()
DB_RXN.update(VRXN)
return finalenergy
def _tblhead(tbl_maxrgt, tbl_delimit, ttype):
r"""Function that prints the header for the changable-width results tables in db().
*tbl_maxrgt* is the number of reagent columns the table must plan for. *tbl_delimit*
is a string of dashes of the correct length to set off the table. *ttype* is 1 for
tables comparing the computed values to the reference or 2 for simple tabulation
and sum of the computed values.
"""
tbl_str = ''
tbl_str += """ %s""" % (tbl_delimit)
if ttype == 1:
tbl_str += """\n%23s %19s %21s""" % ('Reaction', 'Reaction Energy', 'Reaction Error')
elif ttype == 2:
tbl_str += """\n%23s %19s %17s""" % ('Reaction', 'Reaction Value', '')
for i in range(tbl_maxrgt):
tbl_str += """%20s""" % ('Reagent ' + str(i + 1))
if ttype == 1:
tbl_str += """\n%23s %8s %8s %10s %10s""" % ('', 'Ref', 'Calc', '[kcal/mol]', '[kJ/mol]')
elif ttype == 2:
tbl_str += """\n%65s""" % ('')
for i in range(tbl_maxrgt):
if ttype == 1:
tbl_str += """%20s""" % ('[Eh] Wt')
elif ttype == 2:
tbl_str += """%20s""" % ('Value Wt')
tbl_str += """\n %s""" % (tbl_delimit)
return tbl_str
## Aliases ##
db = database
#######################
## End of Database ##
#######################
# Quickly normalize the types for both python 2 and 3
try:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str, bytes)
else:
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
|
kannon92/psi4
|
psi4/driver/wrapper_database.py
|
Python
|
gpl-2.0
| 35,234
|
[
"Psi4"
] |
18e6a05533257df6a21ebeabb6699a4d0b5086b67343a4a4d7050eabe7fd2a85
|
# plotting
from matplotlib import pyplot as plt;
from matplotlib import colors
import matplotlib as mpl;
from mpl_toolkits.mplot3d import Axes3D
if "bmh" in plt.style.available: plt.style.use("bmh");
# matplotlib objects
from matplotlib import mlab;
from matplotlib import gridspec;
# scientific
import numpy as np;
import scipy as scp;
from scipy import linalg
import scipy.stats;
# table display
import pandas as pd
from IPython.display import display
# python
import random;
# warnings
import warnings
warnings.filterwarnings("ignore")
# rise config
from notebook.services.config import ConfigManager
cm = ConfigManager()
cm.update('livereveal', {
'theme': 'simple',
'start_slideshow_at': 'selected',
'transition':'fade',
'scroll': False
});
def lin_reg_classifier(means, covs, n, outliers):
"""
Least Squares for Classification.
:Parameters:
- `means`: means of multivariate normal distributions used to generate data.
- `covs`: terms of variance-covariance matrix used to determine spread of simulated data.
- `n`: number of samples.
- `outliers`: user-specified outliers to be added to the second simulated dataset.
"""
# generate data
x1, y1 = np.random.multivariate_normal(means[0], covs[0], n[0]).T
x2, y2 = np.random.multivariate_normal(means[1], covs[1], n[1]).T
# add targets
class_1 = [1]*n[0] + [0]*n[1]
class_2 = [0]*n[0] + [1]*n[1]
T = np.mat([class_1, class_2]).T
# add intercept and merge data
ones = np.ones(n[0]+n[1])
a = np.hstack((x1,x2))
b = np.hstack((y1,y2))
X = np.mat([ones, a, b]).T
# obtain weights
w_t = np.dot(T.T, np.linalg.pinv(X).T)
# obtain decision line
decision_line_int = -(w_t.item((0,0)) - w_t.item((1,0)))/(w_t.item((0,2)) - w_t.item((1,2)))
decision_line_slope = - (w_t.item((0,1)) - w_t.item((1,1)))/(w_t.item((0,2)) - w_t.item((1,2)))
# add outliers to the second set of simulated data
extract_x = []
extract_y = []
for i in outliers:
extract_x.append(i[0])
extract_y.append(i[1])
x2_out = np.hstack((x2, extract_x))
y2_out = np.hstack((y2, extract_y))
class_1_out = [1]*n[0] + [0]*n[1] + [0]*len(outliers)
class_2_out = [0]*n[0] + [1]*n[1] + [1]*len(outliers)
T_out = np.array([class_1_out, class_2_out]).T
ones_out = np.ones(n[0]+n[1]+len(outliers))
a_out = np.hstack((x1,x2_out))
b_out = np.hstack((y1,y2_out))
X_out = np.array([ones_out, a_out, b_out]).T
# obtain revised weights and decision line
w_t_out = np.dot(T_out.T, np.linalg.pinv(X_out).T)
decision_line_int_out = -(w_t_out[0][0] - w_t_out[1][0])/(w_t_out[0][2] - w_t_out[1][2])
decision_line_slope_out = - (w_t_out[0][1] - w_t_out[1][1])/(w_t_out[0][2] - w_t_out[1][2])
# plot results
x = np.linspace(np.min(a_out)-3 , np.max(a_out)+3, 100)
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=False, sharey=True)
plt.suptitle('Least Squares for Classification')
ax1.plot(x, decision_line_int+decision_line_slope*x, 'k', linewidth=2)
ax1.plot(x1, y1, 'go', x2, y2, 'bs', alpha=0.4)
ax2.plot(x, decision_line_int_out+decision_line_slope_out*x, 'k', linewidth=2)
ax2.plot(x1, y1, 'go', x2, y2, 'bs', alpha=0.4)
for i in range(len(outliers)):
ax2.plot(outliers[i][0], outliers[i][1], 'bs', alpha=0.4)
fig.set_size_inches(15, 5, forward=True)
ax1.set_xlim([np.min(a_out)-1, np.max(a_out)+1,])
ax2.set_xlim([np.min(a_out)-1, np.max(a_out)+1])
ax1.set_ylim([np.min(b_out)-1, np.max(b_out)+1,])
ax2.set_ylim([np.min(b_out)-1, np.max(b_out)+1])
ax1.set_xlabel('X1')
ax2.set_xlabel('X1')
ax1.set_ylabel('X2')
plt.show()
def generate_gda(means, covs, num_samples):
num_classes = len(means);
num_samples //= num_classes;
# cheat and draw equal number of samples from each gaussian
samples = [
np.random.multivariate_normal(means[c],covs[c],num_samples).T
for c in range(num_classes)
];
return np.concatenate(samples, axis=1);
def plot_decision_contours(means, covs):
# plt
fig = plt.figure(figsize=(10,6));
ax = fig.gca();
# generate samples
data_x,data_y = generate_gda(means, covs, 1000);
ax.plot(data_x, data_y, 'x');
# dimensions
min_x, max_x = -10,10;
min_y, max_y = -10,10;
# grid
delta = 0.025
x = np.arange(min_x, max_x, delta);
y = np.arange(min_y, max_y, delta);
X, Y = np.meshgrid(x, y);
# bivariate difference of gaussians
mu1,mu2 = means;
sigma1, sigma2 = covs;
Z1 = mlab.bivariate_normal(X, Y, sigmax=sigma1[0][0], sigmay=sigma1[1][1], mux=mu1[0], muy=mu1[1], sigmaxy=sigma1[0][1]);
Z2 = mlab.bivariate_normal(X, Y, sigmax=sigma2[0][0], sigmay=sigma2[1][1], mux=mu2[0], muy=mu2[1], sigmaxy=sigma2[0][1]);
Z = Z2 - Z1;
# contour plot
ax.contour(X, Y, Z, levels=np.linspace(np.min(Z),np.max(Z),10));
cs = ax.contour(X, Y, Z, levels=[0], c="k", linewidths=5);
plt.clabel(cs, fontsize=10, inline=1, fmt='%1.3f')
# plot settings
ax.set_xlim((min_x,max_x));
ax.set_ylim((min_y,max_y));
# ax.set_title("Gaussian Discriminant Analysis: $P(y=1 | x) - P(y=0 | x)$", fontsize=20)
ax.set_title("Countours: $P(y=1 | x) - P(y=0 | x)$", fontsize=20)
|
eecs445-f16/umich-eecs445-f16
|
lecture07_naive-bayes/Lec07.py
|
Python
|
mit
| 5,343
|
[
"Gaussian"
] |
cdf74970a6800ec942280b22df253582bcc2a2e5d4719587e724a4a3f3bf8a67
|
import random
import math
from .interval import Interval
class Note(object):
"""A single note, defined by a pitch, octave, and (optional) accidentals."""
VALID_PITCHES = ('C', 'D', 'E', 'F', 'G', 'A', 'B')
"""List of valid pitch characters."""
VALID_ACCIDENTALS = ('#', '##', 'b', 'bb', None)
"""List of valid accidental representors."""
def __init__(self, pitch, octave, accidental=None, random_instance=random.Random()):
"""Create a new Note.
Args:
pitch : str
The pitch of the note. Should be one of :attr:`~music_essentials.note.Note.VALID_PITCHES`, but can
be upper or lower case.
octave : int
The octave of the note. Should be in the range [-1, 9].
Kwags:
accidental : str (default `None`)
The accidental to apply to the note. Should be one of :attr:`~music_essentials.note.Note.VALID_ACCIDENTALS`.
duration : float (default `None`)
The duration of the note, in terms of how many would fit into one bar in common time.
For example, a semibreve has a duration of 1; a quaver has a duration of 8.
dotted : boolean (default `False`)
If true, the duration of the note is multiplied by 1.5.
Returns:
:attr:`~music_essentials.note.Note`
A new note with the given pitch, octave, and accidental.
Raises:
`ValueError: <https://docs.python.org/2/library/exceptions.html#exceptions.ValueError>`_
If an invalid pitch, octave, or accidental is provided.
`TypeError: <https://docs.python.org/2/library/exceptions.html#exceptions.TypeError>`_
If an incorrect type of value is given for pitch, octave, or accidental.
Examples:
>>> n = Note('A', 4, '##')
>>> print(n)
A4##
>>> n = Note('d', 7)
>>> print(n)
D7
>>> n = Note('x', 6)
ValueError: Invalid pitch: x
"""
if not isinstance(pitch, str):
raise TypeError('Expected string for pitch, got: ' + str(pitch))
if pitch.upper() not in Note.VALID_PITCHES:
raise ValueError('Invalid pitch: ' + str(pitch))
try:
int(octave) # test if octave value is a number
except:
raise TypeError('Expected integer for octave, got: ' + str(octave))
if '.' in str(octave): # check that the number doesn't have a decimal place
raise TypeError('Expected integer for octave, got ' + str(octave))
if (int(octave) < -1) or (int(octave) > 9):
raise ValueError('Octave needs to be in the range [-1, 9], got: ' + str(octave))
if accidental is not None:
if accidental.lower() not in Note.VALID_ACCIDENTALS:
raise ValueError('Invalid accidental: ' + str(accidental))
self.pitch = pitch.upper()
self.octave = int(octave)
self.accidental = accidental
self.is_rest = False
self.random_instance = random_instance
if accidental is not None:
self.accidental = self.accidental.lower()
if (self.midi_note_number() < 0) or (self.midi_note_number() > 127):
raise ValueError('Invalid Note parameters \'' + str(self.pitch) + str(self.octave) + str(self.accidental) + '\', results in MIDI note number: ' + str(self.midi_note_number()))
@classmethod
def from_note_string(cls, note_string, random_instance=random.Random()):
"""Create a new Note.
Processes the note string then uses the constructor :attr:`~music_essentials.note.Note.__init__()`.
If the note string is 'r', a :attr:`~music_essentials.note.Rest` is returned.
Args:
note_string : str
A string representing the note to create. Should be in the form:
``<pitch><octave><accidental>``
The pitch of the note should be one of :attr:`~music_essentials.note.Note.VALID_PITCHES`, but can
be upper or lower case.
The octave of the note should be in the range ``[-1, 9]``.
The accidental is optional, but if used should be one of :attr:`~music_essentials.note.Note.VALID_ACCIDENTALS`.
Returns:
:attr:`~music_essentials.note.Note`
A new note with the given pitch, octave, and accidental.
Raises:
`ValueError: <https://docs.python.org/2/library/exceptions.html#exceptions.ValueError>`_
If an invalid pitch, octave, or accidental is provided.
`TypeError: <https://docs.python.org/2/library/exceptions.html#exceptions.TypeError>`_
If the provided note string is not a string.
Examples:
>>> n = Note.from_note_string('A4##')
>>> print(n)
A4##
>>> n = Note.from_note_string('d7')
>>> print(n)
D7
>>> n = Note.from_note_string('x6')
ValueError: Invalid pitch: x
"""
if not isinstance(note_string, str):
raise TypeError('Expected string for note string, got \'' + str(note_string + '\''))
if note_string == 'r':
return Rest()
pitch = note_string[0]
octave = note_string[1]
accidental = note_string[2:]
if octave == '-':
# interval is negative - offset octave and accidental variables
octave = note_string[1:3]
accidental = note_string[3:]
if len(accidental) == 0:
accidental = None
return cls(pitch, octave, accidental, random_instance)
@classmethod
def from_midi_num(cls, midi_num, random_instance=random.Random()):
"""Create a new note.
Uses the provided MIDI number to set the note parameters.
Args:
midi_num : int
A number in the range [0, 127] representing a Note.
Returns:
:attr:`~music_essentials.note.Note`
A new note with a pitch, octave, and accidental corresponding to the
given MIDI note number.
"""
try:
int(midi_num) # test if octave value is a number
except:
raise TypeError('Expected integer for MIDI number, got: ' + str(midi_num))
if '.' in str(midi_num): # check that the number doesn't have a decimal place
raise TypeError('Expected integer for MIDI number, got ' + str(midi_num))
if (int(midi_num) < 0) or (int(midi_num) > 127):
raise ValueError('MIDI number needs to be in the range [0, 127], got: ' + str(midi_num))
# key = midi_num % 12; val = (pitch, accidental)
pitch_accidental_mappings = {
0 : ('C', None),
1 : ('C', '#'),
2 : ('D', None),
3 : ('D', '#'),
4 : ('E', None),
5 : ('F', None),
6 : ('F', '#'),
7 : ('G', None),
8 : ('G', '#'),
9 : ('A', None),
10 : ('A', '#'),
11 : ('B', None)
}
octave = int(math.floor(midi_num / 12) - 1)
pitch, accidental = pitch_accidental_mappings[midi_num % 12]
return cls(pitch, octave, accidental, random_instance)
@classmethod
def random_note(cls, lowest_midi_num=0, highest_midi_num=127, method='rand', chance_for_rest=0.01, random_instance=random.Random()):
"""Create and return a random Note within the MIDI note
number range [lowest_midi_num, highest_midi_num].
Args:
lowest_midi_num : int (default 0)
The lowest MIDI number allowed.
highest_midi_num : int (default 127)
The highest MIDI number allowed.
method : str (default 'rand')
The method of random selection to use.
If 'rand', a uniform distribution will be used.
If 'gauss', a gaussian distribution will be used.
Returns:
:attr:`~music_essentials.note.Note`
A new note with a randomly selected pitch, octave, and accidental.
"""
if random_instance.random() <= chance_for_rest:
return Rest()
midi_num = -1
if method == 'rand':
midi_num = random_instance.randrange(lowest_midi_num, highest_midi_num + 1)
elif method == 'gauss':
mean = lowest_midi_num + math.floor(((highest_midi_num - lowest_midi_num) / 2))
std_dev = math.floor((mean - lowest_midi_num) / 3)
while (midi_num < lowest_midi_num) or (midi_num > highest_midi_num):
midi_num = round(random_instance.gauss(mean, std_dev))
return cls.from_midi_num(midi_num)
def midi_note_number(self):
"""Get the MIDI note number equivalent to this pitch.
Assumes that middle C corresponds to the MIDI note number 60, as
described on `Wikipedia: <https://en.wikipedia.org/wiki/Scientific_pitch_notation#Table_of_note_frequencies>`_.
Returns:
int
The MIDI note number representing this pitch.
Examples:
>>> n = Note.from_note_string('C-1')
>>> print(n.midi_note_number())
0
>>> n = Note.from_note_string('G9')
>>> print(n.midi_note_number())
127
>>> n = Note.from_note_string('B0b')
>>> print(n.midi_note_number())
22
"""
# calculate number based on octave and pitch
midi_num = self.octave * 12
midi_num += Note.VALID_PITCHES.index(self.pitch) * 2
if self.pitch not in ('C', 'D', 'E'):
midi_num -= 1
midi_num += 12
# adjust for accidentals
if self.accidental is not None:
midi_num -= self.accidental.count('b')
midi_num += self.accidental.count('#')
return midi_num
def __add__(self, other):
"""Calculate and return the note found when adding an interval to this note.
Args:
other : :attr:`~music_essentials.interval.Interval`
The interval to add to this note.
Returns:
:attr:`~music_essentials.note.Note`
The new note that comes from adding the provided interval to this note.
Raises:
`TypeError: <https://docs.python.org/2/library/exceptions.html#exceptions.TypeError>`_
If the object to add is not an :attr:`~music_essentials.interval.Interval`.
Examples:
>>> n = Note.from_note_string('C4')
>>> i = Interval.from_interval_string('M2')
>>> print(n + i)
D4
>>> n = Note.from_note_string('C4')
>>> i = Interval.from_interval_string('m14')
>>> print(n + i)
B5b
>>> n = Note.from_note_string('C4')
>>> i = Interval.from_interval_string('aug13')
>>> print(n + i)
A5#
"""
if not isinstance(other, Interval):
raise TypeError('unsupported operand type(s) for +: \'Note\' and \'' + str(other.__class__.__name__) + '\'')
# calculate new pitch
note_pitch_idx = Note.VALID_PITCHES.index(self.pitch)
pitch_diff = (other.size % 7) - 1
if (note_pitch_idx + pitch_diff) > (len(Note.VALID_PITCHES) - 1):
pitch_diff = -7 + pitch_diff
new_pitch = Note.VALID_PITCHES[note_pitch_idx + pitch_diff]
# calculate new octave
base_size = int(other.size)
octave_diff = 0
is_compound = False
while (base_size >= 8):
base_size -= 7
octave_diff += 1
is_compound = True
if Note.VALID_PITCHES.index(new_pitch) < Note.VALID_PITCHES.index(self.pitch):
octave_diff += 1
new_octave = self.octave + octave_diff
# find appropriate accidental
goal_semitone_diff = octave_diff * 12
if not is_compound and octave_diff > 0:
goal_semitone_diff -= 12
if base_size in Interval._PERFECT_INTERVALS_SEMITONES.keys():
goal_semitone_diff += Interval._PERFECT_INTERVALS_SEMITONES[base_size]
if other.interval_type == 'dim':
goal_semitone_diff -= 1
elif other.interval_type == 'aug':
goal_semitone_diff += 1
elif base_size in Interval._MAJOR_INTERVALS_SEMITONES.keys():
goal_semitone_diff += Interval._MAJOR_INTERVALS_SEMITONES[base_size]
if other.interval_type == 'dim':
goal_semitone_diff -= 2
elif other.interval_type == 'm':
goal_semitone_diff -= 1
elif other.interval_type == 'aug':
goal_semitone_diff += 1
for a in Note.VALID_ACCIDENTALS:
new_note = Note(new_pitch, new_octave, a)
diff = new_note.midi_note_number() - self.midi_note_number()
if diff == goal_semitone_diff:
return new_note
raise RuntimeError('FATAL ERROR: Could not complete note + interval operation: ' + str(self) + ' + ' + str(other))
def is_enharmonic(self, other):
"""Check if two notes are `enharmonic <https://en.wikipedia.org/wiki/Enharmonic>`_.
Args:
other : :attr:`~music_essentials.note.Note`
The note to compare this to.
Returns:
bool
True if the two notes represent the same pitch, otherwise false.
Raises:
`ValueError: <https://docs.python.org/2/library/exceptions.html#exceptions.ValueError>`_
If anything other than a :attr:`~music_essentials.note.Note` is given to compare to.
`TypeError: <https://docs.python.org/2/library/exceptions.html#exceptions.TypeError>`_
If the object to compare to is not a :attr:`~music_essentials.note.Note`.
Examples:
>>> n1 = Note('C', 4)
>>> n2 = Note('D', 4)
>>> n1.is_enharmonic(n2)
False
>>> n1 = Note('C', 4, '#')
>>> n2 = Note('D', 4, 'b')
>>> n1.is_enharmonic(n2)
True
>>> n1 = Note('F', 4)
>>> n2 = Note('E', 4, '#')
>>> n1.is_enharmonic(n2)
True
>>> n1 = Note('F', 4)
>>> n2 = Note('G', 4, 'bb')
>>> n1.is_enharmonic(n2)
True
"""
if not isinstance(other, Note) or (isinstance(other, Rest) or isinstance(self, Rest)):
raise TypeError('Can not determine whether ' + str(self) + ' and ' + str(other) + ' are enharmonic')
return self.midi_note_number() == other.midi_note_number()
def __eq__(self, other):
"""Check if this note is equal to another note.
Does not consider `enharmonic notes <https://en.wikipedia.org/wiki/Enharmonic>`_ to be equal.
Args:
other : :attr:`~music_essentials.note.Note`
The note to compare this note to.
Returns:
bool
True if the notes have the same pitch, octave, and accidentals; otherwise false.
Raises:
`TypeError: <https://docs.python.org/2/library/exceptions.html#exceptions.TypeError>`_
If the object to compare to is not a :attr:`~music_essentials.note.Note`.
Examples:
>>> n1 = Note.from_note_string('C4')
>>> n2 = Note('C', 4)
>>> n1 == n2
True
>>> n1 = Note.from_note_string('C4#')
>>> n2 = Note.from_note_string('D4b')
>>> n1 == n2
False
"""
if not isinstance(other, Note) or (isinstance(other, Rest) or isinstance(self, Rest)):
raise TypeError('Can not check equality between Note and \'' + str(other) + '\'')
return (self.pitch == other.pitch) and (self.octave == other.octave) and (self.accidental == other.accidental)
def __ne__(self, other):
"""Check if this note is note equal to another note.
Does not consider `enharmonic notes <https://en.wikipedia.org/wiki/Enharmonic>`_ to be equal.
Args:
other : :attr:`~music_essentials.note.Note`
The note to compare this note to.
Returns:
bool
True if the notes do not have the same pitch, octave, and accidentals; otherwise false.
Raises:
`TypeError: <https://docs.python.org/2/library/exceptions.html#exceptions.TypeError>`_
If the object to compare to is not a :attr:`~music_essentials.note.Note`.
Examples:
>>> n1 = Note.from_note_string('C4')
>>> n2 = Note('C', 4)
>>> n1 != n2
False
>>> n1 = Note.from_note_string('C4#')
>>> n2 = Note.from_note_string('D4b')
>>> n1 != n2
True
"""
return not self.__eq__(other)
def __lt__(self, other):
"""Check if this note is less than another note.
Does not consider `enharmonic notes <https://en.wikipedia.org/wiki/Enharmonic>`_ to be equal.
If two notes are enharmonic, the note with the lower written pitch is considered lower.
Args:
other : :attr:`~music_essentials.note.Note`
The note to compare this note to.
Returns:
bool
True if this note is less than the other, otherwise false.
Raises:
`TypeError: <https://docs.python.org/2/library/exceptions.html#exceptions.TypeError>`_
If the object to compare to is not a :attr:`~music_essentials.note.Note`.
Examples:
>>> n1 = Note.from_note_string('C4')
>>> n2 = Note('C', 4)
>>> n1 < n2
False
>>> n1 = Note.from_note_string('D4')
>>> n2 = Note.from_note_string('G4')
>>> n1 < n2
True
>>> n2 < n1
False
"""
if not isinstance(other, Note) or (isinstance(other, Rest) or isinstance(self, Rest)):
raise TypeError('Can not check equality between Note and \'' + str(other) + '\'')
if self.__eq__(other):
return False
if self.is_enharmonic(other):
if self.octave != other.octave:
return self.octave < other.octave
return Note.VALID_PITCHES.index(self.pitch) < Note.VALID_PITCHES.index(other.pitch)
return self.midi_note_number() < other.midi_note_number()
def __gt__(self, other):
"""Check if this note is greater than another note.
Does not consider `enharmonic notes <https://en.wikipedia.org/wiki/Enharmonic>`_ to be equal.
If two notes are enharmonic, the note with the higher written pitch is considered higher.
Args:
other : :attr:`~music_essentials.note.Note`
The note to compare this note to.
Returns:
bool
True if this note is greater than the other, otherwise false.
Raises:
`TypeError: <https://docs.python.org/2/library/exceptions.html#exceptions.TypeError>`_
If the object to compare to is not a :attr:`~music_essentials.note.Note`.
Examples:
>>> n1 = Note.from_note_string('C4')
>>> n2 = Note('C', 4)
>>> n1 > n2
False
>>> n1 = Note.from_note_string('D4')
>>> n2 = Note.from_note_string('G4')
>>> n1 > n2
False
>>> n2 > n1
True
"""
if not isinstance(other, Note) or (isinstance(other, Rest) or isinstance(self, Rest)):
raise TypeError('Can not check equality between Note and \'' + str(other) + '\'')
if self.__eq__(other):
return False
if self.is_enharmonic(other):
if self.octave != other.octave:
return self.octave > other.octave
return Note.VALID_PITCHES.index(self.pitch) > Note.VALID_PITCHES.index(other.pitch)
return self.midi_note_number() > other.midi_note_number()
def __le__(self, other):
"""Check if this note is less than or equal to another note.
Args:
other : :attr:`~music_essentials.note.Note`
The note to compare this note to.
Returns:
bool
True if this note is less than or equal to the other, otherwise false.
Raises:
`TypeError: <https://docs.python.org/2/library/exceptions.html#exceptions.TypeError>`_
If the object to compare to is not a :attr:`~music_essentials.note.Note`.
Examples:
>>> n1 = Note.from_note_string('C4')
>>> n2 = Note('C', 4)
>>> n1 < n2
True
>>> n1 = Note.from_note_string('D4')
>>> n2 = Note.from_note_string('G4')
>>> n1 < n2
True
>>> n2 < n1
False
"""
return not self.__gt__(other)
def __ge__(self, other):
"""Check if this note is greater than or equal to another note.
Args:
other : :attr:`~music_essentials.note.Note`
The note to compare this note to.
Returns:
bool
True if this note is greater than or equal to the other, otherwise false.
Raises:
`TypeError: <https://docs.python.org/2/library/exceptions.html#exceptions.TypeError>`_
If the object to compare to is not a :attr:`~music_essentials.note.Note`.
Examples:
>>> n1 = Note.from_note_string('C4')
>>> n2 = Note('C', 4)
>>> n1 > n2
True
>>> n1 = Note.from_note_string('D4')
>>> n2 = Note.from_note_string('G4')
>>> n1 > n2
False
>>> n2 > n1
True
"""
return not self.__lt__(other)
def __str__(self):
"""Create a string representation of the note in the form ``<pitch><octave><accidental>``.
Can be used as a note string argument for :attr:`~music_essentials.note.Note.from_note_string()`.
Examples:
>>> n = Note('B', 9, '#')
>>> print(n)
B9#
>>> n = Note('g', 7)
>>> print(n)
G7
>>> n = Note('D', 3, 'B')
>>> print(n)
D3b
"""
s = self.pitch + str(self.octave)
if self.accidental is not None:
s += self.accidental
return s
class Rest(Note):
"""A single note, defined as a period of silence."""
def __init__(self):
"""Create a rest note. Sets the note's pitch, octave, and accidental as `None`."""
self.pitch = None
self.octave = None
self.accidental = None
self.is_rest = True
def midi_note_number(self):
"""Override the MIDI note number method from the parent class.
Returns -1 to indicate that a rest has no MIDI note number.
"""
return -1
def __str__(self):
"""Create a string representation of the rest.
Examples:
>>> r = Rest()
>>> print(r)
r
"""
return 'r'
|
charlottepierce/music_essentials
|
music_essentials/note.py
|
Python
|
mit
| 23,672
|
[
"Gaussian"
] |
0567a0da912e52ad7a0d7537e0941f2b85e175a3ec5c9e00275befdd20256429
|
# Author:
# Tests for the yambopy library
#
#
import unittest
import sys
import os
import argparse
import subprocess
import filecmp
from yamboparser import YamboFile, YamboFolder
folder = os.path.dirname(os.path.realpath(__file__))+'/testdata/'
class TestFolder(unittest.TestCase):
def test_folder_list(self):
fold = YamboFolder(folder+'t2_parse_qps/')
assert len (fold.yambofiles)==7
class TestFileT1(unittest.TestCase):
def test_qp_parsing(self):
fl = YamboFile('o-GW_run.10.720.qp',folder+'t1_errors_warnings')
assert len(fl.data.keys()) == 4 # more intelligent test needed
assert fl.type == 'output_gw'
def test_l_parsing(self):
fl = YamboFile('l-GW_run.8.480_em1d_ppa_HF_and_locXC_gw0_rim_cut_CPU_1',folder+'t1_errors_warnings')
assert not fl.data
assert len(fl.warnings) ==1
assert len(fl.errors) == 1
assert fl.type == 'log'
def test_r_parsing(self):
fl = YamboFile('r-GW_run.8.480_em1d_ppa_HF_and_locXC_gw0_rim_cut',folder+'t1_errors_warnings')
assert fl.type=='report'
assert fl.kpoints
assert not fl.data
class TestFileT2(unittest.TestCase):
def test_qp_parsing(self):
fl = YamboFile('o-yambo.qp',folder+'t2_parse_qps')
assert fl.type == 'output_gw'
def test_l_parsing(self):
fl = YamboFile('l-yambo_em1d_HF_and_locXC_gw0',folder+'t2_parse_qps')
assert fl.type == 'log'
def test_r_parsing(self):
fl = YamboFile('r-yambo_em1d_life',folder+'t2_parse_qps')
assert fl.type=='report'
fl = YamboFile('r-yambo_em1d_HF_and_locXC_gw0',folder+'t2_parse_qps')
assert fl.type=='report'
def test_ndb_qp_parsing(self):
fl = YamboFile('ndb.QP',folder+'t3_parse_netcdf')
print "fl type", fl.type
assert fl.type=='netcdf_gw'
def test_ndb_hf_parsing(self):
fl = YamboFile('ndb.HF_and_locXC',folder+'t3_parse_netcdf')
print "fl type", fl.type
assert fl.type=='netcdf_hf'
if __name__ == "__main__":
#t1_errors_warnings
suite = unittest.TestLoader().loadTestsFromTestCase(TestFileT1)
unittest.TextTestRunner(verbosity=2).run(suite)
#t2_parse_qps
suite = unittest.TestLoader().loadTestsFromTestCase(TestFileT2)
unittest.TextTestRunner(verbosity=2).run(suite)
|
henriquemiranda/yambopy
|
tests/parser/test_parser.py
|
Python
|
bsd-3-clause
| 2,368
|
[
"Yambo"
] |
52a37cc7cba2bfeae61a653420c3b1725f2561aa9c437e1946778d7613b44d7c
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""The mdtraj package contains tools for loading and saving molecular dynamics
trajectories in a variety of formats, including Gromacs XTC & TRR, CHARMM/NAMD
DCD, AMBER BINPOS, PDB, and HDF5.
"""
from mdtraj.formats.registry import _FormatRegistry
from mdtraj.formats.xtc import load_xtc
from mdtraj.formats.trr import load_trr
from mdtraj.formats.hdf5 import load_hdf5
from mdtraj.formats.lh5 import load_lh5
from mdtraj.formats.netcdf import load_netcdf
from mdtraj.formats.mdcrd import load_mdcrd
from mdtraj.formats.dcd import load_dcd
from mdtraj.formats.binpos import load_binpos
from mdtraj.formats.pdb import load_pdb
from mdtraj.formats.arc import load_arc
from mdtraj.formats.openmmxml import load_xml
from mdtraj.formats.prmtop import load_prmtop
from mdtraj.formats.psf import load_psf
from mdtraj.formats.mol2 import load_mol2
from mdtraj.formats.amberrst import load_restrt, load_ncrestrt
from mdtraj.formats.lammpstrj import load_lammpstrj
from mdtraj.formats.dtr import load_dtr
from mdtraj.core import element
from mdtraj._rmsd import rmsd
from mdtraj._lprmsd import lprmsd
from mdtraj.core.topology import Topology
from mdtraj.geometry import *
from mdtraj.core.trajectory import *
from mdtraj.nmr import *
import mdtraj.reporters
def test(label='full', verbose=2):
"""Run tests for mdtraj using nose.
Parameters
----------
label : {'fast', 'full'}
Identifies the tests to run. The fast tests take about 10 seconds,
and the full test suite takes about two minutes (as of this writing).
verbose : int, optional
Verbosity value for test outputs, in the range 1-10. Default is 2.
"""
import mdtraj
from mdtraj.testing.nosetester import MDTrajTester
tester = MDTrajTester(mdtraj)
return tester.test(label=label, verbose=verbose, extra_argv=('--exe',))
# prevent nose from discovering this function, or otherwise when its run
# the test suite in an infinite loop
test.__test__ = False
def capi():
import os
import sys
module_path = sys.modules['mdtraj'].__path__[0]
return {
'lib_dir': os.path.join(module_path, 'core', 'lib'),
'include_dir': os.path.join(module_path, 'core', 'lib'),
}
|
kyleabeauchamp/mdtraj
|
mdtraj/__init__.py
|
Python
|
lgpl-2.1
| 3,224
|
[
"Amber",
"CHARMM",
"Gromacs",
"MDTraj",
"NAMD",
"NetCDF"
] |
d85e5ffccc096c2af54af8dd223312e32df95b1b0c6781495d7c54f292de28ae
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.