text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''Density expansion on plane waves'''
import copy
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.pbc import tools
from pyscf.pbc.gto import pseudo, estimate_ke_cutoff, error_for_ke_cutoff
from pyscf.pbc.df import ft_ao
from pyscf.pbc.df import fft_ao2mo
from pyscf.pbc.df.aft import _sub_df_jk_
from pyscf.pbc.lib.kpts_helper import is_zero, gamma_point
from pyscf import __config__
KE_SCALING = getattr(__config__, 'pbc_df_aft_ke_cutoff_scaling', 0.75)
def get_nuc(mydf, kpts=None):
if kpts is None:
kpts_lst = numpy.zeros((1,3))
else:
kpts_lst = numpy.reshape(kpts, (-1,3))
cell = mydf.cell
mesh = mydf.mesh
charge = -cell.atom_charges()
Gv = cell.get_Gv(mesh)
SI = cell.get_SI(Gv)
rhoG = numpy.dot(charge, SI)
coulG = tools.get_coulG(cell, mesh=mesh, Gv=Gv)
vneG = rhoG * coulG
vneR = tools.ifft(vneG, mesh).real
vne = [0] * len(kpts_lst)
for ao_ks_etc, p0, p1 in mydf.aoR_loop(mydf.grids, kpts_lst):
ao_ks = ao_ks_etc[0]
for k, ao in enumerate(ao_ks):
vne[k] += lib.dot(ao.T.conj()*vneR[p0:p1], ao)
ao = ao_ks = None
if kpts is None or numpy.shape(kpts) == (3,):
vne = vne[0]
return numpy.asarray(vne)
def get_pp(mydf, kpts=None):
'''Get the periodic pseudotential nuc-el AO matrix, with G=0 removed.
'''
from pyscf import gto
cell = mydf.cell
if kpts is None:
kpts_lst = numpy.zeros((1,3))
else:
kpts_lst = numpy.reshape(kpts, (-1,3))
mesh = mydf.mesh
SI = cell.get_SI()
Gv = cell.get_Gv(mesh)
vpplocG = pseudo.get_vlocG(cell, Gv)
vpplocG = -numpy.einsum('ij,ij->j', SI, vpplocG)
ngrids = len(vpplocG)
# vpploc evaluated in real-space
vpplocR = tools.ifft(vpplocG, mesh).real
vpp = [0] * len(kpts_lst)
for ao_ks_etc, p0, p1 in mydf.aoR_loop(mydf.grids, kpts_lst):
ao_ks = ao_ks_etc[0]
for k, ao in enumerate(ao_ks):
vpp[k] += lib.dot(ao.T.conj()*vpplocR[p0:p1], ao)
ao = ao_ks = None
# vppnonloc evaluated in reciprocal space
fakemol = gto.Mole()
fakemol._atm = numpy.zeros((1,gto.ATM_SLOTS), dtype=numpy.int32)
fakemol._bas = numpy.zeros((1,gto.BAS_SLOTS), dtype=numpy.int32)
ptr = gto.PTR_ENV_START
fakemol._env = numpy.zeros(ptr+10)
fakemol._bas[0,gto.NPRIM_OF ] = 1
fakemol._bas[0,gto.NCTR_OF ] = 1
fakemol._bas[0,gto.PTR_EXP ] = ptr+3
fakemol._bas[0,gto.PTR_COEFF] = ptr+4
# buf for SPG_lmi upto l=0..3 and nl=3
buf = numpy.empty((48,ngrids), dtype=numpy.complex128)
def vppnl_by_k(kpt):
Gk = Gv + kpt
G_rad = lib.norm(Gk, axis=1)
aokG = ft_ao.ft_ao(cell, Gv, kpt=kpt) * (1/cell.vol)**.5
vppnl = 0
for ia in range(cell.natm):
symb = cell.atom_symbol(ia)
if symb not in cell._pseudo:
continue
pp = cell._pseudo[symb]
p1 = 0
for l, proj in enumerate(pp[5:]):
rl, nl, hl = proj
if nl > 0:
fakemol._bas[0,gto.ANG_OF] = l
fakemol._env[ptr+3] = .5*rl**2
fakemol._env[ptr+4] = rl**(l+1.5)*numpy.pi**1.25
pYlm_part = fakemol.eval_gto('GTOval', Gk)
p0, p1 = p1, p1+nl*(l*2+1)
# pYlm is real, SI[ia] is complex
pYlm = numpy.ndarray((nl,l*2+1,ngrids), dtype=numpy.complex128, buffer=buf[p0:p1])
for k in range(nl):
qkl = pseudo.pp._qli(G_rad*rl, l, k)
pYlm[k] = pYlm_part.T * qkl
#:SPG_lmi = numpy.einsum('g,nmg->nmg', SI[ia].conj(), pYlm)
#:SPG_lm_aoG = numpy.einsum('nmg,gp->nmp', SPG_lmi, aokG)
#:tmp = numpy.einsum('ij,jmp->imp', hl, SPG_lm_aoG)
#:vppnl += numpy.einsum('imp,imq->pq', SPG_lm_aoG.conj(), tmp)
if p1 > 0:
SPG_lmi = buf[:p1]
SPG_lmi *= SI[ia].conj()
SPG_lm_aoGs = lib.zdot(SPG_lmi, aokG)
p1 = 0
for l, proj in enumerate(pp[5:]):
rl, nl, hl = proj
if nl > 0:
p0, p1 = p1, p1+nl*(l*2+1)
hl = numpy.asarray(hl)
SPG_lm_aoG = SPG_lm_aoGs[p0:p1].reshape(nl,l*2+1,-1)
tmp = numpy.einsum('ij,jmp->imp', hl, SPG_lm_aoG)
vppnl += numpy.einsum('imp,imq->pq', SPG_lm_aoG.conj(), tmp)
return vppnl * (1./cell.vol)
for k, kpt in enumerate(kpts_lst):
vppnl = vppnl_by_k(kpt)
if gamma_point(kpt):
vpp[k] = vpp[k].real + vppnl.real
else:
vpp[k] += vppnl
if kpts is None or numpy.shape(kpts) == (3,):
vpp = vpp[0]
return numpy.asarray(vpp)
class FFTDF(lib.StreamObject):
'''Density expansion on plane waves
'''
def __init__(self, cell, kpts=numpy.zeros((1,3))):
from pyscf.pbc.dft import gen_grid
from pyscf.pbc.dft import numint
self.cell = cell
self.stdout = cell.stdout
self.verbose = cell.verbose
self.max_memory = cell.max_memory
self.kpts = kpts
self.grids = gen_grid.UniformGrids(cell)
# to mimic molecular DF object
self.blockdim = getattr(__config__, 'pbc_df_df_DF_blockdim', 240)
# The following attributes are not input options.
# self.exxdiv has no effects. It was set in the get_k_kpts function to
# mimic the KRHF/KUHF object in the call to tools.get_coulG.
self.exxdiv = None
self._numint = numint.KNumInt()
self._rsh_df = {} # Range separated Coulomb DF objects
self._keys = set(self.__dict__.keys())
@property
def mesh(self):
return self.grids.mesh
@mesh.setter
def mesh(self, mesh):
self.grids.mesh = mesh
def reset(self, cell=None):
if cell is not None:
self.cell = cell
self.grids.reset(cell)
self._rsh_df = {}
return self
def dump_flags(self, verbose=None):
logger.info(self, '\n')
logger.info(self, '******** %s ********', self.__class__)
logger.info(self, 'mesh = %s (%d PWs)', self.mesh, numpy.prod(self.mesh))
logger.info(self, 'len(kpts) = %d', len(self.kpts))
logger.debug1(self, ' kpts = %s', self.kpts)
return self
def check_sanity(self):
lib.StreamObject.check_sanity(self)
cell = self.cell
if (cell.dimension < 2 or
(cell.dimension == 2 and cell.low_dim_ft_type == 'inf_vacuum')):
raise RuntimeError('FFTDF method does not support 0D/1D low-dimension '
'PBC system. DF, MDF or AFTDF methods should '
'be used.\nSee also examples/pbc/31-low_dimensional_pbc.py')
if not cell.has_ecp():
logger.warn(self, 'FFTDF integrals are found in all-electron '
'calculation. It often causes huge error.\n'
'Recommended methods are DF or MDF. In SCF calculation, '
'they can be initialized as\n'
' mf = mf.density_fit()\nor\n'
' mf = mf.mix_density_fit()')
if cell.ke_cutoff is None:
ke_cutoff = tools.mesh_to_cutoff(cell.lattice_vectors(), self.mesh).min()
else:
ke_cutoff = numpy.min(cell.ke_cutoff)
ke_guess = estimate_ke_cutoff(cell, cell.precision)
if ke_cutoff < ke_guess * KE_SCALING:
mesh_guess = tools.cutoff_to_mesh(cell.lattice_vectors(), ke_guess)
logger.warn(self, 'ke_cutoff/mesh (%g / %s) is not enough for FFTDF '
'to get integral accuracy %g.\nCoulomb integral error '
'is ~ %.2g Eh.\nRecommended ke_cutoff/mesh are %g / %s.',
ke_cutoff, self.mesh, cell.precision,
error_for_ke_cutoff(cell, ke_cutoff), ke_guess, mesh_guess)
return self
def aoR_loop(self, grids=None, kpts=None, deriv=0):
if grids is None:
grids = self.grids
cell = self.cell
else:
cell = grids.cell
if grids.non0tab is None:
grids.build(with_non0tab=True)
if kpts is None: kpts = self.kpts
kpts = numpy.asarray(kpts)
if (cell.dimension < 2 or
(cell.dimension == 2 and cell.low_dim_ft_type == 'inf_vacuum')):
raise RuntimeError('FFTDF method does not support low-dimension '
'PBC system. DF, MDF or AFTDF methods should '
'be used.\nSee also examples/pbc/31-low_dimensional_pbc.py')
max_memory = max(2000, self.max_memory-lib.current_memory()[0])
ni = self._numint
nao = cell.nao_nr()
p1 = 0
for ao_k1_etc in ni.block_loop(cell, grids, nao, deriv, kpts,
max_memory=max_memory):
coords = ao_k1_etc[4]
p0, p1 = p1, p1 + coords.shape[0]
yield ao_k1_etc, p0, p1
get_pp = get_pp
get_nuc = get_nuc
# Note: Special exxdiv by default should not be used for an arbitrary
# input density matrix. When the df object was used with the molecular
# post-HF code, get_jk was often called with an incomplete DM (e.g. the
# core DM in CASCI). An SCF level exxdiv treatment is inadequate for
# post-HF methods.
def get_jk(self, dm, hermi=1, kpts=None, kpts_band=None,
with_j=True, with_k=True, omega=None, exxdiv=None):
from pyscf.pbc.df import fft_jk
if omega is not None: # J/K for RSH functionals
return _sub_df_jk_(self, dm, hermi, kpts, kpts_band,
with_j, with_k, omega, exxdiv)
if kpts is None:
if numpy.all(self.kpts == 0): # Gamma-point J/K by default
kpts = numpy.zeros(3)
else:
kpts = self.kpts
else:
kpts = numpy.asarray(kpts)
vj = vk = None
if kpts.shape == (3,):
vj, vk = fft_jk.get_jk(self, dm, hermi, kpts, kpts_band,
with_j, with_k, exxdiv)
else:
if with_k:
vk = fft_jk.get_k_kpts(self, dm, hermi, kpts, kpts_band, exxdiv)
if with_j:
vj = fft_jk.get_j_kpts(self, dm, hermi, kpts, kpts_band)
return vj, vk
get_eri = get_ao_eri = fft_ao2mo.get_eri
ao2mo = get_mo_eri = fft_ao2mo.general
ao2mo_7d = fft_ao2mo.ao2mo_7d
get_ao_pairs_G = get_ao_pairs = fft_ao2mo.get_ao_pairs_G
get_mo_pairs_G = get_mo_pairs = fft_ao2mo.get_mo_pairs_G
def update_mf(self, mf):
mf = copy.copy(mf)
mf.with_df = self
return mf
################################################################################
# With this function to mimic the molecular DF.loop function, the pbc gamma
# point DF object can be used in the molecular code
def loop(self, blksize=None):
if self.cell.dimension < 3:
raise RuntimeError('ERIs of 1D and 2D systems are not positive '
'definite. Current API only supports postive '
'definite ERIs.')
if blksize is None:
blksize = self.blockdim
kpts0 = numpy.zeros((2,3))
coulG = tools.get_coulG(self.cell, numpy.zeros(3), mesh=self.mesh)
ngrids = len(coulG)
ao_pairs_G = self.get_ao_pairs_G(kpts0, compact=True)
ao_pairs_G *= numpy.sqrt(coulG*(self.cell.vol/ngrids**2)).reshape(-1,1)
Lpq = numpy.empty((self.blockdim, ao_pairs_G.shape[1]))
for p0, p1 in lib.prange(0, ngrids, blksize):
Lpq[:p1-p0] = ao_pairs_G[p0:p1].real
yield Lpq[:p1-p0]
Lpq[:p1-p0] = ao_pairs_G[p0:p1].imag
yield Lpq[:p1-p0]
def get_naoaux(self):
mesh = numpy.asarray(self.mesh)
ngrids = numpy.prod(mesh)
return ngrids * 2
if __name__ == '__main__':
from pyscf.pbc import gto as pbcgto
cell = pbcgto.Cell()
cell.verbose = 0
cell.atom = 'C 0 0 0; C 1 1 1; C 0 2 2; C 2 0 2'
cell.a = numpy.diag([4, 4, 4])
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.mesh = [20]*3
cell.build()
k = numpy.ones(3)*.25
df = FFTDF(cell)
v1 = get_pp(df, k)
print(lib.finger(v1) - (1.8428463642697195-0.10478381725330854j))
v1 = get_nuc(df, k)
print(lib.finger(v1) - (2.3454744614944714-0.12528407127454744j))
|
gkc1000/pyscf
|
pyscf/pbc/df/fft.py
|
Python
|
apache-2.0
| 13,481
|
[
"PySCF"
] |
bef94cdb3e97e3fe5dd899ba5d698e379c17604bb8abd19767e42a5e19bf59b3
|
def load_microbial_data( GALAXY_DATA_INDEX_DIR, sep='\t' ):
# FIXME: this function is duplicated in the DynamicOptions class. It is used here only to
# set data.name in exec_after_process().
microbe_info= {}
orgs = {}
filename = "%s/microbial_data.loc" % GALAXY_DATA_INDEX_DIR
for i, line in enumerate( open( filename ) ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( '#' ):
fields = line.split( sep )
#read each line, if not enough fields, go to next line
try:
info_type = fields.pop(0)
if info_type.upper() == "ORG":
#ORG 12521 Clostridium perfringens SM101 bacteria Firmicutes CP000312,CP000313,CP000314,CP000315 http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=genomeprj&cmd=Retrieve&dopt=Overview&list_uids=12521
org_num = fields.pop(0)
name = fields.pop(0)
kingdom = fields.pop(0)
group = fields.pop(0)
chromosomes = fields.pop(0)
info_url = fields.pop(0)
link_site = fields.pop(0)
if org_num not in orgs:
orgs[ org_num ] = {}
orgs[ org_num ][ 'chrs' ] = {}
orgs[ org_num ][ 'name' ] = name
orgs[ org_num ][ 'kingdom' ] = kingdom
orgs[ org_num ][ 'group' ] = group
orgs[ org_num ][ 'chromosomes' ] = chromosomes
orgs[ org_num ][ 'info_url' ] = info_url
orgs[ org_num ][ 'link_site' ] = link_site
elif info_type.upper() == "CHR":
#CHR 12521 CP000315 Clostridium perfringens phage phiSM101, complete genome 38092 110684521 CP000315.1
org_num = fields.pop(0)
chr_acc = fields.pop(0)
name = fields.pop(0)
length = fields.pop(0)
gi = fields.pop(0)
gb = fields.pop(0)
info_url = fields.pop(0)
chr = {}
chr[ 'name' ] = name
chr[ 'length' ] = length
chr[ 'gi' ] = gi
chr[ 'gb' ] = gb
chr[ 'info_url' ] = info_url
if org_num not in orgs:
orgs[ org_num ] = {}
orgs[ org_num ][ 'chrs' ] = {}
orgs[ org_num ][ 'chrs' ][ chr_acc ] = chr
elif info_type.upper() == "DATA":
#DATA 12521_12521_CDS 12521 CP000315 CDS bed /home/djb396/alignments/playground/bacteria/12521/CP000315.CDS.bed
uid = fields.pop(0)
org_num = fields.pop(0)
chr_acc = fields.pop(0)
feature = fields.pop(0)
filetype = fields.pop(0)
path = fields.pop(0)
data = {}
data[ 'filetype' ] = filetype
data[ 'path' ] = path
data[ 'feature' ] = feature
if org_num not in orgs:
orgs[ org_num ] = {}
orgs[ org_num ][ 'chrs' ] = {}
if 'data' not in orgs[ org_num ][ 'chrs' ][ chr_acc ]:
orgs[ org_num ][ 'chrs' ][ chr_acc ][ 'data' ] = {}
orgs[ org_num ][ 'chrs' ][ chr_acc ][ 'data' ][ uid ] = data
else: continue
except: continue
for org_num in orgs:
org = orgs[ org_num ]
if org[ 'kingdom' ] not in microbe_info:
microbe_info[ org[ 'kingdom' ] ] = {}
if org_num not in microbe_info[ org[ 'kingdom' ] ]:
microbe_info[ org[ 'kingdom' ] ][org_num] = org
return microbe_info
#post processing, set build for data and add additional data to history
from galaxy import datatypes, config, tools
from shutil import copyfile
def exec_after_process(app, inp_data, out_data, param_dict, tool, stdout, stderr):
base_dataset = out_data.items()[0][1]
history = base_dataset.history
if history == None:
print "unknown history!"
return
kingdom = param_dict.get( 'kingdom', None )
#group = param_dict.get( 'group', None )
org = param_dict.get( 'org', None )
#if not (kingdom or group or org):
if not (kingdom or org):
print "Parameters are not available."
#workflow passes galaxy.tools.parameters.basic.UnvalidatedValue instead of values
if isinstance( kingdom, tools.parameters.basic.UnvalidatedValue ):
kingdom = kingdom.value
if isinstance( org, tools.parameters.basic.UnvalidatedValue ):
org = org.value
GALAXY_DATA_INDEX_DIR = app.config.tool_data_path
microbe_info = load_microbial_data( GALAXY_DATA_INDEX_DIR, sep='\t' )
new_stdout = ""
split_stdout = stdout.split("\n")
basic_name = ""
for line in split_stdout:
fields = line.split("\t")
if fields[0] == "#File1":
description = fields[1]
chr = fields[2]
dbkey = fields[3]
file_type = fields[4]
name, data = out_data.items()[0]
data.set_size()
basic_name = data.name
data.name = data.name + " (" + microbe_info[kingdom][org]['chrs'][chr]['data'][description]['feature'] +" for " + microbe_info[kingdom][org]['name'] + ":" + chr + ")"
data.dbkey = dbkey
data.info = data.name
data = app.datatypes_registry.change_datatype( data, file_type )
data.init_meta()
data.set_peek()
app.model.context.add( data )
app.model.context.flush()
elif fields[0] == "#NewFile":
description = fields[1]
chr = fields[2]
dbkey = fields[3]
filepath = fields[4]
file_type = fields[5]
newdata = app.model.HistoryDatasetAssociation( create_dataset = True, sa_session = app.model.context ) #This import should become a library
newdata.set_size()
newdata.extension = file_type
newdata.name = basic_name + " (" + microbe_info[kingdom][org]['chrs'][chr]['data'][description]['feature'] +" for "+microbe_info[kingdom][org]['name']+":"+chr + ")"
app.model.context.add( newdata )
app.model.context.flush()
app.security_agent.copy_dataset_permissions( base_dataset.dataset, newdata.dataset )
history.add_dataset( newdata )
app.model.context.add( history )
app.model.context.flush()
try:
copyfile(filepath,newdata.file_name)
newdata.info = newdata.name
newdata.state = newdata.states.OK
except:
newdata.info = "The requested file is missing from the system."
newdata.state = newdata.states.ERROR
newdata.dbkey = dbkey
newdata.init_meta()
newdata.set_peek()
app.model.context.flush()
|
lappsgrid-incubator/GalaxyMods
|
tools/data_source/microbial_import_code.py
|
Python
|
apache-2.0
| 7,277
|
[
"Galaxy"
] |
40a526ca31ffe7d38ff112cfe7770d5654d1bee2251d00b658e4ec697dc2155c
|
#
# Handle the special case of the first scenario
#
self.notebook.switchScenario(0,scenarioType="Powder")
#
#
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'castep'
tab.settings['Output file name'] = 'phonon.castep'
tab.settings['Excel file name'] = 'analysis_bruggeman.xlsx'
tab.settings['Script file name'] = 'analysis_bruggeman.py'
tab.settings['QM program'] = ''
#
#
tab = self.notebook.settingsTab
tab.settings['Eckart flag'] = True
tab.settings['Neutral Born charges'] = False
tab.settings['Sigma value'] = 10.0
tab.settings['Mass definition'] = 'average'
tab.settings['Optical permittivity edited'] = False
tab.sigmas_cm1 = [10.0, 10.0, 10.0, 10.0, 10.0, 10.0]
#
#
tab = self.notebook.scenarios[0]
tab.settings['Legend'] = 'f=0.01'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.01579304466235449
tab.settings['Volume fraction'] = 0.009999999999999998
tab.settings['Particle size(mu)'] = 0.0001
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Bruggeman'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[1]
tab.settings['Legend'] = 'f=0.1'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.1500292973489613
tab.settings['Volume fraction'] = 0.09999999999999999
tab.settings['Particle size(mu)'] = 0.0001
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Bruggeman'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[2]
tab.settings['Legend'] = 'f=0.2'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.2842572707828141
tab.settings['Volume fraction'] = 0.19999999999999996
tab.settings['Particle size(mu)'] = 0.0001
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Bruggeman'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[3]
tab.settings['Legend'] = 'f=0.3'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.405055368745222
tab.settings['Volume fraction'] = 0.30000000000000004
tab.settings['Particle size(mu)'] = 0.0001
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Bruggeman'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[4]
tab.settings['Legend'] = 'f=0.4'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.5143431083692053
tab.settings['Volume fraction'] = 0.4
tab.settings['Particle size(mu)'] = 0.0001
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Bruggeman'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[5]
tab.settings['Legend'] = 'f=0.5'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.613690836374523
tab.settings['Volume fraction'] = 0.49999999999999994
tab.settings['Particle size(mu)'] = 0.0001
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Bruggeman'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[6]
tab.settings['Legend'] = 'f=0.6'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.7043956704831009
tab.settings['Volume fraction'] = 0.6
tab.settings['Particle size(mu)'] = 0.0001
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Bruggeman'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[7]
tab.settings['Legend'] = 'f=0.7'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.7875384471385155
tab.settings['Volume fraction'] = 0.7
tab.settings['Particle size(mu)'] = 0.0001
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Bruggeman'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[8]
tab.settings['Legend'] = 'f=0.8'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.8640269960076986
tab.settings['Volume fraction'] = 0.8
tab.settings['Particle size(mu)'] = 0.0001
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Bruggeman'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[9]
tab.settings['Legend'] = 'f=0.9'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.9346294240088532
tab.settings['Volume fraction'] = 0.9000000000000001
tab.settings['Particle size(mu)'] = 0.0001
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Bruggeman'
tab.settings['Particle shape'] = 'Sphere'
#
#
tab = self.notebook.analysisTab
tab.settings['Minimum frequency'] = -1
tab.settings['Maximum frequency'] = 400
tab.settings['title'] = 'Analysis'
tab.settings['Covalent radius scaling'] = 1.1
tab.settings['Bonding tolerance'] = 0.1
tab.settings['Bar width'] = 0.5
#
#
tab = self.notebook.viewerTab
tab.settings['Atom scaling'] = 0.5
tab.settings['Maximum displacement'] = 1.0
tab.settings['Bond colour'] = [80, 80, 80, 255]
tab.settings['Bond radius'] = 0.1
tab.settings['Cell colour'] = [255, 0, 0, 255]
tab.settings['Cell radius'] = 0.1
tab.settings['Background colour'] = [120, 120, 120, 255]
tab.settings['Arrow colour'] = [0, 255, 0, 255]
tab.settings['Arrow radius'] = 0.07
tab.settings['Number of phase steps'] = 41
tab.settings['Super Cell'] = [1, 1, 1]
#
#
tab = self.notebook.fitterTab
tab.settings['Excel file name'] = ''
tab.settings['Plot title'] = 'Experimental and Calculated Spectral Comparison'
tab.settings['Fitting type'] = 'Minimise x-correlation'
tab.settings['Number of iterations'] = 20
tab.settings['Frequency scaling factor'] = 1.0
tab.settings['Optimise frequency scaling'] = False
tab.settings['Spectrum scaling'] = False
tab.settings['Spectrum scaling factor'] = 1.0
tab.settings['Independent y-axes'] = True
tab.settings['Spectral difference threshold'] = 0.05
tab.settings['HPFilter lambda'] = 7.0
tab.settings['Baseline removal'] = False
tab.settings['Scenario index'] = 0
#
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 0
tab.settings['Maximum frequency'] = 800
tab.settings['Frequency increment'] = 0.2
tab.settings['Molar definition'] = 'Unit cells'
tab.settings['Number of atoms'] = 1
tab.settings['Plot type'] = 'Powder Molar Absorption'
tab.settings['concentration'] = 86.71312720248292
tab.settings['cell concentration'] = 86.71312720248292
|
JohnKendrick/PDielec
|
Examples/Castep/MgO/application_note_bruggeman.py
|
Python
|
mit
| 12,568
|
[
"CASTEP"
] |
4d6925ce6b43d5a74447f452b211ef0bc88b99eea7e24eb30bd76522972e3f51
|
#!/usr/local/bin/python2.6
###AltAnalyze
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import math
#import pkg_resources
#import distutils
import statistics
import sys, string
import os.path
import unique
import update
import UI
import copy
import export
reload(export)
import ExpressionBuilder
reload(ExpressionBuilder)
import ExonAnalyze_module
reload(ExonAnalyze_module)
import ExonAnnotate_module
reload(ExonAnnotate_module)
import ResultsExport_module
import FeatureAlignment
import GO_Elite
import time
import webbrowser
import random
import traceback
import shutil
try:
import multiprocessing as mlp
except Exception:
mlp = None
print 'Note: Multiprocessing not supported for this verison python.'
try:
from scipy import stats
except Exception:
pass ### scipy is not required but is used as a faster implementation of Fisher Exact Test when present
try:
from PIL import Image as PIL_Image
try:
import ImageTk
except Exception:
from PIL import ImageTk
except Exception:
None #print 'Python Imaging Library not installed... using default PNG viewer'
use_Tkinter = 'no'
debug_mode = 'no'
analysis_start_time = time.time()
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
dir_list2 = [] #add in code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".csv" or entry[-
4:] == ".TXT":
dir_list2.append(entry)
return dir_list2
def eliminate_redundant_dict_values(database):
db1 = {}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def makeUnique(item):
db1 = {}
list1 = []
k = 0
for i in item:
try:
db1[i] = []
except TypeError:
db1[tuple(i)] = []
k = 1
for i in db1:
if k == 0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def cleanUpLine(line):
line = string.replace(line, '\n', '')
line = string.replace(line, '\c', '')
data = string.replace(line, '\r', '')
data = string.replace(data, '"', '')
return data
def returnLargeGlobalVars():
### Prints all large global variables retained in memory (taking up space)
all = [var for var in globals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(globals()[var]) > 500:
print var, len(globals()[var])
except Exception:
null = []
def clearObjectsFromMemory(db_to_clear):
db_keys = {}
try:
for key in db_to_clear:
db_keys[key] = []
except Exception:
for key in db_to_clear:
del key ### if key is a list
for key in db_keys:
try:
del db_to_clear[key]
except Exception:
try:
for i in key:
del i ### For lists of tuples
except Exception:
del key ### For plain lists
def importGeneric(filename):
fn = filepath(filename)
key_db = {}
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
key_db[t[0]] = t[1:]
return key_db
def importGenericFiltered(filename, filter_db):
fn = filepath(filename)
key_db = {}
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
key = t[0]
if key in filter_db: key_db[key] = t[1:]
return key_db
def importGenericFilteredDBList(filename, filter_db):
fn = filepath(filename)
key_db = {}
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
try:
null = filter_db[t[0]]
try:
key_db[t[0]].append(t[1])
except KeyError:
key_db[t[0]] = [t[1]]
except Exception:
null = []
return key_db
def importGenericDBList(filename):
fn = filepath(filename)
key_db = {}
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
try:
key_db[t[0]].append(t[1])
except KeyError:
key_db[t[0]] = [t[1]]
return key_db
def importExternalDBList(filename):
fn = filepath(filename)
key_db = {}
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
try:
key_db[t[0]].append(t[1:])
except Exception:
key_db[t[0]] = [t[1:]]
return key_db
def FindDir(dir, term):
dir_list = unique.read_directory(dir)
dir_list2 = []
dir_list.sort()
for i in dir_list:
if term == i: dir_list2.append(i)
if len(dir_list2) == 0:
for i in dir_list:
if term in i: dir_list2.append(i)
dir_list2.sort()
dir_list2.reverse()
if len(dir_list2) > 0: return dir_list2[0]
else: return ''
def openFile(file_dir):
if os.name == 'nt':
try:
os.startfile('"' + file_dir + '"')
except Exception:
os.system('open "' + file_dir + '"')
elif 'darwin' in sys.platform:
os.system('open "' + file_dir + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + file_dir + '"')
def openCytoscape(parent_dir, application_dir, application_name):
cytoscape_dir = FindDir(parent_dir, application_dir)
cytoscape_dir = filepath(parent_dir + '/' + cytoscape_dir)
app_dir = FindDir(cytoscape_dir, application_name)
app_dir = cytoscape_dir + '/' + app_dir
if 'linux' in sys.platform:
app_dir = app_dir
app_dir2 = cytoscape_dir + '/Cytoscape'
try:
createCytoscapeDesktop(cytoscape_dir)
except Exception:
null = []
dir_list = unique.read_directory(
'/usr/bin/') ### Check to see that JAVA is installed
if 'java' not in dir_list:
print 'Java not referenced in "usr/bin/. If not installed,\nplease install and re-try opening Cytoscape'
try:
jar_path = cytoscape_dir + '/cytoscape.jar'
main_path = cytoscape_dir + '/cytoscape.CyMain'
plugins_path = cytoscape_dir + '/plugins'
os.system('java -Dswing.aatext=true -Xss5M -Xmx512M -jar ' +
jar_path + ' ' + main_path + ' -p ' + plugins_path +
' &')
print 'Cytoscape jar opened:', jar_path
except Exception:
print 'OS command to open Java failed.'
try:
try:
openFile(app_dir2)
print 'Cytoscape opened:', app_dir2
except Exception:
os.chmod(app_dir, 0777)
openFile(app_dir2)
except Exception:
try:
openFile(app_dir)
except Exception:
os.chmod(app_dir, 0777)
openFile(app_dir)
else:
try:
openFile(app_dir)
except Exception:
os.chmod(app_dir, 0777)
openFile(app_dir)
def createCytoscapeDesktop(cytoscape_dir):
cyto_ds_output = cytoscape_dir + '/Cytoscape.desktop'
data = export.ExportFile(cyto_ds_output)
cytoscape_desktop = cytoscape_dir + '/Cytoscape'
#cytoscape_desktop = '/hd3/home/nsalomonis/Cytoscape_v2.6.1/Cytoscape'
cytoscape_png = cytoscape_dir + '/.install4j/Cytoscape.png'
#cytoscape_png = '/hd3/home/nsalomonis/Cytoscape_v2.6.1/.install4j/Cytoscape.png'
data.write('[Desktop Entry]' + '\n')
data.write('Type=Application' + '\n')
data.write('Name=Cytoscape' + '\n')
data.write('Exec=/bin/sh "' + cytoscape_desktop + '"' + '\n')
data.write('Icon=' + cytoscape_png + '\n')
data.write('Categories=Application;' + '\n')
data.close()
########### Parse Input Annotations ###########
def ProbesetCalls(array_type, probeset_class, splice_event, constitutive_call,
external_exonid):
include_probeset = 'yes'
if array_type == 'AltMouse':
exonid = splice_event
if filter_probesets_by == 'exon':
if '-' in exonid or '|' in exonid: ###Therfore the probeset represents an exon-exon junction or multi-exon probeset
include_probeset = 'no'
if filter_probesets_by != 'exon':
if '|' in exonid: include_probeset = 'no'
if constitutive_call == 'yes': include_probeset = 'yes'
else:
if avg_all_for_ss == 'yes' and (probeset_class == 'core' or
len(external_exonid) > 2):
constitutive_call = 'yes'
#if len(splice_event)>2 and constitutive_call == 'yes' and avg_all_for_ss == 'no': constitutive_call = 'no'
if constitutive_call == 'no' and len(splice_event) < 2 and len(
external_exonid) < 2: ###otherwise these are interesting probesets to keep
if filter_probesets_by != 'full':
if filter_probesets_by == 'extended':
if probeset_class == 'full': include_probeset = 'no'
elif filter_probesets_by == 'core':
if probeset_class != 'core': include_probeset = 'no'
return include_probeset, constitutive_call
def EvidenceOfAltSplicing(slicing_annot):
splice_annotations = ["ntron", "xon", "strangeSplice", "Prime", "3", "5",
"C-term"]
as_call = 0
splice_annotations2 = ["ntron", "assette", "strangeSplice", "Prime", "3",
"5"]
for annot in splice_annotations:
if annot in slicing_annot: as_call = 1
if as_call == 1:
if "C-term" in slicing_annot and ("N-" in slicing_annot or
"Promoter" in slicing_annot):
as_call = 0
for annot in splice_annotations2:
if annot in slicing_annot: as_call = 1
elif "bleed" in slicing_annot and ("N-" in slicing_annot or
"Promoter" in slicing_annot):
as_call = 0
for annot in splice_annotations2:
if annot in slicing_annot: as_call = 1
return as_call
########### Begin Analyses ###########
class SplicingAnnotationData:
def ArrayType(self):
self._array_type = array_type
return self._array_type
def Probeset(self):
return self._probeset
def setProbeset(self, probeset):
self._probeset = probeset
def ExonID(self):
return self._exonid
def setDisplayExonID(self, exonid):
self._exonid = exonid
def GeneID(self):
return self._geneid
def Symbol(self):
symbol = ''
if self.GeneID() in annotate_db:
y = annotate_db[self.GeneID()]
symbol = y.Symbol()
return symbol
def ExternalGeneID(self):
return self._external_gene
def ProbesetType(self):
###e.g. Exon, junction, constitutive(gene)
return self._probeset_type
def GeneStructure(self):
return self._block_structure
def SecondaryExonID(self):
return self._block_exon_ids
def setSecondaryExonID(self, ids):
self._block_exon_ids = ids
def setLocationData(self, chromosome, strand, probeset_start,
probeset_stop):
self._chromosome = chromosome
self._strand = strand
self._start = probeset_start
self._stop = probeset_stop
def LocationSummary(self):
location = self.Chromosome() + ':' + self.ProbeStart(
) + '-' + self.ProbeStop() + '(' + self.Strand() + ')'
return location
def Chromosome(self):
return self._chromosome
def Strand(self):
return self._strand
def ProbeStart(self):
return self._start
def ProbeStop(self):
return self._stop
def ProbesetClass(self):
###e.g. core, extendended, full
return self._probest_class
def ExternalExonIDs(self):
return self._external_exonids
def ExternalExonIDList(self):
external_exonid_list = string.split(self.ExternalExonIDs(), '|')
return external_exonid_list
def Constitutive(self):
return self._constitutive_status
def setTranscriptCluster(self, secondary_geneid):
self._secondary_geneid = secondary_geneid
def setNovelExon(self, novel_exon):
self._novel_exon = novel_exon
def NovelExon(self):
return self._novel_exon
def SecondaryGeneID(self):
return self._secondary_geneid
def setExonRegionID(self, exon_region):
self._exon_region = exon_region
def ExonRegionID(self):
return self._exon_region
def SplicingEvent(self):
splice_event = self._splicing_event
if len(splice_event) != 0:
if splice_event[0] == '|': splice_event = splice_event[1:]
return splice_event
def SplicingCall(self):
return self._splicing_call
def SpliceJunctions(self):
return self._splice_junctions
def Delete(self):
del self
def Report(self):
output = self.ArrayType() + '|' + self.ExonID(
) + '|' + self.ExternalGeneID()
return output
def __repr__(self):
return self.Report()
class AltMouseData(SplicingAnnotationData):
def __init__(self, affygene, exons, ensembl, block_exon_ids,
block_structure, probe_type_call):
self._geneid = affygene
self._external_gene = ensembl
self._exonid = exons
self._secondary_geneid = ensembl
self._probeset_type = probe_type_call
self._block_structure = block_structure
self._block_exon_ids = block_exon_ids
self._external_exonids = 'NA'
self._constitutive_status = 'no'
self._splicing_event = ''
self._secondary_geneid = 'NA'
self._exon_region = ''
if self._probeset_type == 'gene': self._constitutive_status = 'yes'
else: self._constitutive_status = 'no'
class AffyExonSTData(SplicingAnnotationData):
def __init__(self, ensembl_gene_id, exon_id, ens_exon_ids,
constitutive_call_probeset, exon_region, splicing_event,
splice_junctions, splicing_call):
self._geneid = ensembl_gene_id
self._external_gene = ensembl_gene_id
self._exonid = exon_id
self._constitutive_status = constitutive_call_probeset #; self._start = probeset_start; self._stop = probeset_stop
self._external_exonids = ens_exon_ids
#self._secondary_geneid = transcript_cluster_id#; self._chromosome = chromosome; self._strand = strand
self._exon_region = exon_region
self._splicing_event = splicing_event
self._splice_junctions = splice_junctions
self._splicing_call = splicing_call
if self._exonid[0] == 'U': self._probeset_type = 'UTR'
elif self._exonid[0] == 'E': self._probeset_type = 'exonic'
elif self._exonid[0] == 'I': self._probeset_type = 'intronic'
class AffyExonSTDataAbbreviated(SplicingAnnotationData):
def __init__(self, ensembl_gene_id, exon_id, splicing_call):
self._geneid = ensembl_gene_id
self._exonid = exon_id
self._splicing_call = splicing_call
def importSplicingAnnotations(array_type, Species, probeset_type,
avg_ss_for_all, root_dir):
global filter_probesets_by
filter_probesets_by = probeset_type
global species
species = Species
global avg_all_for_ss
avg_all_for_ss = avg_ss_for_all
global exon_db
exon_db = {}
global summary_data_db
summary_data_db = {}
global remove_intronic_junctions
remove_intronic_junctions = 'no'
if array_type == 'RNASeq':
probeset_annotations_file = root_dir + 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_junctions.txt'
else:
probeset_annotations_file = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_probesets.txt'
filtered_arrayids = {}
filter_status = 'no'
constitutive_probeset_db, exon_db, genes_being_analyzed = importSplicingAnnotationDatabase(
probeset_annotations_file, array_type, filtered_arrayids,
filter_status)
return exon_db, constitutive_probeset_db
def importSplicingAnnotationDatabase(filename, array_type, filtered_arrayids,
filter_status):
begin_time = time.time()
probesets_included_by_new_evidence = 0
export_exon_regions = 'yes'
if 'fake' in array_type:
array_type = string.replace(array_type, '-fake', '')
original_arraytype = 'RNASeq'
else:
original_arraytype = array_type
if filter_status == 'no':
global gene_transcript_cluster_db
gene_transcript_cluster_db = {}
gene_transcript_cluster_db2 = {}
global last_exon_region_db
last_exon_region_db = {}
else:
new_exon_db = {}
fn = filepath(filename)
last_gene = ' '
last_exon_region = ''
constitutive_probeset_db = {}
constitutive_gene = {}
count = 0
x = 0
constitutive_original = {}
#if filter_status == 'yes': exon_db = {}
if array_type == 'AltMouse':
for line in open(fn, 'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
probeset, affygene, exons, transcript_num, transcripts, probe_type_call, ensembl, block_exon_ids, block_structure, comparison_info = string.split(
probeset_data, '\t')
###note: currently exclude comparison_info since not applicable for existing analyses
if x == 0: x = 1
else:
if exons[-1] == '|': exons = exons[0:-1]
if affygene[-1] == '|':
affygene = affygene[0:-1]
constitutive_gene[affygene] = []
if probe_type_call == 'gene':
constitutive_call = 'yes' #looked through the probe annotations and the gene seems to be the most consistent constitutive feature
else:
constitutive_call = 'no'
include_call, constitutive_call = ProbesetCalls(
array_type, '', exons, constitutive_call, '')
if include_call == 'yes':
probe_data = AltMouseData(
affygene, exons, ensembl, block_exon_ids,
block_structure, probe_type_call
) #this used to just have affygene,exon in the values (1/17/05)
exon_db[probeset] = probe_data
if filter_status == 'yes':
new_exon_db[probeset] = probe_data
if constitutive_call == 'yes':
constitutive_probeset_db[probeset] = affygene
genes_being_analyzed = constitutive_gene
else:
for line in open(fn, 'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
if x == 0: x = 1
else:
try:
probeset_id, exon_id, ensembl_gene_id, transcript_cluster_id, chromosome, strand, probeset_start, probeset_stop, affy_class, constitutive_call_probeset, external_exonid, ens_const_exons, exon_region, exon_region_start, exon_region_stop, splicing_event, splice_junctions = string.split(
probeset_data, '\t')
except Exception:
print probeset_data
force_error
if affy_class == 'free':
affy_class = 'full' ### Don't know what the difference is
include_call, constitutive_call = ProbesetCalls(
array_type, affy_class, splicing_event,
constitutive_call_probeset, external_exonid)
#if 'ENSG00000163904:E11.5' in probeset_id: print probeset_data
#print array_type,affy_class,splicing_event,constitutive_call_probeset,external_exonid,constitutive_call,include_call;kill
if array_type == 'junction' and '.' not in exon_id:
exon_id = string.replace(exon_id, '-', '.')
exon_region = string.replace(exon_region, '-', '.')
if ensembl_gene_id != last_gene: new_gene = 'yes'
else: new_gene = 'no'
if filter_status == 'no' and new_gene == 'yes':
if '.' in exon_id: ### Exclude junctions
if '-' not in last_exon_region and 'E' in last_exon_region:
last_exon_region_db[last_gene] = last_exon_region
else:
last_exon_region_db[last_gene] = last_exon_region
last_gene = ensembl_gene_id
if len(exon_region) > 1:
last_exon_region = exon_region ### some probeset not linked to an exon region
###Record the transcript clusters assoicated with each gene to annotate the results later on
if constitutive_call_probeset != constitutive_call:
probesets_included_by_new_evidence += 1 #; print probeset_id,[splicing_event],[constitutive_call_probeset];kill
proceed = 'no'
as_call = 0
if array_type == 'RNASeq' or array_type == 'junction':
include_call = 'yes' ### Constitutive expression is not needed
if remove_intronic_junctions == 'yes':
if 'E' not in exon_id:
include_call = 'no' ### Remove junctions that only have splice-sites within an intron or UTR
if include_call == 'yes' or constitutive_call == 'yes':
#if proceed == 'yes':
as_call = EvidenceOfAltSplicing(splicing_event)
if filter_status == 'no':
probe_data = AffyExonSTDataAbbreviated(
ensembl_gene_id, exon_id, as_call)
if array_type != 'RNASeq':
probe_data.setTranscriptCluster(
transcript_cluster_id)
try:
if export_exon_regions == 'yes':
probe_data.setExonRegionID(exon_region)
except Exception:
null = []
else:
probe_data = AffyExonSTData(
ensembl_gene_id, exon_id, external_exonid,
constitutive_call, exon_region, splicing_event,
splice_junctions, as_call)
probe_data.setLocationData(
chromosome, strand, probeset_start, probeset_stop)
if array_type != 'RNASeq':
probe_data.setTranscriptCluster(
transcript_cluster_id)
else:
probe_data.setNovelExon(affy_class)
if filter_status == 'yes':
try: ### saves memory
null = filtered_arrayids[probeset_id]
new_exon_db[probeset_id] = probe_data
except KeyError:
null = []
else:
exon_db[probeset_id] = probe_data
if constitutive_call == 'yes' and filter_status == 'no': ###only perform function when initially running
constitutive_probeset_db[probeset_id] = ensembl_gene_id
try:
constitutive_gene[ensembl_gene_id].append(
probeset_id)
except Exception:
constitutive_gene[ensembl_gene_id] = [probeset_id]
###Only consider transcript clusters that make up the constitutive portion of the gene or that are alternatively regulated
if array_type != 'RNASeq':
try:
gene_transcript_cluster_db[
ensembl_gene_id].append(
transcript_cluster_id)
except KeyError:
gene_transcript_cluster_db[ensembl_gene_id] = [
transcript_cluster_id
]
if constitutive_call_probeset == 'yes' and filter_status == 'no': ###only perform function when initially running
try:
constitutive_original[ensembl_gene_id].append(
probeset_id)
except KeyError:
constitutive_original[ensembl_gene_id] = [probeset_id]
if array_type != 'RNASeq':
try:
gene_transcript_cluster_db2[
ensembl_gene_id].append(transcript_cluster_id)
except KeyError:
gene_transcript_cluster_db2[ensembl_gene_id] = [
transcript_cluster_id
]
###If no constitutive probesets for a gene as a result of additional filtering (removing all probesets associated with a splice event), add these back
original_probesets_add = 0
genes_being_analyzed = {}
for gene in constitutive_gene:
genes_being_analyzed[gene] = []
for gene in constitutive_original:
if gene not in constitutive_gene:
genes_being_analyzed[gene] = [gene]
constitutive_gene[gene] = []
original_probesets_add += 1
gene_transcript_cluster_db[gene] = gene_transcript_cluster_db2[
gene]
for probeset in constitutive_original[gene]:
constitutive_probeset_db[probeset] = gene
#if array_type == 'junction' or array_type == 'RNASeq':
### Added the below in 1.16!!!
### If no constitutive probesets for a gene assigned, assign all gene probesets
for probeset in exon_db:
gene = exon_db[probeset].GeneID()
proceed = 'no'
exonid = exon_db[probeset].ExonID()
### Rather than add all probesets, still filter based on whether the probeset is in an annotated exon
if 'E' in exonid and 'I' not in exonid and '_' not in exonid:
proceed = 'yes'
if proceed == 'yes':
if gene not in constitutive_gene:
constitutive_probeset_db[probeset] = gene
genes_being_analyzed[gene] = [gene]
### DO NOT ADD TO constitutive_gene SINCE WE WANT ALL mRNA ALIGNING EXONS/JUNCTIONS TO BE ADDED!!!!
#constitutive_gene[gene]=[]
gene_transcript_cluster_db = eliminate_redundant_dict_values(
gene_transcript_cluster_db)
#if affygene == 'ENSMUSG00000023089': print [abs(fold_change_log)],[log_fold_cutoff];kill
if array_type == 'RNASeq':
import RNASeq
try:
last_exon_region_db = RNASeq.importExonAnnotations(
species, 'distal-exon', '')
except Exception:
null = []
constitutive_original = []
constitutive_gene = []
#clearObjectsFromMemory(exon_db); constitutive_probeset_db=[];genes_being_analyzed=[] ### used to evaluate how much memory objects are taking up
#print 'remove_intronic_junctions:',remove_intronic_junctions
#print constitutive_gene['ENSMUSG00000031170'];kill ### Determine if avg_ss_for_all is working
if original_arraytype == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print len(
exon_db), id_name, 'stored as instances of SplicingAnnotationData in memory'
#print len(constitutive_probeset_db),'array IDs stored as constititive'
#print probesets_included_by_new_evidence, 'array IDs were re-annotated as NOT constitutive based on mRNA evidence'
if array_type != 'AltMouse':
print original_probesets_add, 'genes not viewed as constitutive as a result of filtering', id_name, 'based on splicing evidence, added back'
end_time = time.time()
time_diff = int(end_time - begin_time)
#print filename,"import finished in %d seconds" % time_diff
if filter_status == 'yes': return new_exon_db
else:
summary_data_db['gene_assayed'] = len(genes_being_analyzed)
try:
exportDenominatorGenes(genes_being_analyzed)
except Exception:
null = []
return constitutive_probeset_db, exon_db, genes_being_analyzed
def exportDenominatorGenes(genes_being_analyzed):
goelite_output = root_dir + 'GO-Elite/denominator/AS.denominator.txt'
goelite_data = export.ExportFile(goelite_output)
systemcode = 'En'
goelite_data.write("GeneID\tSystemCode\n")
for gene in genes_being_analyzed:
if array_type == 'AltMouse':
try:
gene = annotate_db[gene].ExternalGeneID()
except KeyError:
null = []
goelite_data.write(gene + '\t' + systemcode + '\n')
try:
goelite_data.close()
except Exception:
null = []
def performExpressionAnalysis(filename, constitutive_probeset_db, exon_db,
annotate_db, dataset_name):
#if analysis_method == 'splicing-index': returnLargeGlobalVars();kill ### used to ensure all large global vars from the reciprocal junction analysis have been cleared from memory
#returnLargeGlobalVars()
"""import list of expression values for arrayids and calculates statistics"""
global fold_dbase
global original_conditions
global normalization_method
stats_dbase = {}
fold_dbase = {}
ex_db = {}
si_db = []
bad_row_import = {}
count = 0
global array_group_name_db
array_group_name_db = {}
global array_group_db
array_group_db = {}
global array_raw_group_values
array_raw_group_values = {}
global original_array_names
original_array_names = []
global max_replicates
global equal_replicates
global array_group_list
array_index_list = [] ###Use this list for permutation analysis
fn = filepath(filename)
line_num = 1
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
probeset = t[0]
if t[0] == '#': null = [] ### Don't import line
elif line_num == 1:
line_num += 1 #makes this value null for the next loop of actual array data
###Below ocucrs if the data is raw opposed to precomputed
if ':' in t[1]:
array_group_list = []
x = 0 ###gives us an original index value for each entry in the group
for entry in t[1:]:
original_array_names.append(entry)
aa = string.split(entry, ':')
try:
array_group, array_name = aa
except Exception:
array_name = string.join(aa[1:], ':')
array_group = aa[0]
try:
array_group_db[array_group].append(x)
array_group_name_db[array_group].append(array_name)
except KeyError:
array_group_db[array_group] = [x]
array_group_name_db[array_group] = [array_name]
### below only occurs with a new group addition
array_group_list.append(
array_group
) #use this to generate comparisons in the below linked function
x += 1
else:
#try: print data_type
#except Exception,exception:
#print exception
#print traceback.format_exc()
print_out = 'The AltAnalyze filtered expression file "' + filename + '" is not propperly formatted.\n Review formatting requirements if this file was created by another application.\n'
print_out += "\nFirst line\n" + line
try:
UI.WarningWindow(print_out, 'Exit')
print print_out
except Exception:
print print_out
badExit()
else:
#if probeset in exon_db:
#if exon_db[probeset].GeneID() == 'ENSG00000139970':
###Use the index values from above to assign each expression value to a new database
temp_group_array = {}
line_num += 1
for group in array_group_db:
if count == 0: array_index_list.append(array_group_db[group])
for array_index in array_group_db[group]:
try:
exp_val = float(t[array_index + 1])
except Exception:
if 'Gene_ID' not in line:
bad_row_import[probeset] = line
exp_val = 1
###appended is the numerical expression value for each array in the group (temporary array)
try:
temp_group_array[group].append(
exp_val) #add 1 since probeset is the first column
except KeyError:
temp_group_array[group] = [exp_val]
if count == 0:
array_index_list.sort()
count = 1
####store the group database within the probeset database entry
try:
null = exon_db[
probeset
] ###To conserve memory, don't store any probesets not used for downstream analyses (e.g. not linked to mRNAs)
#if 'ENSG00000139970' in probeset:
#print [max_exp]
#print t[1:];kill
#max_exp = max(map(float, t[1:]))
#if len(array_raw_group_values)>10000: break
#if max_exp>math.log(70,2):
array_raw_group_values[probeset] = temp_group_array
except KeyError:
#print probeset
pass
print len(
array_raw_group_values), 'sequence identifiers imported out of', line_num - 1
if len(bad_row_import) > 0:
print len(
bad_row_import), "Rows with an unexplained import error processed and deleted."
print "Example row:"
x = 0
for i in bad_row_import:
if x == 0: print bad_row_import[i]
try:
del array_raw_group_values[i]
except Exception:
null = []
x += 1
### If no gene expression reporting probesets were imported, update constitutive_probeset_db to include all mRNA aligning probesets
cs_genedb = {}
missing_genedb = {}
addback_genedb = {}
rnaseq_cs_gene_db = {}
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
#if gene == 'ENSG00000185008': print [probeset]
try:
null = array_raw_group_values[probeset]
cs_genedb[gene] = []
if gene == probeset:
rnaseq_cs_gene_db[gene] = [
] ### If RPKM normalization used, use the gene expression values already calculated
except Exception:
missing_genedb[gene] = [
] ### Collect possible that are missing from constitutive database (verify next)
for gene in missing_genedb:
try:
null = cs_genedb[gene]
except Exception:
addback_genedb[gene] = []
for probeset in array_raw_group_values:
try:
gene = exon_db[probeset].GeneID()
try:
null = addback_genedb[gene]
if 'I' not in probeset and 'U' not in probeset: ### No intron or UTR containing should be used for constitutive expression
null = string.split(probeset, ':')
if len(null) < 3: ### No trans-gene junctions should be used for constitutive expression
constitutive_probeset_db[probeset] = gene
except Exception:
null = []
except Exception:
null = []
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
#if gene == 'ENSG00000185008': print [[probeset]]
### Only examine values for associated exons when determining RNASeq constitutive expression (when exon data is present)
normalization_method = 'raw'
if array_type == 'RNASeq':
junction_count = 0
constitutive_probeset_db2 = {}
for uid in constitutive_probeset_db:
if '-' in uid: junction_count += 1
if len(rnaseq_cs_gene_db) > 0: ### If filtered RPKM gene-level expression data present, use this instead (and only this)
normalization_method = 'RPKM'
constitutive_probeset_db = {} ### Re-set this database
for gene in rnaseq_cs_gene_db:
constitutive_probeset_db[gene] = gene
elif junction_count != 0 and len(
constitutive_probeset_db) != junction_count:
### occurs when there is a mix of junction and exon IDs
for uid in constitutive_probeset_db:
if '-' not in uid:
constitutive_probeset_db2[uid] = constitutive_probeset_db[
uid]
constitutive_probeset_db = constitutive_probeset_db2
constitutive_probeset_db2 = []
"""
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
if gene == 'ENSG00000185008': print [probeset]
"""
###Build all putative splicing events
global alt_junction_db
global exon_dbase
global critical_exon_db
critical_exon_db = {}
if array_type == 'AltMouse' or (
(array_type == 'junction' or
array_type == 'RNASeq') and explicit_data_type == 'null'):
### Applies to reciprocal junction analyses only
if array_type == 'AltMouse':
alt_junction_db, critical_exon_db, exon_dbase, exon_inclusion_db, exon_db = ExonAnnotate_module.identifyPutativeSpliceEvents(
exon_db, constitutive_probeset_db, array_raw_group_values,
agglomerate_inclusion_probesets, onlyAnalyzeJunctions)
print 'Number of Genes with Examined Splice Events:', len(
alt_junction_db)
elif (array_type == 'junction' or
array_type == 'RNASeq') and explicit_data_type == 'null':
import JunctionArray
alt_junction_db, critical_exon_db, exon_dbase, exon_inclusion_db, exon_db = JunctionArray.getPutativeSpliceEvents(
species, array_type, exon_db, agglomerate_inclusion_probesets,
root_dir)
print 'Number of Genes with Examined Splice Events:', len(
alt_junction_db)
#alt_junction_db=[]; critical_exon_db=[]; exon_dbase=[]; exon_inclusion_db=[]
if agglomerate_inclusion_probesets == 'yes':
array_raw_group_values = agglomerateInclusionProbesets(
array_raw_group_values, exon_inclusion_db)
exon_inclusion_db = []
### For datasets with high memory requirements (RNASeq), filter the current and new databases
### Begin this function after agglomeration to ensure agglomerated probesets are considered
reciprocal_probesets = {}
if array_type == 'junction' or array_type == 'RNASeq':
for affygene in alt_junction_db:
for event in alt_junction_db[affygene]:
reciprocal_probesets[event.InclusionProbeset()] = []
reciprocal_probesets[event.ExclusionProbeset()] = []
not_evalutated = {}
for probeset in array_raw_group_values:
try:
null = reciprocal_probesets[probeset]
except Exception:
### Don't remove constitutive probesets
try:
null = constitutive_probeset_db[probeset]
except Exception:
not_evalutated[probeset] = []
#print 'Removing',len(not_evalutated),'exon/junction IDs not evaulated for splicing'
for probeset in not_evalutated:
del array_raw_group_values[probeset]
###Check to see if we have precomputed expression data or raw to be analyzed
x = 0
y = 0
array_raw_group_values2 = {}
probesets_to_delete = [] ### Record deleted probesets
if len(array_raw_group_values) == 0:
print_out = "No genes were considered 'Expressed' based on your input options. Check to make sure that the right species database is indicated and that the right data format has been selected (e.g., non-log versus log expression)."
try:
UI.WarningWindow(print_out, 'Exit')
except Exception:
print print_out
print "Exiting program"
badExit()
elif len(array_raw_group_values) > 0:
###array_group_list should already be unique and correctly sorted (see above)
for probeset in array_raw_group_values:
data_lists = []
for group_name in array_group_list:
data_list = array_raw_group_values[probeset][
group_name
] ###nested database entry access - baseline expression
if global_addition_factor > 0:
data_list = addGlobalFudgeFactor(data_list, 'log')
data_lists.append(data_list)
if len(array_group_list) == 2:
data_list1 = data_lists[0]
data_list2 = data_lists[-1]
avg1 = statistics.avg(data_list1)
avg2 = statistics.avg(data_list2)
log_fold = avg2 - avg1
try:
#t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function
#t = abs(t); df = round(df) #Excel doesn't recognize fractions in a DF
#p = statistics.t_probability(t,df)
p = statistics.runComparisonStatistic(
data_list1, data_list2, probability_statistic)
if p == -1:
if len(data_list1) > 1 and len(data_list2) > 1:
print_out = "The probability statistic selected (" + probability_statistic + ") is not compatible with the\nexperimental design. Please consider an alternative statistic or correct the problem.\nExiting AltAnalyze."
try:
UI.WarningWindow(print_out, 'Exit')
except Exception:
print print_out
print "Exiting program"
badExit()
else:
p = 1
except Exception:
p = 1
fold_dbase[probeset] = [0]
fold_dbase[probeset].append(log_fold)
stats_dbase[probeset] = [avg1]
stats_dbase[probeset].append(p)
###replace entries with the two lists for later permutation analysis
if p == -1: ### should by p == 1: Not sure why this filter was here, but mistakenly removes probesets where there is just one array for each group
del fold_dbase[probeset]
del stats_dbase[probeset]
probesets_to_delete.append(probeset)
x += 1
if x == 1:
print 'Bad data detected...', data_list1, data_list2
elif (
avg1 < expression_threshold and
avg2 < expression_threshold and p > p_threshold
) and array_type != 'RNASeq': ### Inserted a filtering option to exclude small variance, low expreession probesets
del fold_dbase[probeset]
del stats_dbase[probeset]
probesets_to_delete.append(probeset)
x += 1
else:
array_raw_group_values2[probeset] = [data_list1, data_list2
]
else: ###Non-junction analysis can handle more than 2 groups
index = 0
for data_list in data_lists:
try:
array_raw_group_values2[probeset].append(data_list)
except KeyError:
array_raw_group_values2[probeset] = [data_list]
if len(array_group_list) > 2: ### Thus, there is some variance for this probeset
### Create a complete stats_dbase containing all fold changes
if index == 0:
avg_baseline = statistics.avg(data_list)
stats_dbase[probeset] = [avg_baseline]
else:
avg_exp = statistics.avg(data_list)
log_fold = avg_exp - avg_baseline
try:
fold_dbase[probeset].append(log_fold)
except KeyError:
fold_dbase[probeset] = [0, log_fold]
index += 1
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
array_raw_group_values = array_raw_group_values2
array_raw_group_values2 = []
print x, id_name, "excluded prior to analysis... predicted not detected"
global original_avg_const_exp_db
global original_fold_dbase
global avg_const_exp_db
global permute_lists
global midas_db
if len(array_raw_group_values) > 0:
adj_fold_dbase, nonlog_NI_db, conditions, gene_db, constitutive_gene_db, constitutive_fold_change, original_avg_const_exp_db = constitutive_exp_normalization(
fold_dbase, stats_dbase, exon_db, constitutive_probeset_db)
stats_dbase = [] ### No longer needed after this point
original_fold_dbase = fold_dbase
avg_const_exp_db = {}
permute_lists = []
y = 0
original_conditions = conditions
max_replicates, equal_replicates = maxReplicates()
gene_expression_diff_db = constitutive_expression_changes(
constitutive_fold_change, annotate_db
) ###Add in constitutive fold change filter to assess gene expression for ASPIRE
while conditions > y:
avg_const_exp_db = constitutive_exp_normalization_raw(
gene_db, constitutive_gene_db, array_raw_group_values, exon_db,
y, avg_const_exp_db)
y += 1
#print len(avg_const_exp_db),constitutive_gene_db['ENSMUSG00000054850']
###Export Analysis Results for external splicing analysis (e.g. MiDAS format)
if run_MiDAS == 'yes' and normalization_method != 'RPKM': ### RPKM has negative values which will crash MiDAS
status = ResultsExport_module.exportTransitResults(
array_group_list, array_raw_group_values, array_group_name_db,
avg_const_exp_db, adj_fold_dbase, exon_db, dataset_name,
apt_location)
print "Finished exporting input data for MiDAS analysis"
try:
midas_db = ResultsExport_module.importMidasOutput(dataset_name)
except Exception:
midas_db = {
} ### Occurs if there are not enough samples to calculate a MiDAS p-value
else:
midas_db = {}
###Provides all pairwise permuted group comparisons
if array_type == 'AltMouse' or (
(array_type == 'junction' or
array_type == 'RNASeq') and explicit_data_type == 'null'):
permute_lists = statistics.permute_arrays(array_index_list)
### Now remove probesets from the analysis that were used to evaluate gene expression
for probeset in constitutive_probeset_db:
try:
null = reciprocal_probesets[probeset]
except Exception:
try:
del array_raw_group_values[probeset]
except Exception:
null = []
not_evalutated = []
reciprocal_probesets = []
constitutive_probeset_db = []
### Above, all conditions were examined when more than 2 are present... change this so that only the most extreeem are analyzed further
if len(array_group_list) > 2 and analysis_method == 'splicing-index' and (
array_type == 'exon' or array_type == 'gene' or
explicit_data_type !=
'null'): ### USED FOR MULTIPLE COMPARISONS
print 'Calculating splicing-index values for multiple group comparisons (please be patient)...',
"""
if len(midas_db)==0:
print_out = 'Warning!!! MiDAS failed to run for multiple groups. Please make\nsure there are biological replicates present for your groups.\nAltAnalyze requires replicates for multi-group (more than two) analyses.'
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out; print "Exiting program"
badExit()"""
if filter_for_AS == 'yes':
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try:
del nonlog_NI_db[probeset]
except KeyError:
null = []
if export_NI_values == 'yes':
export_exon_regions = 'yes'
### Currently, we don't deal with raw adjusted expression values, just group, so just export the values for each group
summary_output = root_dir + 'AltResults/RawSpliceData/' + species + '/' + analysis_method + '/' + dataset_name[:
-
1] + '.txt'
print "Exporting all normalized intensities to:\n" + summary_output
adjoutput = export.ExportFile(summary_output)
title = string.join(
['Gene\tExonID\tprobesetID'
] + original_array_names, '\t') + '\n'
adjoutput.write(title)
### Pick which data lists have the most extreem values using the NI_dbase (adjusted folds for each condition)
original_increment = int(len(nonlog_NI_db) / 20)
increment = original_increment
interaction = 0
for probeset in nonlog_NI_db:
if interaction == increment:
increment += original_increment
print '*',
interaction += 1
geneid = exon_db[probeset].GeneID()
ed = exon_db[probeset]
index = 0
NI_list = [] ### Add the group_name to each adj fold value
for NI in nonlog_NI_db[probeset]:
NI_list.append((NI, index))
index += 1 ### setup to sort for the extreeme adj folds and get associated group_name using the index
raw_exp_vals = array_raw_group_values[probeset]
adj_exp_lists = {
} ### Store the adjusted expression values for each group
if geneid in avg_const_exp_db:
k = 0
gi = 0
adj_exp_vals = []
for exp_list in raw_exp_vals:
for exp in exp_list:
adj_exp_val = exp - avg_const_exp_db[geneid][k]
try:
adj_exp_lists[gi].append(adj_exp_val)
except Exception:
adj_exp_lists[gi] = [adj_exp_val]
if export_NI_values == 'yes':
adj_exp_vals.append(str(adj_exp_val))
k += 1
gi += 1
if export_NI_values == 'yes':
#print geneid+'-'+probeset, adj_exp_val, [ed.ExonID()];kill
if export_exon_regions == 'yes':
try: ### Thid will only work if ExonRegionID is stored in the abreviated AffyExonSTData object - useful in comparing results between arrays (exon-region centric)
if (
array_type == 'exon' or
array_type == 'gene'
) or '-' not in ed.ExonID(
): ### only include exon entries not junctions
exon_regions = string.split(
ed.ExonRegionID(), '|')
for er in exon_regions:
if len(er) > 0: er = er
else:
try:
er = ed.ExonID()
except Exception:
er = 'NA'
ev = string.join(
[geneid + '\t' + er + '\t' +
probeset] + adj_exp_vals,
'\t') + '\n'
if len(filtered_probeset_db) > 0:
if probeset in filtered_probeset_db:
adjoutput.write(
ev
) ### This is used when we want to restrict to only probesets known to already by changed
else:
adjoutput.write(ev)
except Exception:
ev = string.join(
[geneid + '\t' + 'NA' + '\t' + probeset
] + adj_exp_vals, '\t') + '\n'
adjoutput.write(ev)
NI_list.sort()
examine_pairwise_comparisons = 'yes'
if examine_pairwise_comparisons == 'yes':
k1 = 0
k2 = 0
filtered_NI_comps = []
NI_list_rev = list(NI_list)
NI_list_rev.reverse()
NI1, index1 = NI_list[k1]
NI2, index2 = NI_list_rev[k2]
abs_SI = abs(math.log(NI1 / NI2, 2))
if abs_SI < alt_exon_logfold_cutoff:
### Indicates that no valid matches were identified - hence, exit loop and return an NI_list with no variance
NI_list = [NI_list[0], NI_list[0]]
else:
### Indicates that no valid matches were identified - hence, exit loop and return an NI_list with no variance
constit_exp1 = original_avg_const_exp_db[geneid][
index1]
constit_exp2 = original_avg_const_exp_db[geneid][
index2]
ge_fold = constit_exp2 - constit_exp1
#print 'original',abs_SI,k1,k2, ge_fold, constit_exp1, constit_exp2
if abs(ge_fold) < log_fold_cutoff:
filtered_NI_comps.append([abs_SI, k1, k2])
else:
for i1 in NI_list:
k2 = 0
for i2 in NI_list_rev:
NI1, index1 = i1
NI2, index2 = i2
abs_SI = abs(math.log(NI1 / NI2, 2))
#constit_exp1 = original_avg_const_exp_db[geneid][index1]
#constit_exp2 = original_avg_const_exp_db[geneid][index2]
#ge_fold = constit_exp2-constit_exp1
#if abs(ge_fold) < log_fold_cutoff: filtered_NI_comps.append([abs_SI,k1,k2])
#print k1,k2, i1, i2, abs_SI, abs(ge_fold), log_fold_cutoff, alt_exon_logfold_cutoff
if abs_SI < alt_exon_logfold_cutoff: break
else:
constit_exp1 = original_avg_const_exp_db[
geneid][index1]
constit_exp2 = original_avg_const_exp_db[
geneid][index2]
ge_fold = constit_exp2 - constit_exp1
if abs(ge_fold) < log_fold_cutoff:
filtered_NI_comps.append([abs_SI,
k1, k2])
#if k1 == 49 or k1 == 50 or k1 == 51: print probeset, abs_SI, k1, k2, abs(ge_fold),log_fold_cutoff, index1, index2, NI1, NI2, constit_exp1,constit_exp2
k2 += 1
k1 += 1
if len(filtered_NI_comps) > 0:
#print filtered_NI_comps
#print NI_list_rev
#print probeset,geneid
#print len(filtered_NI_comps)
#print original_avg_const_exp_db[geneid]
filtered_NI_comps.sort()
si, k1, k2 = filtered_NI_comps[-1]
NI_list = [NI_list[k1], NI_list_rev[k2]]
"""
NI1,index1 = NI_list[0]; NI2,index2 = NI_list[-1]
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
print probeset, si, ge_fold, NI_list"""
#print k1,k2;sys.exit()
index1 = NI_list[0][1]
index2 = NI_list[-1][1]
nonlog_NI_db[probeset] = [
NI_list[0][0], NI_list[-1][0]
] ### Update the values of this dictionary
data_list1 = array_raw_group_values[probeset][index1]
data_list2 = array_raw_group_values[probeset][index2]
avg1 = statistics.avg(data_list1)
avg2 = statistics.avg(data_list2)
log_fold = avg2 - avg1
group_name1 = array_group_list[index1]
group_name2 = array_group_list[index2]
try:
#t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function
#t = abs(t); df = round(df); ttest_exp_p = statistics.t_probability(t,df)
ttest_exp_p = statistics.runComparisonStatistic(
data_list1, data_list2, probability_statistic)
except Exception:
ttest_exp_p = 1
fold_dbase[probeset] = [0]
fold_dbase[probeset].append(log_fold)
if ttest_exp_p == -1:
del fold_dbase[probeset]
probesets_to_delete.append(probeset)
x += 1
elif avg1 < expression_threshold and avg2 < expression_threshold and (
ttest_exp_p > p_threshold and ttest_exp_p != 1
): ### Inserted a filtering option to exclude small variance, low expreession probesets
del fold_dbase[probeset]
probesets_to_delete.append(probeset)
x += 1
else:
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2 - constit_exp1
normInt1 = (avg1 - constit_exp1)
normInt2 = (avg2 - constit_exp2)
adj_fold = normInt2 - normInt1
splicing_index = -1 * adj_fold
abs_splicing_index = abs(splicing_index)
#print probeset, splicing_index, ge_fold, index1, index2
#normIntList1 = adj_exp_lists[index1]; normIntList2 = adj_exp_lists[index2]
all_nI = []
for g_index in adj_exp_lists:
all_nI.append(adj_exp_lists[g_index])
try:
normIntensityP = statistics.OneWayANOVA(
all_nI
) #[normIntList1,normIntList2] ### This stays an ANOVA independent of the algorithm choosen since groups number > 2
except Exception:
normIntensityP = 'NA'
if (normInt1 * normInt2) < 0: opposite_SI_log_mean = 'yes'
else: opposite_SI_log_mean = 'no'
abs_log_ratio = abs(ge_fold)
if probeset in midas_db:
try:
midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 'NA'
else:
midas_p = 'NA'
#if 'ENSG00000059588' in geneid: print probeset, splicing_index, constit_exp1, constit_exp2, ge_fold,group_name2+'_vs_'+group_name1, index1, index2
if abs_splicing_index > alt_exon_logfold_cutoff and (
midas_p < p_threshold or midas_p == 'NA'
): #and abs_log_ratio>1 and ttest_exp_p<0.05: ###and ge_threshold_count==2
exonid = ed.ExonID()
critical_exon_list = [1, [exonid]]
ped = ProbesetExpressionData(
avg1, avg2, log_fold, adj_fold, ttest_exp_p,
group_name2 + '_vs_' + group_name1)
sid = ExonData(splicing_index, probeset,
critical_exon_list, geneid, normInt1,
normInt2, normIntensityP,
opposite_SI_log_mean)
sid.setConstitutiveExpression(constit_exp1)
sid.setConstitutiveFold(ge_fold)
sid.setProbesetExpressionData(ped)
si_db.append((splicing_index, sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(splicing_index, geneid,
normIntensityP)
ex_db[probeset] = eed
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print len(
si_db), id_name, "with evidence of Alternative expression"
original_fold_dbase = fold_dbase
si_db.sort()
summary_data_db['denominator_exp_events'] = len(nonlog_NI_db)
del avg_const_exp_db
del gene_db
del constitutive_gene_db
gene_expression_diff_db = {}
if export_NI_values == 'yes': adjoutput.close()
### Above, all conditions were examined when more than 2 are present... change this so that only the most extreeem are analyzed further
elif len(array_group_list) > 2 and (
array_type == 'junction' or array_type == 'RNASeq' or
array_type == 'AltMouse'): ### USED FOR MULTIPLE COMPARISONS
excluded_probeset_db = {}
group_sizes = []
original_array_indices = permute_lists[
0
] ###p[0] is the original organization of the group samples prior to permutation
for group in original_array_indices:
group_sizes.append(len(group))
if analysis_method == 'linearregres': ### For linear regression, these scores are non-long
original_array_raw_group_values = copy.deepcopy(
array_raw_group_values)
for probeset in array_raw_group_values:
ls_concatenated = []
for group in array_raw_group_values[probeset]:
ls_concatenated += group
ls_concatenated = statistics.log_fold_conversion_fraction(
ls_concatenated)
array_raw_group_values[probeset] = ls_concatenated
pos1 = 0
pos2 = 0
positions = []
for group in group_sizes:
if pos1 == 0:
pos2 = group
positions.append((pos1, pos2))
else:
pos2 = pos1 + group
positions.append((pos1, pos2))
pos1 = pos2
if export_NI_values == 'yes':
export_exon_regions = 'yes'
### Currently, we don't deal with raw adjusted expression values, just group, so just export the values for each group
summary_output = root_dir + 'AltResults/RawSpliceData/' + species + '/' + analysis_method + '/' + dataset_name[:
-
1] + '.txt'
print "Exporting all normalized intensities to:\n" + summary_output
adjoutput = export.ExportFile(summary_output)
title = string.join(
['gene\tprobesets\tExonRegion'
] + original_array_names, '\t') + '\n'
adjoutput.write(title)
events_examined = 0
denominator_events = 0
fold_dbase = []
adj_fold_dbase = []
scores_examined = 0
splice_event_list = []
splice_event_list_mx = []
splice_event_list_non_mx = []
event_mx_temp = []
permute_p_values = {}
probeset_comp_db = {} #use this to exclude duplicate mx events
for geneid in alt_junction_db:
affygene = geneid
for event in alt_junction_db[geneid]:
if array_type == 'AltMouse':
#event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
#critical_exon_db[affygene,tuple(critical_exons)] = [1,'E'+str(e1a),'E'+str(e2b)] --- affygene,tuple(event) == key, 1 indicates both are either up or down together
event_call = event[0][0] + '-' + event[1][0]
exon_set1 = event[0][1]
exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene, exon_set1]
probeset2 = exon_dbase[affygene, exon_set2]
critical_exon_list = critical_exon_db[affygene, tuple(
event)]
if array_type == 'junction' or array_type == 'RNASeq':
event_call = 'ei-ex' ### Below objects from JunctionArrayEnsemblRules - class JunctionInformation
probeset1 = event.InclusionProbeset()
probeset2 = event.ExclusionProbeset()
exon_set1 = event.InclusionJunction()
exon_set2 = event.ExclusionJunction()
try:
novel_event = event.NovelEvent()
except Exception:
novel_event = 'known'
critical_exon_list = [1, event.CriticalExonSets()]
key, jd = formatJunctionData(
[probeset1, probeset2], geneid, critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try:
jd.setSymbol(annotate_db[geneid].Symbol())
except Exception:
null = []
#if '|' in probeset1: print probeset1, key,jd.InclusionDisplay();kill
probeset_comp_db[
key] = jd ### This is used for the permutation analysis and domain/mirBS import
dI_scores = []
if probeset1 in nonlog_NI_db and probeset2 in nonlog_NI_db and probeset1 in array_raw_group_values and probeset2 in array_raw_group_values:
events_examined += 1
if analysis_method == 'ASPIRE':
index1 = 0
NI_list1 = []
NI_list2 = [
] ### Add the group_name to each adj fold value
for NI in nonlog_NI_db[probeset1]:
NI_list1.append(NI)
for NI in nonlog_NI_db[probeset2]:
NI_list2.append(NI)
for NI1_g1 in NI_list1:
NI2_g1 = NI_list2[index1]
index2 = 0
for NI1_g2 in NI_list1:
try:
NI2_g2 = NI_list2[index2]
except Exception:
print index1, index2, NI_list1, NI_list2
kill
if index1 != index2:
b1 = NI1_g1
e1 = NI1_g2
b2 = NI2_g1
e2 = NI2_g2
try:
dI = statistics.aspire_stringent(
b1, e1, b2, e2)
Rin = b1 / e1
Rex = b2 / e2
if (Rin > 1 and Rex < 1) or (
Rin < 1 and Rex > 1):
if dI < 0:
i1, i2 = index2, index1 ### all scores should indicate upregulation
else:
i1, i2 = index1, index2
dI_scores.append((abs(dI), i1,
i2))
except Exception:
#if array_type != 'RNASeq': ### RNASeq has counts of zero and one that can cause the same result between groups and probesets
#print probeset1, probeset2, b1, e1, b2, e2, index1, index2, events_examined;kill
### Exception - Occurs for RNA-Seq but can occur for array data under extreemly rare circumstances (Rex=Rin even when different b1,e1 and b2,ed values)
null = []
index2 += 1
index1 += 1
dI_scores.sort()
if analysis_method == 'linearregres':
log_fold, i1, i2 = getAllPossibleLinearRegressionScores(
probeset1, probeset2, positions, group_sizes)
dI_scores.append((log_fold, i1, i2))
raw_exp_vals1 = original_array_raw_group_values[
probeset1]
raw_exp_vals2 = original_array_raw_group_values[
probeset2]
else:
raw_exp_vals1 = array_raw_group_values[probeset1]
raw_exp_vals2 = array_raw_group_values[probeset2]
adj_exp_lists1 = {}
adj_exp_lists2 = {
} ### Store the adjusted expression values for each group
if geneid in avg_const_exp_db:
gi = 0
l = 0
adj_exp_vals = []
anova_test = []
for exp_list in raw_exp_vals1:
k = 0
anova_group = []
for exp in exp_list:
adj_exp_val1 = exp - avg_const_exp_db[
geneid][l]
try:
adj_exp_lists1[gi].append(adj_exp_val1)
except Exception:
adj_exp_lists1[gi] = [adj_exp_val1]
adj_exp_val2 = raw_exp_vals2[gi][
k] - avg_const_exp_db[geneid][l]
try:
adj_exp_lists2[gi].append(adj_exp_val2)
except Exception:
adj_exp_lists2[gi] = [adj_exp_val2]
anova_group.append(adj_exp_val2 -
adj_exp_val1)
if export_NI_values == 'yes':
#if analysis_method == 'ASPIRE':
adj_exp_vals.append(str(adj_exp_val2 -
adj_exp_val1))
### BELOW CODE PRODUCES THE SAME RESULT!!!!
"""folds1 = statistics.log_fold_conversion_fraction([exp])
folds2 = statistics.log_fold_conversion_fraction([raw_exp_vals2[gi][k]])
lr_score = statistics.convert_to_log_fold(statistics.simpleLinRegress(folds1,folds2))
adj_exp_vals.append(str(lr_score))"""
k += 1
l += 0
gi += 1
anova_test.append(anova_group)
if export_NI_values == 'yes':
if export_exon_regions == 'yes':
exon_regions = string.join(
critical_exon_list[1], '|')
exon_regions = string.split(exon_regions,
'|')
for er in exon_regions:
ev = string.join(
[geneid + '\t' + probeset1 + '-' +
probeset2 + '\t' + er] +
adj_exp_vals, '\t') + '\n'
if len(filtered_probeset_db) > 0:
if probeset1 in filtered_probeset_db and probeset2 in filtered_probeset_db:
adjoutput.write(
ev
) ### This is used when we want to restrict to only probesets known to already by changed
else:
adjoutput.write(ev)
try:
anovaNIp = statistics.OneWayANOVA(
anova_test
) ### This stays an ANOVA independent of the algorithm choosen since groups number > 2
except Exception:
anovaNIp = 'NA'
if len(dI_scores) > 0 and geneid in avg_const_exp_db:
dI, index1, index2 = dI_scores[-1]
count = 0
probesets = [probeset1, probeset2]
index = 0
key, jd = formatJunctionData(
[probeset1, probeset2
], affygene, critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try:
jd.setSymbol(annotate_db[affygene].Symbol(
))
except Exception:
null = []
probeset_comp_db[
key] = jd ### This is used for the permutation analysis and domain/mirBS import
if max_replicates > 2 or equal_replicates == 2:
permute_p_values[(probeset1, probeset2)] = [
anovaNIp, 'NA', 'NA', 'NA'
]
index = 0
for probeset in probesets:
if analysis_method == 'linearregres':
data_list1 = original_array_raw_group_values[
probeset][index1]
data_list2 = original_array_raw_group_values[
probeset][index2]
else:
data_list1 = array_raw_group_values[
probeset][index1]
data_list2 = array_raw_group_values[
probeset][index2]
baseline_exp = statistics.avg(data_list1)
experimental_exp = statistics.avg(data_list2)
fold_change = experimental_exp - baseline_exp
group_name1 = array_group_list[index1]
group_name2 = array_group_list[index2]
try:
ttest_exp_p = statistics.runComparisonStatistic(
data_list1, data_list2,
probability_statistic)
except Exception:
ttest_exp_p = 'NA'
if ttest_exp_p == 1: ttest_exp_p = 'NA'
if index == 0:
try:
adj_fold = statistics.avg(
adj_exp_lists1[
index2]) - statistics.avg(
adj_exp_lists1[index1])
except Exception:
print raw_exp_vals1, raw_exp_vals2, avg_const_exp_db[
geneid]
print probeset, probesets, adj_exp_lists1, adj_exp_lists2, index1, index2
kill
ped1 = ProbesetExpressionData(
baseline_exp, experimental_exp,
fold_change, adj_fold, ttest_exp_p,
group_name2 + '_vs_' + group_name1)
else:
adj_fold = statistics.avg(adj_exp_lists2[
index2]) - statistics.avg(
adj_exp_lists2[index1])
ped2 = ProbesetExpressionData(
baseline_exp, experimental_exp,
fold_change, adj_fold, ttest_exp_p,
group_name2 + '_vs_' + group_name1)
constit_exp1 = original_avg_const_exp_db[
geneid][index1]
constit_exp2 = original_avg_const_exp_db[
geneid][index2]
ge_fold = constit_exp2 - constit_exp1
index += 1
try:
pp1 = statistics.runComparisonStatistic(
adj_exp_lists1[index1],
adj_exp_lists1[index2],
probability_statistic)
pp2 = statistics.runComparisonStatistic(
adj_exp_lists2[index1],
adj_exp_lists2[index2],
probability_statistic)
except Exception:
pp1 = 'NA'
pp2 = 'NA'
if analysis_method == 'ASPIRE' and len(
dI_scores) > 0:
p1 = JunctionExpressionData(
adj_exp_lists1[index1],
adj_exp_lists1[index2], pp1, ped1)
p2 = JunctionExpressionData(
adj_exp_lists2[index1],
adj_exp_lists2[index2], pp2, ped2)
### ANOVA p-replaces the below p-value
"""try: baseline_scores, exp_scores, pairwiseNIp = calculateAllASPIREScores(p1,p2)
except Exception: baseline_scores = [0]; exp_scores=[dI]; pairwiseNIp = 0 """
#if pairwiseNIp == 'NA': pairwiseNIp = 0 ### probably comment out
if len(dI_scores) > 0:
scores_examined += 1
if probeset in midas_db:
try:
midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 'NA'
else:
midas_p = 'NA'
if dI > alt_exon_logfold_cutoff and (
anovaNIp < p_threshold or
perform_permutation_analysis == 'yes' or
anovaNIp == 'NA' or anovaNIp == 1
): #and abs_log_ratio>1 and ttest_exp_p<0.05: ###and ge_threshold_count==2
#print [dI, probeset1,probeset2, anovaNIp, alt_exon_logfold_cutoff];kill
ejd = ExonJunctionData(
dI, probeset1, probeset2, pp1, pp2,
'upregulated', event_call,
critical_exon_list, affygene, ped1, ped2)
ejd.setConstitutiveFold(ge_fold)
ejd.setConstitutiveExpression(constit_exp1)
if array_type == 'RNASeq':
ejd.setNovelEvent(novel_event)
splice_event_list.append((dI, ejd))
else:
excluded_probeset_db[
affygene + ':' + critical_exon_list[1][
0]] = probeset1, affygene, dI, 'NA', anovaNIp
statistics.adjustPermuteStats(permute_p_values)
ex_db = splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db
original_fold_dbase = fold_dbase
original_avg_const_exp_db = []
nonlog_NI_db = []
fold_dbase = []
summary_data_db['denominator_exp_events'] = events_examined
del avg_const_exp_db
del gene_db
del constitutive_gene_db
gene_expression_diff_db = {}
if export_NI_values == 'yes': adjoutput.close()
print len(
splice_event_list), 'alternative exons out of %s exon events examined' % events_examined
fold_dbase = []
original_fold_dbase = []
exon_db = []
constitutive_gene_db = []
addback_genedb = []
gene_db = []
missing_genedb = []
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
return conditions, adj_fold_dbase, nonlog_NI_db, dataset_name, gene_expression_diff_db, midas_db, ex_db, si_db
class ProbesetExpressionData:
def __init__(self, baseline_exp, experimental_exp, fold_change, adj_fold,
ttest_raw_exp, annotation):
self.baseline_exp = baseline_exp
self.experimental_exp = experimental_exp
self.fold_change = fold_change
self.adj_fold = adj_fold
self.ttest_raw_exp = ttest_raw_exp
self.annotation = annotation
def BaselineExp(self):
return str(self.baseline_exp)
def ExperimentalExp(self):
return str(self.experimental_exp)
def FoldChange(self):
return str(self.fold_change)
def AdjFold(self):
return str(self.adj_fold)
def ExpPval(self):
return str(self.ttest_raw_exp)
def Annotation(self):
return self.annotation
def __repr__(self):
return self.BaselineExp() + '|' + FoldChange()
def agglomerateInclusionProbesets(array_raw_group_values, exon_inclusion_db):
###Combine expression profiles for inclusion probesets that correspond to the same splice event
for excl_probeset in exon_inclusion_db:
inclusion_event_profiles = []
if len(exon_inclusion_db[excl_probeset]) > 1:
for incl_probeset in exon_inclusion_db[excl_probeset]:
if incl_probeset in array_raw_group_values and excl_probeset in array_raw_group_values:
array_group_values = array_raw_group_values[incl_probeset]
inclusion_event_profiles.append(array_group_values)
#del array_raw_group_values[incl_probeset] ###Remove un-agglomerated original entry
if len(inclusion_event_profiles) > 0: ###Thus, some probesets for this splice event in input file
combined_event_profile = combine_profiles(inclusion_event_profiles)
###Combine inclusion probesets into a single ID (identical manner to that in ExonAnnotate_module.identifyPutativeSpliceEvents
incl_probesets = exon_inclusion_db[excl_probeset]
incl_probesets_str = string.join(incl_probesets, '|')
array_raw_group_values[incl_probesets_str] = combined_event_profile
return array_raw_group_values
def combine_profiles(profile_list):
profile_group_sizes = {}
for db in profile_list:
for key in db:
profile_group_sizes[key] = len(db[key])
break
new_profile_db = {}
for key in profile_group_sizes:
x = profile_group_sizes[key] ###number of elements in list for key
new_val_list = []
i = 0
while i < x:
temp_val_list = []
for db in profile_list:
if key in db:
val = db[key][i]
temp_val_list.append(val)
i += 1
val_avg = statistics.avg(temp_val_list)
new_val_list.append(val_avg)
new_profile_db[key] = new_val_list
return new_profile_db
def constitutive_exp_normalization(fold_db, stats_dbase, exon_db,
constitutive_probeset_db):
"""For every expression value, normalize to the expression of the constitutive gene features for that condition,
then store those ratios (probeset_exp/avg_constitutive_exp) and regenerate expression values relative only to the
baseline avg_constitutive_exp, for all conditions, to normalize out gene expression changes"""
#print "\nParameters:"
#print "Factor_out_expression_changes:",factor_out_expression_changes
#print "Only_include_constitutive_containing_genes:",only_include_constitutive_containing_genes
#print "\nAdjusting probeset average intensity values to factor out condition specific expression changes for optimal splicing descrimination"
gene_db = {}
constitutive_gene_db = {}
### organize everything by gene
for probeset in fold_db:
conditions = len(fold_db[probeset])
break
remove_diff_exp_genes = remove_transcriptional_regulated_genes
if conditions > 2: remove_diff_exp_genes = 'no'
for probeset in exon_db:
affygene = exon_db[probeset].GeneID(
) #exon_db[probeset] = affygene,exons,ensembl,block_exon_ids,block_structure,comparison_info
if probeset in fold_db:
try:
gene_db[affygene].append(probeset)
except KeyError:
gene_db[affygene] = [probeset]
if probeset in constitutive_probeset_db and (
only_include_constitutive_containing_genes == 'yes' or
factor_out_expression_changes == 'no'):
#the second conditional is used to exlcude constitutive data if we wish to use all probesets for
#background normalization rather than just the designated 'gene' probesets.
if probeset in stats_dbase:
try:
constitutive_gene_db[affygene].append(probeset)
except KeyError:
constitutive_gene_db[affygene] = [probeset]
if len(constitutive_gene_db) > 0:
###This is blank when there are no constitutive and the above condition is implemented
gene_db2 = constitutive_gene_db
else:
gene_db2 = gene_db
avg_const_exp_db = {}
for affygene in gene_db2:
probeset_list = gene_db2[affygene]
x = 0
while x < conditions:
### average all exp values for constitutive probesets for each condition
exp_list = []
for probeset in probeset_list:
probe_fold_val = fold_db[probeset][x]
baseline_exp = stats_dbase[probeset][0]
exp_val = probe_fold_val + baseline_exp
exp_list.append(exp_val)
avg_const_exp = statistics.avg(exp_list)
try:
avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError:
avg_const_exp_db[affygene] = [avg_const_exp]
x += 1
adj_fold_dbase = {}
nonlog_NI_db = {}
constitutive_fold_change = {}
for affygene in avg_const_exp_db: ###If we only wish to include propper constitutive probes, this will ensure we only examine those genes and probesets that are constitutive
probeset_list = gene_db[affygene]
x = 0
while x < conditions:
exp_list = []
for probeset in probeset_list:
expr_to_subtract = avg_const_exp_db[affygene][x]
baseline_const_exp = avg_const_exp_db[affygene][0]
probe_fold_val = fold_db[probeset][x]
baseline_exp = stats_dbase[probeset][0]
exp_val = probe_fold_val + baseline_exp
exp_val_non_log = statistics.log_fold_conversion_fraction(
exp_val)
expr_to_subtract_non_log = statistics.log_fold_conversion_fraction(
expr_to_subtract)
baseline_const_exp_non_log = statistics.log_fold_conversion_fraction(
baseline_const_exp)
if factor_out_expression_changes == 'yes':
exp_splice_valff = exp_val_non_log / expr_to_subtract_non_log
else: #if no, then we just normalize to the baseline constitutive expression in order to keep gene expression effects (useful if you don't trust constitutive feature expression levels)
exp_splice_valff = exp_val_non_log / baseline_const_exp_non_log
constitutive_fold_diff = expr_to_subtract_non_log / baseline_const_exp_non_log
###To calculate adjusted expression, we need to get the fold change in the constitutive avg (expr_to_subtract/baseline_const_exp) and divide the experimental expression
###By this fold change.
ge_adj_exp_non_log = exp_val_non_log / constitutive_fold_diff #gives a GE adjusted expression
try:
ge_adj_exp = math.log(ge_adj_exp_non_log, 2)
except ValueError:
print probeset, ge_adj_exp_non_log, constitutive_fold_diff, exp_val_non_log, exp_val, baseline_exp, probe_fold_val, dog
adj_probe_fold_val = ge_adj_exp - baseline_exp
### Here we normalize probeset expression to avg-constitutive expression by dividing probe signal by avg const.prove sig (should be < 1)
### refered to as steady-state normalization
if array_type != 'AltMouse' or (
probeset not in constitutive_probeset_db):
"""Can't use constitutive gene features since these have no variance for pearson analysis
Python will approximate numbers to a small decimal point range. If the first fold value is
zero, often, zero will be close to but not exactly zero. Correct below """
try:
adj_fold_dbase[probeset].append(adj_probe_fold_val)
except KeyError:
if abs(adj_probe_fold_val -
0) < 0.0000001: #make zero == exactly to zero
adj_probe_fold_val = 0
adj_fold_dbase[probeset] = [adj_probe_fold_val]
try:
nonlog_NI_db[probeset].append(
exp_splice_valff
) ###ratio of junction exp relative to gene expression at that time-point
except KeyError:
nonlog_NI_db[probeset] = [exp_splice_valff]
n = 0
#if expr_to_subtract_non_log != baseline_const_exp_non_log: ###otherwise this is the first value in the expression array
if x != 0: ###previous expression can produce errors when multiple group averages have identical values
fold_change = expr_to_subtract_non_log / baseline_const_exp_non_log
fold_change_log = math.log(fold_change, 2)
constitutive_fold_change[affygene] = fold_change_log
### If we want to remove any genes from the analysis with large transcriptional changes
### that may lead to false positive splicing calls (different probeset kinetics)
if remove_diff_exp_genes == 'yes':
if abs(fold_change_log) > log_fold_cutoff:
del constitutive_fold_change[affygene]
try:
del adj_fold_dbase[probeset]
except KeyError:
n = 1
try:
del nonlog_NI_db[probeset]
except KeyError:
n = 1
"""elif expr_to_subtract_non_log == baseline_const_exp_non_log: ###This doesn't make sense, since n can't equal 1 if the conditional is false (check this code again later 11/23/07)
if n == 1:
del adj_fold_dbase[probeset]
del nonlog_NI_db[probeset]"""
x += 1
print "Intensity normalization complete..."
if factor_out_expression_changes == 'no':
adj_fold_dbase = fold_db #don't change expression values
print len(
constitutive_fold_change), "genes undergoing analysis for alternative splicing/transcription"
summary_data_db['denominator_exp_genes'] = len(constitutive_fold_change)
"""
mir_gene_count = 0
for gene in constitutive_fold_change:
if gene in gene_microRNA_denom: mir_gene_count+=1
print mir_gene_count, "Genes with predicted microRNA binding sites undergoing analysis for alternative splicing/transcription"
"""
global gene_analyzed
gene_analyzed = len(constitutive_gene_db)
return adj_fold_dbase, nonlog_NI_db, conditions, gene_db, constitutive_gene_db, constitutive_fold_change, avg_const_exp_db
class TranscriptionData:
def __init__(self, constitutive_fold, rna_processing_annotation):
self._constitutive_fold = constitutive_fold
self._rna_processing_annotation = rna_processing_annotation
def ConstitutiveFold(self):
return self._constitutive_fold
def ConstitutiveFoldStr(self):
return str(self._constitutive_fold)
def RNAProcessing(self):
return self._rna_processing_annotation
def __repr__(self):
return self.ConstitutiveFoldStr() + '|' + RNAProcessing()
def constitutive_expression_changes(constitutive_fold_change, annotate_db):
###Add in constitutive fold change filter to assess gene expression for ASPIRE
gene_expression_diff_db = {}
for affygene in constitutive_fold_change:
constitutive_fold = constitutive_fold_change[affygene]
rna_processing_annotation = ''
if affygene in annotate_db:
if len(annotate_db[affygene].RNAProcessing()) > 4:
rna_processing_annotation = annotate_db[
affygene].RNAProcessing()
###Add in evaluation of RNA-processing/binding factor
td = TranscriptionData(constitutive_fold, rna_processing_annotation)
gene_expression_diff_db[affygene] = td
return gene_expression_diff_db
def constitutive_exp_normalization_raw(gene_db, constitutive_gene_db,
array_raw_group_values, exon_db, y,
avg_const_exp_db):
"""normalize expression for raw expression data (only for non-baseline data)"""
#avg_true_const_exp_db[affygene] = [avg_const_exp]
temp_avg_const_exp_db = {}
for probeset in array_raw_group_values:
conditions = len(array_raw_group_values[probeset][y])
break #number of raw expresson values to normalize
for affygene in gene_db:
###This is blank when there are no constitutive or the above condition is implemented
if affygene in constitutive_gene_db:
probeset_list = constitutive_gene_db[affygene]
z = 1
else: ###so we can analyze splicing independent of gene expression even if no 'gene' feature is present
probeset_list = gene_db[affygene]
z = 0
x = 0
while x < conditions:
### average all exp values for constitutive probesets for each conditionF
exp_list = []
for probeset in probeset_list:
try:
exp_val = array_raw_group_values[probeset][y][
x
] ### try statement is used for constitutive probes that were deleted due to filtering in performExpressionAnalysis
except KeyError:
continue
exp_list.append(exp_val)
try:
avg_const_exp = statistics.avg(exp_list)
except Exception:
avg_const_exp = 'null'
if only_include_constitutive_containing_genes == 'yes' and avg_const_exp != 'null':
if z == 1:
try:
avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError:
avg_const_exp_db[affygene] = [avg_const_exp]
try:
temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError:
temp_avg_const_exp_db[affygene] = [avg_const_exp]
elif avg_const_exp != 'null': ###***
try:
avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError:
avg_const_exp_db[affygene] = [avg_const_exp]
try:
temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError:
temp_avg_const_exp_db[affygene] = [avg_const_exp]
x += 1
if analysis_method == 'ANOVA':
global normalized_raw_exp_ratios
normalized_raw_exp_ratios = {}
for affygene in gene_db:
probeset_list = gene_db[affygene]
for probeset in probeset_list:
while x < group_size:
new_ratios = [
] ### Calculate expression ratios relative to constitutive expression
exp_val = array_raw_group_values[probeset][y][x]
const_exp_val = temp_avg_const_exp_db[affygene][x]
###Since the above dictionary is agglomerating all constitutive expression values for permutation,
###we need an unbiased way to grab just those relevant const. exp. vals. (hence the temp dictionary)
#non_log_exp_val = statistics.log_fold_conversion_fraction(exp_val)
#non_log_const_exp_val = statistics.log_fold_conversion_fraction(const_exp_val)
#non_log_exp_ratio = non_log_exp_val/non_log_const_exp_val
log_exp_ratio = exp_val - const_exp_val
try:
normalized_raw_exp_ratios[probeset].append(
log_exp_ratio)
except KeyError:
normalized_raw_exp_ratios[probeset] = [log_exp_ratio]
return avg_const_exp_db
######### Z Score Analyses #######
class ZScoreData:
def __init__(self, element, changed, measured, zscore, null_z,
gene_symbols):
self._element = element
self._changed = changed
self._measured = measured
self._zscore = zscore
self._null_z = null_z
self._gene_symbols = gene_symbols
def ElementID(self):
return self._element
def Changed(self):
return str(self._changed)
def Measured(self):
return str(self._measured)
def AssociatedWithElement(self):
return str(self._gene_symbols)
def ZScore(self):
return str(self._zscore)
def SetP(self, p):
self._permute_p = p
def PermuteP(self):
return str(self._permute_p)
def SetAdjP(self, adjp):
self._adj_p = adjp
def AdjP(self):
return str(self._adj_p)
def PercentChanged(self):
try:
pc = float(self.Changed()) / float(self.Measured()) * 100
except Exception:
pc = 0
return str(pc)
def NullZ(self):
return self._null_z
def Report(self):
output = self.ElementID()
return output
def __repr__(self):
return self.Report()
class FDRStats(ZScoreData):
def __init__(self, p):
self._permute_p = p
def AdjP(self):
return str(self._adj_p)
def countGenesForElement(permute_input_list, probeset_to_gene,
probeset_element_db):
element_gene_db = {}
for probeset in permute_input_list:
try:
element_list = probeset_element_db[probeset]
gene = probeset_to_gene[probeset]
for element in element_list:
try:
element_gene_db[element].append(gene)
except KeyError:
element_gene_db[element] = [gene]
except KeyError:
null = []
### Count the number of unique genes per element
for element in element_gene_db:
t = {}
for i in element_gene_db[element]:
t[i] = []
element_gene_db[element] = len(t)
return element_gene_db
def formatGeneSymbolHits(geneid_list):
symbol_list = []
for geneid in geneid_list:
symbol = ''
if geneid in annotate_db: symbol = annotate_db[geneid].Symbol()
if len(symbol) < 1: symbol = geneid
symbol_list.append(symbol)
symbol_str = string.join(symbol_list, ', ')
return symbol_str
def zscore(r, n, N, R):
z = (r - n * (R / N)) / math.sqrt(n * (R / N) * (1 - (R / N)) * (1 - (
(n - 1) / (N - 1)))) #z = statistics.zscore(r,n,N,R)
return z
def calculateZScores(hit_count_db, denom_count_db, total_gene_denom_count,
total_gene_hit_count, element_type):
N = float(total_gene_denom_count) ###Genes examined
R = float(total_gene_hit_count) ###AS genes
for element in denom_count_db:
element_denom_gene_count = denom_count_db[element]
n = float(element_denom_gene_count
) ###all genes associated with element
if element in hit_count_db:
element_hit_gene_count = len(hit_count_db[element])
gene_symbols = formatGeneSymbolHits(hit_count_db[element])
r = float(element_hit_gene_count
) ###regulated genes associated with element
else:
r = 0
gene_symbols = ''
try:
z = zscore(r, n, N, R)
except Exception:
z = 0
#print 'error:',element,r,n,N,R; kill
try:
null_z = zscore(0, n, N, R)
except Exception:
null_z = 0
#print 'error:',element,r,n,N,R; kill
zsd = ZScoreData(element, r, n, z, null_z, gene_symbols)
if element_type == 'domain':
original_domain_z_score_data[element] = zsd
elif element_type == 'microRNA':
original_microRNA_z_score_data[element] = zsd
permuted_z_scores[element] = [z]
if perform_element_permutation_analysis == 'no':
### The below is an alternative to the permute t-statistic that is more effecient
p = FishersExactTest(r, n, R, N)
zsd.SetP(p)
return N, R
######### Begin Permutation Analysis #######
def calculatePermuteZScores(permute_element_inputs,
element_denominator_gene_count, N, R):
###Make this code as efficient as possible
for element_input_gene_count in permute_element_inputs:
for element in element_input_gene_count:
r = element_input_gene_count[element]
n = element_denominator_gene_count[element]
try:
z = statistics.zscore(r, n, N, R)
except Exception:
z = 0
permuted_z_scores[element].append(abs(z))
#if element == '0005488':
#a.append(r)
def calculatePermuteStats(original_element_z_score_data):
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
z = abs(permuted_z_scores[element][0])
permute_scores = permuted_z_scores[
element][1:] ###Exclude the true value
nullz = zsd.NullZ()
if abs(nullz) == z: ###Only add the nullz values if they can count towards the p-value (if equal to the original z)
null_z_to_add = permutations - len(permute_scores)
permute_scores += [
abs(nullz)
] * null_z_to_add ###Add null_z's in proportion to the amount of times there were not genes found for that element
if len(permute_scores) > 0:
p = permute_p(permute_scores, z)
else:
p = 1
#if p>1: p=1
zsd.SetP(p)
def FishersExactTest(r, n, R, N):
a = r
b = n - r
c = R - r
d = N - R - b
table = [[int(a), int(b)], [int(c), int(d)]]
try: ### Scipy version - cuts down rutime by ~1/3rd the time
oddsratio, pvalue = stats.fisher_exact(table)
return pvalue
except Exception:
ft = fishers_exact_test.FishersExactTest(table)
return ft.two_tail_p()
def adjustPermuteStats(original_element_z_score_data):
#1. Sort ascending the original input p value vector. Call this spval. Keep the original indecies so you can sort back.
#2. Define a new vector called tmp. tmp= spval. tmp will contain the BH p values.
#3. m is the length of tmp (also spval)
#4. i=m-1
#5 tmp[ i ]=min(tmp[i+1], min((m/i)*spval[ i ],1)) - second to last, last, last/second to last
#6. i=m-2
#7 tmp[ i ]=min(tmp[i+1], min((m/i)*spval[ i ],1))
#8 repeat step 7 for m-3, m-4,... until i=1
#9. sort tmp back to the original order of the input p values.
spval = []
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
p = float(zsd.PermuteP())
spval.append([p, element])
spval.sort()
tmp = spval
m = len(spval)
i = m - 2
x = 0 ###Step 1-4
while i > -1:
tmp[i] = min(tmp[i + 1][0], min(
(float(m) / (i + 1)) * spval[i][0], 1)), tmp[i][1]
i -= 1
for (adjp, element) in tmp:
zsd = original_element_z_score_data[element]
zsd.SetAdjP(adjp)
spval = []
def permute_p(null_list, true_value):
y = 0
z = 0
x = permutations
for value in null_list:
if value >= true_value: y += 1
#if true_value > 8: global a; a = null_list; print true_value,y,x;kill
return (float(y) / float(x)) ###Multiply probabilty x2?
######### End Permutation Analysis #######
def exportZScoreData(original_element_z_score_data, element_type):
element_output = root_dir + 'AltResults/AlternativeOutput/' + dataset_name + analysis_method + '-' + element_type + '-zscores.txt'
data = export.ExportFile(element_output)
headers = [element_type + '-Name', 'Number Changed', 'Number Measured',
'Percent Changed', 'Zscore', 'PermuteP', 'AdjP',
'Changed GeneSymbols']
headers = string.join(headers, '\t') + '\n'
data.write(headers)
sort_results = []
#print "Results for",len(original_element_z_score_data),"elements exported to",element_output
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
try:
results = [zsd.Changed(), zsd.Measured(), zsd.PercentChanged(),
zsd.ZScore(), zsd.PermuteP(), zsd.AdjP(),
zsd.AssociatedWithElement()]
except AttributeError:
print element, len(permuted_z_scores[element])
kill
results = [element] + results
results = string.join(results, '\t') + '\n'
sort_results.append([float(zsd.PermuteP()), -1 / float(zsd.Measured()),
results])
sort_results.sort()
for values in sort_results:
results = values[2]
data.write(results)
data.close()
def getInputsForPermutationAnalysis(exon_db):
### Filter fold_dbase, which is the proper denominator
probeset_to_gene = {}
denominator_list = []
for probeset in exon_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
if proceed == 'yes':
gene = exon_db[probeset].GeneID()
probeset_to_gene[probeset] = gene
denominator_list.append(probeset)
return probeset_to_gene, denominator_list
def getJunctionSplicingAnnotations(regulated_exon_junction_db):
filter_status = 'yes'
########### Import critical exon annotation for junctions, build through the exon array analysis pipeline - link back to probesets
filtered_arrayids = {}
critical_probeset_annotation_db = {}
if array_type == 'RNASeq' and explicit_data_type == 'null':
critical_exon_annotation_file = root_dir + 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_exons.txt'
elif array_type == 'RNASeq' and explicit_data_type != 'null':
critical_exon_annotation_file = root_dir + 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_junctions.txt'
else:
critical_exon_annotation_file = "AltDatabase/" + species + "/" + array_type + "/" + species + "_Ensembl_" + array_type + "_probesets.txt"
critical_exon_annotation_file = filename = getFilteredFilename(
critical_exon_annotation_file)
for uid in regulated_exon_junction_db:
gene = regulated_exon_junction_db[uid].GeneID()
critical_exons = regulated_exon_junction_db[uid].CriticalExons()
"""### It appears that each critical exon for junction arrays can be a concatenation of multiple exons, making this unnecessary
if len(critical_exons)>1 and array_type == 'junction':
critical_exons_joined = string.join(critical_exons,'|')
filtered_arrayids[gene+':'+critical_exon].append(uid)"""
for critical_exon in critical_exons:
try:
try:
filtered_arrayids[gene + ':' + critical_exon].append(uid)
except TypeError:
print gene, critical_exon, uid
kill
except KeyError:
filtered_arrayids[gene + ':' + critical_exon] = [uid]
critical_exon_annotation_db = importSplicingAnnotationDatabase(
critical_exon_annotation_file, 'exon-fake', filtered_arrayids,
filter_status)
null = [
] ###The file is in exon centric format, so designate array_type as exon
for key in critical_exon_annotation_db:
ced = critical_exon_annotation_db[key]
for junction_probesets in filtered_arrayids[key]:
try:
critical_probeset_annotation_db[junction_probesets].append(
ced) ###use for splicing and Exon annotations
except KeyError:
critical_probeset_annotation_db[junction_probesets] = [ced]
for junction_probesets in critical_probeset_annotation_db:
if len(critical_probeset_annotation_db[
junction_probesets]) > 1: ###Thus multiple exons associated, must combine annotations
exon_ids = []
external_exonids = []
exon_regions = []
splicing_events = []
for ed in critical_probeset_annotation_db[junction_probesets]:
ensembl_gene_id = ed.GeneID()
transcript_cluster_id = ed.ExternalGeneID()
exon_ids.append(ed.ExonID())
external_exonids.append(ed.ExternalExonIDs())
exon_regions.append(ed.ExonRegionID())
se = string.split(ed.SplicingEvent(), '|')
for i in se:
splicing_events.append(i)
splicing_events = unique.unique(splicing_events
) ###remove duplicate entries
exon_id = string.join(exon_ids, '|')
external_exonid = string.join(external_exonids, '|')
exon_region = string.join(exon_regions, '|')
splicing_event = string.join(splicing_events, '|')
probe_data = AffyExonSTData(ensembl_gene_id, exon_id,
external_exonid, '', exon_region,
splicing_event, '', '')
if array_type != 'RNASeq':
probe_data.setTranscriptCluster(transcript_cluster_id)
critical_probeset_annotation_db[junction_probesets] = probe_data
else:
critical_probeset_annotation_db[
junction_probesets] = critical_probeset_annotation_db[
junction_probesets][0]
return critical_probeset_annotation_db
def determineExternalType(external_probeset_db):
external_probeset_db2 = {}
if 'TC' in external_probeset_db:
temp_index = {}
i = 0
type = 'JETTA'
for name in external_probeset_db['TC'][0]:
temp_index[i] = i
i += 1
if 'PS:norm_expr_fold_change' in temp_index:
NI_fold_index = temp_index['PS:norm_expr_fold_change']
if 'MADS:pv_1over2' in temp_index:
MADS_p1_index = temp_index['MADS:pv_1over2']
if 'MADS:pv_2over1' in temp_index:
MADS_p2_index = temp_index['MADS:pv_2over1']
if 'TC:expr_fold_change' in temp_index:
MADS_p2_index = temp_index['MADS:pv_2over1']
if 'PsId' in temp_index: ps_index = temp_index['PsId']
for tc in external_probeset_db:
for list in external_probeset_db[tc]:
try:
NI_fold = float(list[NI_fold_index])
except Exception:
NI_fold = 1
try:
MADSp1 = float(list[MADS_p1_index])
except Exception:
MADSp1 = 1
try:
MADSp2 = float(list[MADS_p2_index])
except Exception:
MADSp1 = 1
if MADSp1 < MADSp2: pval = MADSp1
else: pval = MADSp2
probeset = list[ps_index]
external_probeset_db2[probeset] = NI_fold, pval
else:
type = 'generic'
a = []
b = []
for id in external_probeset_db:
#print external_probeset_db[id]
try:
a.append(abs(float(external_probeset_db[id][0][0])))
except Exception:
null = []
try:
b.append(abs(float(external_probeset_db[id][0][1])))
except Exception:
null = []
a.sort()
b.sort()
pval_index = None
score_index = None
if len(a) > 0:
if max(a) > 1: score_index = 0
else: pval_index = 0
if len(b) > 0:
if max(b) > 1: score_index = 1
else: pval_index = 1
for id in external_probeset_db:
if score_index != None:
score = external_probeset_db[id][0][score_index]
else:
score = 1
if pval_index != None:
pval = external_probeset_db[id][0][pval_index]
else:
pval = 1
external_probeset_db2[id] = score, pval
return external_probeset_db2, type
def importExternalProbesetData(dataset_dir):
excluded_probeset_db = {}
splice_event_list = []
p_value_call = {}
permute_p_values = {}
gene_expression_diff_db = {}
analyzed_probeset_db = {}
external_probeset_db = importExternalDBList(dataset_dir)
external_probeset_db, ext_type = determineExternalType(
external_probeset_db)
for probeset in exon_db:
analyzed_probeset_db[probeset] = []
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db) > 0:
temp_db = {}
for probeset in analyzed_probeset_db:
temp_db[probeset] = []
for probeset in temp_db:
try:
filtered_probeset_db[probeset]
except KeyError:
del analyzed_probeset_db[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try:
del analyzed_probeset_db[probeset]
except KeyError:
null = []
for probeset in analyzed_probeset_db:
ed = exon_db[probeset]
geneid = ed.GeneID()
td = TranscriptionData('', '')
gene_expression_diff_db[geneid] = td
if probeset in external_probeset_db:
exonid = ed.ExonID()
critical_exon_list = [1, [exonid]]
splicing_index, normIntensityP = external_probeset_db[probeset]
group1_ratios = []
group2_ratios = []
exp_log_ratio = ''
ttest_exp_p = ''
normIntensityP = ''
opposite_SI_log_mean = ''
sid = ExonData(splicing_index, probeset, critical_exon_list,
geneid, group1_ratios, group2_ratios,
normIntensityP, opposite_SI_log_mean)
splice_event_list.append((splicing_index, sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(0, geneid, 'NA')
excluded_probeset_db[probeset] = eed
print len(splice_event_list), 'pre-filtered external results imported...\n'
return splice_event_list, p_value_call, permute_p_values, excluded_probeset_db, gene_expression_diff_db
def splicingAnalysisAlgorithms(nonlog_NI_db, fold_dbase, dataset_name,
gene_expression_diff_db, exon_db, ex_db, si_db,
dataset_dir):
protein_exon_feature_db = {}
global regulated_exon_junction_db
global critical_exon_annotation_db
global probeset_comp_db
probeset_comp_db = {}
if original_conditions == 2:
print "Beginning to run", analysis_method, "algorithm on", dataset_name[
0:-1], "data"
if run_from_scratch == 'Annotate External Results':
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db, gene_expression_diff_db = importExternalProbesetData(
dataset_dir)
elif analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
original_exon_db = exon_db
if original_conditions > 2:
splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db = ex_db
splice_event_list, p_value_call, permute_p_values, exon_db, regulated_exon_junction_db = furtherProcessJunctionScores(
splice_event_list, probeset_comp_db, permute_p_values)
else:
splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db = analyzeJunctionSplicing(
nonlog_NI_db)
splice_event_list, p_value_call, permute_p_values, exon_db, regulated_exon_junction_db = furtherProcessJunctionScores(
splice_event_list, probeset_comp_db, permute_p_values)
elif analysis_method == 'splicing-index':
regulated_exon_junction_db = {}
if original_conditions > 2:
excluded_probeset_db = ex_db
splice_event_list = si_db
clearObjectsFromMemory(ex_db)
clearObjectsFromMemory(si_db)
ex_db = []
si_db = []
permute_p_values = {}
p_value_call = ''
else:
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db = analyzeSplicingIndex(
fold_dbase)
elif analysis_method == 'FIRMA':
regulated_exon_junction_db = {}
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db = FIRMAanalysis(
fold_dbase)
global permuted_z_scores
permuted_z_scores = {}
global original_domain_z_score_data
original_domain_z_score_data = {}
global original_microRNA_z_score_data
original_microRNA_z_score_data = {}
nonlog_NI_db = [] ### Clear memory of this large dictionary
try:
clearObjectsFromMemory(original_avg_const_exp_db)
clearObjectsFromMemory(array_raw_group_values)
except Exception:
null = []
try:
clearObjectsFromMemory(avg_const_exp_db)
except Exception:
null = []
try:
clearObjectsFromMemory(alt_junction_db)
except Exception:
null = []
try:
clearObjectsFromMemory(fold_dbase)
fold_dbase = []
except Exception:
null = []
microRNA_full_exon_db, microRNA_count_db, gene_microRNA_denom = ExonAnalyze_module.importmicroRNADataExon(
species, array_type, exon_db, microRNA_prediction_method,
explicit_data_type, root_dir)
#print "MicroRNA data imported"
if use_direct_domain_alignments_only == 'yes':
protein_ft_db_len, domain_associated_genes = importProbesetAligningDomains(
exon_db, 'gene')
else:
protein_ft_db_len, domain_associated_genes = importProbesetProteinCompDomains(
exon_db, 'gene', 'exoncomp')
if perform_element_permutation_analysis == 'yes':
probeset_to_gene, denominator_list = getInputsForPermutationAnalysis(
exon_db)
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
exon_gene_array_translation_file = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_' + array_type + '-exon_probesets.txt'
try:
exon_array_translation_db = importGeneric(
exon_gene_array_translation_file)
except Exception:
exon_array_translation_db = {} ### Not present for all species
exon_hits = {}
clearObjectsFromMemory(probeset_comp_db)
probeset_comp_db = []
###Run analyses in the ExonAnalyze_module module to assess functional changes
for (score, ed) in splice_event_list:
geneid = ed.GeneID()
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
pl = string.split(ed.Probeset1(), '|')
probeset1 = pl[0] ### When agglomerated, this is important
uid = (probeset1, ed.Probeset2())
else:
uid = ed.Probeset1()
gene_exon = geneid, uid
exon_hits[gene_exon] = ed
#print probeset1,ed.Probeset1(),ed.Probeset2(),gene_exon,ed.CriticalExons()
dataset_name_original = analysis_method + '-' + dataset_name[8:-1]
global functional_attribute_db
global protein_features
### Possibly Block-out code for DomainGraph export
########### Re-import the exon_db for significant entries with full annotaitons
exon_db = {}
filtered_arrayids = {}
filter_status = 'yes' ###Use this as a means to save memory (import multiple times - only storing different types relevant information)
for (score, entry) in splice_event_list:
try:
probeset = original_exon_db[entry.Probeset1()].Probeset()
except Exception:
probeset = entry.Probeset1()
pl = string.split(probeset, '|')
probeset = pl[0]
filtered_arrayids[probeset] = [
] ### When agglomerated, this is important
if array_type == 'AltMouse' or (
(array_type == 'junction' or
array_type == 'RNASeq') and explicit_data_type == 'null'):
try:
probeset = entry.Probeset2()
filtered_arrayids[probeset] = []
except AttributeError:
null = [] ###occurs when running Splicing
exon_db = importSplicingAnnotationDatabase(probeset_annotations_file,
array_type, filtered_arrayids,
filter_status)
null = [
] ###replace existing exon_db (probeset_annotations_file should be a global)
###domain_gene_changed_count_db is the number of genes for each domain that are found for regulated probesets
if array_type == 'AltMouse' or (
(array_type == 'junction' or
array_type == 'RNASeq') and explicit_data_type == 'null'):
if use_direct_domain_alignments_only == 'yes':
protein_features, domain_gene_changed_count_db, functional_attribute_db = importProbesetAligningDomains(
regulated_exon_junction_db, 'probeset')
else:
protein_features, domain_gene_changed_count_db, functional_attribute_db = importProbesetProteinCompDomains(
regulated_exon_junction_db, 'probeset', 'exoncomp')
else:
if use_direct_domain_alignments_only == 'yes':
protein_features, domain_gene_changed_count_db, functional_attribute_db = importProbesetAligningDomains(
exon_db, 'probeset')
else:
protein_features, domain_gene_changed_count_db, functional_attribute_db = importProbesetProteinCompDomains(
exon_db, 'probeset', 'exoncomp')
filtered_microRNA_exon_db = ExonAnalyze_module.filterMicroRNAProbesetAssociations(
microRNA_full_exon_db, exon_hits)
microRNA_full_exon_db = []
###add microRNA data to functional_attribute_db
microRNA_hit_gene_count_db = {}
all_microRNA_gene_hits = {}
microRNA_attribute_db = {}
probeset_mirBS_db = {}
for (
affygene, uid
) in filtered_microRNA_exon_db: ###example ('G7091354', 'E20|') [('hsa-miR-130a', 'Pbxip1'), ('hsa-miR-130a', 'Pbxip1'
###3-1-08
miR_list = []
microRNA_symbol_list = filtered_microRNA_exon_db[(affygene, uid)]
for mir_key in microRNA_symbol_list:
microRNA, gene_symbol, miR_seq, miR_sources = mir_key
#if 'ENS' in microRNA: print microRNA; kill ### bug in some miRNA annotations introduced in the build process
specific_microRNA_tuple = (microRNA, '~')
try:
microRNA_hit_gene_count_db[microRNA].append(affygene)
except KeyError:
microRNA_hit_gene_count_db[microRNA] = [affygene]
###Create a database with the same structure as "protein_exon_feature_db"(below) for over-representation analysis (direction specific), after linking up splice direction data
try:
microRNA_attribute_db[(affygene, uid)].append(
specific_microRNA_tuple)
except KeyError:
microRNA_attribute_db[(affygene, uid)] = [
specific_microRNA_tuple
]
miR_data = microRNA + ':' + miR_sources
miR_list.append(miR_data) ###Add miR information to the record
function_type = (
'miR-sequence: ' + '(' + miR_data + ')' + miR_seq, '~'
) ###Add miR sequence information to the sequence field of the report
try:
functional_attribute_db[(affygene, uid)].append(function_type)
except KeyError:
functional_attribute_db[(affygene, uid)] = [function_type]
#print (affygene,uid), [function_type];kill
if perform_element_permutation_analysis == 'yes':
try:
probeset_mirBS_db[uid].append(microRNA)
except KeyError:
probeset_mirBS_db[uid] = [microRNA]
miR_str = string.join(miR_list, ',')
miR_str = '(' + miR_str + ')'
function_type = ('microRNA-target' + miR_str, '~')
try:
functional_attribute_db[(affygene, uid)].append(function_type)
except KeyError:
functional_attribute_db[(affygene, uid)] = [function_type]
all_microRNA_gene_hits[affygene] = []
###Replace the gene list for each microRNA hit with count data
microRNA_hit_gene_count_db = eliminate_redundant_dict_values(
microRNA_hit_gene_count_db)
###Combines any additional feature alignment info identified from 'ExonAnalyze_module.characterizeProteinLevelExonChanges' (e.g. from Ensembl or junction-based queries rather than exon specific) and combines
###this with this database of (Gene,Exon)=[(functional element 1,'~'),(functional element 2,'~')] for downstream result file annotatations
domain_hit_gene_count_db = {}
all_domain_gene_hits = {}
probeset_domain_db = {}
for entry in protein_features:
gene, uid = entry
for data_tuple in protein_features[entry]:
domain, call = data_tuple
try:
protein_exon_feature_db[entry].append(data_tuple)
except KeyError:
protein_exon_feature_db[entry] = [data_tuple]
try:
domain_hit_gene_count_db[domain].append(gene)
except KeyError:
domain_hit_gene_count_db[domain] = [gene]
all_domain_gene_hits[gene] = []
if perform_element_permutation_analysis == 'yes':
try:
probeset_domain_db[uid].append(domain)
except KeyError:
probeset_domain_db[uid] = [domain]
protein_features = []
domain_gene_changed_count_db = []
###Replace the gene list for each microRNA hit with count data
domain_hit_gene_count_db = eliminate_redundant_dict_values(
domain_hit_gene_count_db)
############ Perform Element Over-Representation Analysis ############
"""Domain/FT Fishers-Exact test: with "protein_exon_feature_db" (transformed to "domain_hit_gene_count_db") we can analyze over-representation of domain/features WITHOUT taking into account exon-inclusion or exclusion
Do this using: "domain_associated_genes", which contains domain tuple ('Tyr_pkinase', 'IPR001245') as a key and count in unique genes as the value in addition to
Number of genes linked to splice events "regulated" (SI and Midas p<0.05), number of genes with constitutive probesets
MicroRNA Fishers-Exact test: "filtered_microRNA_exon_db" contains gene/exon to microRNA data. For each microRNA, count the representation in spliced genes microRNA (unique gene count - make this from the mentioned file)
Do this using: "microRNA_count_db"""
domain_gene_counts = {} ### Get unique gene counts for each domain
for domain in domain_associated_genes:
domain_gene_counts[domain] = len(domain_associated_genes[domain])
total_microRNA_gene_hit_count = len(all_microRNA_gene_hits)
total_microRNA_gene_denom_count = len(gene_microRNA_denom)
Nm, Rm = calculateZScores(microRNA_hit_gene_count_db, microRNA_count_db,
total_microRNA_gene_denom_count,
total_microRNA_gene_hit_count, 'microRNA')
gene_microRNA_denom = []
summary_data_db['miRNA_gene_denom'] = total_microRNA_gene_denom_count
summary_data_db['miRNA_gene_hits'] = total_microRNA_gene_hit_count
summary_data_db['alt_events'] = len(splice_event_list)
total_domain_gene_hit_count = len(all_domain_gene_hits)
total_domain_gene_denom_count = protein_ft_db_len ###genes connected to domain annotations
Nd, Rd = calculateZScores(domain_hit_gene_count_db, domain_gene_counts,
total_domain_gene_denom_count,
total_domain_gene_hit_count, 'domain')
microRNA_hit_gene_counts = {}
gene_to_miR_db = {
} ### Get unique gene counts for each miR and the converse
for microRNA in microRNA_hit_gene_count_db:
microRNA_hit_gene_counts[microRNA] = len(microRNA_hit_gene_count_db[
microRNA])
for gene in microRNA_hit_gene_count_db[microRNA]:
try:
gene_to_miR_db[gene].append(microRNA)
except KeyError:
gene_to_miR_db[gene] = [microRNA]
gene_to_miR_db = eliminate_redundant_dict_values(gene_to_miR_db)
if perform_element_permutation_analysis == 'yes':
###Begin Domain/microRNA Permute Analysis
input_count = len(
splice_event_list
) ### Number of probesets or probeset pairs (junction array) alternatively regulated
original_increment = int(permutations / 20)
increment = original_increment
start_time = time.time()
print 'Permuting the Domain/miRBS analysis %d times' % permutations
x = 0
permute_domain_inputs = []
permute_miR_inputs = []
while x < permutations:
if x == increment:
increment += original_increment
print '*',
permute_input_list = random.sample(denominator_list, input_count)
x += 1
permute_domain_input_gene_counts = countGenesForElement(
permute_input_list, probeset_to_gene, probeset_domain_db)
permute_domain_inputs.append(permute_domain_input_gene_counts)
permute_miR_input_gene_counts = countGenesForElement(
permute_input_list, probeset_to_gene, probeset_mirBS_db)
permute_miR_inputs.append(permute_miR_input_gene_counts)
calculatePermuteZScores(permute_domain_inputs, domain_gene_counts, Nd,
Rd)
calculatePermuteZScores(permute_miR_inputs, microRNA_hit_gene_counts,
Nm, Rm)
calculatePermuteStats(original_domain_z_score_data)
calculatePermuteStats(original_microRNA_z_score_data)
adjustPermuteStats(original_domain_z_score_data)
adjustPermuteStats(original_microRNA_z_score_data)
exportZScoreData(original_domain_z_score_data, 'ft-domain')
exportZScoreData(original_microRNA_z_score_data, 'microRNA')
end_time = time.time()
time_diff = int(end_time - start_time)
print "Enrichment p-values for Domains/miRBS calculated in %d seconds" % time_diff
denominator_list = []
try:
clearObjectsFromMemory(original_microRNA_z_score_data)
except Exception:
null = []
microRNA_hit_gene_count_db = {}
microRNA_hit_gene_counts = {}
clearObjectsFromMemory(permuted_z_scores)
permuted_z_scores = []
original_domain_z_score_data = []
if (array_type == 'AltMouse' or
((array_type == 'junction' or
array_type == 'RNASeq') and explicit_data_type ==
'null')) and analysis_method != 'splicing-index':
critical_probeset_annotation_db = getJunctionSplicingAnnotations(
regulated_exon_junction_db)
probeset_aligning_db = importProbesetAligningDomains(
regulated_exon_junction_db, 'perfect_match')
else:
probeset_aligning_db = importProbesetAligningDomains(exon_db,
'perfect_match')
############ Export exon/junction level results ############
splice_event_db = {}
protein_length_list = []
aspire_gene_results = {}
critical_gene_exons = {}
unique_exon_event_db = {}
comparison_count = {}
direct_domain_gene_alignments = {}
functional_attribute_db2 = {}
protein_exon_feature_db2 = {}
microRNA_exon_feature_db2 = {}
external_exon_annot = {}
gene_exon_region = {}
gene_smallest_p = {}
gene_splice_event_score = {}
alternatively_reg_tc = {}
aspire_output = root_dir + 'AltResults/AlternativeOutput/' + dataset_name + analysis_method + '-exon-inclusion-results.txt'
data = export.ExportFile(aspire_output)
goelite_output = root_dir + 'GO-Elite/AltExon/AS.' + dataset_name + analysis_method + '.txt'
goelite_data = export.ExportFile(goelite_output)
gcn = 0
#print 'LENGTH OF THE GENE ANNOTATION DATABASE',len(annotate_db)
if array_type != 'AltMouse':
DG_output = root_dir + 'AltResults/DomainGraph/' + dataset_name + analysis_method + '-DomainGraph.txt'
DG_data = export.ExportFile(DG_output)
### Write out only the inclusion hits to a subdir
SRFinder_inclusion = root_dir + 'GO-Elite/exon/' + dataset_name + analysis_method + '-inclusion.txt'
SRFinder_in_data = export.ExportFile(SRFinder_inclusion)
SRFinder_in_data.write('probeset\tSystemCode\tdeltaI\tp-value\n')
### Write out only the exclusion hits to a subdir
SRFinder_exclusion = root_dir + 'GO-Elite/exon/' + dataset_name + analysis_method + '-exclusion.txt'
SRFinder_ex_data = export.ExportFile(SRFinder_exclusion)
SRFinder_ex_data.write('probeset\tSystemCode\tdeltaI\tp-value\n')
### Write out only the denominator set to a subdir
SRFinder_denom = root_dir + 'GO-Elite/exon_denominator/' + species + '-' + array_type + '.txt'
SRFinder_denom_data = export.ExportFile(SRFinder_denom)
SRFinder_denom_data.write('probeset\tSystemCode\n')
ens_version = unique.getCurrentGeneDatabaseVersion()
ProcessedSpliceData_output = string.replace(
DG_output, 'DomainGraph', 'ProcessedSpliceData'
) ### This is the same as the DG export but without converting the probeset IDs for non-exon arrays
ProcessedSpliceData_data = export.ExportFile(
ProcessedSpliceData_output)
if ens_version == '':
try:
elite_db_versions = UI.returnDirectoriesNoReplace(
'/AltDatabase')
if len(elite_db_versions) > 0:
ens_version = elite_db_versions[0]
except Exception:
null = []
ens_version = string.replace(ens_version, 'EnsMart', 'ENS_')
DG_data.write(ens_version + "\n")
DG_data.write(
"Probeset\tGeneID\tRegulation call\tSI\tSI p-value\tMiDAS p-value\n")
ProcessedSpliceData_data.write("ExonID(s)\tGeneID\tRegulation call\t" +
analysis_method + "\t" + analysis_method
+ " p-value\tMiDAS p-value\n")
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
if perform_permutation_analysis == 'yes':
p_value_type = 'permutation-values'
else:
p_value_type = 'FDR-' + p_value_call
if array_type == 'AltMouse':
gene_name = 'AffyGene'
extra_transcript_annotation = 'block_structure'
extra_exon_annotation = 'splice_event_description'
if array_type == 'junction' or array_type == 'RNASeq':
gene_name = 'Ensembl'
extra_transcript_annotation = 'transcript cluster ID'
extra_exon_annotation = 'distal exon-region-ID'
goelite_data.write(
"GeneID\tSystemCode\tscore\tp-value\tSymbol\tExonIDs\n")
if array_type == 'RNASeq':
id1 = 'junctionID-1'
id2 = 'junctionID-2'
loc_column = 'exon/junction locations'
extra_transcript_annotation = 'Known/Novel Feature'
else:
id1 = 'probeset1'
id2 = 'probeset2'
loc_column = 'probeset locations'
title = [gene_name, analysis_method, 'symbol', 'description', 'exons1',
'exons2', 'regulation_call', 'event_call', id1, 'norm-p1',
id2, 'norm-p2', 'fold1', 'fold2']
title += ['adj-fold1', 'adj-fold2', extra_transcript_annotation,
'critical_up_exons', 'critical_down_exons',
'functional_prediction', 'uniprot-ens_feature_predictions']
title += ['peptide_predictions', 'exp1', 'exp2',
'ens_overlapping_domains', 'constitutive_baseline_exp',
p_value_call, p_value_type, 'permutation-false-positives']
title += ['gene-expression-change', extra_exon_annotation,
'ExternalExonIDs', 'ExonRegionID', 'SplicingEvent',
'ExonAnnotationScore', 'large_splicing_diff', loc_column]
else:
goelite_data.write(
"GeneID\tSystemCode\tSI\tSI p-value\tMiDAS p-value\tSymbol\tExonID\n")
if analysis_method == 'splicing-index':
NIpval = 'SI_rawp'
splicing_score = 'Splicing-Index'
lowestp = 'lowest_p (MIDAS or SI)'
AdjPcolumn = 'Deviation-Value'
#AdjPcolumn = 'SI_adjp'
else:
NIpval = 'FIRMA_rawp'
splicing_score = 'FIRMA_fold'
lowestp = 'lowest_p (MIDAS or FIRMA)'
AdjPcolumn = 'Deviation-Value'
#AdjPcolumn = 'FIRMA_adjp'
if array_type == 'RNASeq':
id1 = 'junctionID'
pval_column = 'junction p-value'
loc_column = 'junction location'
else:
id1 = 'probeset'
pval_column = 'probeset p-value'
loc_column = 'probeset location'
if array_type == 'RNASeq': secondary_ID_title = 'Known/Novel Feature'
else: secondary_ID_title = 'alternative gene ID'
title = ['Ensembl', splicing_score, 'symbol', 'description', 'exons',
'regulation_call', id1, pval_column, lowestp, 'midas p-value',
'fold', 'adjfold']
title += ['up_exons', 'down_exons', 'functional_prediction',
'uniprot-ens_feature_predictions', 'peptide_predictions',
'ens_overlapping_domains', 'baseline_probeset_exp']
title += ['constitutive_baseline_exp', NIpval, AdjPcolumn,
'gene-expression-change']
title += [secondary_ID_title, 'ensembl exons', 'consitutive exon',
'exon-region-ID', 'exon annotations',
'distal exon-region-ID', loc_column]
title = string.join(title, '\t') + '\n'
try:
if original_conditions > 2:
title = string.replace(title, 'regulation_call',
'conditions_compared')
except Exception:
null = []
data.write(title)
### Calculate adjusted normalized intensity p-values
fdr_exon_stats = {}
if analysis_method != 'ASPIRE' and 'linearregres' not in analysis_method:
for (score, entry
) in splice_event_list: ### These are all "significant entries"
fds = FDRStats(entry.TTestNormalizedRatios())
fdr_exon_stats[entry.Probeset1()] = fds
for probeset in excluded_probeset_db: ### These are all "non-significant entries"
fds = FDRStats(excluded_probeset_db[
probeset].TTestNormalizedRatios())
fdr_exon_stats[probeset] = fds
try:
adjustPermuteStats(fdr_exon_stats)
except Exception:
null = []
### Calculate score average and stdev for each gene to alter get a Deviation Value
gene_deviation_db = {}
for (score, entry) in splice_event_list:
dI = entry.Score()
geneID = entry.GeneID()
try:
gene_deviation_db[geneID].append(dI)
except Exception:
gene_deviation_db[geneID] = [dI]
for i in excluded_probeset_db:
entry = excluded_probeset_db[i]
try:
dI = entry.Score()
geneID = entry.GeneID()
except Exception:
geneID = entry[1]
dI = entry[-1]
try:
gene_deviation_db[geneID].append(dI)
except Exception:
None ### Don't include genes with no hits
for geneID in gene_deviation_db:
try:
avg_dI = statistics.avg(gene_deviation_db[geneID])
stdev_dI = statistics.stdev(gene_deviation_db[geneID])
gene_deviation_db[geneID] = avg_dI, stdev_dI
except Exception:
gene_deviation_db[geneID] = 'NA', 'NA'
event_count = 0
for (score, entry) in splice_event_list:
event_count += 1
dI = entry.Score()
probeset1 = entry.Probeset1()
regulation_call = entry.RegulationCall()
event_call = entry.EventCall()
critical_exon_list = entry.CriticalExonTuple()
probeset1_display = probeset1
selected_probeset = probeset1
if agglomerate_inclusion_probesets == 'yes':
if array_type == 'AltMouse':
exons1 = original_exon_db[probeset1].ExonID()
try:
probeset1 = original_exon_db[probeset1].Probeset()
except Exception:
null = []
else:
probeset1 = probeset1
exons1 = original_exon_db[probeset1].ExonID()
try:
selected_probeset = original_exon_db[probeset1].Probeset()
except Exception:
selected_probeset = probeset1
else:
try:
exons1 = exon_db[probeset1].ExonID()
except Exception:
print probeset1, len(exon_db)
for i in exon_db:
print i
break
kill
critical_probeset_list = [selected_probeset]
affygene = entry.GeneID()
### Calculate deviation value for each exon
avg_dI, stdev_dI = gene_deviation_db[affygene]
try:
DV = deviation(
dI, avg_dI, stdev_dI
) ### Note: the dI values are always in log2 space, independent of platform
except Exception:
DV = 'NA'
if affygene in annotate_db:
description = annotate_db[affygene].Description()
symbol = annotate_db[affygene].Symbol()
else:
description = ''
symbol = ''
ped1 = entry.ProbesetExprData1()
adjfold1 = ped1.AdjFold()
exp1 = ped1.BaselineExp()
fold1 = ped1.FoldChange()
rawp1 = ped1.ExpPval()
### Get Constitutive expression values
baseline_const_exp = entry.ConstitutiveExpression(
) ### For multiple group comparisosn
#if affygene in gene_expression_diff_db: mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
try:
mean_fold_change = str(
entry.ConstitutiveFold()
) ### For multi-condition analyses, the gene expression is dependent on the conditions compared
except Exception:
mean_fold_change = gene_expression_diff_db[
affygene].ConstitutiveFoldStr()
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
probeset2 = entry.Probeset2()
exons2 = exon_db[probeset2].ExonID()
rawp1 = str(entry.TTestNormalizedRatios())
rawp2 = str(entry.TTestNormalizedRatios2())
critical_probeset_list.append(probeset2)
ped2 = entry.ProbesetExprData2()
adjfold2 = ped2.AdjFold()
exp2 = ped2.BaselineExp()
fold2 = ped2.FoldChange()
try:
location_summary = original_exon_db[
selected_probeset].LocationSummary(
) + '|' + original_exon_db[probeset2].LocationSummary()
except Exception:
try:
location_summary = exon_db[
selected_probeset].LocationSummary() + '|' + exon_db[
probeset2].LocationSummary()
except Exception:
location_summary = ''
if array_type == 'AltMouse':
extra_transcript_annotation = exon_db[probeset1].GeneStructure(
)
else:
try:
extra_exon_annotation = last_exon_region_db[affygene]
except KeyError:
extra_exon_annotation = ''
try:
tc1 = original_exon_db[probeset1].SecondaryGeneID()
tc2 = original_exon_db[probeset2].SecondaryGeneID(
) ### Transcript Cluster
probeset_tc = makeUnique([tc1, tc2])
extra_transcript_annotation = string.join(probeset_tc, '|')
try:
alternatively_reg_tc[affygene] += probeset_tc
except KeyError:
alternatively_reg_tc[affygene] = probeset_tc
except Exception:
extra_transcript_annotation = ''
if array_type == 'RNASeq':
try:
extra_transcript_annotation = entry.NovelEvent(
) ### Instead of secondary gene ID, list known vs. novel reciprocal junction annotation
except Exception:
None
exp_list = [float(exp1), float(exp2), float(exp1) + float(fold1),
float(exp2) + float(fold2)]
exp_list.sort()
exp_list.reverse()
probeset_tuple = (probeset1, probeset2)
else:
try:
exp_list = [float(exp1), float(exp1) + float(fold1)]
exp_list.sort()
exp_list.reverse()
except Exception:
exp_list = ['']
probeset_tuple = (probeset1)
highest_exp = exp_list[0]
###Use permuted p-value or lowest expression junction p-value based on the situtation
###This p-value is used to filter out aspire events for further analyses
if len(p_value_call) > 0:
if probeset_tuple in permute_p_values:
lowest_raw_p, pos_permute, total_permute, false_pos = permute_p_values[
probeset_tuple]
else:
lowest_raw_p = "NA"
pos_permute = "NA"
total_permute = "NA"
false_pos = "NA"
else:
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
raw_p_list = [
entry.TTestNormalizedRatios(),
entry.TTestNormalizedRatios2()
] #raw_p_list = [float(rawp1),float(rawp2)]; raw_p_list.sort()
else:
try:
raw_p_list = [
float(entry.TTestNormalizedRatios())
] ###Could also be rawp1, but this is more appropriate
except Exception:
raw_p_list = [1] ### Occurs when p='NA'
raw_p_list.sort()
lowest_raw_p = raw_p_list[0]
pos_permute = "NA"
total_permute = "NA"
false_pos = "NA"
if perform_permutation_analysis == 'yes':
p_value_extra = str(pos_permute) + ' out of ' + str(total_permute)
else:
p_value_extra = str(pos_permute)
up_exons = ''
down_exons = ''
up_exon_list = []
down_exon_list = []
gene_exon_list = []
exon_data = critical_exon_list
variable = exon_data[0]
if variable == 1 and regulation_call == 'upregulated':
for exon in exon_data[1]:
up_exons = up_exons + exon + ','
up_exon_list.append(exon)
key = affygene, exon + '|'
gene_exon_list.append(key)
elif variable == 1 and regulation_call == 'downregulated':
for exon in exon_data[1]:
down_exons = down_exons + exon + ','
down_exon_list.append(exon)
key = affygene, exon + '|'
gene_exon_list.append(key)
else:
try:
exon1 = exon_data[1][0]
exon2 = exon_data[1][1]
except Exception:
print exon_data
kill
if adjfold1 > 0:
up_exons = up_exons + exon1 + ','
down_exons = down_exons + exon2 + ','
up_exon_list.append(exon1)
down_exon_list.append(exon2)
key = affygene, exon1 + '|'
gene_exon_list.append(key)
key = affygene, exon2 + '|'
gene_exon_list.append(key)
else:
up_exons = up_exons + exon2 + ','
down_exons = down_exons + exon1 + ','
up_exon_list.append(exon2)
down_exon_list.append(exon1)
key = affygene, exon1 + '|'
gene_exon_list.append(key)
key = affygene, exon2 + '|'
gene_exon_list.append(key)
up_exons = up_exons[0:-1]
down_exons = down_exons[0:-1]
try: ### Get comparisons group annotation data for multigroup comparison analyses
if original_conditions > 2:
try:
regulation_call = ped1.Annotation()
except Exception:
null = []
except Exception:
null = []
###Format functional results based on exon level fold change
null = []
#global a; a = exon_hits; global b; b=microRNA_attribute_db; kill
"""if 'G7100684@J934332_RC@j_at' in critical_probeset_list:
print probeset1, probeset2, gene, critical_probeset_list, 'blah'
if ('G7100684', ('G7100684@J934333_RC@j_at', 'G7100684@J934332_RC@j_at')) in functional_attribute_db:
print functional_attribute_db[('G7100684', ('G7100684@J934333_RC@j_at', 'G7100684@J934332_RC@j_at'))];blah
blah"""
new_functional_attribute_str, functional_attribute_list2, seq_attribute_str, protein_length_list = format_exon_functional_attributes(
affygene, critical_probeset_list, functional_attribute_db,
up_exon_list, down_exon_list, protein_length_list)
new_uniprot_exon_feature_str, uniprot_exon_feature_list, null, null = format_exon_functional_attributes(
affygene, critical_probeset_list, protein_exon_feature_db,
up_exon_list, down_exon_list, null)
null, microRNA_exon_feature_list, null, null = format_exon_functional_attributes(
affygene, critical_probeset_list, microRNA_attribute_db,
up_exon_list, down_exon_list, null)
if len(new_functional_attribute_str) == 0:
new_functional_attribute_str = ' '
if len(new_uniprot_exon_feature_str) == 0:
new_uniprot_exon_feature_str = ' '
if len(seq_attribute_str) > 12000:
seq_attribute_str = 'The sequence is too long to report for spreadsheet analysis'
### Add entries to a database to quantify the number of reciprocal isoforms regulated
reciprocal_isoform_data = [len(critical_exon_list[1]),
critical_exon_list[1], event_call,
regulation_call]
try:
float((lowest_raw_p))
except ValueError:
lowest_raw_p = 0
if (float((lowest_raw_p)) <= p_threshold or
false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try:
unique_exon_event_db[affygene].append(reciprocal_isoform_data)
except KeyError:
unique_exon_event_db[affygene] = [reciprocal_isoform_data]
### Add functional attribute information to a new database
for item in uniprot_exon_feature_list:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p)) <= p_threshold or false_pos <
2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try:
protein_exon_feature_db2[affygene, attribute].append(exon)
except KeyError:
protein_exon_feature_db2[affygene, attribute] = [exon]
### Add functional attribute information to a new database
"""Database not used for exon/junction data export but for over-representation analysis (direction specific)"""
for item in microRNA_exon_feature_list:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p)) <= p_threshold or false_pos <
2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try:
microRNA_exon_feature_db2[affygene, attribute].append(exon)
except KeyError:
microRNA_exon_feature_db2[affygene, attribute] = [exon]
### Add functional attribute information to a new database
for item in functional_attribute_list2:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p)) <= p_threshold or false_pos <
2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try:
functional_attribute_db2[affygene, attribute].append(exon)
except KeyError:
functional_attribute_db2[affygene, attribute] = [exon]
try:
abs_fold = abs(float(mean_fold_change))
fold_direction = 'down'
fold1_direction = 'down'
fold2_direction = 'down'
large_splicing_diff1 = 0
large_splicing_diff2 = 0
large_splicing_diff = 'null'
opposite_splicing_pattern = 'no'
if float(mean_fold_change) > 0: fold_direction = 'up'
if float(fold1) > 0: fold1_direction = 'up'
if fold1_direction != fold_direction:
if float(fold1) > float(mean_fold_change):
large_splicing_diff1 = float(fold1) - float(
mean_fold_change)
except Exception:
fold_direction = ''
large_splicing_diff = ''
opposite_splicing_pattern = ''
if analysis_method != 'ASPIRE' and 'linearregres' not in analysis_method:
ed = exon_db[probeset1]
else:
try:
ed = critical_probeset_annotation_db[selected_probeset,
probeset2]
except KeyError:
try:
ed = exon_db[
selected_probeset
] ###not useful data here, but the objects need to exist
except IOError:
ed = original_exon_db[probeset1]
ucsc_splice_annotations = ["retainedIntron", "cassetteExon",
"strangeSplice", "altFivePrime",
"altThreePrime", "altPromoter",
"bleedingExon"]
custom_annotations = ["alt-3'", "alt-5'", "alt-C-term", "alt-N-term",
"cassette-exon", "cassette-exon",
"exon-region-exclusion", "intron-retention",
"mutually-exclusive-exon", "trans-splicing"]
custom_exon_annotations_found = 'no'
ucsc_annotations_found = 'no'
exon_annot_score = 0
if len(ed.SplicingEvent()) > 0:
for annotation in ucsc_splice_annotations:
if annotation in ed.SplicingEvent():
ucsc_annotations_found = 'yes'
for annotation in custom_annotations:
if annotation in ed.SplicingEvent():
custom_exon_annotations_found = 'yes'
if custom_exon_annotations_found == 'yes' and ucsc_annotations_found == 'no':
exon_annot_score = 3
elif ucsc_annotations_found == 'yes' and custom_exon_annotations_found == 'no':
exon_annot_score = 4
elif ucsc_annotations_found == 'yes' and custom_exon_annotations_found == 'yes':
exon_annot_score = 5
else:
exon_annot_score = 2
try:
gene_splice_event_score[affygene].append(
exon_annot_score) ###store for gene level results
except KeyError:
gene_splice_event_score[affygene] = [exon_annot_score]
try:
gene_exon_region[affygene].append(
ed.ExonRegionID()) ###store for gene level results
except KeyError:
gene_exon_region[affygene] = [ed.ExonRegionID()]
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
if float(fold2) > 0: fold2_direction = 'up'
if fold2_direction != fold_direction:
if float(fold2) > float(mean_fold_change):
large_splicing_diff2 = float(fold2) - float(
mean_fold_change)
if abs(large_splicing_diff2) > large_splicing_diff1:
large_splicing_diff = str(large_splicing_diff2)
else:
large_splicing_diff = str(large_splicing_diff1)
if fold1_direction != fold2_direction and abs(float(
fold1)) > 0.4 and abs(float(fold2)) > 0.4 and abs(float(
mean_fold_change)) < max([float(fold2), float(fold1)]):
opposite_splicing_pattern = 'yes'
### Annotate splicing events based on exon_strucuture data
if array_type == 'AltMouse':
extra_exon_annotation = ExonAnnotate_module.annotate_splice_event(
exons1, exons2, extra_transcript_annotation)
try:
splice_event_db[extra_exon_annotation] += 1
except KeyError:
splice_event_db[extra_exon_annotation] = 1
try:
direct_domain_alignments = probeset_aligning_db[
selected_probeset, probeset2]
try:
direct_domain_gene_alignments[
affygene] += ', ' + direct_domain_alignments
except KeyError:
direct_domain_gene_alignments[
affygene] = direct_domain_alignments
except KeyError:
direct_domain_alignments = ' '
splicing_event = ed.SplicingEvent()
if array_type == 'RNASeq':
splicing_event = checkForTransSplicing(probeset1_display,
splicing_event)
splicing_event = checkForTransSplicing(probeset2,
splicing_event)
exp1 = covertLogExpressionToNonLog(exp1)
exp2 = covertLogExpressionToNonLog(exp2)
baseline_const_exp = covertLogExpressionToNonLog(
baseline_const_exp)
fold1 = covertLogFoldToNonLog(fold1)
fold2 = covertLogFoldToNonLog(fold2)
adjfold1 = covertLogFoldToNonLog(adjfold1)
adjfold2 = covertLogFoldToNonLog(adjfold2)
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
### Annotate splicing events based on pre-computed and existing annotations
values = [affygene, dI, symbol, fs(description), exons1, exons2,
regulation_call, event_call, probeset1_display, rawp1,
probeset2, rawp2, fold1, fold2, adjfold1, adjfold2]
values += [extra_transcript_annotation, up_exons, down_exons,
fs(new_functional_attribute_str),
fs(new_uniprot_exon_feature_str), fs(seq_attribute_str),
exp1, exp2, fs(direct_domain_alignments)]
values += [str(baseline_const_exp), str(lowest_raw_p),
p_value_extra, str(false_pos), mean_fold_change,
extra_exon_annotation]
values += [ed.ExternalExonIDs(), ed.ExonRegionID(), splicing_event,
str(exon_annot_score), large_splicing_diff,
location_summary]
exon_sets = abs(float(
dI)), regulation_call, event_call, exons1, exons2, ''
### Export significant reciprocol junction pairs and scores
values_ps = [probeset1 + '|' + probeset2, affygene, 'changed', dI,
'NA', str(lowest_raw_p)]
values_ps = string.join(values_ps, '\t') + '\n'
try:
ProcessedSpliceData_data.write(values_ps)
except Exception:
None
values_ge = [affygene, 'En', dI, str(lowest_raw_p), symbol,
probeset1_display + ' | ' + probeset2]
values_ge = string.join(values_ge, '\t') + '\n'
if array_type == 'junction' or array_type == 'RNASeq': ### Only applies to reciprocal junction sensitive platforms (but not currently AltMouse)
goelite_data.write(values_ge)
if array_type == 'junction' or array_type == 'RNASeq': ### Only applies to reciprocal junction sensitive platforms (but not currently AltMouse)
try:
exon_probeset = exon_array_translation_db[
affygene + ':' + exon_data[1][0]][0]
probeset1 = exon_probeset
gcn += 1
except Exception:
probeset1 = None #probeset1 = affygene+':'+exon_data[1][0]
try:
null = int(probeset1
) ### Must be an int to work in DomainGraph
values_dg = [probeset1, affygene, 'changed', dI, 'NA',
str(lowest_raw_p)]
values_dg = string.join(values_dg, '\t') + '\n'
if array_type == 'junction' or array_type == 'RNASeq':
DG_data.write(values_dg)
values_srf = string.join(
[probeset1, 'Ae', dI, str(lowest_raw_p)], '\t') + '\n'
if float(dI) > 0:
SRFinder_ex_data.write(values_srf)
elif float(dI) < 0:
SRFinder_in_data.write(values_srf)
except Exception:
null = []
else:
si_pvalue = lowest_raw_p
if si_pvalue == 1: si_pvalue = 'NA'
if probeset1 in midas_db:
midas_p = str(midas_db[probeset1])
if float(midas_p) < lowest_raw_p:
lowest_raw_p = float(midas_p
) ###This is the lowest and SI-pvalue
else:
midas_p = ''
###Determine what type of exon-annotations are present to assign a confidence score
if affygene in annotate_db: ###Determine the transcript clusters used to comprise a splice event (genes and exon specific)
try:
gene_tc = annotate_db[affygene].TranscriptClusterIDs()
try:
probeset_tc = [ed.SecondaryGeneID()]
except Exception:
probeset_tc = [affygene]
for transcript_cluster in gene_tc:
probeset_tc.append(transcript_cluster)
probeset_tc = makeUnique(probeset_tc)
except Exception:
probeset_tc = ''
gene_tc = ''
else:
try:
try:
probeset_tc = [ed.SecondaryGeneID()]
except Exception:
probeset_tc = [affygene]
probeset_tc = makeUnique(probeset_tc)
except Exception:
probeset_tc = ''
gene_tc = ''
cluster_number = len(probeset_tc)
try:
alternatively_reg_tc[affygene] += probeset_tc
except KeyError:
alternatively_reg_tc[affygene] = probeset_tc
try:
last_exon_region = last_exon_region_db[affygene]
except KeyError:
last_exon_region = ''
if cluster_number > 1: exon_annot_score = 1
direct_domain_alignments = ' '
if array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null':
try:
direct_domain_alignments = probeset_aligning_db[probeset1]
try:
direct_domain_gene_alignments[
affygene] += ', ' + direct_domain_alignments
except KeyError:
direct_domain_gene_alignments[
affygene] = direct_domain_alignments
except KeyError:
direct_domain_alignments = ' '
else:
try:
direct_domain_alignments = probeset_aligning_db[
affygene + ':' + exons1]
except KeyError:
direct_domain_alignments = ''
if array_type == 'RNASeq':
exp1 = covertLogExpressionToNonLog(exp1)
baseline_const_exp = covertLogExpressionToNonLog(
baseline_const_exp)
fold1 = covertLogFoldToNonLog(fold1)
adjfold1 = covertLogFoldToNonLog(adjfold1)
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
try:
adj_SIp = fdr_exon_stats[probeset1].AdjP()
except Exception:
adj_SIp = 'NA'
try:
secondary_geneid = ed.SecondaryGeneID()
except Exception:
secondary_geneid = affygene
if array_type == 'RNASeq':
secondary_geneid = ed.NovelExon()
### Write Splicing Index results
values = [affygene, dI, symbol, fs(description), exons1,
regulation_call, probeset1, rawp1, str(lowest_raw_p),
midas_p, fold1, adjfold1]
values += [up_exons, down_exons, fs(new_functional_attribute_str),
fs(new_uniprot_exon_feature_str), fs(seq_attribute_str),
fs(direct_domain_alignments), exp1]
values += [str(baseline_const_exp), str(si_pvalue), DV,
mean_fold_change, secondary_geneid,
ed.ExternalExonIDs()]
values += [ed.Constitutive(), ed.ExonRegionID(),
ed.SplicingEvent(), last_exon_region,
ed.LocationSummary()] #str(exon_annot_score)
if probeset1 in filtered_probeset_db:
values += filtered_probeset_db[probeset1]
exon_sets = abs(float(
dI)), regulation_call, event_call, exons1, exons1, midas_p
probeset = probeset1 ### store original ID (gets converted below)
### Write DomainGraph results
try:
midas_p = str(midas_db[probeset1])
except KeyError:
midas_p = 'NA'
### Export significant exon/junction IDs and scores
values_ps = [probeset1, affygene, 'changed', dI, 'NA',
str(lowest_raw_p)]
values_ps = string.join(values_ps, '\t') + '\n'
try:
ProcessedSpliceData_data.write(values_ps)
except Exception:
None
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
if (array_type == 'junction' or array_type ==
'RNASeq') and explicit_data_type == 'null':
try:
exon_probeset = exon_array_translation_db[
affygene + ':' + exon_data[1][0]][0]
probeset1 = exon_probeset
gcn += 1
except Exception:
probeset1 = None ### don't write out a line
else:
try:
exon_probeset = exon_array_translation_db[probeset1][0]
probeset1 = exon_probeset
gcn += 1
except Exception:
probeset1 = None
#null=[]; #print gcn, probeset1;kill - force an error - new in version 2.0.8
try:
null = int(probeset1)
values_dg = [probeset1, affygene, 'changed', dI,
str(si_pvalue), midas_p]
values_dg = string.join(values_dg, '\t') + '\n'
DG_data.write(values_dg)
values_srf = string.join(
[probeset1, 'Ae', dI, str(lowest_raw_p)], '\t') + '\n'
if float(dI) > 0:
SRFinder_ex_data.write(values_srf)
elif float(dI) < 0:
SRFinder_in_data.write(values_srf)
except Exception:
null = []
values_ge = [affygene, 'En', dI, str(si_pvalue), midas_p, symbol,
probeset]
values_ge = string.join(values_ge, '\t') + '\n'
goelite_data.write(values_ge)
if len(ed.SplicingEvent()) > 2:
try:
external_exon_annot[affygene].append(ed.SplicingEvent())
except KeyError:
external_exon_annot[affygene] = [ed.SplicingEvent()]
try:
values = string.join(values, '\t') + '\n'
except Exception:
print values
kill
data.write(values)
###Process data for gene level reports
if float((lowest_raw_p
)) <= p_threshold or false_pos < 2 or lowest_raw_p == 1:
try:
comparison_count[affygene] += 1
except KeyError:
comparison_count[affygene] = 1
try:
aspire_gene_results[affygene].append(exon_sets)
except KeyError:
aspire_gene_results[affygene] = [exon_sets]
for exon in up_exon_list:
exon_info = exon, 'upregulated'
try:
critical_gene_exons[affygene].append(exon_info)
except KeyError:
critical_gene_exons[affygene] = [exon_info]
for exon in down_exon_list:
exon_info = exon, 'downregulated'
try:
critical_gene_exons[affygene].append(exon_info)
except KeyError:
critical_gene_exons[affygene] = [exon_info]
data.close()
print event_count, analysis_method, "results written to:", aspire_output, '\n'
try:
clearObjectsFromMemory(original_exon_db)
except Exception:
null = []
exon_array_translation_db = []
original_exon_db = []
probeset_to_gene = []
### Finish writing the DomainGraph export file with non-significant probesets
if array_type != 'AltMouse':
for probeset in excluded_probeset_db:
eed = excluded_probeset_db[probeset]
try:
midas_p = str(midas_db[probeset])
except KeyError:
midas_p = 'NA'
### Export significant exon/junction IDs and scores
try:
values_ps = [probeset, eed.GeneID(), 'UC', eed.Score(),
str(eed.TTestNormalizedRatios()), midas_p]
except Exception:
excl_probeset, geneid, score, rawp, pvalue = eed
values_ps = [probeset, geneid, 'UC', str(score), str(rawp),
str(pvalue)]
values_ps = string.join(values_ps, '\t') + '\n'
ProcessedSpliceData_data.write(values_ps)
### Write DomainGraph results
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
try:
exon_probeset = exon_array_translation_db[probeset][0]
probeset = exon_probeset
gcn += 1
except Exception:
probeset = None
# null=[] - force an error - new in version 2.0.8
try:
values_dg = [probeset, eed.GeneID(), 'UC', eed.Score(),
str(eed.TTestNormalizedRatios()), midas_p]
except Exception:
try:
excl_probeset, geneid, score, rawp, pvalue = eed
if ':' in probeset:
probeset = excl_probeset ### Example: ENSMUSG00000029213:E2.1, make this just the numeric exclusion probeset - Not sure if DG handles non-numeric
values_dg = [probeset, geneid, 'UC', str(score), str(rawp),
str(pvalue)]
except Exception:
None
try:
null = int(probeset)
values_dg = string.join(values_dg, '\t') + '\n'
DG_data.write(values_dg)
except Exception:
null = []
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
for id in exon_array_translation_db:
SRFinder_denom_data.write(exon_array_translation_db[id] +
'\tAe\n')
else:
for probeset in original_exon_db:
SRFinder_denom_data.write(probeset + '\tAe\n')
DG_data.close()
SRFinder_in_data.close()
SRFinder_ex_data.close()
SRFinder_denom_data.close()
for affygene in direct_domain_gene_alignments:
domains = string.split(direct_domain_gene_alignments[affygene], ', ')
domains = unique.unique(domains)
domains = string.join(domains, ', ')
direct_domain_gene_alignments[affygene] = domains
### functional_attribute_db2 will be reorganized so save the database with another. Use this
functional_attribute_db = functional_attribute_db2
functional_attribute_db2 = reorganize_attribute_entries(
functional_attribute_db2, 'no')
external_exon_annot = eliminate_redundant_dict_values(external_exon_annot)
protein_exon_feature_db = protein_exon_feature_db2
protein_exon_feature_db2 = reorganize_attribute_entries(
protein_exon_feature_db2, 'no')
############ Export Gene Data ############
up_splice_val_genes = 0
down_dI_genes = 0
diff_exp_spliced_genes = 0
diff_spliced_rna_factor = 0
ddI = 0
udI = 0
summary_data_db['direct_domain_genes'] = len(direct_domain_gene_alignments)
summary_data_db['alt_genes'] = len(aspire_gene_results)
critical_gene_exons = eliminate_redundant_dict_values(critical_gene_exons)
aspire_output_gene = root_dir + 'AltResults/AlternativeOutput/' + dataset_name + analysis_method + '-exon-inclusion-GENE-results.txt'
data = export.ExportFile(aspire_output_gene)
if array_type == 'AltMouse': goelite_data.write("GeneID\tSystemCode\n")
title = ['AffyGene', 'max_dI', 'midas-p (corresponding)', 'symbol',
'external gene ID', 'description', 'regulation_call',
'event_call']
title += ['number_of_comparisons', 'num_effected_exons', 'up_exons',
'down_exons', 'functional_attribute',
'uniprot-ens_exon_features', 'direct_domain_alignments']
title += ['pathways', 'mean_fold_change', 'exon-annotations',
'exon-region IDs', 'alternative gene ID',
'splice-annotation score']
title = string.join(title, '\t') + '\n'
data.write(title)
for affygene in aspire_gene_results:
if affygene in annotate_db:
description = annotate_db[affygene].Description()
symbol = annotate_db[affygene].Symbol()
ensembl = annotate_db[affygene].ExternalGeneID()
if array_type != 'AltMouse' and array_type != 'RNASeq':
transcript_clusters = alternatively_reg_tc[affygene]
transcript_clusters = makeUnique(transcript_clusters)
transcript_clusters = string.join(transcript_clusters, '|')
else:
transcript_clusters = affygene
rna_processing_factor = annotate_db[affygene].RNAProcessing()
else:
description = ''
symbol = ''
ensembl = affygene
rna_processing_factor = ''
transcript_clusters = ''
if ensembl in go_annotations:
wpgo = go_annotations[ensembl]
goa = wpgo.Combined()
else:
goa = ''
if array_type == 'AltMouse':
if len(ensembl) > 0: goelite_data.write(ensembl + '\tL\n')
try:
gene_splice_event_score[affygene].sort()
top_se_score = str(gene_splice_event_score[affygene][-1])
except KeyError:
top_se_score = 'NA'
try:
gene_regions = gene_exon_region[affygene]
gene_regions = makeUnique(gene_regions)
gene_regions = string.join(gene_regions, '|')
except KeyError:
gene_regions = 'NA'
if analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
number_of_comparisons = str(comparison_count[affygene])
else:
number_of_comparisons = 'NA'
results_list = aspire_gene_results[affygene]
results_list.sort()
results_list.reverse()
max_dI = str(results_list[0][0])
regulation_call = results_list[0][1]
event_call = results_list[0][2]
midas_p = results_list[0][-1]
num_critical_exons = str(len(critical_gene_exons[affygene]))
try:
direct_domain_annots = direct_domain_gene_alignments[affygene]
except KeyError:
direct_domain_annots = ' '
down_exons = ''
up_exons = ''
down_list = []
up_list = []
for exon_info in critical_gene_exons[affygene]:
exon = exon_info[0]
call = exon_info[1]
if call == 'downregulated':
down_exons = down_exons + exon + ','
down_list.append(exon)
ddI += 1
if call == 'upregulated':
up_exons = up_exons + exon + ','
up_list.append(exon)
udI += 1
down_exons = down_exons[0:-1]
up_exons = up_exons[0:-1]
up_exons = add_a_space(up_exons)
down_exons = add_a_space(down_exons)
functional_annotation = ''
if affygene in functional_attribute_db2:
number_of_functional_attributes = str(len(functional_attribute_db2[
affygene]))
attribute_list = functional_attribute_db2[affygene]
attribute_list.sort()
for attribute_exon_info in attribute_list:
exon_attribute = attribute_exon_info[0]
exon_list = attribute_exon_info[1]
functional_annotation = functional_annotation + exon_attribute
exons = '('
for exon in exon_list:
exons = exons + exon + ','
exons = exons[0:-1] + '),'
if add_exons_to_annotations == 'yes':
functional_annotation = functional_annotation + exons
else:
functional_annotation = functional_annotation + ','
functional_annotation = functional_annotation[0:-1]
uniprot_exon_annotation = ''
if affygene in protein_exon_feature_db2:
number_of_functional_attributes = str(len(protein_exon_feature_db2[
affygene]))
attribute_list = protein_exon_feature_db2[affygene]
attribute_list.sort()
for attribute_exon_info in attribute_list:
exon_attribute = attribute_exon_info[0]
exon_list = attribute_exon_info[1]
uniprot_exon_annotation = uniprot_exon_annotation + exon_attribute
exons = '('
for exon in exon_list:
exons = exons + exon + ','
exons = exons[0:-1] + '),'
if add_exons_to_annotations == 'yes':
uniprot_exon_annotation = uniprot_exon_annotation + exons
else:
uniprot_exon_annotation = uniprot_exon_annotation + ','
uniprot_exon_annotation = uniprot_exon_annotation[0:-1]
if len(uniprot_exon_annotation) == 0: uniprot_exon_annotation = ' '
if len(functional_annotation) == 0: functional_annotation = ' '
if affygene in gene_expression_diff_db:
mean_fold_change = gene_expression_diff_db[
affygene].ConstitutiveFoldStr()
try:
if abs(float(mean_fold_change)) > log_fold_cutoff:
diff_exp_spliced_genes += 1
except Exception:
diff_exp_spliced_genes = diff_exp_spliced_genes
else:
mean_fold_change = 'NC'
if len(rna_processing_factor) > 2: diff_spliced_rna_factor += 1
###Add annotations for where in the gene structure these exons are (according to Ensembl)
if affygene in external_exon_annot:
external_gene_annot = string.join(external_exon_annot[affygene],
', ')
else:
external_gene_annot = ''
if array_type == 'RNASeq':
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
values = [affygene, max_dI, midas_p, symbol, ensembl, fs(description),
regulation_call, event_call, number_of_comparisons]
values += [num_critical_exons, up_exons, down_exons,
functional_annotation]
values += [fs(uniprot_exon_annotation), fs(direct_domain_annots),
fs(goa), mean_fold_change, external_gene_annot,
gene_regions, transcript_clusters, top_se_score]
values = string.join(values, '\t') + '\n'
data.write(values)
### Use results for summary statistics
if len(up_list) > len(down_list): up_splice_val_genes += 1
else: down_dI_genes += 1
data.close()
print "Gene-level results written"
###yes here indicates that although the truncation events will initially be filtered out, later they will be added
###back in without the non-truncation annotations....if there is no second database (in this case functional_attribute_db again)
###IF WE WANT TO FILTER OUT NON-NMD ENTRIES WHEN NMD IS PRESENT (FOR A GENE) MUST INCLUDE functional_attribute_db AS THE SECOND VARIABLE!!!!
###Currently, yes does nothing
functional_annotation_db, null = grab_summary_dataset_annotations(
functional_attribute_db, '', 'yes')
upregulated_genes = 0
downregulated_genes = 0
###Calculate the number of upregulated and downregulated genes
for affygene in gene_expression_diff_db:
fold_val = gene_expression_diff_db[affygene].ConstitutiveFold()
try:
if float(fold_val) > log_fold_cutoff: upregulated_genes += 1
elif abs(float(fold_val)) > log_fold_cutoff:
downregulated_genes += 1
except Exception:
null = []
upregulated_rna_factor = 0
downregulated_rna_factor = 0
###Calculate the total number of putative RNA-processing/binding factors differentially regulated
for affygene in gene_expression_diff_db:
gene_fold = gene_expression_diff_db[affygene].ConstitutiveFold()
rna_processing_factor = gene_expression_diff_db[
affygene].RNAProcessing()
if len(rna_processing_factor) > 1:
if gene_fold > log_fold_cutoff: upregulated_rna_factor += 1
elif abs(gene_fold) > log_fold_cutoff:
downregulated_rna_factor += 1
###Generate three files for downstream functional summary
### functional_annotation_db2 is output to the same function as functional_annotation_db, ranked_uniprot_list_all to get all ranked uniprot annotations,
### and ranked_uniprot_list_coding_only to get only coding ranked uniprot annotations
functional_annotation_db2, ranked_uniprot_list_all = grab_summary_dataset_annotations(
protein_exon_feature_db, '', '') #functional_attribute_db
null, ranked_uniprot_list_coding_only = grab_summary_dataset_annotations(
protein_exon_feature_db, functional_attribute_db, ''
) #functional_attribute_db
functional_attribute_db = []
protein_exon_feature_db = []
###Sumarize changes in avg protein length for each splice event
up_protein_list = []
down_protein_list = []
protein_length_fold_diff = []
for [down_protein, up_protein] in protein_length_list:
up_protein = float(up_protein)
down_protein = float(down_protein)
down_protein_list.append(down_protein)
up_protein_list.append(up_protein)
if up_protein > 10 and down_protein > 10:
fold_change = up_protein / down_protein
protein_length_fold_diff.append(fold_change)
median_fold_diff = statistics.median(protein_length_fold_diff)
try:
down_avg = int(statistics.avg(down_protein_list))
up_avg = int(statistics.avg(up_protein_list))
except Exception:
down_avg = 0
up_avg = 0
try:
try:
down_std = int(statistics.stdev(down_protein_list))
up_std = int(statistics.stdev(up_protein_list))
except ValueError: ###If 'null' is returned fro stdev
down_std = 0
up_std = 0
except Exception:
down_std = 0
up_std = 0
if len(down_protein_list) > 1 and len(up_protein_list) > 1:
try:
#t,df,tails = statistics.ttest(down_protein_list,up_protein_list,2,3)
#t = abs(t);df = round(df)
#print 'ttest t:',t,'df:',df
#p = str(statistics.t_probability(t,df))
p = str(statistics.runComparisonStatistic(
down_protein_list, up_protein_list, probability_statistic))
#print dataset_name,p
except Exception:
p = 'NA'
if p == 1: p = 'NA'
else: p = 'NA'
###Calculate unique reciprocal isoforms for exon-inclusion, exclusion and mutual-exclusive events
unique_exon_inclusion_count = 0
unique_exon_exclusion_count = 0
unique_mutual_exclusive_count = 0
unique_exon_event_db = eliminate_redundant_dict_values(
unique_exon_event_db)
for affygene in unique_exon_event_db:
isoform_entries = unique_exon_event_db[affygene]
possibly_redundant = []
non_redundant = []
check_for_redundant = []
for entry in isoform_entries:
if entry[0] == 1: ### If there is only one regulated exon
possibly_redundant.append(entry)
else:
non_redundant.append(entry)
critical_exon_list = entry[1]
for exon in critical_exon_list:
check_for_redundant.append(exon)
for entry in possibly_redundant:
exon = entry[1][0]
if exon not in check_for_redundant:
non_redundant.append(entry)
for entry in non_redundant:
if entry[2] == 'ei-ex':
if entry[3] == 'upregulated': unique_exon_inclusion_count += 1
else: unique_exon_exclusion_count += 1
else: unique_mutual_exclusive_count += 1
udI = unique_exon_inclusion_count
ddI = unique_exon_exclusion_count
mx = unique_mutual_exclusive_count
###Add splice event information to the functional_annotation_db
for splice_event in splice_event_db:
count = splice_event_db[splice_event]
functional_annotation_db.append((splice_event, count))
if analysis_method == 'splicing-index' or analysis_method == 'FIRMA':
udI = 'NA'
ddI = 'NA'
summary_results_db[dataset_name[0:-1]] = udI, ddI, mx, up_splice_val_genes, down_dI_genes, (
up_splice_val_genes + down_dI_genes
), upregulated_genes, downregulated_genes, diff_exp_spliced_genes, upregulated_rna_factor, downregulated_rna_factor, diff_spliced_rna_factor, down_avg, down_std, up_avg, up_std, p, median_fold_diff, functional_annotation_db
result_list = exportComparisonSummary(dataset_name, summary_data_db, 'log')
###Re-set this variable (useful for testing purposes)
clearObjectsFromMemory(gene_expression_diff_db)
clearObjectsFromMemory(splice_event_list)
clearObjectsFromMemory(si_db)
si_db = []
clearObjectsFromMemory(fdr_exon_stats)
try:
clearObjectsFromMemory(excluded_probeset_db)
clearObjectsFromMemory(ex_db)
ex_db = []
except Exception:
ex_db = []
clearObjectsFromMemory(exon_db)
#clearObjectsFromMemory(annotate_db)
critical_probeset_annotation_db = []
gene_expression_diff_db = []
domain_associated_genes = []
permute_p_values = []
permute_miR_inputs = []
seq_attribute_str = []
microRNA_count_db = []
excluded_probeset_db = []
fdr_exon_stats = []
splice_event_list = []
critical_exon_db_len = len(
critical_exon_db
) #; critical_exon_db=[] deleting here will cause a global instance problem
all_domain_gene_hits = []
gene_splice_event_score = []
unique_exon_event_db = []
probeset_aligning_db = []
ranked_uniprot_list_all = []
filtered_microRNA_exon_db = []
permute_domain_inputs = []
functional_annotation_db2 = []
functional_attribute_db2 = []
protein_length_list = []
ranked_uniprot_list_coding_only = []
miR_str = []
permute_input_list = []
microRNA_exon_feature_db2 = []
alternatively_reg_tc = []
direct_domain_gene_alignments = []
aspire_gene_results = []
domain_gene_counts = []
functional_annotation = []
protein_exon_feature_db2 = []
microRNA_attribute_db = []
probeset_mirBS_db = []
exon_hits = []
critical_gene_exons = []
gene_exon_region = []
exon_db = []
external_exon_annot = []
values = []
down_protein_list = []
functional_annotation_db = []
protein_length_fold_diff = []
comparison_count = []
filtered_arrayids = []
domain_hit_gene_count_db = []
up_protein_list = []
probeset_domain_db = []
try:
goelite_data.close()
except Exception:
null = []
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
return summary_results_db, summary_results_db2, aspire_output, aspire_output_gene, critical_exon_db_len
def deviation(dI, avg_dI, stdev_dI):
dI = covertLogFoldToNonLogFloat(dI)
avg_dI = covertLogFoldToNonLogFloat(avg_dI)
stdev_dI = covertLogFoldToNonLogFloat(stdev_dI)
return str(abs((dI - avg_dI) / stdev_dI))
def covertLogExpressionToNonLog(log_val):
if normalization_method == 'RPKM':
nonlog_val = (math.pow(2, float(log_val)))
else:
nonlog_val = (math.pow(2, float(log_val))) - 1
return str(nonlog_val)
def covertLogFoldToNonLog(log_val):
try:
if float(log_val) < 0:
nonlog_val = (-1 / math.pow(2, (float(log_val))))
else:
nonlog_val = (math.pow(2, float(log_val)))
except Exception:
nonlog_val = log_val
return str(nonlog_val)
def covertLogFoldToNonLogFloat(log_val):
if float(log_val) < 0: nonlog_val = (-1 / math.pow(2, (float(log_val))))
else: nonlog_val = (math.pow(2, float(log_val)))
return nonlog_val
def checkForTransSplicing(uid, splicing_event):
pl = string.split(uid, ':')
if len(pl) > 2:
if pl[0] not in pl[1]: ### Two different genes
if len(splicing_event) > 0: splicing_event += '|trans-splicing'
else: splicing_event = '|trans-splicing'
return splicing_event
def fs(text):
### Formats a text entry to prevent delimiting a comma
return '"' + text + '"'
def analyzeSplicingIndex(fold_dbase):
"""The Splicing Index (SI) represents the log ratio of the exon intensities between the two tissues after normalization
to the gene intensities in each sample: SIi = log2((e1i/g1j)/(e2i/g2j)), for the i-th exon of the j-th gene in tissue
type 1 or 2. The splicing indices are then subjected to a t-test to probe for differential inclusion of the exon into the gene.
In order to determine if the change in isoform expression was statistically significant, a simple two-tailed t-test was carried
out on the isoform ratios by grouping the 10 samples from either "tumor" or "normal" tissue.
The method ultimately producing the highest proportion of true positives was to retain only: a) exons with a DABG p-value < 0.05,
b) genes with a signal > 70, c) exons with a log ratio between tissues (i.e., the gene-level normalized fold change) > 0.5,
d) Splicing Index p-values < 0.005 and e) Core exons.
Gardina PJ, Clark TA, Shimada B, Staples MK, Yang Q, Veitch J, Schweitzer A, Awad T, Sugnet C, Dee S, Davies C, Williams A, Turpaz Y.
Alternative splicing and differential gene expression in colon cancer detected by a whole genome exon array.
BMC Genomics. 2006 Dec 27;7:325. PMID: 17192196
"""
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db) > 0:
temp_db = {}
for probeset in fold_dbase:
temp_db[probeset] = []
for probeset in temp_db:
try:
filtered_probeset_db[probeset]
except KeyError:
del fold_dbase[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
proceed = 0
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try:
del fold_dbase[probeset]
except KeyError:
null = []
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
summary_output = root_dir + 'AltResults/RawSpliceData/' + species + '/' + analysis_method + '/' + dataset_name[:
-
1] + '.txt'
data = export.ExportFile(summary_output)
title = string.join(['gene\tExonID\tprobesets'] + original_array_names,
'\t') + '\n'
data.write(title)
print 'Calculating splicing-index values (please be patient)...',
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print len(fold_dbase), id_name, 'beging examined'
###original_avg_const_exp_db contains constitutive mean expression values per group: G6953871 [7.71, 7.66]
###array_raw_group_values: Raw expression values in list of groups: G7072464@J935416_RC@j_at ([1.79, 2.16, 2.22], [1.68, 2.24, 1.97, 1.92, 2.12])
###avg_const_exp_db contains the raw constitutive expression values in a single list
splicing_index_hash = []
excluded_probeset_db = {}
denominator_probesets = 0
interaction = 0
original_increment = int(len(exon_db) / 20)
increment = original_increment
for probeset in exon_db:
ed = exon_db[probeset]
#include_probeset = ed.IncludeProbeset()
if interaction == increment:
increment += original_increment
print '*',
interaction += 1
include_probeset = 'yes' ###Moved this filter to import of the probeset relationship file
###Examines user input parameters for inclusion of probeset types in the analysis
if include_probeset == 'yes':
geneid = ed.GeneID()
if probeset in fold_dbase and geneid in original_avg_const_exp_db: ###used to search for array_raw_group_values, but when filtered by expression changes, need to filter by adj_fold_dbase
denominator_probesets += 1
###Includes probesets with a calculated constitutive expression value for each gene and expression data for that probeset
group_index = 0
si_interim_group_db = {}
si_interim_group_str_db = {}
ge_threshold_count = 0
value_count = 0
for group_values in array_raw_group_values[probeset]:
"""gene_expression_value = math.pow(2,original_avg_const_exp_db[geneid][group_index])
###Check to see if gene expression is > threshod for both conditions
if gene_expression_value>gene_expression_threshold:ge_threshold_count+=1"""
value_index = 0
ratio_hash = []
ratio_str_hash = []
for value in group_values: ###Calculate normalized ratio's for each condition and save raw values for later permutation
#exp_val = math.pow(2,value);ge_val = math.pow(2,avg_const_exp_db[geneid][value_count]) ###To calculate a ttest we need the raw constitutive expression values, these are not in group list form but are all in a single list so keep count.
exp_val = value
ge_val = avg_const_exp_db[geneid][value_count]
exp_ratio = exp_val - ge_val
ratio_hash.append(exp_ratio)
ratio_str_hash.append(str(exp_ratio))
value_index += 1
value_count += 1
si_interim_group_db[group_index] = ratio_hash
si_interim_group_str_db[group_index] = ratio_str_hash
group_index += 1
group1_ratios = si_interim_group_db[0]
group2_ratios = si_interim_group_db[1]
group1_mean_ratio = statistics.avg(group1_ratios)
group2_mean_ratio = statistics.avg(group2_ratios)
if export_NI_values == 'yes':
try:
er = ed.ExonID()
except Exception:
er = 'NA'
ev = string.join(
[geneid + '\t' + er + '\t' + probeset
] + si_interim_group_str_db[0] +
si_interim_group_str_db[1], '\t') + '\n'
data.write(ev)
#if ((math.log(group1_mean_ratio,2))*(math.log(group2_mean_ratio,2)))<0: opposite_SI_log_mean = 'yes'
if (group1_mean_ratio * group2_mean_ratio) < 0:
opposite_SI_log_mean = 'yes'
else:
opposite_SI_log_mean = 'no'
try:
if calculate_normIntensity_p == 'yes':
try:
normIntensityP = statistics.runComparisonStatistic(
group1_ratios, group2_ratios,
probability_statistic)
except Exception:
normIntensityP = 'NA' ### Occurs when analyzing two groups with no variance
else:
normIntensityP = 'NA' ### Set to an always signficant value
if normIntensityP == 1: normIntensityP = 'NA'
splicing_index = group1_mean_ratio - group2_mean_ratio
abs_splicing_index = abs(splicing_index)
#if probeset == '3061323': print abs_splicing_index,normIntensityP,ed.ExonID(),group1_mean_ratio,group2_mean_ratio,math.log(group1_mean_ratio,2),math.log(group2_mean_ratio,2),((math.log(group1_mean_ratio,2))*(math.log(group2_mean_ratio,2))),opposite_SI_log_mean; kill
if probeset in midas_db:
try:
midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 0
#if abs_splicing_index>1 and normIntensityP < 0.05: print probeset,normIntensityP, abs_splicing_index;kill
else:
midas_p = 0
#print ed.GeneID(),ed.ExonID(),probeset,splicing_index,normIntensityP,midas_p,group1_ratios,group2_ratios
if abs_splicing_index > alt_exon_logfold_cutoff and (
normIntensityP < p_threshold or
normIntensityP == 'NA' or
normIntensityP == 1) and midas_p < p_threshold:
exonid = ed.ExonID()
critical_exon_list = [1, [exonid]]
constit_exp1 = original_avg_const_exp_db[geneid][0]
constit_exp2 = original_avg_const_exp_db[geneid][1]
ge_fold = constit_exp2 - constit_exp1
### Re-define all of the pairwise values now that the two Splicing-Index groups to report have been determined
data_list1 = array_raw_group_values[probeset][0]
data_list2 = array_raw_group_values[probeset][1]
baseline_exp = statistics.avg(data_list1)
experimental_exp = statistics.avg(data_list2)
fold_change = experimental_exp - baseline_exp
try:
ttest_exp_p = statistics.runComparisonStatistic(
data_list1, data_list2, probability_statistic)
except Exception:
ttest_exp_p = 1
normInt1 = (baseline_exp - constit_exp1)
normInt2 = (experimental_exp - constit_exp2)
adj_fold = normInt2 - normInt1
ped = ProbesetExpressionData(
baseline_exp, experimental_exp, fold_change,
adj_fold, ttest_exp_p, '')
sid = ExonData(splicing_index, probeset,
critical_exon_list, geneid,
group1_ratios, group2_ratios,
normIntensityP, opposite_SI_log_mean)
sid.setConstitutiveExpression(constit_exp1)
sid.setConstitutiveFold(ge_fold)
sid.setProbesetExpressionData(ped)
splicing_index_hash.append((splicing_index, sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(splicing_index, geneid,
normIntensityP)
excluded_probeset_db[probeset] = eed
except Exception:
null = [
] ###If this occurs, then most likely, the exon and constitutive probeset are the same
print 'Splicing Index analysis complete'
if export_NI_values == 'yes': data.close()
splicing_index_hash.sort()
splicing_index_hash.reverse()
print len(
splicing_index_hash), id_name, "with evidence of Alternative expression"
p_value_call = ''
permute_p_values = {}
summary_data_db['denominator_exp_events'] = denominator_probesets
return splicing_index_hash, p_value_call, permute_p_values, excluded_probeset_db
def importResiduals(filename, probe_probeset_db):
fn = filepath(filename)
key_db = {}
x = 0
prior_uid = ''
uid_gene_db = {}
for line in open(fn, 'rU').xreadlines():
if x == 0 and line[0] == '#': null = []
elif x == 0: x += 1
else:
data = cleanUpLine(line)
t = string.split(data, '\t')
uid = t[0]
uid, probe = string.split(uid, '-')
try:
probeset = probe_probeset_db[probe]
residuals = t[1:]
if uid == prior_uid:
try:
uid_gene_db[probeset].append(
residuals
) ### Don't need to keep track of the probe ID
except KeyError:
uid_gene_db[probeset] = [residuals]
else: ### Hence, we have finished storing all residual data for that gene
if len(uid_gene_db) > 0:
calculateFIRMAScores(uid_gene_db)
uid_gene_db = {}
try:
uid_gene_db[probeset].append(
residuals
) ### Don't need to keep track of the probe ID
except KeyError:
uid_gene_db[probeset] = [residuals]
prior_uid = uid
except Exception:
null = []
### For the last gene imported
if len(uid_gene_db) > 0: calculateFIRMAScores(uid_gene_db)
def calculateFIRMAScores(uid_gene_db):
probeset_residuals = {}
all_gene_residuals = []
total_probes = 0
for probeset in uid_gene_db:
residuals_list = uid_gene_db[probeset]
sample_db = {}
total_probes += len(residuals_list)
### For all probes in a probeset, calculate the median residual for each sample
for residuals in residuals_list:
index = 0
for residual in residuals:
try:
sample_db[index].append(float(residual))
except KeyError:
sample_db[index] = [float(residual)]
all_gene_residuals.append(float(residual))
index += 1
for index in sample_db:
median_residual = statistics.median(sample_db[index])
sample_db[index] = median_residual
probeset_residuals[probeset] = sample_db
### Calculate the Median absolute deviation
"""http://en.wikipedia.org/wiki/Absolute_deviation
The median absolute deviation (also MAD) is the median absolute deviation from the median. It is a robust estimator of dispersion.
For the example {2, 2, 3, 4, 14}: 3 is the median, so the absolute deviations from the median are {1, 1, 0, 1, 11} (or reordered as
{0, 1, 1, 1, 11}) with a median absolute deviation of 1, in this case unaffected by the value of the outlier 14.
Here, the global gene median will be expressed as res_gene_median.
"""
res_gene_median = statistics.median(all_gene_residuals)
subtracted_residuals = []
for residual in all_gene_residuals:
subtracted_residuals.append(abs(res_gene_median - residual))
gene_MAD = statistics.median(subtracted_residuals)
#if '3263614' in probeset_residuals: print len(all_gene_residuals),all_gene_residuals
for probeset in probeset_residuals:
sample_db = probeset_residuals[probeset]
for index in sample_db:
median_residual = sample_db[index]
try:
firma_score = median_residual / gene_MAD
sample_db[index] = firma_score
except Exception:
null = []
#if probeset == '3263614': print index, median_residual, firma_score, gene_MAD
firma_scores[probeset] = sample_db
def importProbeToProbesets(fold_dbase):
#print "Importing probe-to-probeset annotations (please be patient)..."
filename = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_probeset-probes.txt'
probeset_to_include = {}
gene2examine = {}
### Although we want to restrict the analysis to probesets in fold_dbase, we don't want to effect the FIRMA model - filter later
for probeset in fold_dbase:
try:
ed = exon_db[probeset]
gene2examine[ed.GeneID()] = []
except Exception:
null = []
for gene in original_avg_const_exp_db:
gene2examine[gene] = []
for probeset in exon_db:
ed = exon_db[probeset]
geneid = ed.GeneID()
if geneid in gene2examine:
gene2examine[geneid].append(
probeset) ### Store these so we can break things up
probeset_to_include[probeset] = []
probeset_probe_db = importGenericFilteredDBList(filename,
probeset_to_include)
### Get Residuals filename and verify it's presence
#print "Importing comparison residuals..."
filename_objects = string.split(dataset_name[:-1], '.p')
filename = filename_objects[0] + '.txt'
if len(array_group_list) == 2:
filename = import_dir = root_dir + 'AltExpression/FIRMA/residuals/' + array_type + '/' + species + '/' + filename
else:
filename = import_dir = root_dir + 'AltExpression/FIRMA/FullDatasets/' + array_type + '/' + species + '/' + filename
status = verifyFile(filename)
if status != 'found':
print_out = 'The residual file:'
print_out += filename
print_out += 'was not found in the default location.\nPlease make re-run the analysis from the Beginning.'
try:
UI.WarningWindow(print_out, 'Exit')
except Exception:
print print_out
print traceback.format_exc()
badExit()
print "Calculating FIRMA scores..."
input_count = len(
gene2examine
) ### Number of probesets or probeset pairs (junction array) alternatively regulated
original_increment = int(input_count / 20)
increment = original_increment
start_time = time.time()
x = 0
probe_probeset_db = {}
gene_count = 0
total_gene_count = 0
max_gene_count = 3000
round = 1
for gene in gene2examine:
gene_count += 1
total_gene_count += 1
x += 1
#if x == increment: increment+=original_increment; print '*',
for probeset in gene2examine[gene]:
for probe in probeset_probe_db[probeset]:
probe_probeset_db[probe] = probeset
if gene_count == max_gene_count:
### Import residuals and calculate primary sample/probeset FIRMA scores
importResiduals(filename, probe_probeset_db)
#print max_gene_count*round,"genes"
print '*',
gene_count = 0
probe_probeset_db = {}
round += 1 ### Reset these variables and re-run
probeset_probe_db = {}
### Analyze residuals for the remaining probesets (< max_gene_count)
importResiduals(filename, probe_probeset_db)
end_time = time.time()
time_diff = int(end_time - start_time)
print "FIRMA scores calculted for", total_gene_count, "genes in %d seconds" % time_diff
def FIRMAanalysis(fold_dbase):
"""The FIRMA method calculates a score for each probeset and for each samples within a group of arrays, independent
of group membership. However, in AltAnalyze, these analyses are performed dependent on group. The FIRMA score is calculated
by obtaining residual values (residuals is a variable for each probe that can't be explained by the GC content or intensity
of that probe) from APT, for all probes corresponding to a metaprobeset (Ensembl gene in AltAnalyze). These probe residuals
are imported and the ratio of the median residual per probeset per sample divided by the absolute standard deviation of the
median of all probes for all samples for that gene."""
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db) > 0:
temp_db = {}
for probeset in fold_dbase:
temp_db[probeset] = []
for probeset in temp_db:
try:
filtered_probeset_db[probeset]
except KeyError:
del fold_dbase[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
proceed = 0
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try:
del fold_dbase[probeset]
except KeyError:
null = []
#print 'Beginning FIRMA analysis (please be patient)...'
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
sample_names_ordered = [
] ### note: Can't use original_array_names since the order is potentially different (FIRMA stores sample data as indeces within dictionary keys)
for group_name in array_group_list: ### THIS LIST IS USED TO MAINTAIN CONSISTENT GROUP ORDERING DURING ANALYSIS
for sample_name in array_group_name_db[group_name]:
sample_names_ordered.append(sample_name)
summary_output = root_dir + 'AltResults/RawSpliceData/' + species + '/' + analysis_method + '/' + dataset_name[:
-
1] + '.txt'
data = export.ExportFile(summary_output)
title = string.join(['gene-probesets'] + sample_names_ordered,
'\t') + '\n'
data.write(title)
### Import probes for probesets to be analyzed
global firma_scores
firma_scores = {}
importProbeToProbesets(fold_dbase)
print 'FIRMA scores obtained for', len(firma_scores), 'probests.'
### Group sample scores for each probeset and calculate statistics
firma_hash = []
excluded_probeset_db = {}
denominator_probesets = 0
interaction = 0
original_increment = int(len(firma_scores) / 20)
increment = original_increment
for probeset in firma_scores:
if probeset in fold_dbase: ### Filter based on expression
ed = exon_db[probeset]
geneid = ed.GeneID()
if interaction == increment:
increment += original_increment
print '*',
interaction += 1
denominator_probesets += 1
sample_db = firma_scores[probeset]
###Use the index values from performExpressionAnalysis to assign each expression value to a new database
firma_group_array = {}
for group_name in array_group_db:
for array_index in array_group_db[group_name]:
firma_score = sample_db[array_index]
try:
firma_group_array[group_name].append(firma_score)
except KeyError:
firma_group_array[group_name] = [firma_score]
###array_group_list should already be unique and correctly sorted (see above)
firma_lists = []
index = 0
for group_name in array_group_list:
firma_list = firma_group_array[group_name]
if len(array_group_list) > 2:
firma_list = statistics.avg(firma_list), firma_list, index
firma_lists.append(firma_list)
index += 1
if export_NI_values == 'yes': ### DO THIS HERE SINCE firma_lists IS SORTED BELOW!!!!
try:
er = ed.ExonID()
except Exception:
er = 'NA'
export_list = [geneid + '\t' + er + '\t' + probeset]
export_list2 = []
for firma_ls in firma_lists:
if len(array_group_list) > 2:
firma_ls = firma_ls[
1
] ### See above modification of firma_list object for multiple group anlaysis
export_list += firma_ls
for i in export_list:
export_list2.append(str(i))
ev = string.join(export_list2, '\t') + '\n'
data.write(ev)
if len(array_group_list) == 2:
firma_list1 = firma_lists[0]
firma_list2 = firma_lists[-1]
firma_avg1 = statistics.avg(firma_list1)
firma_avg2 = statistics.avg(firma_list2)
index1 = 0
index2 = 1 ### Only two groups, thus only two indeces
else: ### The below code deals with identifying the comparisons which yeild the greatest FIRMA difference
firma_lists.sort()
index1 = firma_lists[0][-1]
index2 = firma_lists[-1][-1]
firma_list1 = firma_lists[0][1]
firma_list2 = firma_lists[-1][1]
firma_avg1 = firma_lists[0][0]
firma_avg2 = firma_lists[-1][0]
if calculate_normIntensity_p == 'yes':
try:
normIntensityP = statistics.runComparisonStatistic(
firma_list1, firma_list2, probability_statistic)
except Exception:
normIntensityP = 'NA' ### Occurs when analyzing two groups with no variance
else:
normIntensityP = 'NA'
if normIntensityP == 1: normIntensityP = 'NA'
firma_fold_change = firma_avg2 - firma_avg1
firma_fold_change = -1 * firma_fold_change ### Make this equivalent to Splicing Index fold which is also relative to experimental not control
if (firma_avg2 * firma_avg1) < 0: opposite_FIRMA_scores = 'yes'
else: opposite_FIRMA_scores = 'no'
if probeset in midas_db:
try:
midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 0
else:
midas_p = 0
#if probeset == '3263614': print firma_fold_change, normIntensityP, midas_p,'\n',firma_list1, firma_list2, [p_threshold];kill
if abs(firma_fold_change) > alt_exon_logfold_cutoff and (
normIntensityP < p_threshold or
normIntensityP == 'NA') and midas_p < p_threshold:
exonid = ed.ExonID()
critical_exon_list = [1, [exonid]]
#gene_expression_values = original_avg_const_exp_db[geneid]
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2 - constit_exp1
### Re-define all of the pairwise values now that the two FIRMA groups to report have been determined
data_list1 = array_raw_group_values[probeset][index1]
data_list2 = array_raw_group_values[probeset][index2]
baseline_exp = statistics.avg(data_list1)
experimental_exp = statistics.avg(data_list2)
fold_change = experimental_exp - baseline_exp
group_name1 = array_group_list[index1]
group_name2 = array_group_list[index2]
try:
ttest_exp_p = statistics.runComparisonStatistic(
data_list1, data_list2, probability_statistic)
except Exception:
ttest_exp_p = 1
normInt1 = (baseline_exp - constit_exp1)
normInt2 = (experimental_exp - constit_exp2)
adj_fold = normInt2 - normInt1
ped = ProbesetExpressionData(
baseline_exp, experimental_exp, fold_change, adj_fold,
ttest_exp_p, group_name2 + '_vs_' + group_name1)
fid = ExonData(firma_fold_change, probeset, critical_exon_list,
geneid, data_list1, data_list2, normIntensityP,
opposite_FIRMA_scores)
fid.setConstitutiveExpression(constit_exp1)
fid.setConstitutiveFold(ge_fold)
fid.setProbesetExpressionData(ped)
firma_hash.append((firma_fold_change, fid))
#print [[[probeset,firma_fold_change,normIntensityP,p_threshold]]]
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(firma_fold_change, geneid,
normIntensityP)
excluded_probeset_db[probeset] = eed
print 'FIRMA analysis complete'
if export_NI_values == 'yes': data.close()
firma_hash.sort()
firma_hash.reverse()
print len(
firma_hash), "Probesets with evidence of Alternative expression out of", len(
excluded_probeset_db) + len(firma_hash)
p_value_call = ''
permute_p_values = {}
summary_data_db['denominator_exp_events'] = denominator_probesets
return firma_hash, p_value_call, permute_p_values, excluded_probeset_db
def getFilteredFilename(filename):
if array_type == 'junction':
filename = string.replace(filename, '.txt', '-filtered.txt')
return filename
def getExonVersionFilename(filename):
original_filename = filename
if array_type == 'junction' or array_type == 'RNASeq':
if explicit_data_type != 'null':
filename = string.replace(filename, array_type,
array_type + '/' + explicit_data_type)
### Make sure the file exists, otherwise, use the original
file_status = verifyFile(filename)
#print [[filename,file_status]]
if file_status != 'found': filename = original_filename
return filename
def importProbesetAligningDomains(exon_db, report_type):
filename = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_domain_aligning_probesets.txt'
filename = getFilteredFilename(filename)
probeset_aligning_db = importGenericDBList(filename)
filename = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_indirect_domain_aligning_probesets.txt'
filename = getFilteredFilename(filename)
probeset_indirect_aligning_db = importGenericDBList(filename)
if array_type == 'AltMouse' or (
(array_type == 'junction' or
array_type == 'RNASeq') and explicit_data_type == 'null'):
new_exon_db = {}
splicing_call_db = {}
for probeset_pair in exon_db:
### For junction analyses exon_db is really regulated_exon_junction_db, containing the inclusion,exclusion probeset tuple and an object as values
ed = exon_db[probeset_pair]
geneid = ed.GeneID()
critical_exons = ed.CriticalExons()
for exon in critical_exons:
new_key = geneid + ':' + exon
try:
new_exon_db[new_key].append(probeset_pair)
except KeyError:
new_exon_db[new_key] = [probeset_pair]
try:
splicing_call_db[new_key].append(ed.SplicingCall())
except KeyError:
splicing_call_db[new_key] = [ed.SplicingCall()]
for key in new_exon_db:
probeset_pairs = new_exon_db[key]
probeset_pair = probeset_pairs[
0
] ### grab one of the probeset pairs
ed = exon_db[probeset_pair]
geneid = ed.GeneID()
jd = SimpleJunctionData(
geneid, '', '', '', probeset_pairs
) ### use only those necessary fields for this function (probeset pairs will be called as CriticalExons)
splicing_call_db[key].sort()
splicing_call = splicing_call_db[key][-1]
jd.setSplicingCall(splicing_call
) ### Bug from 1.15 to have key be new_key?
new_exon_db[key] = jd
exon_db = new_exon_db
gene_protein_ft_db = {}
domain_gene_count_db = {}
protein_functional_attribute_db = {}
probeset_aligning_db2 = {}
splicing_call_db = []
new_exon_db = [] ### Clear memory
for probeset in exon_db:
#if probeset == '107650':
#if probeset in probeset_aligning_db: print probeset_aligning_db[probeset];kill
if probeset in probeset_aligning_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
gene = exon_db[probeset].GeneID()
new_domain_list = []
new_domain_list2 = []
if report_type == 'gene' and proceed == 'yes':
for domain in probeset_aligning_db[probeset]:
try:
domain_gene_count_db[domain].append(gene)
except KeyError:
domain_gene_count_db[domain] = [gene]
try:
gene_protein_ft_db[gene].append(domain)
except KeyError:
gene_protein_ft_db[gene] = [domain]
elif proceed == 'yes':
if array_type == 'AltMouse' or (
(array_type == 'junction' or
array_type == 'RNASeq') and explicit_data_type == 'null'):
probeset_list = exon_db[probeset].CriticalExons()
else:
probeset_list = [probeset]
for id in probeset_list:
for domain in probeset_aligning_db[probeset]:
new_domain_list.append('(direct)' + domain)
new_domain_list2.append((domain, '+'))
new_domain_list = unique.unique(new_domain_list)
new_domain_list_str = string.join(new_domain_list, ', ')
gene_protein_ft_db[gene, id] = new_domain_list2
probeset_aligning_db2[id] = new_domain_list_str
#print exon_db['107650']
for probeset in exon_db:
if probeset in probeset_indirect_aligning_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
gene = exon_db[probeset].GeneID()
new_domain_list = []
new_domain_list2 = []
if report_type == 'gene' and proceed == 'yes':
for domain in probeset_indirect_aligning_db[probeset]:
try:
domain_gene_count_db[domain].append(gene)
except KeyError:
domain_gene_count_db[domain] = [gene]
try:
gene_protein_ft_db[gene].append(domain)
except KeyError:
gene_protein_ft_db[gene] = [domain]
elif proceed == 'yes':
if array_type == 'AltMouse' or (
(array_type == 'junction' or
array_type == 'RNASeq') and explicit_data_type == 'null'):
probeset_list = exon_db[probeset].CriticalExons()
else:
probeset_list = [probeset]
for id in probeset_list:
for domain in probeset_indirect_aligning_db[probeset]:
new_domain_list.append('(indirect)' + domain)
new_domain_list2.append((domain, '-'))
new_domain_list = unique.unique(new_domain_list)
new_domain_list_str = string.join(new_domain_list, ', ')
gene_protein_ft_db[gene, id] = new_domain_list2
probeset_aligning_db2[id] = new_domain_list_str
domain_gene_count_db = eliminate_redundant_dict_values(
domain_gene_count_db)
gene_protein_ft_db = eliminate_redundant_dict_values(gene_protein_ft_db)
if analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
clearObjectsFromMemory(exon_db)
exon_db = []
try:
clearObjectsFromMemory(new_exon_db)
except Exception:
null = []
probeset_indirect_aligning_db = []
probeset_aligning_db = []
if report_type == 'perfect_match':
gene_protein_ft_db = []
domain_gene_count_db = []
protein_functional_attribute_db = []
return probeset_aligning_db2
elif report_type == 'probeset':
probeset_aligning_db2 = []
return gene_protein_ft_db, domain_gene_count_db, protein_functional_attribute_db
else:
probeset_aligning_db2 = []
protein_functional_attribute_db = []
probeset_aligning_db2 = []
len_gene_protein_ft_db = len(gene_protein_ft_db)
gene_protein_ft_db = []
return len_gene_protein_ft_db, domain_gene_count_db
def importProbesetProteinCompDomains(exon_db, report_type, comp_type):
filename = 'AltDatabase/' + species + '/' + array_type + '/probeset-domain-annotations-' + comp_type + '.txt'
if (array_type == 'junction' or
array_type == 'RNASeq') and explicit_data_type != 'null':
filename = getFilteredFilename(filename)
filename = getExonVersionFilename(filename)
probeset_aligning_db = importGeneric(filename)
filename = 'AltDatabase/' + species + '/' + array_type + '/probeset-protein-annotations-' + comp_type + '.txt'
if (array_type == 'junction' or
array_type == 'RNASeq') and explicit_data_type != 'null':
filename = getFilteredFilename(filename)
filename = getExonVersionFilename(filename)
gene_protein_ft_db = {}
domain_gene_count_db = {}
for probeset in exon_db:
initial_proceed = 'no'
original_probeset = probeset
if probeset in probeset_aligning_db: initial_proceed = 'yes'
elif array_type == 'AltMouse' or (
(array_type == 'junction' or
array_type == 'RNASeq') and explicit_data_type == 'null'):
### For junction analyses exon_db is really regulated_exon_junction_db, containing the inclusion,exclusion probeset tuple and an object as values
if '|' in probeset[0]:
probeset1 = string.split(probeset[0], '|')[0]
probeset = probeset1, probeset[1]
try:
alternate_probeset_id = exon_db[probeset].InclusionLookup()
probeset = alternate_probeset_id, probeset[1]
except Exception:
null = []
probeset_joined = string.join(probeset, '|')
#print [probeset_joined],[probeset]
if probeset_joined in probeset_aligning_db:
initial_proceed = 'yes'
probeset = probeset_joined
elif probeset[0] in probeset_aligning_db:
initial_proceed = 'yes'
probeset = probeset[0]
elif probeset[1] in probeset_aligning_db:
initial_proceed = 'yes'
probeset = probeset[1]
#else: for i in probeset_aligning_db: print [i];kill
if initial_proceed == 'yes':
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[original_probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
new_domain_list = []
gene = exon_db[original_probeset].GeneID()
if report_type == 'gene' and proceed == 'yes':
for domain_data in probeset_aligning_db[probeset]:
try:
domain, call = string.split(domain_data, '|')
except Exception:
values = string.split(domain_data, '|')
domain = values[0]
call = values[
-1
] ### occurs when a | exists in the annotations from UniProt
try:
domain_gene_count_db[domain].append(gene)
except KeyError:
domain_gene_count_db[domain] = [gene]
try:
gene_protein_ft_db[gene].append(domain)
except KeyError:
gene_protein_ft_db[gene] = [domain]
elif proceed == 'yes':
for domain_data in probeset_aligning_db[probeset]:
domain, call = string.split(domain_data, '|')
new_domain_list.append((domain, call))
#new_domain_list = string.join(new_domain_list,', ')
gene_protein_ft_db[gene, original_probeset] = new_domain_list
domain_gene_count_db = eliminate_redundant_dict_values(
domain_gene_count_db)
probeset_aligning_db = [] ### Clear memory
probeset_aligning_protein_db = importGeneric(filename)
probeset_pairs = {
} ### Store all possible probeset pairs as single probesets for protein-protein associations
for probeset in exon_db:
if len(probeset) == 2:
for p in probeset:
probeset_pairs[p] = probeset
if report_type == 'probeset':
### Below code was re-written to be more memory efficient by not storing all data in probeset-domain-annotations-*comp*.txt via generic import
protein_functional_attribute_db = {}
probeset_protein_associations = {}
protein_db = {}
for probeset in exon_db:
initial_proceed = 'no'
original_probeset = probeset
if probeset in probeset_aligning_protein_db:
initial_proceed = 'yes'
elif array_type == 'AltMouse' or (
(array_type == 'junction' or
array_type == 'RNASeq') and explicit_data_type == 'null'):
if '|' in probeset[0]:
probeset1 = string.split(probeset[0], '|')[0]
probeset = probeset1, probeset[1]
try:
alternate_probeset_id = exon_db[probeset].InclusionLookup()
probeset = alternate_probeset_id, probeset[1]
except Exception:
null = []
probeset_joined = string.join(probeset, '|')
#print [probeset_joined],[probeset]
if probeset_joined in probeset_aligning_protein_db:
initial_proceed = 'yes'
probeset = probeset_joined
elif probeset[0] in probeset_aligning_protein_db:
initial_proceed = 'yes'
probeset = probeset[0]
elif probeset[1] in probeset_aligning_protein_db:
initial_proceed = 'yes'
probeset = probeset[1]
#else: for i in probeset_aligning_db: print [i];kill
if initial_proceed == 'yes':
protein_data_list = probeset_aligning_protein_db[probeset]
new_protein_list = []
gene = exon_db[original_probeset].GeneID()
for protein_data in protein_data_list:
protein_info, call = string.split(protein_data, '|')
if 'AA:' in protein_info:
protein_info_r = string.replace(protein_info, ')', '*')
protein_info_r = string.replace(protein_info_r, '(',
'*')
protein_info_r = string.split(protein_info_r, '*')
null_protein = protein_info_r[1]
hit_protein = protein_info_r[3]
probeset_protein_associations[
original_probeset] = null_protein, hit_protein, call
protein_db[null_protein] = []
protein_db[hit_protein] = []
new_protein_list.append((protein_info, call))
#new_protein_list = string.join(new_domain_list,', ')
protein_functional_attribute_db[
gene, original_probeset] = new_protein_list
filename = 'AltDatabase/' + species + '/' + array_type + '/SEQUENCE-protein-dbase_' + comp_type + '.txt'
filename = getExonVersionFilename(filename)
protein_seq_db = importGenericFiltered(filename, protein_db)
for key in protein_functional_attribute_db:
gene, probeset = key
try:
null_protein, hit_protein, call = probeset_protein_associations[
probeset]
null_seq = protein_seq_db[null_protein][0]
hit_seq = protein_seq_db[hit_protein][0]
seq_attr = 'sequence: ' + '(' + null_protein + ')' + null_seq + ' -> ' + '(' + hit_protein + ')' + hit_seq
protein_functional_attribute_db[key].append((seq_attr, call))
except KeyError:
null = []
protein_seq_db = []
probeset_aligning_protein_db = []
return gene_protein_ft_db, domain_gene_count_db, protein_functional_attribute_db
else:
probeset_aligning_protein_db = []
len_gene_protein_ft_db = len(gene_protein_ft_db)
gene_protein_ft_db = []
return len_gene_protein_ft_db, domain_gene_count_db
class SimpleJunctionData:
def __init__(self, geneid, probeset1, probeset2, probeset1_display,
critical_exon_list):
self._geneid = geneid
self._probeset1 = probeset1
self._probeset2 = probeset2
self._probeset1_display = probeset1_display
self._critical_exon_list = critical_exon_list
def GeneID(self):
return self._geneid
def Probeset1(self):
return self._probeset1
def Probeset2(self):
return self._probeset2
def InclusionDisplay(self):
return self._probeset1_display
def CriticalExons(self):
return self._critical_exon_list
def setSplicingCall(self, splicing_call):
#self._splicing_call = EvidenceOfAltSplicing(slicing_annot)
self._splicing_call = splicing_call
def setSymbol(self, symbol):
self.symbol = symbol
def Symbol(self):
return self.symbol
def SplicingCall(self):
return self._splicing_call
def setInclusionLookup(self, incl_junction_probeset):
self.incl_junction_probeset = incl_junction_probeset
def InclusionLookup(self):
return self.incl_junction_probeset
def formatJunctionData(probesets, affygene, critical_exon_list):
if '|' in probesets[
0]: ### Only return the first inclusion probeset (agglomerated probesets)
incl_list = string.split(probesets[0], '|')
incl_probeset = incl_list[0]
excl_probeset = probesets[1]
else:
incl_probeset = probesets[0]
excl_probeset = probesets[1]
jd = SimpleJunctionData(affygene, incl_probeset, excl_probeset,
probesets[0], critical_exon_list)
key = incl_probeset, excl_probeset
return key, jd
class JunctionExpressionData:
def __init__(self, baseline_norm_exp, exper_norm_exp, pval, ped):
self.baseline_norm_exp = baseline_norm_exp
self.exper_norm_exp = exper_norm_exp
self.pval = pval
self.ped = ped
def ConNI(self):
ls = []
for i in self.logConNI():
ls.append(math.pow(2, i))
return ls
def ExpNI(self):
ls = []
for i in self.logExpNI():
ls.append(math.pow(2, i))
return ls
def ConNIAvg(self):
return math.pow(2, statistics.avg(self.logConNI()))
def ExpNIAvg(self):
return math.pow(2, statistics.avg(self.logExpNI()))
def logConNI(self):
return self.baseline_norm_exp
def logExpNI(self):
return self.exper_norm_exp
def Pval(self):
return self.pval
def ProbesetExprData(self):
return self.ped
def __repr__(self):
return self.ConNI() + '|' + self.ExpNI()
def calculateAllASPIREScores(p1, p2):
b1o = p1.ConNIAvg()
b2o = p2.ConNIAvg()
e1o = p1.ExpNIAvg()
e2o = p2.ExpNIAvg()
original_score = statistics.aspire_stringent(b1o, e1o, b2o, e2o)
index = 0
baseline_scores = [
] ### Loop through each control ratio and compare to control ratio mean
for b1 in p1.ConNI():
b2 = p2.ConNI()[index]
score = statistics.aspire_stringent(b2, e2o, b1, e1o)
index += 1
baseline_scores.append(score)
index = 0
exp_scores = [
] ### Loop through each experimental ratio and compare to control ratio mean
for e1 in p1.ExpNI():
e2 = p2.ExpNI()[index]
score = statistics.aspire_stringent(b1o, e1, b2o, e2)
index += 1
exp_scores.append(score)
try:
aspireP = statistics.runComparisonStatistic(
baseline_scores, exp_scores, probability_statistic)
except Exception:
aspireP = 'NA' ### Occurs when analyzing two groups with no variance
if aspireP == 1: aspireP = 'NA'
"""
if aspireP<0.05 and oscore>0.2 and statistics.avg(exp_scores)<0:
index=0
for e1 in p1.ExpNI():
e2 = p2.ExpNI()[index]
score = statistics.aspire_stringent(b1,e1,b2,e2)
print p1.ExpNI(), p2.ExpNI(); print e1, e2
print e1o,e2o; print b1, b2; print score, original_score
print exp_scores, statistics.avg(exp_scores); kill"""
return baseline_scores, exp_scores, aspireP
def stringListConvert(ls):
ls2 = []
for i in ls:
ls2.append(str(i))
return ls2
def analyzeJunctionSplicing(nonlog_NI_db):
group_sizes = []
original_array_indices = permute_lists[
0
] ###p[0] is the original organization of the group samples prior to permutation
for group in original_array_indices:
group_sizes.append(len(group))
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db) > 0:
temp_db = {}
for probeset in nonlog_NI_db:
temp_db[probeset] = []
for probeset in temp_db:
try:
filtered_probeset_db[probeset]
except KeyError:
del nonlog_NI_db[probeset]
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
global NIdata_export
summary_output = root_dir + 'AltResults/RawSpliceData/' + species + '/' + analysis_method + '/' + dataset_name[:
-
1] + '.txt'
NIdata_export = export.ExportFile(summary_output)
title = string.join(
['inclusion-probeset', 'exclusion-probeset'
] + original_array_names, '\t') + '\n'
NIdata_export.write(title)
### Calculate a probeset p-value adjusted for constitutive expression levels (taken from splicing index method)
xl = 0
probeset_normIntensity_db = {}
for probeset in array_raw_group_values:
ed = exon_db[probeset]
geneid = ed.GeneID()
xl += 1
#if geneid in alt_junction_db and geneid in original_avg_const_exp_db: ### Don't want this filter since it causes problems for Trans-splicing
group_index = 0
si_interim_group_db = {}
ge_threshold_count = 0
value_count = 0
### Prepare normalized expression lists for recipricol-junction algorithms
if geneid in avg_const_exp_db:
for group_values in array_raw_group_values[probeset]:
value_index = 0
ratio_hash = []
for value in group_values: ###Calculate normalized ratio's for each condition and save raw values for later permutation
exp_val = value
ge_val = avg_const_exp_db[geneid][value_count]
exp_ratio = exp_val - ge_val
ratio_hash.append(exp_ratio)
value_index += 1
value_count += 1
si_interim_group_db[group_index] = ratio_hash
group_index += 1
group1_ratios = si_interim_group_db[0]
group2_ratios = si_interim_group_db[1]
### Calculate and store simple expression summary stats
data_list1 = array_raw_group_values[probeset][0]
data_list2 = array_raw_group_values[probeset][1]
baseline_exp = statistics.avg(data_list1)
experimental_exp = statistics.avg(data_list2)
fold_change = experimental_exp - baseline_exp
#group_name1 = array_group_list[0]; group_name2 = array_group_list[1]
try:
ttest_exp_p = statistics.runComparisonStatistic(
data_list1, data_list2, probability_statistic)
except Exception:
ttest_exp_p = 'NA'
if ttest_exp_p == 1: ttest_exp_p = 'NA'
adj_fold = statistics.avg(group2_ratios) - statistics.avg(
group1_ratios)
ped = ProbesetExpressionData(baseline_exp, experimental_exp,
fold_change, adj_fold, ttest_exp_p,
'')
try:
try:
normIntensityP = statistics.runComparisonStatistic(
group1_ratios, group2_ratios, probability_statistic)
except Exception:
#print group1_ratios,group2_ratios,array_raw_group_values[probeset],avg_const_exp_db[geneid];kill
normIntensityP = 'NA' ###occurs for constitutive probesets
except Exception:
normIntensityP = 0
if normIntensityP == 1: normIntensityP = 'NA'
ji = JunctionExpressionData(group1_ratios, group2_ratios,
normIntensityP, ped)
probeset_normIntensity_db[
probeset] = ji ### store and access this below
#if probeset == 'G6899622@J916374@j_at': print normIntensityP,group1_ratios,group2_ratios;kill
###Concatenate the two raw expression groups into a single list for permutation analysis
ls_concatenated = []
for group in array_raw_group_values[probeset]:
for entry in group:
ls_concatenated.append(entry)
if analysis_method == 'linearregres': ###Convert out of log space
ls_concatenated = statistics.log_fold_conversion_fraction(
ls_concatenated)
array_raw_group_values[probeset] = ls_concatenated
s = 0
t = 0
y = ''
denominator_events = 0
excluded_probeset_db = {}
splice_event_list = []
splice_event_list_mx = []
splice_event_list_non_mx = []
event_mx_temp = []
permute_p_values = {} #use this to exclude duplicate mx events
for affygene in alt_junction_db:
if affygene in original_avg_const_exp_db:
constit_exp1 = original_avg_const_exp_db[affygene][0]
constit_exp2 = original_avg_const_exp_db[affygene][1]
ge_fold = constit_exp2 - constit_exp1
for event in alt_junction_db[affygene]:
if array_type == 'AltMouse':
#event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
#critical_exon_db[affygene,tuple(critical_exons)] = [1,'E'+str(e1a),'E'+str(e2b)] --- affygene,tuple(event) == key, 1 indicates both are either up or down together
event_call = event[0][0] + '-' + event[1][0]
exon_set1 = event[0][1]
exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene, exon_set1]
probeset2 = exon_dbase[affygene, exon_set2]
critical_exon_list = critical_exon_db[affygene, tuple(
event)]
if array_type == 'junction' or array_type == 'RNASeq':
event_call = 'ei-ex' ### Below objects from JunctionArrayEnsemblRules - class JunctionInformation
probeset1 = event.InclusionProbeset()
probeset2 = event.ExclusionProbeset()
exon_set1 = event.InclusionJunction()
exon_set2 = event.ExclusionJunction()
try:
novel_event = event.NovelEvent()
except Exception:
novel_event = 'known'
critical_exon_list = [1, event.CriticalExonSets()]
key, jd = formatJunctionData(
[probeset1, probeset2], affygene, critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try:
jd.setSymbol(annotate_db[affygene].Symbol())
except Exception:
null = []
#if '|' in probeset1: print probeset1, key,jd.InclusionDisplay();kill
probeset_comp_db[
key] = jd ### This is used for the permutation analysis and domain/mirBS import
#print probeset1,probeset2, critical_exon_list,event_call,exon_set1,exon_set2;kill
if probeset1 in nonlog_NI_db and probeset2 in nonlog_NI_db:
denominator_events += 1
try:
p1 = probeset_normIntensity_db[probeset1]
p2 = probeset_normIntensity_db[probeset2]
except Exception:
print probeset1, probeset2
p1 = probeset_normIntensity_db[probeset1]
p2 = probeset_normIntensity_db[probeset2]
#if '|' in probeset1: print
pp1 = p1.Pval()
pp2 = p2.Pval()
baseline_ratio1 = p1.ConNIAvg()
experimental_ratio1 = p1.ExpNIAvg()
baseline_ratio2 = p2.ConNIAvg()
experimental_ratio2 = p2.ExpNIAvg()
ped1 = p1.ProbesetExprData()
ped2 = p2.ProbesetExprData()
Rin = ''
Rex = ''
r = 0 ###Variable used to determine if we should take the absolute value of dI for mutually exlcusive events
if event_call == 'ei-ex': #means probeset1 is an exon inclusion and probeset2 is an exon exclusion
Rin = baseline_ratio1 / experimental_ratio1 # Rin=A/C
Rex = baseline_ratio2 / experimental_ratio2 # Rin=B/D
I1 = baseline_ratio1 / (
baseline_ratio1 + baseline_ratio2)
I2 = experimental_ratio1 / (
experimental_ratio1 + experimental_ratio2)
###When Rex is larger, the exp_ratio for exclusion is decreased in comparison to baseline.
###Thus, increased inclusion (when Rin is small, inclusion is big)
if (Rin > 1 and Rex < 1): y = 'downregulated'
elif (Rin < 1 and Rex > 1): y = 'upregulated'
elif (Rex < Rin): y = 'downregulated'
else: y = 'upregulated'
temp_list = []
if event_call == 'mx-mx':
temp_list.append(exon_set1)
temp_list.append(exon_set2)
temp_list.sort()
if (
affygene, temp_list
) not in event_mx_temp: #use this logic to prevent mx entries being added more than once
event_mx_temp.append((affygene, temp_list))
###Arbitrarily choose which exon-set will be Rin or Rex, does matter for mutually exclusive events
Rin = baseline_ratio1 / experimental_ratio1 # Rin=A/C
Rex = baseline_ratio2 / experimental_ratio2 # Rin=B/D
I1 = baseline_ratio1 / (
baseline_ratio1 + baseline_ratio2)
I2 = experimental_ratio1 / (
experimental_ratio1 + experimental_ratio2)
y = 'mutually-exclusive'
r = 1
if analysis_method == 'ASPIRE' and Rex != '':
#if affygene == 'ENSMUSG00000000126': print Rin, Rex, probeset1, probeset2
if (Rin > 1 and Rex < 1) or (Rin < 1 and Rex > 1):
s += 1
in1 = ((Rex - 1.0) * Rin) / (Rex - Rin)
in2 = (Rex - 1.0) / (Rex - Rin)
dI = (
(in2 - in1) + (I2 - I1)
) / 2.0 #modified to give propper exon inclusion
dI = dI * (
-1
) ### Reverse the fold to make equivalent to splicing-index and FIRMA scores
try:
baseline_scores, exp_scores, aspireP = calculateAllASPIREScores(
p1, p2)
except Exception:
baseline_scores = [0]
exp_scores = [dI]
aspireP = 0
if export_NI_values == 'yes':
baseline_scores = stringListConvert(
baseline_scores)
exp_scores = stringListConvert(exp_scores)
ev = string.join(
[probeset1, probeset2] + baseline_scores +
exp_scores, '\t') + '\n'
NIdata_export.write(ev)
if max_replicates > 2 or equal_replicates == 2:
permute_p_values[(probeset1, probeset2)] = [
aspireP, 'NA', 'NA', 'NA'
]
if r == 1:
dI = abs(
dI
) ###Occurs when event is mutually exclusive
#if abs(dI)>alt_exon_logfold_cutoff: print [dI],pp1,pp2,aspireP;kill
#print [affygene,dI,pp1,pp2,aspireP,event.CriticalExonSets(),probeset1,probeset2,alt_exon_logfold_cutoff,p_threshold]
if ((pp1 < p_threshold or
pp2 < p_threshold) or pp1 == 1 or pp1 == 'NA'
) and abs(
dI) > alt_exon_logfold_cutoff: ###Require that the splice event have a constitutive corrected p less than the user defined threshold
ejd = ExonJunctionData(
dI, probeset1, probeset2, pp1, pp2, y,
event_call, critical_exon_list, affygene,
ped1, ped2)
"""if probeset1 == 'ENSMUSG00000033335:E16.1-E17.1' and probeset2 == 'ENSMUSG00000033335:E16.1-E19.1':
print [dI,pp1,pp2,p_threshold,alt_exon_logfold_cutoff]
print baseline_scores, exp_scores, [aspireP]#;sys.exit()"""
ejd.setConstitutiveExpression(constit_exp1)
ejd.setConstitutiveFold(ge_fold)
if perform_permutation_analysis == 'yes':
splice_event_list.append((dI, ejd))
elif aspireP < permute_p_threshold or aspireP == 'NA':
splice_event_list.append((dI, ejd))
#if abs(dI)>.2: print probeset1, probeset2, critical_exon_list, [exon_set1], [exon_set2]
#if dI>.2 and aspireP<0.05: print baseline_scores,exp_scores,aspireP, statistics.avg(exp_scores), dI
elif array_type == 'junction' or array_type == 'RNASeq':
excluded_probeset_db[
affygene + ':' + event.CriticalExonSets(
)[0]] = probeset1, affygene, dI, 'NA', aspireP
if array_type == 'RNASeq':
try:
ejd.setNovelEvent(novel_event)
except Exception:
None
if analysis_method == 'linearregres' and Rex != '':
s += 1
log_fold, linregressP, rsqrd_status = getLinearRegressionScores(
probeset1, probeset2, group_sizes)
log_fold = log_fold ### Reverse the fold to make equivalent to splicing-index and FIRMA scores
if max_replicates > 2 or equal_replicates == 2:
permute_p_values[(probeset1, probeset2)] = [
linregressP, 'NA', 'NA', 'NA'
]
if rsqrd_status == 'proceed':
if ((pp1 < p_threshold or
pp2 < p_threshold) or pp1 == 1 or pp1 == 'NA'
) and abs(
log_fold) > alt_exon_logfold_cutoff: ###Require that the splice event have a constitutive corrected p less than the user defined threshold
ejd = ExonJunctionData(
log_fold, probeset1, probeset2, pp1, pp2,
y, event_call, critical_exon_list,
affygene, ped1, ped2)
ejd.setConstitutiveExpression(constit_exp1)
ejd.setConstitutiveFold(ge_fold)
if perform_permutation_analysis == 'yes':
splice_event_list.append((log_fold, ejd))
elif linregressP < permute_p_threshold:
splice_event_list.append((log_fold, ejd))
#if probeset1 == 'G6990053@762121_762232_at' and probeset2 == 'G6990053@J926254@j_at':
#print event_call, critical_exon_list,affygene, Rin, Rex, y, temp_list;kill
elif array_type == 'junction' or array_type == 'RNASeq':
excluded_probeset_db[
affygene + ':' + event.CriticalExonSets(
)[0]] = probeset1, affygene, log_fold, 'NA', linregressP
if array_type == 'RNASeq':
try:
ejd.setNovelEvent(novel_event)
except Exception:
None
else:
t += 1
clearObjectsFromMemory(probeset_normIntensity_db)
probeset_normIntensity_db = {}
### Potentially large memory object containing summary stats for all probesets
statistics.adjustPermuteStats(permute_p_values)
summary_data_db['denominator_exp_events'] = denominator_events
print "Number of exon-events analyzed:", s
print "Number of exon-events excluded:", t
return splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db
def maxReplicates():
replicates = 0
greater_than_two = 0
greater_than_one = 0
group_sizes = []
for probeset in array_raw_group_values:
for group_values in array_raw_group_values[probeset]:
try:
replicates += len(group_values)
group_sizes.append(len(group_values))
if len(group_values) > 2: greater_than_two += 1
elif len(group_values) > 1: greater_than_one += 1
except Exception:
replicates += len(array_raw_group_values[probeset])
break
break
group_sizes = unique.unique(group_sizes)
if len(group_sizes) == 1: equal_replicates = group_sizes[0]
else: equal_replicates = 0
max_replicates = replicates / float(original_conditions)
if max_replicates < 2.01:
if greater_than_two > 0 and greater_than_one > 0: max_replicates = 3
return max_replicates, equal_replicates
def furtherProcessJunctionScores(splice_event_list, probeset_comp_db,
permute_p_values):
splice_event_list.sort()
splice_event_list.reverse()
print "filtered %s scores:" % analysis_method, len(splice_event_list)
if perform_permutation_analysis == 'yes':
###*********BEGIN PERMUTATION ANALYSIS*********
if max_replicates > 2 or equal_replicates == 2:
splice_event_list, p_value_call, permute_p_values = permuteSplicingScores(
splice_event_list)
else:
print "WARNING...Not enough replicates to perform permutation analysis."
p_value_call = ''
permute_p_values = {}
else:
if max_replicates > 2 or equal_replicates == 2:
if probability_statistic == 'unpaired t-test':
p_value_call = analysis_method + '-OneWayAnova'
else:
p_value_call = analysis_method + '-' + probability_statistic
else:
if probability_statistic == 'unpaired t-test':
p_value_call = 'OneWayAnova'
permute_p_values = {}
else:
p_value_call = probability_statistic
permute_p_values = {}
print len(
splice_event_list), 'alternative events after subsequent filtering (optional)'
### Get ExonJunction annotaitons
junction_splicing_annot_db = getJunctionSplicingAnnotations(
probeset_comp_db)
regulated_exon_junction_db = {}
new_splice_event_list = []
if filter_for_AS == 'yes':
print "Filtering for evidence of Alternative Splicing"
for (fold, ejd) in splice_event_list:
proceed = 'no'
if filter_for_AS == 'yes':
try:
ja = junction_splicing_annot_db[ejd.Probeset1(), ejd.Probeset2(
)]
splicing_call = ja.SplicingCall()
if splicing_call == 1: proceed = 'yes'
except KeyError:
proceed = 'no'
else:
proceed = 'yes'
if proceed == 'yes':
key, jd = formatJunctionData(
[ejd.Probeset1(), ejd.Probeset2()
], ejd.GeneID(), ejd.CriticalExons())
regulated_exon_junction_db[
key] = jd ### This is used for the permutation analysis and domain/mirBS import
new_splice_event_list.append((fold, ejd))
### Add junction probeset lookup for reciprocal junctions composed of an exonid (not in protein database currently)
if array_type == 'RNASeq' and '-' not in key[
0]: ### Thus, it is an exon compared to a junction
events = alt_junction_db[ejd.GeneID()]
for ji in events:
if (ji.InclusionProbeset(), ji.ExclusionProbeset()) == key:
jd.setInclusionLookup(
ji.InclusionLookup()
) ### This is the source junction from which the exon ID comes from
probeset_comp_db[ji.InclusionLookup(),
ji.ExclusionProbeset()] = jd
#print ji.InclusionProbeset(),ji.ExclusionProbeset(),' ',ji.InclusionLookup()
if filter_for_AS == 'yes':
print len(
new_splice_event_list), "remaining after filtering for evidence of Alternative splicing"
filtered_exon_db = {}
for junctions in probeset_comp_db:
rj = probeset_comp_db[
junctions
] ### Add splicing annotations to the AltMouse junction DBs (needed for permutation analysis statistics and filtering)
try:
ja = junction_splicing_annot_db[junctions]
splicing_call = ja.SplicingCall()
rj.setSplicingCall(ja.SplicingCall())
except KeyError:
rj.setSplicingCall(0)
if filter_for_AS == 'yes': filtered_exon_db[junctions] = rj
for junctions in regulated_exon_junction_db:
rj = regulated_exon_junction_db[junctions]
try:
ja = junction_splicing_annot_db[junctions]
rj.setSplicingCall(ja.SplicingCall())
except KeyError:
rj.setSplicingCall(0)
if filter_for_AS == 'yes': probeset_comp_db = filtered_exon_db
try:
clearObjectsFromMemory(alt_junction_db)
except Exception:
null = []
return new_splice_event_list, p_value_call, permute_p_values, probeset_comp_db, regulated_exon_junction_db
class SplicingScoreData:
def Method(self):
###e.g. ASPIRE
return self._method
def Score(self):
return str(self._score)
def Probeset1(self):
return self._probeset1
def Probeset2(self):
return self._probeset2
def RegulationCall(self):
return self._regulation_call
def GeneID(self):
return self._geneid
def CriticalExons(self):
return self._critical_exon_list[1]
def CriticalExonTuple(self):
return self._critical_exon_list
def TTestNormalizedRatios(self):
return self._normIntensityP
def TTestNormalizedRatios2(self):
return self._normIntensityP2
def setConstitutiveFold(self, exp_log_ratio):
self._exp_log_ratio = exp_log_ratio
def ConstitutiveFold(self):
return str(self._exp_log_ratio)
def setConstitutiveExpression(self, const_baseline):
self.const_baseline = const_baseline
def ConstitutiveExpression(self):
return str(self.const_baseline)
def setProbesetExpressionData(self, ped):
self.ped1 = ped
def ProbesetExprData1(self):
return self.ped1
def ProbesetExprData2(self):
return self.ped2
def setNovelEvent(self, novel_event):
self._novel_event = novel_event
def NovelEvent(self):
return self._novel_event
def EventCall(self):
###e.g. Exon inclusion (ei) Exon exclusion (ex), ei-ex, reported in that direction
return self._event_call
def Report(self):
output = self.Method() + '|' + self.GeneID() + '|' + string.join(
self.CriticalExons(), '|')
return output
def __repr__(self):
return self.Report()
class ExonJunctionData(SplicingScoreData):
def __init__(self, score, probeset1, probeset2, probeset1_p, probeset2_p,
regulation_call, event_call, critical_exon_list, affygene,
ped1, ped2):
self._score = score
self._probeset1 = probeset1
self._probeset2 = probeset2
self._regulation_call = regulation_call
self._event_call = event_call
self._critical_exon_list = critical_exon_list
self._geneid = affygene
self._method = analysis_method
self._normIntensityP = probeset1_p
self._normIntensityP2 = probeset2_p
self.ped1 = ped1
self.ped2 = ped2
class ExonData(SplicingScoreData):
def __init__(self, splicing_index, probeset, critical_exon_list, geneid,
group1_ratios, group2_ratios, normIntensityP,
opposite_SI_log_mean):
self._score = splicing_index
self._probeset1 = probeset
self._opposite_SI_log_mean = opposite_SI_log_mean
self._critical_exon_list = critical_exon_list
self._geneid = geneid
self._baseline_ratio1 = group1_ratios
self._experimental_ratio1 = group2_ratios
self._normIntensityP = normIntensityP
self._method = analysis_method
self._event_call = 'exon-inclusion'
if splicing_index > 0:
regulation_call = 'downregulated' ###Since baseline is the numerator ratio
else:
regulation_call = 'upregulated'
self._regulation_call = regulation_call
def OppositeSIRatios(self):
return self._opposite_SI_log_mean
class ExcludedExonData(ExonData):
def __init__(self, splicing_index, geneid, normIntensityP):
self._score = splicing_index
self._geneid = geneid
self._normIntensityP = normIntensityP
def getAllPossibleLinearRegressionScores(probeset1, probeset2, positions,
group_sizes):
### Get Raw expression values for the two probests
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
all_possible_scores = []
index1 = 0 ### Perform all possible pairwise comparisons between groups (not sure how this will work for 10+ groups)
for (pos1a, pos2a) in positions:
index2 = 0
for (pos1b, pos2b) in positions:
if pos1a != pos1b:
p1_g1 = p1_exp[pos1a:pos2a]
p1_g2 = p1_exp[pos1b:pos2b]
p2_g1 = p2_exp[pos1a:pos2a]
p2_g2 = p2_exp[pos1b:pos2b]
#log_fold, linregressP, rsqrd = getAllLinearRegressionScores(probeset1,probeset2,p1_g1,p2_g1,p1_g2,p2_g2,len(group_sizes)) ### Used to calculate a pairwise group pvalue
log_fold, rsqrd = performLinearRegression(p1_g1, p2_g1, p1_g2,
p2_g2)
if log_fold < 0:
i1, i2 = index2, index1 ### all scores should indicate upregulation
else:
i1, i2 = index1, index2
all_possible_scores.append((abs(log_fold), i1, i2))
index2 += 1
index1 += 1
all_possible_scores.sort()
try:
log_fold, index1, index2 = all_possible_scores[-1]
except Exception:
log_fold = 0
index1 = 0
index2 = 0
return log_fold, index1, index2
def getLinearRegressionScores(probeset1, probeset2, group_sizes):
### Get Raw expression values for the two probests
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
try:
p1_g1 = p1_exp[:group_sizes[0]]
p1_g2 = p1_exp[group_sizes[0]:]
p2_g1 = p2_exp[:group_sizes[0]]
p2_g2 = p2_exp[group_sizes[0]:]
except Exception:
print probeset1, probeset2
print p1_exp
print p2_exp
print group_sizes
force_kill
log_fold, linregressP, rsqrd = getAllLinearRegressionScores(
probeset1, probeset2, p1_g1, p2_g1, p1_g2, p2_g2, 2)
return log_fold, linregressP, rsqrd
def getAllLinearRegressionScores(probeset1, probeset2, p1_g1, p2_g1, p1_g2,
p2_g2, groups):
log_fold, rsqrd = performLinearRegression(p1_g1, p2_g1, p1_g2, p2_g2)
try:
### Repeat for each sample versus baselines to calculate a p-value
index = 0
group1_scores = []
for p1_g1_sample in p1_g1:
p2_g1_sample = p2_g1[index]
log_f, rs = performLinearRegression(p1_g1, p2_g1, [p1_g1_sample],
[p2_g1_sample])
group1_scores.append(log_f)
index += 1
index = 0
group2_scores = []
for p1_g2_sample in p1_g2:
p2_g2_sample = p2_g2[index]
log_f, rs = performLinearRegression(p1_g1, p2_g1, [p1_g2_sample],
[p2_g2_sample])
group2_scores.append(log_f)
index += 1
try:
linregressP = statistics.runComparisonStatistic(
group1_scores, group2_scores, probability_statistic)
except Exception:
linregressP = 0
group1_scores = [0]
group2_scores = [log_fold]
if linregressP == 1: linregressP = 0
except Exception:
linregressP = 0
group1_scores = [0]
group2_scores = [log_fold]
if export_NI_values == 'yes' and groups == 2:
group1_scores = stringListConvert(group1_scores)
group2_scores = stringListConvert(group2_scores)
ev = string.join([probeset1, probeset2] + group1_scores +
group2_scores, '\t') + '\n'
NIdata_export.write(ev)
return log_fold, linregressP, rsqrd
def performLinearRegression(p1_g1, p2_g1, p1_g2, p2_g2):
return_rsqrd = 'no'
if use_R == 'yes': ###Uses the RLM algorithm
#print "Performing Linear Regression analysis using rlm."
g1_slope = statistics.LinearRegression(p1_g1, p2_g1, return_rsqrd)
g2_slope = statistics.LinearRegression(p1_g2, p2_g2, return_rsqrd)
else: ###Uses a basic least squared method
#print "Performing Linear Regression analysis using python specific methods."
g1_slope = statistics.simpleLinRegress(p1_g1, p2_g1)
g2_slope = statistics.simpleLinRegress(p1_g2, p2_g2)
log_fold = statistics.convert_to_log_fold(g2_slope / g1_slope)
rsqrd = 'proceed'
#if g1_rsqrd > 0 and g2_rsqrd > 0: rsqrd = 'proceed'
#else: rsqrd = 'hault'
return log_fold, rsqrd
########### Permutation Analysis Functions ###########
def permuteLinearRegression(probeset1, probeset2, p):
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
p1_g1, p1_g2 = permute_samples(p1_exp, p)
p2_g1, p2_g2 = permute_samples(p2_exp, p)
return_rsqrd = 'no'
if use_R == 'yes': ###Uses the RLM algorithm
g1_slope = statistics.LinearRegression(p1_g1, p2_g1, return_rsqrd)
g2_slope = statistics.LinearRegression(p1_g2, p2_g2, return_rsqrd)
else: ###Uses a basic least squared method
g1_slope = statistics.simpleLinRegress(p1_g1, p2_g1)
g2_slope = statistics.simpleLinRegress(p1_g2, p2_g2)
log_fold = statistics.convert_to_log_fold(g2_slope / g1_slope)
return log_fold
def permuteSplicingScores(splice_event_list):
p_value_call = 'lowest_raw_p'
permute_p_values = {}
splice_event_list2 = []
if len(permute_lists) > 0:
#tuple_data in splice_event_list = dI,probeset1,probeset2,y,event_call,critical_exon_list
all_samples = []
a = 0
for (score, x) in splice_event_list:
###NOTE: This reference dI differs slightly from the below calculated, since the values are calculated from raw relative ratios rather than the avg
###Solution: Use the first calculated dI as the reference
score = score * (
-1
) ### Reverse the score to make equivalent to splicing-index and FIRMA scores
ref_splice_val = score
probeset1 = x.Probeset1()
probeset2 = x.Probeset2()
affygene = x.GeneID()
y = 0
p_splice_val_dist = []
count = 0
return_rsqrd = 'no'
for p in permute_lists: ###There are two lists in each entry
count += 1
permute = 'yes'
if analysis_method == 'ASPIRE':
p_splice_val = permute_ASPIRE_filtered(
affygene, probeset1, probeset2, p, y, ref_splice_val,
x)
elif analysis_method == 'linearregres':
slope_ratio = permuteLinearRegression(probeset1, probeset2,
p)
p_splice_val = slope_ratio
if p_splice_val != 'null':
p_splice_val_dist.append(p_splice_val)
y += 1
p_splice_val_dist.sort()
new_ref_splice_val = str(abs(ref_splice_val))
new_ref_splice_val = float(
new_ref_splice_val[0:8]
) #otherwise won't match up the scores correctly
if analysis_method == 'linearregres':
if ref_splice_val < 0:
p_splice_val_dist2 = []
for val in p_splice_val_dist:
p_splice_val_dist2.append(-1 * val)
p_splice_val_dist = p_splice_val_dist2
p_splice_val_dist.reverse()
p_val, pos_permute, total_permute, greater_than_true_permute = statistics.permute_p(
p_splice_val_dist, new_ref_splice_val, len(permute_lists))
#print p_val,ref_splice_val, pos_permute, total_permute, greater_than_true_permute,p_splice_val_dist[-3:];kill
###When two groups are of equal size, there will be 2 pos_permutes rather than 1
if len(permute_lists[0][0]) == len(permute_lists[0][1]):
greater_than_true_permute = (
pos_permute / 2) - 1 #size of the two groups are equal
else:
greater_than_true_permute = (pos_permute) - 1
if analysis_method == 'linearregres':
greater_than_true_permute = (
pos_permute
) - 1 ###since this is a one sided test, unlike ASPIRE
###Below equation is fine if the population is large
permute_p_values[(probeset1, probeset2)] = [
p_val, pos_permute, total_permute, greater_than_true_permute
]
###Remove non-significant linear regression results
if analysis_method == 'linearregres':
if p_val <= permute_p_threshold or greater_than_true_permute < 2:
splice_event_list2.append(
(score, x)) ###<= since many p=0.05
print "Number of permutation p filtered splice event:", len(
splice_event_list2)
if len(permute_p_values) > 0: p_value_call = 'permuted_aspire_p-value'
if analysis_method == 'linearregres':
splice_event_list = splice_event_list2
return splice_event_list, p_value_call, permute_p_values
def permute_ASPIRE_filtered(affygene, probeset1, probeset2, p, y,
ref_splice_val, x):
### Get raw expression values for each permuted group for the two probesets
b1, e1 = permute_dI(array_raw_group_values[probeset1], p)
try:
b2, e2 = permute_dI(array_raw_group_values[probeset2], p)
except IndexError:
print probeset2, array_raw_group_values[probeset2], p
kill
### Get the average constitutive expression values (averaged per-sample across probesets) for each permuted group
try:
bc, ec = permute_dI(avg_const_exp_db[affygene], p)
except IndexError:
print affygene, avg_const_exp_db[affygene], p
kill
if factor_out_expression_changes == 'no':
ec = bc
### Analyze the averaged ratio's of junction expression relative to permuted constitutive expression
try:
p_splice_val = abs(statistics.aspire_stringent(b1 / bc, e1 / ec, b2 /
bc, e2 / ec)
) ### This the permuted ASPIRE score
except Exception:
p_splice_val = 0
#print p_splice_val, ref_splice_val, probeset1, probeset2, affygene; dog
if y == 0: ###The first permutation is always the real one
### Grab the absolute number with small number of decimal places
try:
new_ref_splice_val = str(p_splice_val)
new_ref_splice_val = float(new_ref_splice_val[0:8])
ref_splice_val = str(abs(ref_splice_val))
ref_splice_val = float(ref_splice_val[0:8])
y += 1
except ValueError:
###Only get this error if your ref_splice_val is a null
print y, probeset1, probeset2
print ref_splice_val, new_ref_splice_val, p
print b1 / bc, e1 / ec, b2 / bc, e2 / ec
print(b1 / bc) / (e1 / ec), (b2 / bc) / (e2 / ec)
print x[7], x[8], x[9], x[10]
kill
return p_splice_val
def permute_samples(a, p):
baseline = []
experimental = []
for p_index in p[0]:
baseline.append(a[p_index]
) ###Append expression values for each permuted list
for p_index in p[1]:
experimental.append(a[p_index])
return baseline, experimental
def permute_dI(all_samples, p):
baseline, experimental = permute_samples(all_samples, p)
#if get_non_log_avg == 'no':
gb = statistics.avg(baseline)
ge = statistics.avg(experimental
) ###Group avg baseline, group avg experimental value
gb = statistics.log_fold_conversion_fraction(gb)
ge = statistics.log_fold_conversion_fraction(ge)
#else:
#baseline = statistics.log_fold_conversion_fraction(baseline); experimental = statistics.log_fold_conversion_fraction(experimental)
#gb = statistics.avg(baseline); ge = statistics.avg(experimental) ###Group avg baseline, group avg experimental value
return gb, ge
def format_exon_functional_attributes(affygene, critical_probeset_list,
functional_attribute_db, up_exon_list,
down_exon_list, protein_length_list):
### Add functional attributes
functional_attribute_list2 = []
new_functional_attribute_str = ''
new_seq_attribute_str = ''
new_functional_attribute_list = []
if array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null':
critical_probesets = critical_probeset_list[0]
else:
critical_probesets = tuple(critical_probeset_list)
key = affygene, critical_probesets
if key in functional_attribute_db:
###Grab exon IDs corresponding to the critical probesets
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
try:
critical_exons = regulated_exon_junction_db[
critical_probesets].CriticalExons() ###For junction arrays
except Exception:
print key, functional_attribute_db[key]
kill
else:
critical_exons = [exon_db[critical_probesets].ExonID()
] ###For exon arrays
for exon in critical_exons:
for entry in functional_attribute_db[key]:
x = 0
functional_attribute = entry[0]
call = entry[1] # +, -, or ~
if ('AA:' in functional_attribute) or (
'ref' in functional_attribute):
x = 1
if exon in up_exon_list:
### design logic to determine whether up or down regulation promotes the functional change (e.g. NMD)
if 'ref' in functional_attribute:
new_functional_attribute = '(~)' + functional_attribute
data_tuple = new_functional_attribute, exon
elif call == '+' or call == '~':
new_functional_attribute = '(+)' + functional_attribute
data_tuple = new_functional_attribute, exon
elif call == '-':
new_functional_attribute = '(-)' + functional_attribute
data_tuple = new_functional_attribute, exon
if 'AA:' in functional_attribute and '?' not in functional_attribute:
functional_attribute_temp = functional_attribute[3:]
if call == '+' or call == '~':
val1, val2 = string.split(
functional_attribute_temp, '->')
else:
val2, val1 = string.split(
functional_attribute_temp, '->')
val1, null = string.split(val1, '(')
val2, null = string.split(val2, '(')
protein_length_list.append([val1, val2])
elif exon in down_exon_list:
if 'ref' in functional_attribute:
new_functional_attribute = '(~)' + functional_attribute
data_tuple = new_functional_attribute, exon
elif call == '+' or call == '~':
new_functional_attribute = '(-)' + functional_attribute
data_tuple = new_functional_attribute, exon
elif call == '-':
new_functional_attribute = '(+)' + functional_attribute
data_tuple = new_functional_attribute, exon
if 'AA:' in functional_attribute and '?' not in functional_attribute:
functional_attribute_temp = functional_attribute[3:]
if call == '+' or call == '~':
val2, val1 = string.split(
functional_attribute_temp, '->')
else:
val1, val2 = string.split(
functional_attribute_temp, '->')
val1, null = string.split(val1, '(')
val2, null = string.split(val2, '(')
protein_length_list.append([val1, val2])
if x == 0 or (exclude_protein_details != 'yes'):
try:
new_functional_attribute_list.append(
new_functional_attribute)
except UnboundLocalError:
print entry
print up_exon_list, down_exon_list
print exon, critical_exons
print critical_probesets, (key, affygene,
critical_probesets)
for i in functional_attribute_db:
print i, functional_attribute_db[i]
kill
###remove protein sequence prediction_data
if 'sequence' not in data_tuple[0]:
if x == 0 or exclude_protein_details == 'no':
functional_attribute_list2.append(data_tuple)
###Get rid of duplicates, but maintain non-alphabetical order
new_functional_attribute_list2 = []
for entry in new_functional_attribute_list:
if entry not in new_functional_attribute_list2:
new_functional_attribute_list2.append(entry)
new_functional_attribute_list = new_functional_attribute_list2
#new_functional_attribute_list = unique.unique(new_functional_attribute_list)
#new_functional_attribute_list.sort()
for entry in new_functional_attribute_list:
if 'sequence' in entry:
new_seq_attribute_str = new_seq_attribute_str + entry + ','
else:
new_functional_attribute_str = new_functional_attribute_str + entry + ','
new_seq_attribute_str = new_seq_attribute_str[0:-1]
new_functional_attribute_str = new_functional_attribute_str[0:-1]
return new_functional_attribute_str, functional_attribute_list2, new_seq_attribute_str, protein_length_list
def grab_summary_dataset_annotations(functional_attribute_db, comparison_db,
include_truncation_results_specifically):
###If a second filtering database present, filter the 1st database based on protein length changes
fa_db = {}
cp_db = {
} ###index the geneids for efficient recall in the next segment of code
for (affygene, annotation) in functional_attribute_db:
try:
fa_db[affygene].append(annotation)
except KeyError:
fa_db[affygene] = [annotation]
for (affygene, annotation) in comparison_db:
try:
cp_db[affygene].append(annotation)
except KeyError:
cp_db[affygene] = [annotation]
functional_attribute_db_exclude = {}
for affygene in fa_db:
if affygene in cp_db:
for annotation2 in cp_db[affygene]:
if ('trunc' in annotation2) or ('frag' in annotation2) or (
'NMDs' in annotation2):
try:
functional_attribute_db_exclude[affygene].append(
annotation2)
except KeyError:
functional_attribute_db_exclude[affygene] = [
annotation2
]
functional_annotation_db = {}
for (affygene, annotation) in functional_attribute_db:
### if we wish to filter the 1st database based on protein length changes
if affygene not in functional_attribute_db_exclude:
try:
functional_annotation_db[annotation] += 1
except KeyError:
functional_annotation_db[annotation] = 1
elif include_truncation_results_specifically == 'yes':
for annotation_val in functional_attribute_db_exclude[affygene]:
try:
functional_annotation_db[annotation_val] += 1
except KeyError:
functional_annotation_db[annotation_val] = 1
annotation_list = []
annotation_list_ranked = []
for annotation in functional_annotation_db:
if 'micro' not in annotation:
count = functional_annotation_db[annotation]
annotation_list.append((annotation, count))
annotation_list_ranked.append((count, annotation))
annotation_list_ranked.sort()
annotation_list_ranked.reverse()
return annotation_list, annotation_list_ranked
def reorganize_attribute_entries(attribute_db1,
build_attribute_direction_databases):
attribute_db2 = {}
inclusion_attributes_hit_count = {}
exclusion_attributes_hit_count = {}
genes_with_inclusion_attributes = {}
genes_with_exclusion_attributes = {}
###This database has unique gene, attribute information. No attribute will now be represented more than once per gene
for key in attribute_db1:
###Make gene the key and attribute (functional elements or protein information), along with the associated exons the values
affygene = key[0]
exon_attribute = key[1]
exon_list = attribute_db1[key]
exon_list = unique.unique(exon_list)
exon_list.sort()
attribute_exon_info = exon_attribute, exon_list #e.g. 5'UTR, [E1,E2,E3]
try:
attribute_db2[affygene].append(attribute_exon_info)
except KeyError:
attribute_db2[affygene] = [attribute_exon_info]
###Separate out attribute data by direction for over-representation analysis
if build_attribute_direction_databases == 'yes':
direction = exon_attribute[1:2]
unique_gene_attribute = exon_attribute[3:]
if direction == '+':
try:
inclusion_attributes_hit_count[
unique_gene_attribute].append(affygene)
except KeyError:
inclusion_attributes_hit_count[unique_gene_attribute] = [
affygene
]
genes_with_inclusion_attributes[affygene] = []
if direction == '-':
try:
exclusion_attributes_hit_count[
unique_gene_attribute].append(affygene)
except KeyError:
exclusion_attributes_hit_count[unique_gene_attribute] = [
affygene
]
genes_with_exclusion_attributes[affygene] = []
inclusion_attributes_hit_count = eliminate_redundant_dict_values(
inclusion_attributes_hit_count)
exclusion_attributes_hit_count = eliminate_redundant_dict_values(
exclusion_attributes_hit_count)
"""for key in inclusion_attributes_hit_count:
inclusion_attributes_hit_count[key] = len(inclusion_attributes_hit_count[key])
for key in exclusion_attributes_hit_count:
exclusion_attributes_hit_count[key] = len(exclusion_attributes_hit_count[key])"""
if build_attribute_direction_databases == 'yes':
return attribute_db2, inclusion_attributes_hit_count, genes_with_inclusion_attributes, exclusion_attributes_hit_count, genes_with_exclusion_attributes
else:
return attribute_db2
########### Misc. Functions ###########
def eliminate_redundant_dict_values(database):
db1 = {}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def add_a_space(string):
if len(string) < 1:
string = ' '
return string
def convertToLog2(data_list):
return map(lambda x: math.log(float(x), 2), data_list)
def addGlobalFudgeFactor(data_list, data_type):
new_list = []
if data_type == 'log':
for item in data_list:
new_item = statistics.log_fold_conversion_fraction(item)
new_list.append(float(new_item) + global_addition_factor)
new_list = convertToLog2(new_list)
else:
for item in data_list:
new_list.append(float(item) + global_addition_factor)
return new_list
def copyDirectoryPDFs(root_dir, AS='AS'):
directories = ['AltResults/AlternativeOutputDirectoryDescription.pdf',
'AltResultsDirectoryDescription.pdf',
'ClusteringDirectoryDescription.pdf',
'ExpressionInputDirectoryDescription.pdf',
'ExpressionOutputDirectoryDescription.pdf',
'GO-Elite/GO-Elite_resultsDirectoryDescription.pdf',
'GO-EliteDirectoryDescription.pdf',
'RootDirectoryDescription.pdf']
import shutil
for dir in directories:
file = string.split(dir, '/')[-1]
proceed = True
if 'AltResult' in dir and AS != 'AS': proceed = False
if proceed:
try:
shutil.copyfile(
filepath('Documentation/DirectoryDescription/' + file),
filepath(root_dir + dir))
except Exception:
pass
def restrictProbesets(dataset_name):
### Take a file with probesets and only perform the splicing-analysis on these (e.g. those already identified from a previous run with a specific pattern)
### Allows for propper denominator when calculating z-scores for microRNA and protein-domain ORA
probeset_list_filename = import_dir = '/AltDatabaseNoVersion/filtering'
filtered_probeset_db = {}
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
try:
dir_list = read_directory(import_dir)
fn_dir = filepath(import_dir[1:])
except Exception:
dir_list = []
fn_dir = ''
if len(dir_list) > 0:
for file in dir_list:
if file[:-4] in dataset_name:
fn = fn_dir + '/' + file
fn = string.replace(fn, 'AltDatabase', 'AltDatabaseNoVersion')
filtered_probeset_db = importGeneric(fn)
print len(
filtered_probeset_db), id_name, "will be used to restrict analysis..."
return filtered_probeset_db
def RunAltAnalyze():
#print altanalyze_files
#print '!!!!!starting to run alt-exon analysis'
#returnLargeGlobalVars()
global annotate_db
annotate_db = {}
global splice_event_list
splice_event_list = []
residuals_dirlist = []
global dataset_name
global constitutive_probeset_db
global exon_db
dir_list2 = []
import_dir2 = ''
if array_type == 'AltMouse':
import_dir = root_dir + 'AltExpression/' + array_type
elif array_type == 'exon':
import_dir = root_dir + 'AltExpression/ExonArray/' + species + '/'
elif array_type == 'gene':
import_dir = root_dir + 'AltExpression/GeneArray/' + species + '/'
elif array_type == 'junction':
import_dir = root_dir + 'AltExpression/JunctionArray/' + species + '/'
else:
import_dir = root_dir + 'AltExpression/' + array_type + '/' + species + '/'
#if analysis_method == 'ASPIRE' or analysis_method == 'linearregres' or analysis_method == 'splicing-index':
if array_type != 'AltMouse':
gene_annotation_file = "AltDatabase/ensembl/" + species + "/" + species + "_Ensembl-annotations.txt"
else:
gene_annotation_file = "AltDatabase/" + species + "/" + array_type + "/" + array_type + "_gene_annotations.txt"
annotate_db = ExonAnalyze_module.import_annotations(gene_annotation_file,
array_type)
###Import probe-level associations
exon_db = {}
filtered_arrayids = {}
filter_status = 'no'
try:
constitutive_probeset_db, exon_db, genes_being_analyzed = importSplicingAnnotationDatabase(
probeset_annotations_file, array_type, filtered_arrayids,
filter_status)
except IOError:
print_out = 'The annotation database: \n' + probeset_annotations_file + '\nwas not found. Ensure this file was not deleted and that the correct species has been selected.'
try:
UI.WarningWindow(print_out, 'Exit')
print print_out
except Exception:
print print_out
print traceback.format_exc()
badExit()
run = 0
### Occurs when analyzing multiple conditions rather than performing a simple pair-wise comparison
if run_from_scratch == 'Annotate External Results': import_dir = root_dir
elif analyze_all_conditions == 'all groups':
import_dir = string.replace(import_dir, 'AltExpression',
'AltExpression/FullDatasets')
if array_type == 'AltMouse':
import_dir = string.replace(import_dir, 'FullDatasets/AltMouse',
'FullDatasets/AltMouse/Mm')
elif analyze_all_conditions == 'both':
import_dir2 = string.replace(import_dir, 'AltExpression',
'AltExpression/FullDatasets')
if array_type == 'AltMouse':
import_dir2 = string.replace(import_dir2, 'FullDatasets/AltMouse',
'FullDatasets/AltMouse/Mm')
try:
dir_list2 = read_directory(
import_dir2
) #send a sub_directory to a function to identify all files in a directory
except Exception:
try:
if array_type == 'exon': array_type_dir = 'ExonArray'
elif array_type == 'gene': array_type_dir = 'GeneArray'
elif array_type == 'junction': array_type_dir = 'GeneArray'
else: array_type_dir = array_type
import_dir2 = string.replace(
import_dir2, 'AltExpression/' + array_type_dir + '/' +
species + '/', '')
import_dir2 = string.replace(
import_dir2, 'AltExpression/' + array_type_dir + '/', '')
dir_list2 = read_directory(import_dir2)
except Exception:
print_out = 'The expression files were not found. Please make\nsure you selected the correct species and array type.\n\nselected species: ' + species + '\nselected array type: ' + array_type + '\nselected directory:' + import_dir2
try:
UI.WarningWindow(print_out, 'Exit')
print print_out
except Exception:
print print_out
print traceback.format_exc()
badExit()
try:
dir_list = read_directory(
import_dir
) #send a sub_directory to a function to identify all files in a directory
except Exception:
try:
if array_type == 'exon': array_type_dir = 'ExonArray'
elif array_type == 'gene': array_type_dir = 'GeneArray'
elif array_type == 'junction': array_type_dir = 'JunctionArray'
else: array_type_dir = array_type
import_dir = string.replace(
import_dir,
'AltExpression/' + array_type_dir + '/' + species + '/', '')
import_dir = string.replace(
import_dir, 'AltExpression/' + array_type_dir + '/', '')
try:
dir_list = read_directory(import_dir)
except Exception:
import_dir = root_dir
dir_list = read_directory(
root_dir
) ### Occurs when reading in an AltAnalyze filtered file under certain conditions
except Exception:
print_out = 'The expression files were not found. Please make\nsure you selected the correct species and array type.\n\nselected species: ' + species + '\nselected array type: ' + array_type + '\nselected directory:' + import_dir
try:
UI.WarningWindow(print_out, 'Exit')
except Exception:
print print_out
print traceback.format_exc()
badExit()
dir_list += dir_list2
### Capture the corresponding files in the residual dir to make sure these files exist for all comparisons - won't if FIRMA was run on some files
if analysis_method == 'FIRMA':
try:
residual_dir = root_dir + 'AltExpression/FIRMA/residuals/' + array_type + '/' + species + '/'
residuals_dirlist = read_directory(residual_dir)
except Exception:
null = []
try:
residual_dir = root_dir + 'AltExpression/FIRMA/FullDatasets/' + array_type + '/' + species + '/'
residuals_dirlist += read_directory(residual_dir)
except Exception:
null = []
dir_list_verified = []
for file in residuals_dirlist:
for filename in dir_list:
if file[:-4] in filename: dir_list_verified.append(filename)
dir_list = unique.unique(dir_list_verified)
junction_biotype = 'no'
if array_type == 'RNASeq':
### Check to see if user data includes junctions or just exons
for probeset in exon_db:
if '-' in probeset:
junction_biotype = 'yes'
break
if junction_biotype == 'no' and analysis_method != 'splicing-index' and array_type == 'RNASeq':
dir_list = [] ### DON'T RUN ALTANALYZE WHEN JUST ANALYZING EXON DATA
print 'No junction data to summarize... proceeding with exon analysis\n'
elif len(dir_list) == 0:
print_out = 'No expression files available in the input directory:\n' + root_dir
try:
UI.WarningWindow(print_out, 'Exit')
print print_out
except Exception:
print print_out
badExit()
dir_list = filterAltExpressionFiles(
dir_list, altanalyze_files
) ### Looks to see if the AltExpression files are for this run or from an older run
for altanalyze_input in dir_list: #loop through each file in the directory to output results
###Import probe-level associations
if 'cel_files' in altanalyze_input:
print_out = 'The AltExpression directory containing the necessary import file(s) is missing. Please verify the correct parameters and input directory were selected. If this error persists, contact us.'
try:
UI.WarningWindow(print_out, 'Exit')
print print_out
except Exception:
print print_out
badExit()
if run > 0: ### Only re-set these databases after the run when batch analysing multiple files
exon_db = {}
filtered_arrayids = {}
filter_status = 'no' ###Use this as a means to save memory (import multiple times - only storing different types relevant information)
constitutive_probeset_db, exon_db, genes_being_analyzed = importSplicingAnnotationDatabase(
probeset_annotations_file, array_type, filtered_arrayids,
filter_status)
if altanalyze_input in dir_list2:
dataset_dir = import_dir2 + '/' + altanalyze_input ### Then not a pairwise comparison
else:
dataset_dir = import_dir + '/' + altanalyze_input
dataset_name = altanalyze_input[:-4] + '-'
print "Beginning to process", dataset_name[0:-1]
### If the user want's to restrict the analysis to preselected probesets (e.g., limma or FIRMA analysis selected)
global filtered_probeset_db
filtered_probeset_db = {}
try:
filtered_probeset_db = restrictProbesets(dataset_name)
except Exception:
null = []
if run_from_scratch != 'Annotate External Results':
###Import expression data and stats and filter the expression data based on fold and p-value OR expression threshold
try:
conditions, adj_fold_dbase, nonlog_NI_db, dataset_name, gene_expression_diff_db, midas_db, ex_db, si_db = performExpressionAnalysis(
dataset_dir, constitutive_probeset_db, exon_db,
annotate_db, dataset_name)
except IOError:
#except Exception,exception:
#print exception
print traceback.format_exc()
print_out = 'The AltAnalyze filtered expression file "' + dataset_name + '" is not propperly formatted. Review formatting requirements if this file was created by another application.'
try:
UI.WarningWindow(print_out, 'Exit')
print print_out
except Exception:
print print_out
badExit()
else:
conditions = 0
adj_fold_dbase = {}
nonlog_NI_db = {}
gene_expression_diff_db = {}
ex_db = {}
si_db = {}
defineEmptyExpressionVars(exon_db)
adj_fold_dbase = original_fold_dbase
###Run Analysis
summary_results_db, summary_results_db2, aspire_output, aspire_output_gene, number_events_analyzed = splicingAnalysisAlgorithms(
nonlog_NI_db, adj_fold_dbase, dataset_name,
gene_expression_diff_db, exon_db, ex_db, si_db, dataset_dir)
aspire_output_list.append(aspire_output)
aspire_output_gene_list.append(aspire_output_gene)
try:
clearObjectsFromMemory(exon_db)
clearObjectsFromMemory(constitutive_probeset_db)
constitutive_probeset_db = []
except Exception:
null = []
try:
clearObjectsFromMemory(last_exon_region_db)
last_exon_region_db = []
except Exception:
null = []
try:
clearObjectsFromMemory(adj_fold_dbase)
adj_fold_dbase = []
clearObjectsFromMemory(nonlog_NI_db)
nonlog_NI_db = []
except Exception:
null = []
try:
clearObjectsFromMemory(gene_expression_diff_db)
gene_expression_diff_db = []
clearObjectsFromMemory(midas_db)
midas_db = []
except Exception:
null = []
try:
clearObjectsFromMemory(ex_db)
ex_db = []
clearObjectsFromMemory(si_db)
si_db = []
except Exception:
null = []
try:
run += 1
except Exception:
run = 1
if run > 0: ###run = 0 if no filtered expression data present
try:
return summary_results_db, aspire_output_gene_list, number_events_analyzed
except Exception:
print_out = 'AltAnalyze was unable to find an expression dataset to analyze in:\n', import_dir, '\nor\n', import_dir2, '\nPlease re-run and select a valid input directory.'
try:
UI.WarningWindow(print_out, 'Exit')
print print_out
except Exception:
print print_out
badExit()
else:
try:
clearObjectsFromMemory(exon_db)
clearObjectsFromMemory(constitutive_probeset_db)
constitutive_probeset_db = []
except Exception:
null = []
try:
clearObjectsFromMemory(last_exon_region_db)
last_exon_region_db = []
except Exception:
null = []
return None
def filterAltExpressionFiles(dir_list, current_files):
dir_list2 = []
try:
if len(current_files) == 0:
current_files = dir_list ###if no filenames input
for altanalzye_input in dir_list: #loop through each file in the directory to output results
if altanalzye_input in current_files:
dir_list2.append(altanalzye_input)
dir_list = dir_list2
except Exception:
dir_list = dir_list
return dir_list
def defineEmptyExpressionVars(exon_db):
global fold_dbase
fold_dbase = {}
global original_fold_dbase
global critical_exon_db
critical_exon_db = {}
global midas_db
midas_db = {}
global max_replicates
global equal_replicates
max_replicates = 0
equal_replicates = 0
for probeset in exon_db:
fold_dbase[probeset] = '', ''
original_fold_dbase = fold_dbase
def universalPrintFunction(print_items):
log_report = open(log_file, 'a')
for item in print_items:
if commandLineMode == 'no': ### Command-line has it's own log file write method (Logger)
log_report.write(item + '\n')
else:
print item
log_report.close()
class StatusWindow:
def __init__(self, root, expr_var, alt_var, goelite_var, additional_var,
exp_file_location_db):
root.title('AltAnalyze version 2.0.9.3 beta')
statusVar = StringVar(
) ### Class method for Tkinter. Description: "Value holder for strings variables."
self.root = root
height = 450
width = 500
if os.name != 'nt':
height = 500
width = 600
self.sf = PmwFreeze.ScrolledFrame(root,
labelpos='n',
label_text='Results Status Window',
usehullsize=1,
hull_width=width,
hull_height=height)
self.sf.pack(padx=5, pady=1, fill='both', expand=1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(), tag_text='Output')
group.pack(fill='both', expand=1, padx=10, pady=0)
Label(group.interior(),
width=190,
height=552,
justify=LEFT,
bg='black',
fg='white',
anchor=NW,
padx=5,
pady=5,
textvariable=statusVar).pack(fill=X,
expand=Y)
status = StringVarFile(statusVar, root) ### Likely captures the stdout
sys.stdout = status
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
fl.setSTDOUT(sys.stdout)
root.after(
100, AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var,
exp_file_location_db, root))
try:
root.protocol("WM_DELETE_WINDOW", self.deleteWindow)
root.mainloop()
except Exception:
pass
def deleteWindow(self):
try:
self.root.destroy()
except Exception:
pass
def quit(self):
try:
self.root.quit()
self.root.destroy()
except Exception:
pass
sys.exit()
def exportComparisonSummary(dataset_name, summary_data_dbase, return_type):
log_report = open(log_file, 'a')
result_list = []
for key in summary_data_dbase:
if key != 'QC': ### The value is a list of strings
summary_data_dbase[key] = str(summary_data_dbase[key])
d = 'Dataset name: ' + dataset_name[:-1]
result_list.append(d + '\n')
d = summary_data_dbase['gene_assayed'] + ':\tAll genes examined'
result_list.append(d)
d = summary_data_dbase[
'denominator_exp_genes'] + ':\tExpressed genes examined for AS'
result_list.append(d)
if explicit_data_type == 'exon-only':
d = summary_data_dbase[
'alt_events'] + ':\tAlternatively regulated probesets'
result_list.append(d)
d = summary_data_dbase[
'denominator_exp_events'] + ':\tExpressed probesets examined'
result_list.append(d)
elif (array_type == 'AltMouse' or array_type == 'junction' or
array_type == 'RNASeq') and (explicit_data_type == 'null' or
return_type == 'print'):
d = summary_data_dbase[
'alt_events'] + ':\tAlternatively regulated junction-pairs'
result_list.append(d)
d = summary_data_dbase[
'denominator_exp_events'] + ':\tExpressed junction-pairs examined'
result_list.append(d)
else:
d = summary_data_dbase[
'alt_events'] + ':\tAlternatively regulated probesets'
result_list.append(d)
d = summary_data_dbase[
'denominator_exp_events'] + ':\tExpressed probesets examined'
result_list.append(d)
d = summary_data_dbase[
'alt_genes'] + ':\tAlternatively regulated genes (ARGs)'
result_list.append(d)
d = summary_data_dbase[
'direct_domain_genes'] + ':\tARGs - overlaping with domain/motifs'
result_list.append(d)
d = summary_data_dbase[
'miRNA_gene_hits'] + ':\tARGs - overlaping with microRNA binding sites'
result_list.append(d)
result_list2 = []
for d in result_list:
if explicit_data_type == 'exon-only':
d = string.replace(d, 'probeset', 'exon')
elif array_type == 'RNASeq':
d = string.replace(d, 'probeset', 'junction')
result_list2.append(d)
result_list = result_list2
if return_type == 'log':
for d in result_list:
log_report.write(d + '\n')
log_report.write('\n')
log_report.close()
return result_list
class SummaryResultsWindow:
def __init__(self, tl, analysis_type, output_dir, dataset_name,
output_type, summary_data_dbase):
def showLink(event):
try:
idx = int(
event.widget.tag_names(CURRENT)[1]
) ### This is just the index provided below (e.g., str(0))
#print [self.LINKS[idx]]
if 'http://' in self.LINKS[idx]:
webbrowser.open(self.LINKS[idx])
elif self.LINKS[idx][-1] == '/':
self.openSuppliedDirectory(self.LINKS[idx])
else:
### Instead of using this option to open a hyperlink (which is what it should do), we can open another Tk window
try:
self.viewPNGFile(self.LINKS[idx]
) ### ImageTK PNG viewer
except Exception:
try:
self.ShowImageMPL(self.LINKS[idx]
) ### MatPlotLib based dispaly
except Exception:
self.openPNGImage(self.LINKS[idx]
) ### Native OS PNG viewer
#self.DisplayPlots(self.LINKS[idx]) ### GIF based dispaly
except Exception:
null = [] ### anomalous error
self.emergency_exit = False
self.LINKS = []
self.tl = tl
self.tl.title('AltAnalyze version 2.0.9 beta')
self.analysis_type = analysis_type
filename = 'Config/icon.gif'
fn = filepath(filename)
img = PhotoImage(file=fn)
can = Canvas(tl)
can.pack(side='top')
can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
use_scroll = 'yes'
try:
runGOElite = run_GOElite
except Exception:
runGOElite = 'decide_later'
if 'QC' in summary_data_dbase:
graphic_links = summary_data_dbase[
'QC'
] ### contains hyperlinks to QC and Clustering plots
if len(graphic_links) == 0:
del summary_data_dbase[
'QC'
] ### This can be added if an analysis fails
else:
graphic_links = []
label_text_str = 'AltAnalyze Result Summary'
height = 150
width = 500
if analysis_type == 'AS' or 'QC' in summary_data_dbase: height = 330
if analysis_type == 'AS' and 'QC' in summary_data_dbase: height = 330
self.sf = PmwFreeze.ScrolledFrame(tl,
labelpos='n',
label_text=label_text_str,
usehullsize=1,
hull_width=width,
hull_height=height)
self.sf.pack(padx=5, pady=1, fill='both', expand=1)
self.frame = self.sf.interior()
txt = Text(self.frame, bg='gray', width=150, height=80)
txt.pack(expand=True, fill="both")
#txt.insert(END, 'Primary Analysis Finished....\n')
txt.insert(END, 'Results saved to:\n' + output_dir + '\n')
f = Font(family="System", size=12, weight="bold")
txt.tag_config("font", font=f)
i = 0
copyDirectoryPDFs(output_dir, AS=analysis_type)
if analysis_type == 'AS':
txt.insert(END, '\n')
result_list = exportComparisonSummary(dataset_name,
summary_data_dbase, 'print')
for d in result_list:
txt.insert(END, d + '\n')
if 'QC' in summary_data_dbase and len(graphic_links) > 0:
txt.insert(END, '\nQC and Expression Clustering Plots', "font")
txt.insert(END, '\n\n 1) ')
for (name, file_dir) in graphic_links:
txt.insert(END, name, ('link', str(i)))
if len(graphic_links) > (i + 1):
txt.insert(END, '\n %s) ' % str(i + 2))
self.LINKS.append(file_dir)
i += 1
txt.insert(END, '\n\nView all primary plots in the folder ')
txt.insert(END, 'DataPlots', ('link', str(i)))
i += 1
self.LINKS.append(output_dir + 'DataPlots/')
else:
url = 'http://code.google.com/p/altanalyze/'
self.LINKS = (url, '')
txt.insert(END, '\nFor more information see the ')
txt.insert(END, "AltAnalyze Online Help", ('link', str(0)))
txt.insert(END, '\n\n')
if runGOElite == 'run-immediately':
txt.insert(
END, '\n\nView all pathway enrichment results in the folder ')
txt.insert(END, 'GO-Elite', ('link', str(i)))
i += 1
self.LINKS.append(output_dir + 'GO-Elite/')
if analysis_type == 'AS':
txt.insert(END, '\n\nView all splicing plots in the folder ')
txt.insert(END, 'ExonPlots', ('link', str(i)))
i += 1
self.LINKS.append(output_dir + 'ExonPlots/')
txt.tag_config('link', foreground="blue", underline=1)
txt.tag_bind('link', '<Button-1>', showLink)
txt.insert(END, '\n\n')
open_results_folder = Button(tl,
text='Results Folder',
command=self.openDirectory)
open_results_folder.pack(side='left', padx=5, pady=5)
if analysis_type == 'AS':
#self.dg_url = 'http://www.altanalyze.org/domaingraph.htm'
self.dg_url = 'http://www.altanalyze.org/domaingraph.htm'
dg_pdf_file = 'Documentation/domain_graph.pdf'
dg_pdf_file = filepath(dg_pdf_file)
self.dg_pdf_file = dg_pdf_file
text_button = Button(tl,
text='Start DomainGraph in Cytoscape',
command=self.SelectCytoscapeTopLevel)
text_button.pack(side='right', padx=5, pady=5)
self.output_dir = output_dir + "AltResults"
self.whatNext_url = 'http://code.google.com/p/altanalyze/wiki/AnalyzingASResults' #http://www.altanalyze.org/what_next_altexon.htm'
whatNext_pdf = 'Documentation/what_next_alt_exon.pdf'
whatNext_pdf = filepath(whatNext_pdf)
self.whatNext_pdf = whatNext_pdf
if output_type == 'parent':
self.output_dir = output_dir ###Used for fake datasets
else:
if pathway_permutations == 'NA':
self.output_dir = output_dir + "ExpressionOutput"
else:
self.output_dir = output_dir
self.whatNext_url = 'http://code.google.com/p/altanalyze/wiki/AnalyzingGEResults' #'http://www.altanalyze.org/what_next_expression.htm'
whatNext_pdf = 'Documentation/what_next_GE.pdf'
whatNext_pdf = filepath(whatNext_pdf)
self.whatNext_pdf = whatNext_pdf
what_next = Button(tl, text='What Next?', command=self.whatNextlinkout)
what_next.pack(side='right', padx=5, pady=5)
quit_buttonTL = Button(tl, text='Close View', command=self.close)
quit_buttonTL.pack(side='right', padx=5, pady=5)
continue_to_next_win = Button(text='Continue',
command=self.continue_win)
continue_to_next_win.pack(side='right', padx=10, pady=10)
quit_button = Button(root, text='Quit', command=self.quit)
quit_button.pack(side='right', padx=5, pady=5)
button_text = 'Help'
help_url = 'http://www.altanalyze.org/help_main.htm'
self.help_url = filepath(help_url)
pdf_help_file = 'Documentation/AltAnalyze-Manual.pdf'
pdf_help_file = filepath(pdf_help_file)
self.pdf_help_file = pdf_help_file
help_button = Button(root, text=button_text, command=self.Helplinkout)
help_button.pack(side='left', padx=5, pady=5)
if self.emergency_exit == False:
self.tl.protocol("WM_DELETE_WINDOW", self.tldeleteWindow)
self.tl.mainloop() ###Needed to show graphic
else:
""" This shouldn't have to be called, but is when the topLevel window isn't closed first
specifically if a PNG file is opened. the sys.exitfunc() should work but doesn't.
work on this more later """
#AltAnalyzeSetup('no')
try:
self._tls.quit()
self._tls.destroy()
except Exception:
None
try:
self._tlx.quit()
self._tlx.destroy()
except Exception:
None
try:
self._tlx.quit()
self._tlx.destroy()
except Exception:
None
try:
self.tl.quit()
self.tl.destroy()
except Exception:
None
try:
root.quit()
root.destroy()
except Exception:
None
UI.getUpdatedParameters(array_type, species,
'Process Expression file', output_dir)
sys.exit(
) ### required when opening PNG files on Windows to continue (not sure why)
#sys.exitfunc()
def tldeleteWindow(self):
try:
self.tl.quit()
self.tl.destroy()
except Exception:
self.tl.destroy()
def deleteTLWindow(self):
self.emergency_exit = True
try:
self._tls.quit()
self._tls.destroy()
except Exception:
None
try:
self._tlx.quit()
self._tlx.destroy()
except Exception:
None
self.tl.quit()
self.tl.destroy()
sys.exitfunc()
def deleteWindow(self):
self.emergency_exit = True
try:
self._tls.quit()
self._tls.destroy()
except Exception:
None
try:
self._tlx.quit()
self._tlx.destroy()
except Exception:
None
try:
self.tl.quit()
self.tl.destroy()
except Exception:
None
sys.exitfunc()
def continue_win(self):
self.emergency_exit = True
try:
self._tls.quit()
self._tls.destroy()
except Exception:
None
try:
self._tlx.quit()
self._tlx.destroy()
except Exception:
None
try:
self.tl.quit()
self.tl.destroy()
except Exception:
pass
root.quit()
root.destroy()
try:
self.tl.grid_forget()
except Exception:
None
try:
root.grid_forget()
except Exception:
None
sys.exitfunc()
def openDirectory(self):
if os.name == 'nt':
try:
os.startfile('"' + self.output_dir + '"')
except Exception:
os.system('open "' + self.output_dir + '"')
elif 'darwin' in sys.platform:
os.system('open "' + self.output_dir + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + self.output_dir + '/"')
def openSuppliedDirectory(self, dir):
if os.name == 'nt':
try:
os.startfile('"' + self.output_dir + '"')
except Exception:
os.system('open "' + dir + '"')
elif 'darwin' in sys.platform:
os.system('open "' + dir + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + dir + '/"')
def DGlinkout(self):
try:
altanalyze_path = filepath('') ### Find AltAnalye's path
altanalyze_path = altanalyze_path[:-1]
except Exception:
null = []
if os.name == 'nt':
parent_dir = 'C:/Program Files'
application_dir = 'Cytoscape_v'
application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform:
parent_dir = '/Applications'
application_dir = 'Cytoscape_v'
application_name = 'Cytoscape.app'
elif 'linux' in sys.platform:
parent_dir = '/opt'
application_dir = 'Cytoscape_v'
application_name = 'Cytoscape'
try:
openCytoscape(altanalyze_path, application_dir, application_name)
except Exception:
null = []
try:
self._tls.destroy()
except Exception:
None
try: ###Remove this cytoscape as the default
file_location_defaults = UI.importDefaultFileLocations()
del file_location_defaults['CytoscapeDir']
UI.exportDefaultFileLocations(file_location_defaults)
except Exception:
null = []
self.GetHelpTopLevel(self.dg_url, self.dg_pdf_file)
def Helplinkout(self):
self.GetHelpTopLevel(self.help_url, self.pdf_help_file)
def whatNextlinkout(self):
self.GetHelpTopLevel(self.whatNext_url, self.whatNext_pdf)
def ShowImageMPL(self, file_location):
""" Visualization method using MatPlotLib """
try:
import matplotlib
import matplotlib.pyplot as pylab
except Exception:
#print 'Graphical output mode disabled (requires matplotlib, numpy and scipy)'
None
fig = pylab.figure()
pylab.subplots_adjust(left=0.0,
right=1.0,
top=1.0,
bottom=0.00
) ### Fill the plot area left to right
ax = fig.add_subplot(111)
ax.set_xticks([]) ### Hides ticks
ax.set_yticks([])
img = pylab.imread(file_location)
imgplot = pylab.imshow(img)
pylab.show()
def viewPNGFile(self, png_file_dir):
""" View PNG file within a PMW Tkinter frame """
import ImageTk
tlx = Toplevel()
self._tlx = tlx
sf = PmwFreeze.ScrolledFrame(tlx,
labelpos='n',
label_text='',
usehullsize=1,
hull_width=800,
hull_height=550)
sf.pack(padx=0, pady=0, fill='both', expand=1)
frame = sf.interior()
tlx.title(png_file_dir)
img = ImageTk.PhotoImage(file=png_file_dir)
can = Canvas(frame)
can.pack(fill=BOTH, padx=0, pady=0)
w = img.width()
h = height = img.height()
can.config(width=w, height=h)
can.create_image(2, 2, image=img, anchor=NW)
tlx.mainloop()
def openPNGImage(self, png_file_dir):
if os.name == 'nt':
try:
os.startfile('"' + png_file_dir + '"')
except Exception:
os.system('open "' + png_file_dir + '"')
elif 'darwin' in sys.platform:
os.system('open "' + png_file_dir + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + png_file_dir + '"')
def DisplayPlots(self, file_location):
""" Native Tkinter method - Displays a gif file in a standard TopLevel window (nothing fancy) """
tls = Toplevel()
self._tls = tls
nulls = '\t\t\t\t'
tls.title('AltAnalyze Plot Visualization')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos='n',
label_text='',
usehullsize=1,
hull_width=520,
hull_height=500)
self.sf.pack(padx=5, pady=1, fill='both', expand=1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(), tag_text=file_location)
group.pack(fill='both', expand=1, padx=10, pady=0)
img = PhotoImage(file=filepath(file_location))
can = Canvas(group.interior())
can.pack(side='left', padx=10, pady=20)
can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
tls.mainloop()
def GetHelpTopLevel(self, url, pdf_file):
try:
config_db = UI.importConfigFile()
ask_for_help = config_db['help'] ### hide_selection_option
except Exception:
ask_for_help = 'null'
config_db = {}
self.pdf_file = pdf_file
self.url = url
if ask_for_help == 'null':
message = ''
self.message = message
self.online_help = 'Online Documentation'
self.pdf_help = 'Local PDF File'
tls = Toplevel()
self._tls = tls
nulls = '\t\t\t\t'
tls.title('Please select one of the options')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos='n',
label_text='',
usehullsize=1,
hull_width=320,
hull_height=200)
self.sf.pack(padx=5, pady=1, fill='both', expand=1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(), tag_text='Options')
group.pack(fill='both', expand=1, padx=10, pady=0)
filename = 'Config/icon.gif'
fn = filepath(filename)
img = PhotoImage(file=fn)
can = Canvas(group.interior())
can.pack(side='left', padx=10, pady=20)
can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
l1 = Label(group.interior(), text=nulls)
l1.pack(side='bottom')
text_button2 = Button(group.interior(),
text=self.online_help,
command=self.openOnlineHelp)
text_button2.pack(side='top', padx=5, pady=5)
try:
text_button = Button(group.interior(),
text=self.pdf_help,
command=self.openPDFHelp)
text_button.pack(side='top', padx=5, pady=5)
except Exception:
text_button = Button(group.interior(),
text=self.pdf_help,
command=self.openPDFHelp)
text_button.pack(side='top', padx=5, pady=5)
text_button3 = Button(group.interior(),
text='No Thanks',
command=self.skipHelp)
text_button3.pack(side='top', padx=5, pady=5)
c = Checkbutton(group.interior(),
text="Apply these settings each time",
command=self.setHelpConfig)
c.pack(side='bottom', padx=5, pady=0)
tls.mainloop()
try:
tls.destroy()
except Exception:
None
else:
file_location_defaults = UI.importDefaultFileLocations()
try:
help_choice = file_location_defaults['HelpChoice'].Location()
if help_choice == 'PDF': self.openPDFHelp()
elif help_choice == 'http': self.openOnlineHelp()
else: self.skip()
except Exception:
self.openPDFHelp() ### Open PDF if there's a problem
def SelectCytoscapeTopLevel(self):
try:
config_db = UI.importConfigFile()
cytoscape_type = config_db['cytoscape'] ### hide_selection_option
except Exception:
cytoscape_type = 'null'
config_db = {}
if cytoscape_type == 'null':
message = ''
self.message = message
tls = Toplevel()
self._tls = tls
nulls = '\t\t\t\t'
tls.title('Cytoscape Automatic Start Options')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos='n',
label_text='',
usehullsize=1,
hull_width=420,
hull_height=200)
self.sf.pack(padx=5, pady=1, fill='both', expand=1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(), tag_text='Options')
group.pack(fill='both', expand=1, padx=10, pady=0)
filename = 'Config/cyto-logo-smaller.gif'
fn = filepath(filename)
img = PhotoImage(file=fn)
can = Canvas(group.interior())
can.pack(side='left', padx=10, pady=5)
can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
#"""
self.local_cytoscape = 'AltAnalyze Bundled Version'
self.custom_cytoscape = 'Previously Installed Version'
l1 = Label(group.interior(), text=nulls)
l1.pack(side='bottom')
l3 = Label(group.interior(),
text='Select version of Cytoscape to open:')
l3.pack(side='top', pady=5)
"""
self.local_cytoscape = ' No '; self.custom_cytoscape = ' Yes '
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
l2 = Label(group.interior(), text='Note: Cytoscape can take up-to a minute to initalize', fg="red"); l2.pack(side = 'top', padx = 5, pady = 0)
"""
text_button2 = Button(group.interior(),
text=self.local_cytoscape,
command=self.DGlinkout)
text_button2.pack(padx=5, pady=5)
try:
text_button = Button(group.interior(),
text=self.custom_cytoscape,
command=self.getPath)
text_button.pack(padx=5, pady=5)
except Exception:
text_button = Button(group.interior(),
text=self.custom_cytoscape,
command=self.getPath)
text_button.pack(padx=5, pady=5)
l2 = Label(
group.interior(),
text='Note: Cytoscape can take up-to a minute to initalize',
fg="blue")
l2.pack(side='bottom', padx=5, pady=0)
c = Checkbutton(
group.interior(),
text="Apply these settings each time and don't show again",
command=self.setCytoscapeConfig)
c.pack(side='bottom', padx=5, pady=0)
#c2 = Checkbutton(group.interior(), text = "Open PDF of DomainGraph help rather than online help", command=self.setCytoscapeConfig); c2.pack(side = 'bottom', padx = 5, pady = 0)
tls.mainloop()
try:
tls.destroy()
except Exception:
None
else:
file_location_defaults = UI.importDefaultFileLocations()
try:
cytoscape_app_dir = file_location_defaults[
'CytoscapeDir'].Location()
openFile(cytoscape_app_dir)
except Exception:
try:
altanalyze_path = filepath('')
altanalyze_path = altanalyze_path[:-1]
except Exception:
altanalyze_path = ''
application_dir = 'Cytoscape_v'
if os.name == 'nt': application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform:
application_name = 'Cytoscape.app'
elif 'linux' in sys.platform:
application_name = 'Cytoscape'
try:
openCytoscape(altanalyze_path, application_dir,
application_name)
except Exception:
null = []
def setCytoscapeConfig(self):
config_db = {}
config_db['cytoscape'] = 'hide_selection_option'
UI.exportConfigFile(config_db)
def setHelpConfig(self):
config_db = {}
config_db['help'] = 'hide_selection_option'
UI.exportConfigFile(config_db)
def getPath(self):
file_location_defaults = UI.importDefaultFileLocations()
if os.name == 'nt':
parent_dir = 'C:/Program Files'
application_dir = 'Cytoscape_v'
application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform:
parent_dir = '/Applications'
application_dir = 'Cytoscape_v'
application_name = 'Cytoscape.app'
elif 'linux' in sys.platform:
parent_dir = '/opt'
application_dir = 'Cytoscape_v'
application_name = 'Cytoscape'
try:
self.default_dir = file_location_defaults['CytoscapeDir'].Location(
)
self.default_dir = string.replace(self.default_dir, '//', '/')
self.default_dir = string.replace(self.default_dir, '\\', '/')
self.default_dir = string.join(
string.split(self.default_dir, '/')[:-1], '/')
except Exception:
dir = FindDir(parent_dir, application_dir)
dir = filepath(parent_dir + '/' + dir)
self.default_dir = filepath(parent_dir)
try:
dirPath = tkFileDialog.askdirectory(parent=self._tls,
initialdir=self.default_dir)
except Exception:
self.default_dir = ''
try:
dirPath = tkFileDialog.askdirectory(
parent=self._tls,
initialdir=self.default_dir)
except Exception:
try:
dirPath = tkFileDialog.askdirectory(parent=self._tls)
except Exception:
dirPath = ''
try:
#print [dirPath],application_name
app_dir = dirPath + '/' + application_name
if 'linux' in sys.platform:
try:
createCytoscapeDesktop(cytoscape_dir)
except Exception:
null = []
dir_list = unique.read_directory(
'/usr/bin/') ### Check to see that JAVA is installed
if 'java' not in dir_list:
print 'Java not referenced in "usr/bin/. If not installed,\nplease install and re-try opening Cytoscape'
try:
jar_path = dirPath + '/cytoscape.jar'
main_path = dirPath + '/cytoscape.CyMain'
plugins_path = dirPath + '/plugins'
os.system('java -Dswing.aatext=true -Xss5M -Xmx512M -jar '
+ jar_path + ' ' + main_path + ' -p ' +
plugins_path + ' &')
print 'Cytoscape jar opened:', jar_path
except Exception:
print 'OS command to open Java failed.'
try:
openFile(app_dir2)
print 'Cytoscape opened:', app_dir2
except Exception:
openFile(app_dir)
else:
openFile(app_dir)
try:
file_location_defaults['CytoscapeDir'].SetLocation(app_dir)
except Exception:
fl = UI.FileLocationData('', app_dir, 'all')
file_location_defaults['CytoscapeDir'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
except Exception:
null = []
try:
self._tls.destroy()
except Exception:
None
self.GetHelpTopLevel(self.dg_url, self.dg_pdf_file)
def openOnlineHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:
file_location_defaults['HelpChoice'].SetLocation('http')
except Exception:
fl = UI.FileLocationData('', 'http', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
webbrowser.open(self.url)
#except Exception: null=[]
try:
self._tls.destroy()
except Exception:
None
def skipHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:
file_location_defaults['HelpChoice'].SetLocation('skip')
except Exception:
fl = UI.FileLocationData('', 'skip', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
try:
self._tls.destroy()
except Exception:
None
def openPDFHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:
file_location_defaults['HelpChoice'].SetLocation('PDF')
except Exception:
fl = UI.FileLocationData('', 'PDF', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
if os.name == 'nt':
try:
os.startfile('"' + self.pdf_file + '"')
except Exception:
os.system('open "' + self.pdf_file + '"')
elif 'darwin' in sys.platform:
os.system('open "' + self.pdf_file + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + self.pdf_file + '"')
try:
self._tls.destroy()
except Exception:
None
def quit(self):
root.quit()
root.destroy()
sys.exit()
def close(self):
#self.tl.quit() #### This was causing multiple errors in 2.0.7 - evaluate more!
self.tl.destroy()
class StringVarFile:
def __init__(self, stringVar, window):
self.__newline = 0
self.__stringvar = stringVar
self.__window = window
def write(self, s):
try:
log_report = open(log_file, 'a')
log_report.write(s)
log_report.close() ### Variable to record each print statement
new = self.__stringvar.get()
for c in s:
#if c == '\n': self.__newline = 1
if c == '\k':
self.__newline = 1 ### This should not be found and thus results in a continous feed rather than replacing a single line
else:
if self.__newline:
new = ""
self.__newline = 0
new = new + c
self.set(new)
except Exception:
pass
def set(self, s):
self.__stringvar.set(s)
self.__window.update()
def get(self):
return self.__stringvar.get()
def flush(self):
pass
def timestamp():
import datetime
today = str(datetime.date.today())
today = string.split(today, '-')
today = today[0] + '' + today[1] + '' + today[2]
time_stamp = string.replace(time.ctime(), ':', '')
time_stamp = string.replace(time_stamp, ' ', ' ')
time_stamp = string.split(
time_stamp, ' ') ###Use a time-stamp as the output dir (minus the day)
time_stamp = today + '-' + time_stamp[3]
return time_stamp
def callWXPython():
import wx
import AltAnalyzeViewer
app = wx.App(False)
AltAnalyzeViewer.remoteViewer(app)
def AltAnalyzeSetup(skip_intro):
global apt_location
global root_dir
global log_file
global summary_data_db
summary_data_db = {}
reload(UI)
global probability_statistic
global commandLineMode
commandLineMode = 'no'
if 'remoteViewer' == skip_intro:
if os.name == 'nt':
callWXPython()
elif os.name == 'ntX':
package_path = filepath('python')
win_package_path = string.replace(package_path, 'python',
'AltAnalyzeViewer.exe')
import subprocess
subprocess.call([win_package_path])
sys.exit()
elif os.name == 'posix':
package_path = filepath('python')
#mac_package_path = string.replace(package_path,'python','AltAnalyze.app/Contents/MacOS/python')
#os.system(mac_package_path+' RemoteViewer.py');sys.exit()
mac_package_path = string.replace(
package_path, 'python',
'AltAnalyzeViewer.app/Contents/MacOS/AltAnalyzeViewer')
import subprocess
subprocess.call([mac_package_path])
sys.exit()
"""
import threading
import wx
app = wx.PySimpleApp()
t = threading.Thread(target=callWXPython)
t.setDaemon(1)
t.start()
s = 1
queue = mlp.Queue()
proc = mlp.Process(target=callWXPython) ### passing sys.stdout unfortunately doesn't work to pass the Tk string
proc.start()
sys.exit()
"""
reload(UI)
expr_var, alt_var, additional_var, goelite_var, exp_file_location_db = UI.getUserParameters(
skip_intro,
Multi=mlp)
"""except Exception:
if 'SystemExit' not in str(traceback.format_exc()):
expr_var, alt_var, additional_var, goelite_var, exp_file_location_db = UI.getUserParameters('yes')
else: sys.exit()"""
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
apt_location = fl.APTLocation()
root_dir = fl.RootDir()
try:
probability_statistic = fl.ProbabilityStatistic()
except Exception:
probability_statistic = 'unpaired t-test'
time_stamp = timestamp()
log_file = filepath(root_dir + 'AltAnalyze_report-' + time_stamp + '.log')
log_report = open(log_file, 'w')
log_report.close()
if use_Tkinter == 'yes' and debug_mode == 'no':
try:
global root
root = Tk()
StatusWindow(root, expr_var, alt_var, goelite_var, additional_var,
exp_file_location_db)
root.destroy()
except Exception, exception:
try:
print traceback.format_exc()
badExit()
except Exception:
sys.exit()
else:
AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var,
exp_file_location_db, '')
def badExit():
print "\n...exiting AltAnalyze due to unexpected error"
try:
time_stamp = timestamp()
print_out = "Unknown error encountered during data processing.\nPlease see logfile in:\n\n" + log_file + "\nand report to genmapp@gladstone.ucsf.edu."
try:
if len(log_file) > 0:
if commandLineMode == 'no':
if os.name == 'nt':
try:
os.startfile('"' + log_file + '"')
except Exception:
os.system('open "' + log_file + '"')
elif 'darwin' in sys.platform:
os.system('open "' + log_file + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + log_file + '"')
if commandLineMode == 'no':
try:
UI.WarningWindow(print_out, 'Error Encountered!')
root.destroy()
except Exception:
print print_out
except Exception:
sys.exit()
except Exception:
sys.exit()
sys.exit()
kill
def AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var,
exp_file_location_db, root):
### Hard-coded defaults
w = 'Agilent'
x = 'Affymetrix'
y = 'Ensembl'
z = 'any'
data_source = y
constitutive_source = z
manufacturer = x ### Constitutive source, is only really paid attention to if Ensembl, otherwise Affymetrix is used (even if default)
### Get default options for ExpressionBuilder and AltAnalyze
start_time = time.time()
test_goelite = 'no'
test_results_pannel = 'no'
global species
global array_type
global expression_data_format
global use_R
use_R = 'no'
global analysis_method
global p_threshold
global filter_probeset_types
global permute_p_threshold
global perform_permutation_analysis
global export_NI_values
global run_MiDAS
global analyze_functional_attributes
global microRNA_prediction_method
global calculate_normIntensity_p
global pathway_permutations
global avg_all_for_ss
global analyze_all_conditions
global remove_intronic_junctions
global agglomerate_inclusion_probesets
global expression_threshold
global factor_out_expression_changes
global only_include_constitutive_containing_genes
global remove_transcriptional_regulated_genes
global add_exons_to_annotations
global exclude_protein_details
global filter_for_AS
global use_direct_domain_alignments_only
global run_from_scratch
global explicit_data_type
explicit_data_type = 'null'
global altanalyze_files
altanalyze_files = []
species, array_type, manufacturer, constitutive_source, dabg_p, raw_expression_threshold, avg_all_for_ss, expression_data_format, include_raw_data, run_from_scratch, perform_alt_analysis = expr_var
analysis_method, p_threshold, filter_probeset_types, alt_exon_fold_variable, gene_expression_cutoff, remove_intronic_junctions, permute_p_threshold, perform_permutation_analysis, export_NI_values, analyze_all_conditions = alt_var
calculate_normIntensity_p, run_MiDAS, use_direct_domain_alignments_only, microRNA_prediction_method, filter_for_AS, additional_algorithms = additional_var
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, pathway_permutations, mod, returnPathways = goelite_var
original_remove_intronic_junctions = remove_intronic_junctions
if run_from_scratch == 'Annotate External Results':
analysis_method = 'external'
if returnPathways == 'no' or returnPathways == 'None':
returnPathways = None
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
try:
exon_exp_threshold = fl.ExonExpThreshold()
except Exception:
exon_exp_threshold = 'NA'
try:
gene_exp_threshold = fl.GeneExpThreshold()
except Exception:
gene_exp_threshold = 'NA'
try:
exon_rpkm_threshold = fl.ExonRPKMThreshold()
except Exception:
exon_rpkm_threshold = 'NA'
try:
rpkm_threshold = fl.RPKMThreshold() ### Gene-Level
except Exception:
rpkm_threshold = 'NA'
fl.setJunctionExpThreshold(
raw_expression_threshold
) ### For RNA-Seq, this specifically applies to exon-junctions
try:
predictGroups = fl.predictGroups()
except Exception:
predictGroups = False
try:
if fl.excludeLowExpressionExons(): excludeLowExpExons = 'yes'
else: excludeLowExpExons = 'no'
except Exception:
excludeLowExpExons = 'no'
if test_goelite == 'yes': ### It can be difficult to get error warnings from GO-Elite, unless run here
results_dir = filepath(fl.RootDir())
elite_input_dirs = [
'AltExonConfirmed', 'AltExon', 'regulated', 'upregulated',
'downregulated'
] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir + 'GO-Elite/' + elite_dir, results_dir + 'GO-Elite/denominator', results_dir + 'GO-Elite/' + elite_dir
variables = species, mod, pathway_permutations, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, returnPathways, file_dirs, root
GO_Elite.remoteAnalysis(variables, 'non-UI', Multi=mlp)
global perform_element_permutation_analysis
global permutations
perform_element_permutation_analysis = 'yes'
permutations = 2000
analyze_functional_attributes = 'yes' ### Do this by default (shouldn't substantially increase runtime)
if run_from_scratch != 'Annotate External Results' and (
array_type != "3'array" and array_type != 'RNASeq'):
if run_from_scratch != 'Process AltAnalyze filtered':
try:
raw_expression_threshold = float(raw_expression_threshold)
except Exception:
raw_expression_threshold = 1
if raw_expression_threshold < 1:
raw_expression_threshold = 1
print "Expression threshold < 1, forcing to be a minimum of 1."
try:
dabg_p = float(dabg_p)
except Exception:
dabg_p = 0
if dabg_p == 0 or dabg_p > 1:
print "Invalid dabg-p value threshold entered,(", dabg_p, ") setting to default of 0.05"
dabg_p = 0.05
if use_direct_domain_alignments_only == 'direct-alignment':
use_direct_domain_alignments_only = 'yes'
if run_from_scratch == 'Process CEL files': expression_data_format = 'log'
print "Beginning AltAnalyze Analysis... Format:", expression_data_format
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print_items = []
#print [permute_p_threshold]; sys.exit()
print_items.append(
"AltAnalyze version 2.0.9 - Expression Analysis Parameters Being Used...")
print_items.append('\t' + 'database' + ': ' +
unique.getCurrentGeneDatabaseVersion())
print_items.append('\t' + 'species' + ': ' + species)
print_items.append('\t' + 'method' + ': ' + array_type)
print_items.append('\t' + 'manufacturer' + ': ' + manufacturer)
print_items.append('\t' + 'probability_statistic' + ': ' +
probability_statistic)
print_items.append('\t' + 'constitutive_source' + ': ' +
constitutive_source)
print_items.append('\t' + 'dabg_p' + ': ' + str(dabg_p))
if array_type == 'RNASeq':
print_items.append('\t' + 'junction expression threshold' + ': ' + str(
raw_expression_threshold))
print_items.append('\t' + 'exon_exp_threshold' + ': ' + str(
exon_exp_threshold))
print_items.append('\t' + 'gene_exp_threshold' + ': ' + str(
gene_exp_threshold))
print_items.append('\t' + 'exon_rpkm_threshold' + ': ' + str(
exon_rpkm_threshold))
print_items.append('\t' + 'gene_rpkm_threshold' + ': ' + str(
rpkm_threshold))
print_items.append('\t' + 'exclude low expressing exons for RPKM' +
': ' + excludeLowExpExons)
else:
print_items.append('\t' + 'raw_expression_threshold' + ': ' + str(
raw_expression_threshold))
print_items.append('\t' + 'avg_all_for_ss' + ': ' + avg_all_for_ss)
print_items.append('\t' + 'expression_data_format' + ': ' +
expression_data_format)
print_items.append('\t' + 'include_raw_data' + ': ' + include_raw_data)
print_items.append('\t' + 'run_from_scratch' + ': ' + run_from_scratch)
print_items.append('\t' + 'perform_alt_analysis' + ': ' +
perform_alt_analysis)
if avg_all_for_ss == 'yes': cs_type = 'core'
else: cs_type = 'constitutive'
print_items.append('\t' + 'calculate_gene_expression_using' + ': ' +
cs_type)
print_items.append("Alternative Exon Analysis Parameters Being Used...")
print_items.append('\t' + 'analysis_method' + ': ' + analysis_method)
print_items.append('\t' + 'p_threshold' + ': ' + str(p_threshold))
print_items.append('\t' + 'filter_data_types' + ': ' +
filter_probeset_types)
print_items.append('\t' + 'alt_exon_fold_variable' + ': ' + str(
alt_exon_fold_variable))
print_items.append('\t' + 'gene_expression_cutoff' + ': ' + str(
gene_expression_cutoff))
print_items.append('\t' + 'remove_intronic_junctions' + ': ' +
remove_intronic_junctions)
print_items.append('\t' + 'avg_all_for_ss' + ': ' + avg_all_for_ss)
print_items.append('\t' + 'permute_p_threshold' + ': ' + str(
permute_p_threshold))
print_items.append('\t' + 'perform_permutation_analysis' + ': ' +
perform_permutation_analysis)
print_items.append('\t' + 'export_NI_values' + ': ' + export_NI_values)
print_items.append('\t' + 'run_MiDAS' + ': ' + run_MiDAS)
print_items.append('\t' + 'use_direct_domain_alignments_only' + ': ' +
use_direct_domain_alignments_only)
print_items.append('\t' + 'microRNA_prediction_method' + ': ' +
microRNA_prediction_method)
print_items.append('\t' + 'analyze_all_conditions' + ': ' +
analyze_all_conditions)
print_items.append('\t' + 'filter_for_AS' + ': ' + filter_for_AS)
if pathway_permutations == 'NA': run_GOElite = 'decide_later'
else: run_GOElite = 'run-immediately'
print_items.append('\t' + 'run_GOElite' + ': ' + run_GOElite)
universalPrintFunction(print_items)
if commandLineMode == 'yes':
print 'Running command line mode:', commandLineMode
summary_data_db['gene_assayed'] = 0
summary_data_db['denominator_exp_genes'] = 0
summary_data_db['alt_events'] = 0
summary_data_db['denominator_exp_events'] = 0
summary_data_db['alt_genes'] = 0
summary_data_db['direct_domain_genes'] = 0
summary_data_db['miRNA_gene_denom'] = 0
summary_data_db['miRNA_gene_hits'] = 0
if test_results_pannel == 'yes': ### It can be difficult to get error warnings from GO-Elite, unless run here
graphic_links = []
graphic_links.append(['test', 'Config/AltAnalyze_structure-RNASeq.jpg'
])
summary_data_db['QC'] = graphic_links
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
dataset = 'test'
results_dir = ''
print "Analysis Complete\n"
if root != '' and root != None:
UI.InfoWindow(print_out, 'Analysis Completed!')
tl = Toplevel()
SummaryResultsWindow(tl, 'GE', results_dir, dataset, 'parent',
summary_data_db)
root.destroy()
sys.exit()
global export_go_annotations
global aspire_output_list
global aspire_output_gene_list
global filter_probesets_by
global global_addition_factor
global onlyAnalyzeJunctions
global log_fold_cutoff
global aspire_cutoff
global annotation_system
global alt_exon_logfold_cutoff
"""dabg_p = 0.75; data_type = 'expression' ###used for expression analysis when dealing with AltMouse arrays
a = "3'array"; b = "exon"; c = "AltMouse"; e = "custom"; array_type = c
l = 'log'; n = 'non-log'; expression_data_format = l
hs = 'Hs'; mm = 'Mm'; dr = 'Dr'; rn = 'Rn'; species = mm
include_raw_data = 'yes'; expression_threshold = 70 ### Based on suggestion from BMC Genomics. 2006 Dec 27;7:325. PMID: 17192196, for hu-exon 1.0 st array
avg_all_for_ss = 'no' ###Default is 'no' since we don't want all probes averaged for the exon arrays"""
###### Run ExpressionBuilder ######
"""ExpressionBuilder is used to:
(1) extract out gene expression values, provide gene annotations, and calculate summary gene statistics
(2) filter probesets based DABG p-values and export to pair-wise comparison files
(3) build array annotations files matched to gene structure features (e.g. exons, introns) using chromosomal coordinates
options 1-2 are executed in remoteExpressionBuilder and option 3 is by running ExonArrayEnsembl rules"""
try:
additional_algorithm = additional_algorithms.Algorithm()
additional_score = additional_algorithms.Score()
except Exception:
additional_algorithm = 'null'
additional_score = 'null'
if analysis_method == 'FIRMA': analyze_metaprobesets = 'yes'
elif additional_algorithm == 'FIRMA': analyze_metaprobesets = 'yes'
else: analyze_metaprobesets = 'no'
### Check to see if this is a real or FAKE (used for demonstration purposes) dataset
if run_from_scratch == 'Process CEL files' or 'Feature Extraction' in run_from_scratch:
for dataset in exp_file_location_db:
if run_from_scratch == 'Process CEL files':
fl = exp_file_location_db[dataset]
pgf_file = fl.InputCDFFile()
results_dir = filepath(fl.RootDir())
if '_demo' in pgf_file: ### Thus we are running demo CEL files and want to quit immediately
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
try:
print "Analysis Complete\n"
if root != '' and root != None:
UI.InfoWindow(print_out, 'Analysis Completed!')
tl = Toplevel()
SummaryResultsWindow(tl, 'AS', results_dir,
dataset, 'parent',
summary_data_db)
except Exception:
null = []
skip_intro = 'yes'
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
reload(UI)
UI.getUpdatedParameters(array_type, species,
run_from_scratch, results_dir)
try:
AltAnalyzeSetup('no')
except Exception:
sys.exit()
if 'CEL files' in run_from_scratch:
import APT
try:
try:
APT.probesetSummarize(exp_file_location_db,
analyze_metaprobesets,
filter_probeset_types, species, root)
if analyze_metaprobesets == 'yes':
analyze_metaprobesets = 'no' ### Re-run the APT analysis to obtain probeset rather than gene-level results (only the residuals are needed from a metaprobeset run)
APT.probesetSummarize(
exp_file_location_db, analyze_metaprobesets,
filter_probeset_types, species, root)
except Exception:
import platform
print "Trying to change APT binary access privileges"
for dataset in exp_file_location_db: ### Instance of the Class ExpressionFileLocationData
fl = exp_file_location_db[dataset]
apt_dir = fl.APTLocation()
if '/bin' in apt_dir:
apt_file = apt_dir + '/apt-probeset-summarize' ### if the user selects an APT directory
elif os.name == 'nt':
apt_file = apt_dir + '/PC/' + platform.architecture()[
0] + '/apt-probeset-summarize.exe'
elif 'darwin' in sys.platform:
apt_file = apt_dir + '/Mac/apt-probeset-summarize'
elif 'linux' in sys.platform:
if '32bit' in platform.architecture():
apt_file = apt_dir + '/Linux/32bit/apt-probeset-summarize'
elif '64bit' in platform.architecture():
apt_file = apt_dir + '/Linux/64bit/apt-probeset-summarize'
apt_file = filepath(apt_file)
os.chmod(apt_file, 0777)
midas_dir = string.replace(
apt_file, 'apt-probeset-summarize', 'apt-midas')
os.chmod(midas_dir, 0777)
APT.probesetSummarize(exp_file_location_db,
analysis_method,
filter_probeset_types, species, root)
except Exception:
print_out = 'AltAnalyze encountered an un-expected error while running Affymetrix\n'
print_out += 'Power Tools (APT). Additional information may be found in the directory\n'
print_out += '"ExpressionInput/APT" in the output directory. You may also encounter issues\n'
print_out += 'if you are logged into an account with restricted priveledges.\n\n'
print_out += 'If this issue can not be resolved, contact AltAnalyze help or run RMA outside\n'
print_out += 'of AltAnalyze and import the results using the analysis option "expression file".\n'
print traceback.format_exc()
try:
UI.WarningWindow(print_out, 'Exit')
root.destroy()
sys.exit()
except Exception:
print print_out
sys.exit()
elif 'Feature Extraction' in run_from_scratch:
import ProcessAgilentArrays
try:
ProcessAgilentArrays.agilentSummarize(exp_file_location_db)
except Exception:
print_out = 'Agilent array import and processing failed... see error log for details...'
print traceback.format_exc()
try:
UI.WarningWindow(print_out, 'Exit')
root.destroy()
sys.exit()
except Exception:
print print_out
sys.exit()
reload(ProcessAgilentArrays)
if run_from_scratch == 'Process RNA-seq reads' or run_from_scratch == 'buildExonExportFiles':
import RNASeq
reload(RNASeq)
import RNASeq
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
### The below function aligns splice-junction coordinates to Ensembl exons from BED Files and
### exports AltAnalyze specific databases that are unique to this dataset to the output directory
try:
fastq_folder = fl.RunKallisto()
except Exception:
print traceback.format_exc()
if len(fastq_folder) > 0:
try:
RNASeq.runKallisto(species,
dataset,
root_dir,
fastq_folder,
returnSampleNames=False)
biotypes = 'ran'
except Exception:
biotypes = 'failed'
else:
analyzeBAMs = False
bedFilesPresent = False
dir_list = unique.read_directory(fl.BEDFileDir())
for file in dir_list:
if '.bam' in string.lower(file):
analyzeBAMs = True
if '.bed' in string.lower(file):
bedFilesPresent = True
if analyzeBAMs and bedFilesPresent == False:
import multiBAMtoBED
bam_dir = fl.BEDFileDir()
refExonCoordinateFile = filepath('AltDatabase/ensembl/' +
species + '/' + species +
'_Ensembl_exon.txt')
outputExonCoordinateRefBEDfile = bam_dir + '/BedRef/' + species + '_' + string.replace(
dataset, 'exp.', '')
analysisType = ['exon', 'junction', 'reference']
#analysisType = ['junction']
multiBAMtoBED.parallelBAMProcessing(
bam_dir,
refExonCoordinateFile,
outputExonCoordinateRefBEDfile,
analysisType=analysisType,
useMultiProcessing=fl.multiThreading(),
MLP=mlp,
root=root)
biotypes = RNASeq.alignExonsAndJunctionsToEnsembl(
species,
exp_file_location_db,
dataset,
Multi=mlp)
if biotypes == 'failed':
print_out = 'No valid chromosomal positions in the input BED or BioScope files. Exiting AltAnalyze.'
#print traceback.format_exc()
try:
UI.WarningWindow(print_out, 'Exit')
root.destroy()
sys.exit()
except Exception:
print print_out
sys.exit()
#print '!!!!!back inside AltAnalyze'
#returnLargeGlobalVars()
reload(RNASeq)
#print '!!!!!again'
#returnLargeGlobalVars()
if root_dir in biotypes:
print_out = 'Exon-level BED coordinate predictions exported to:\n' + biotypes
print_out += '\n\nAfter obtaining exon expression estimates, rename exon BED files to\n'
print_out += 'match the junction name (e.g., Sample1__exon.bed and Sample1__junction.bed)\n'
print_out += 'and re-run AltAnalyze (see tutorials at http://altanalyze.org for help).'
UI.InfoWindow(print_out, 'Export Complete')
try:
root.destroy()
sys.exit()
except Exception:
sys.exit()
if predictGroups == True:
expFile = fl.ExpFile()
if array_type == 'RNASeq':
exp_threshold = 100
rpkm_threshold = 10
else:
exp_threshold = 200
rpkm_threshold = 8
RNASeq.singleCellRNASeqWorkflow(species,
array_type,
expFile,
mlp,
exp_threshold=exp_threshold,
rpkm_threshold=rpkm_threshold)
goelite_run = False
if run_from_scratch == 'Process Expression file' or run_from_scratch == 'Process CEL files' or run_from_scratch == 'Process RNA-seq reads' or 'Feature Extraction' in run_from_scratch:
if fl.NormMatrix(
) == 'quantile' and 'Feature Extraction' not in run_from_scratch:
import NormalizeDataset
try:
NormalizeDataset.normalizeDataset(fl.ExpFile())
except Exception:
print "Normalization failed for unknown reasons..."
#"""
status = ExpressionBuilder.remoteExpressionBuilder(
species, array_type, dabg_p, raw_expression_threshold,
avg_all_for_ss, expression_data_format, manufacturer,
constitutive_source, data_source, include_raw_data,
perform_alt_analysis, ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype,
exp_file_location_db, root)
reload(ExpressionBuilder) ### Clears Memory
#"""
graphics = []
if fl.MarkerFinder() == 'yes':
### Identify putative condition-specific marker genees
import markerFinder
fl.setOutputDir(root_dir) ### This needs to be set here
exp_file = fl.ExpFile()
if array_type != "3'array":
exp_file = string.replace(exp_file, '.txt',
'-steady-state.txt')
markerFinder_inputs = [
exp_file, fl.DatasetFile()
] ### Output a replicate and non-replicate version
markerFinder_inputs = [
exp_file
] ### Only considers the replicate and not mean analysis (recommended)
for input_exp_file in markerFinder_inputs:
### This applies to an ExpressionOutput DATASET file compoosed of gene expression values (averages already present)
try:
output_dir = markerFinder.getAverageExpressionValues(
input_exp_file, array_type
) ### Either way, make an average annotated file from the DATASET file
except Exception:
print "Unknown MarkerFinder failure (possible filename issue or data incompatibility)..."
print traceback.format_exc()
continue
if 'DATASET' in input_exp_file:
group_exp_file = string.replace(input_exp_file, 'DATASET',
'AVERAGE')
else:
group_exp_file = (input_exp_file, output_dir
) ### still analyze the primary sample
compendiumType = 'protein_coding'
if expression_data_format == 'non-log': logTransform = True
else: logTransform = False
try:
markerFinder.analyzeData(group_exp_file,
species,
array_type,
compendiumType,
AdditionalParameters=fl,
logTransform=logTransform)
except Exception:
None
### Generate heatmaps (unclustered - order by markerFinder)
try:
graphics = markerFinder.generateMarkerHeatMaps(
fl,
array_type,
graphics=graphics)
except Exception:
print traceback.format_exc()
remove_intronic_junctions = original_remove_intronic_junctions ### This var gets reset when running FilterDABG
try:
summary_data_db['QC'] = fl.GraphicLinks(
) + graphics ### provides links for displaying QC and clustering plots
except Exception:
null = [
] ### Visualization support through matplotlib either not present or visualization options excluded
#print '!!!!!finished expression builder'
#returnLargeGlobalVars()
expression_data_format = 'log' ### This variable is set from non-log in FilterDABG when present (version 1.16)
try:
parent_dir = fl.RootDir() + '/GO-Elite/regulated/'
dir_list = read_directory(parent_dir)
for file in dir_list:
input_file_dir = parent_dir + '/' + file
inputType = 'IDs'
interactionDirs = ['WikiPathways', 'KEGG', 'BioGRID',
'TFTargets']
output_dir = parent_dir
degrees = 'direct'
input_exp_file = input_file_dir
gsp = UI.GeneSelectionParameters(species, array_type,
manufacturer)
gsp.setGeneSet('None Selected')
gsp.setPathwaySelect('')
gsp.setGeneSelection('')
gsp.setOntologyID('')
gsp.setIncludeExpIDs(True)
UI.networkBuilder(input_file_dir, inputType, output_dir,
interactionDirs, degrees, input_exp_file,
gsp, '')
except Exception:
print traceback.format_exc()
if status == 'stop':
### See if the array and species are compatible with GO-Elite analysis
system_codes = UI.getSystemInfo()
go_elite_analysis_supported = 'yes'
species_names = UI.getSpeciesInfo()
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
results_dir = filepath(fl.RootDir())
### Perform GO-Elite Analysis
if pathway_permutations != 'NA':
try:
print '\nBeginning to run GO-Elite analysis on alternative exon results'
elite_input_dirs = [
'AltExonConfirmed', 'AltExon', 'regulated',
'upregulated', 'downregulated'
] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir + 'GO-Elite/' + elite_dir, results_dir + 'GO-Elite/denominator', results_dir + 'GO-Elite/' + elite_dir
input_dir = results_dir + 'GO-Elite/' + elite_dir
variables = species, mod, pathway_permutations, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, returnPathways, file_dirs, root
try:
input_files = read_directory(
input_dir) ### Are there any files to analyze?
except Exception:
input_files = []
if len(input_files) > 0:
try:
GO_Elite.remoteAnalysis(variables,
'non-UI',
Multi=mlp)
goelite_run = True
except Exception, e:
print e
print "GO-Elite analysis failed"
try:
GO_Elite.moveMAPPFinderFiles(file_dirs[0])
except Exception:
print 'Input GO-Elite files could NOT be moved.'
try:
GO_Elite.moveMAPPFinderFiles(file_dirs[1])
except Exception:
print 'Input GO-Elite files could NOT be moved.'
except Exception:
pass
if goelite_run == False:
print 'No GO-Elite input files to analyze (check your criterion).'
print_out = 'Analysis complete. Gene expression\nsummary exported to "ExpressionOutput".'
try:
if use_Tkinter == 'yes':
print "Analysis Complete\n"
UI.InfoWindow(print_out, 'Analysis Completed!')
tl = Toplevel()
SummaryResultsWindow(tl, 'GE', results_dir, dataset,
'parent', summary_data_db)
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
if go_elite_analysis_supported == 'yes':
UI.getUpdatedParameters(array_type, species,
run_from_scratch,
file_dirs)
try:
AltAnalyzeSetup('no')
except Exception:
print traceback.format_exc()
sys.exit()
else:
print '\n' + print_out
sys.exit()
except Exception:
#print 'Failed to report status through GUI.'
sys.exit()
else:
altanalyze_files = status[
1
] ### These files are the comparison files to analyze
elif run_from_scratch == 'update DBs':
null = [] ###Add link to new module here (possibly)
#updateDBs(species,array_type)
sys.exit()
if perform_alt_analysis != 'expression': ###Thus perform_alt_analysis = 'both' or 'alt' (default when skipping expression summary step)
###### Run AltAnalyze ######
global dataset_name
global summary_results_db
global summary_results_db2
summary_results_db = {}
summary_results_db2 = {}
aspire_output_list = []
aspire_output_gene_list = []
onlyAnalyzeJunctions = 'no'
agglomerate_inclusion_probesets = 'no'
filter_probesets_by = 'NA'
if array_type == 'AltMouse' or (
(array_type == 'junction' or
array_type == 'RNASeq') and explicit_data_type == 'null'):
if filter_probeset_types == 'junctions-only':
onlyAnalyzeJunctions = 'yes'
elif filter_probeset_types == 'combined-junctions':
agglomerate_inclusion_probesets = 'yes'
onlyAnalyzeJunctions = 'yes'
elif filter_probeset_types == 'exons-only':
analysis_method = 'splicing-index'
filter_probesets_by = 'exon'
if filter_probeset_types == 'combined-junctions' and array_type == 'junction' or array_type == 'RNASeq':
filter_probesets_by = 'all'
else:
filter_probesets_by = filter_probeset_types
c = 'Ensembl'
d = 'Entrez Gene'
annotation_system = c
expression_threshold = 0 ###This is different than the raw_expression_threshold (probably shouldn't filter so set to 0)
if analysis_method == 'linearregres-rlm':
analysis_method = 'linearregres'
use_R = 'yes'
if gene_expression_cutoff < 1:
gene_expression_cutoff = 2 ### A number less than one is invalid
print "WARNING!!!! Invalid gene expression fold cutoff entered,\nusing the default value of 2, must be greater than 1."
log_fold_cutoff = math.log(float(gene_expression_cutoff), 2)
if analysis_method != 'ASPIRE' and analysis_method != 'none':
if p_threshold <= 0 or p_threshold > 1:
p_threshold = 0.05 ### A number less than one is invalid
print "WARNING!!!! Invalid alternative exon p-value threshold entered,\nusing the default value of 0.05."
if alt_exon_fold_variable < 1:
alt_exon_fold_variable = 1 ### A number less than one is invalid
print "WARNING!!!! Invalid alternative exon fold cutoff entered,\nusing the default value of 2, must be greater than 1."
try:
alt_exon_logfold_cutoff = math.log(
float(alt_exon_fold_variable), 2)
except Exception:
alt_exon_logfold_cutoff = 1
else:
alt_exon_logfold_cutoff = float(alt_exon_fold_variable)
global_addition_factor = 0
export_junction_comparisons = 'no' ### No longer accessed in this module - only in update mode through a different module
factor_out_expression_changes = 'yes' ### Use 'no' if data is normalized already or no expression normalization for ASPIRE desired
only_include_constitutive_containing_genes = 'yes'
remove_transcriptional_regulated_genes = 'yes'
add_exons_to_annotations = 'no'
exclude_protein_details = 'no'
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
annotation_system = d
if 'linear' in analysis_method: analysis_method = 'linearregres'
if 'aspire' in analysis_method: analysis_method = 'ASPIRE'
if array_type == 'AltMouse': species = 'Mm'
#if export_NI_values == 'yes': remove_transcriptional_regulated_genes = 'no'
###Saves run-time while testing the software (global variable stored)
#import_dir = '/AltDatabase/affymetrix/'+species
#dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
### Get Ensembl-GO and pathway annotations from GO-Elite files
universalPrintFunction(["Importing GO-Elite pathway/GO annotations"])
global go_annotations
go_annotations = {}
import BuildAffymetrixAssociations
go_annotations = BuildAffymetrixAssociations.getEnsemblAnnotationsFromGOElite(
species)
global probeset_annotations_file
if array_type == 'RNASeq':
probeset_annotations_file = root_dir + 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_junctions.txt'
elif array_type == 'AltMouse':
probeset_annotations_file = 'AltDatabase/' + species + '/' + array_type + '/' + 'MASTER-probeset-transcript.txt'
else:
probeset_annotations_file = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_probesets.txt'
#"""
if analysis_method != 'none':
analysis_summary = RunAltAnalyze(
) ### Only run if analysis methods is specified (only available for RNA-Seq and junction analyses)
else:
analysis_summary = None
if analysis_summary != None:
summary_results_db, aspire_output_gene_list, number_events_analyzed = analysis_summary
summary_data_db2 = copy.deepcopy(summary_data_db)
for i in summary_data_db2:
del summary_data_db[
i
] ### If we reset the variable it violates it's global declaration... do this instead
#universalPrintFunction(['Alternative Exon Results for Junction Comparisons:'])
#for i in summary_data_db: universalPrintFunction([i+' '+ str(summary_data_db[i])])
exportSummaryResults(summary_results_db, analysis_method,
aspire_output_list, aspire_output_gene_list,
annotate_db, array_type,
number_events_analyzed, root_dir)
else:
### Occurs for RNASeq when no junctions are present
summary_data_db2 = {}
if array_type == 'junction' or array_type == 'RNASeq':
#Reanalyze junction array data separately for individual probests rather than recipricol junctions
if array_type == 'junction': explicit_data_type = 'exon'
elif array_type == 'RNASeq': explicit_data_type = 'junction'
else: report_single_probeset_results = 'no'
### Obtain exon analysis defaults
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults(
'exon', species)
analysis_method, null, filter_probeset_types, null, null, alt_exon_fold_variable, null, null, null, null, null, null, null, calculate_normIntensity_p, null = alt_exon_defaults
filter_probesets_by = filter_probeset_types
if additional_algorithm == 'splicing-index' or additional_algorithm == 'FIRMA':
analysis_method = additional_algorithm
#print [analysis_method], [filter_probeset_types], [p_threshold], [alt_exon_fold_variable]
try:
alt_exon_logfold_cutoff = math.log(
float(additional_score), 2)
except Exception:
alt_exon_logfold_cutoff = 1
agglomerate_inclusion_probesets = 'no'
try:
summary_results_db, aspire_output_gene_list, number_events_analyzed = RunAltAnalyze(
)
exportSummaryResults(
summary_results_db, analysis_method,
aspire_output_list, aspire_output_gene_list,
annotate_db, 'exon', number_events_analyzed, root_dir)
if len(summary_data_db2) == 0:
summary_data_db2 = summary_data_db
explicit_data_type = 'exon-only'
#universalPrintFunction(['Alternative Exon Results for Individual Probeset Analyses:'])
#for i in summary_data_db: universalPrintFunction([i+' '+ str(summary_data_db[i])])
except Exception:
print traceback.format_exc()
None
#"""
### Perform dPSI Analysis
try:
if 'counts.' in fl.CountsFile(): pass
else:
dir_list = read_directory(fl.RootDir() + 'ExpressionInput')
for file in dir_list:
if 'exp.' in file and 'steady-state' not in file:
fl.setExpFile(fl.RootDir() + 'ExpressionInput/' + file)
#print [fl.RootDir()+'ExpressionInput/'+file]
except Exception:
search_dir = fl.RootDir() + '/ExpressionInput'
files = unique.read_directory(fl.RootDir() + '/ExpressionInput')
for file in files:
if 'exp.' in file and 'steady-state.txt' not in file:
fl.setExpFile(search_dir + '/' + file)
try:
#"""
try:
graphic_links2, cluster_input_file = ExpressionBuilder.unbiasedComparisonSpliceProfiles(
fl.RootDir(),
species,
array_type,
expFile=fl.CountsFile(),
min_events=0,
med_events=1)
except Exception:
pass
#"""
inputpsi = fl.RootDir(
) + 'AltResults/AlternativeOutput/' + species + '_' + array_type + '_top_alt_junctions-PSI-clust.txt'
### Calculate ANOVA p-value stats based on groups
matrix, compared_groups, original_data = statistics.matrixImport(
inputpsi)
matrix_pvalues = statistics.runANOVA(inputpsi, matrix,
compared_groups)
anovaFilteredDir = statistics.returnANOVAFiltered(
inputpsi, original_data, matrix_pvalues)
graphic_link1 = ExpressionBuilder.exportHeatmap(anovaFilteredDir)
try:
summary_data_db2['QC'] += graphic_link1
except Exception:
summary_data_db2['QC'] = graphic_link1
except Exception:
print traceback.format_exc()
import RNASeq
try:
graphic_link = RNASeq.compareExonAndJunctionResults(
species, array_type, summary_results_db, root_dir)
try:
summary_data_db2['QC'] += graphic_link
except Exception:
summary_data_db2['QC'] = graphic_link
except Exception:
print traceback.format_exc()
#"""
### Export the top 15 spliced genes
try:
altresult_dir = fl.RootDir() + '/AltResults/'
splicing_results_root = altresult_dir + '/Clustering/'
dir_list = read_directory(splicing_results_root)
gene_string = ''
altanalyze_results_folder = altresult_dir + '/RawSpliceData/' + species
### Lookup the raw expression dir
expression_results_folder = string.replace(
altresult_dir, 'AltResults', 'ExpressionInput')
expression_dir = UI.getValidExpFile(expression_results_folder)
try:
altresult_dir = UI.getValidSplicingScoreFile(
altanalyze_results_folder)
except Exception, e:
print traceback.format_exc()
for file in dir_list:
if 'AltExonConfirmed' in file:
gene_dir = splicing_results_root + '/' + file
genes = UI.importGeneList(
gene_dir,
limit=50) ### list of gene IDs or symbols
gene_string = gene_string + ',' + genes
print 'Imported genes from', file, '\n'
show_introns = False
analysisType = 'plot'
for file in dir_list:
if 'Combined-junction-exon-evidence' in file and 'top' not in file:
gene_dir = splicing_results_root + '/' + file
try:
isoform_dir = UI.exportJunctionList(
gene_dir,
limit=50) ### list of gene IDs or symbols
except Exception:
print traceback.format_exc()
UI.altExonViewer(species, array_type, expression_dir, gene_string,
show_introns, analysisType, None)
print 'completed'
UI.altExonViewer(species, array_type, altresult_dir, gene_string,
show_introns, analysisType, None)
print 'completed'
except Exception:
print traceback.format_exc()
try:
top_PSI_junction = inputpsi[:-4] + '-ANOVA.txt'
isoform_dir2 = UI.exportJunctionList(
top_PSI_junction,
limit=50) ### list of gene IDs or symbols
except Exception:
print traceback.format_exc()
try:
analyzeBAMs = False
dir_list = unique.read_directory(fl.RootDir())
for file in dir_list:
if '.bam' in string.lower(file):
analyzeBAMs = True
if analyzeBAMs:
### Create sashimi plot index
import SashimiIndex
SashimiIndex.remoteIndexing(species, fl)
import SashimiPlot
print 'Exporting Sashimi Plots for the top-predicted splicing events... be patient'
try:
SashimiPlot.remoteSashimiPlot(
species, fl, fl.RootDir(), isoform_dir
) ### assuming the bam files are in the root-dir
except Exception:
pass
print 'completed'
SashimiPlot.remoteSashimiPlot(
species, fl, fl.RootDir(), isoform_dir2
) ### assuming the bam files are in the root-dir
print 'completed'
else:
print 'No BAM files present in the root directory... skipping SashimiPlot analysis...'
except Exception:
print traceback.format_exc()
try:
clearObjectsFromMemory(exon_db)
clearObjectsFromMemory(constitutive_probeset_db)
clearObjectsFromMemory(go_annotations)
clearObjectsFromMemory(original_microRNA_z_score_data)
clearObjectsFromMemory(last_exon_region_db)
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
except Exception:
null = []
#print '!!!!!finished'
#returnLargeGlobalVars()
end_time = time.time()
time_diff = int(end_time - start_time)
universalPrintFunction(["Analyses finished in %d seconds" % time_diff])
#universalPrintFunction(["Hit Enter/Return to exit AltAnalyze"])
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
results_dir = filepath(fl.RootDir())
### Perform GO-Elite Analysis
if pathway_permutations != 'NA':
goelite_run = False
print '\nBeginning to run GO-Elite analysis on alternative exon results'
elite_input_dirs = [
'AltExonConfirmed', 'AltExon', 'regulated', 'upregulated',
'downregulated'
] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir + 'GO-Elite/' + elite_dir, results_dir + 'GO-Elite/denominator', results_dir + 'GO-Elite/' + elite_dir
input_dir = results_dir + 'GO-Elite/' + elite_dir
try:
input_files = read_directory(
input_dir) ### Are there any files to analyze?
except Exception:
input_files = []
if len(input_files) > 0:
variables = species, mod, pathway_permutations, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, returnPathways, file_dirs, root
try:
GO_Elite.remoteAnalysis(variables, 'non-UI', Multi=mlp)
goelite_run = True
except Exception, e:
print e
print "GO-Elite analysis failed"
try:
GO_Elite.moveMAPPFinderFiles(file_dirs[0])
except Exception:
print 'Input GO-Elite files could NOT be moved.'
try:
GO_Elite.moveMAPPFinderFiles(file_dirs[1])
except Exception:
print 'Input GO-Elite files could NOT be moved.'
if goelite_run == False:
print 'No GO-Elite input files to analyze (check your criterion).'
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
try:
if root != '' and root != None:
print "Analysis Complete\n"
UI.InfoWindow(print_out, 'Analysis Completed!')
tl = Toplevel()
SummaryResultsWindow(tl, 'AS', results_dir, dataset_name,
'specific', summary_data_db2)
except Exception:
print traceback.format_exc()
pass #print 'Failed to open GUI.'
skip_intro = 'yes'
if root != '' and root != None:
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
try:
UI.getUpdatedParameters(array_type, species, run_from_scratch,
file_dirs)
except Exception:
pass
try:
AltAnalyzeSetup('no')
except Exception:
sys.exit()
def exportSummaryResults(summary_results_db, analysis_method,
aspire_output_list, aspire_output_gene_list,
annotate_db, array_type, number_events_analyzed,
root_dir):
try:
ResultsExport_module.outputSummaryResults(summary_results_db, '',
analysis_method, root_dir)
#ResultsExport_module.outputSummaryResults(summary_results_db2,'-uniprot_attributes',analysis_method)
ResultsExport_module.compareAltAnalyzeResults(
aspire_output_list, annotate_db, number_events_analyzed, 'no',
analysis_method, array_type, root_dir)
ResultsExport_module.compareAltAnalyzeResults(
aspire_output_gene_list, annotate_db, '', 'yes', analysis_method,
array_type, root_dir)
except UnboundLocalError:
print "...No results to summarize" ###Occurs if there is a problem parsing these files
def checkGOEliteProbesets(fn, species):
### Get all probesets in GO-Elite files
mod_source = 'Ensembl' + '-' + 'Affymetrix'
import gene_associations
try:
ensembl_to_probeset_id = gene_associations.getGeneToUid(species,
mod_source)
except Exception:
ensembl_to_probeset_id = {}
mod_source = 'EntrezGene' + '-' + 'Affymetrix'
try:
entrez_to_probeset_id = gene_associations.getGeneToUid(species,
mod_source)
except Exception:
entrez_to_probeset_id = {}
probeset_db = {}
for gene in ensembl_to_probeset_id:
for probeset in ensembl_to_probeset_id[gene]:
probeset_db[probeset] = []
for gene in entrez_to_probeset_id:
for probeset in entrez_to_probeset_id[gene]:
probeset_db[probeset] = []
###Import an Affymetrix array annotation file (from http://www.affymetrix.com) and parse out annotations
csv_probesets = {}
x = 0
y = 0
fn = filepath(fn)
status = 'no'
for line in open(fn, 'r').readlines():
probeset_data = string.replace(line, '\n', '') #remove endline
probeset_data = string.replace(probeset_data, '---', '')
affy_data = string.split(probeset_data[1:-1], '","')
if x == 0 and line[0] != '#':
x = 1
affy_headers = affy_data
for header in affy_headers:
y = 0
while y < len(affy_headers):
if 'Probe Set ID' in affy_headers[
y] or 'probeset_id' in affy_headers[y]:
ps = y
y += 1
elif x == 1:
try:
probeset = affy_data[ps]
csv_probesets[probeset] = []
except Exception:
null = []
for probeset in csv_probesets:
if probeset in probeset_db:
status = 'yes'
break
return status
class SpeciesData:
def __init__(self, abrev, species, systems, taxid):
self._abrev = abrev
self._species = species
self._systems = systems
self._taxid = taxid
def SpeciesCode(self):
return self._abrev
def SpeciesName(self):
return self._species
def Systems(self):
return self._systems
def TaxID(self):
return self._taxid
def __repr__(self):
return self.SpeciesCode() + '|' + SpeciesName
def getSpeciesInfo():
### Used by AltAnalyze
UI.importSpeciesInfo()
species_names = {}
for species_full in species_codes:
sc = species_codes[species_full]
abrev = sc.SpeciesCode()
species_names[abrev] = species_full
return species_codes, species_names
def importGOEliteSpeciesInfo():
filename = 'Config/goelite_species.txt'
x = 0
fn = filepath(filename)
species_codes = {}
for line in open(fn, 'rU').readlines():
data = cleanUpLine(line)
abrev, species, taxid, compatible_mods = string.split(data, '\t')
if x == 0: x = 1
else:
compatible_mods = string.split(compatible_mods, '|')
sd = SpeciesData(abrev, species, compatible_mods, taxid)
species_codes[species] = sd
return species_codes
def exportGOEliteSpeciesInfo(species_codes):
fn = filepath('Config/goelite_species.txt')
data = open(fn, 'w')
x = 0
header = string.join(
['species_code', 'species_name', 'tax_id', 'compatible_algorithms'
], '\t') + '\n'
data.write(header)
for species in species_codes:
if 'other' not in species and 'all-' not in species:
sd = species_codes[species]
mods = string.join(sd.Systems(), '|')
values = [sd.SpeciesCode(), sd.SpeciesName(), sd.TaxID(), mods]
values = string.join(values, '\t') + '\n'
data.write(values)
data.close()
def TimeStamp():
time_stamp = time.localtime()
year = str(time_stamp[0])
month = str(time_stamp[1])
day = str(time_stamp[2])
if len(month) < 2: month = '0' + month
if len(day) < 2: day = '0' + day
return year + month + day
def verifyFile(filename):
status = 'not found'
try:
fn = filepath(filename)
for line in open(fn, 'rU').xreadlines():
status = 'found'
break
except Exception:
status = 'not found'
return status
def verifyFileLength(filename):
count = 0
try:
fn = filepath(filename)
for line in open(fn, 'rU').xreadlines():
count += 1
if count > 9: break
except Exception:
null = []
return count
def verifyGroupFileFormat(filename):
correct_format = False
try:
fn = filepath(filename)
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
if len(string.split(data, '\t')) == 3:
correct_format = True
break
except Exception:
correct_format = False
return correct_format
def displayHelp():
fn = filepath('Documentation/commandline.txt')
print '\n################################################\nAltAnalyze Command-Line Help'
for line in open(fn, 'rU').readlines():
print cleanUpLine(line)
print '\n################################################ - END HELP'
sys.exit()
def searchDirectory(directory, var):
directory = unique.filepath(directory)
files = unique.read_directory(directory)
version = unique.getCurrentGeneDatabaseVersion()
for file in files:
if var in file:
location = string.split(directory + '/' + file, version)[1][1:]
return [location]
break
###### Command Line Functions (AKA Headless Mode) ######
def commandLineRun():
print 'Running commandline options'
import getopt
#/hd3/home/nsalomonis/normalization/mir1 - boxer
#python AltAnalyze.py --species Mm --arraytype "3'array" --celdir "C:/CEL" --output "C:/CEL" --expname miR1_column --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --species Hs --arraytype "3'array" --FEdir "C:/FEfiles" --output "C:/FEfiles" --channel_to_extract "green/red ratio" --expname cancer --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --celdir "C:/CEL" --output "C:/CEL" --expname miR1_column
#open ./AltAnalyze.app --celdir "/Users/nsalomonis/Desktop" --output "/Users/nsalomonis/Desktop" --expname test
#python AltAnalyze.py --species Mm --arraytype "3'array" --expdir "C:/CEL/ExpressionInput/exp.miR1_column.txt" --output "C:/CEL" --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --species Mm --platform RNASeq --bedDir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles" --groupdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/ExpressionInput/groups.test.txt" --compdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/ExpressionInput/comps.test.txt" --output "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles" --expname "test"
#python AltAnalyze.py --species Mm --platform RNASeq --filterdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/" --output "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles"
#python AltAnalyze.py --expdir "/Users/nsalomonis/Desktop/Nathan/ExpressionInput/exp.test.txt" --exonMapFile "/Users/nsalomonis/Desktop/Nathan/hgu133_probe.txt" --species Hs --platform "3'array" --output "/Users/nsalomonis/Desktop/Nathan"
global apt_location
global root_dir
global probability_statistic
global log_file
global summary_data_db
summary_data_db = {}
###required
marker_finder = 'no'
manufacturer = 'Affymetrix'
constitutive_source = 'Ensembl'
ensembl_version = 'current'
species_code = None
species = None
main_input_folder = None
output_dir = None
array_type = None
input_annotation_file = None
groups_file = None
comps_file = None
input_cdf_file = None
exp_name = None
run_GOElite = 'yes'
visualize_qc_results = 'yes'
run_lineage_profiler = 'yes'
input_exp_file = ''
cel_file_dir = ''
input_stats_file = ''
input_filtered_dir = ''
external_annotation_dir = ''
xhyb_remove = 'no'
update_method = []
update_dbs = 'no'
analyze_all_conditions = 'no'
return_all = 'no'
additional_array_types = []
remove_intronic_junctions = 'no'
ignore_built_species = 'no'
build_exon_bedfile = 'no'
compendiumType = 'protein_coding'
probability_statistic = 'unpaired t-test'
specific_array_type = None
additional_resources = [None]
wpid = None
mod = 'Ensembl'
transpose = False
input_file_dir = None
denom_file_dir = None
image_export = []
selected_species = [
'Hs', 'Mm', 'Rn'
] ### These are the species that additional array types are currently supported
selected_platforms = ['AltMouse', 'exon', 'gene', 'junction']
returnPathways = 'no'
compendiumPlatform = 'gene'
exonMapFile = None
platformType = None ### This option is used to store the orignal platform type
perform_alt_analysis = 'no'
mappedExonAnalysis = False ### Map the original IDs to the RNA-Seq exon database (when True)
microRNA_prediction_method = None
pipelineAnalysis = True
OntologyID = ''
PathwaySelection = ''
GeneSetSelection = ''
interactionDirs = []
inputType = 'ID list'
Genes = ''
degrees = 'direct'
includeExpIDs = True
update_interactions = False
data_type = 'raw expression'
batch_effects = 'no'
channel_to_extract = None
normalization = False
justShowTheseIDs = ''
display = False
accessoryAnalysis = ''
modelSize = None
geneModel = False
run_from_scratch = None
systemToUse = None ### For other IDs
custom_reference = False
multiThreading = True
genesToReport = 60
correlateAll = True
expression_data_format = 'log'
runICGS = False
IDtype = None
runKallisto = False
original_arguments = sys.argv
arguments = []
for arg in original_arguments:
arg = string.replace(arg, '\xe2\x80\x9c', ''
) ### These are non-standard forward quotes
arg = string.replace(arg, '\xe2\x80\x9d', ''
) ### These are non-standard reverse quotes
arg = string.replace(arg, '\xe2\x80\x93', '-'
) ### These are non-standard dashes
arg = string.replace(arg, '\x96', '-'
) ### These are non-standard dashes
arg = string.replace(arg, '\x93', ''
) ### These are non-standard forward quotes
arg = string.replace(arg, '\x94', ''
) ### These are non-standard reverse quotes
arguments.append(arg)
print '\nArguments input:', arguments, '\n'
if '--help' in arguments[1:] or '--h' in arguments[1:]:
try:
displayHelp() ### Print out a help file and quit
except Exception:
print 'See: http://www.altanalyze.org for documentation and command-line help'
sys.exit()
if 'AltAnalyze' in arguments[1]:
arguments = arguments[
1:
] ### Occurs on Ubuntu with the location of AltAnalyze being added to sys.argv (exclude this since no argument provided for this var)
try:
options, remainder = getopt.getopt(
arguments[1:], '',
['species=', 'mod=', 'elitepval=', 'elitepermut=', 'method=',
'zscore=', 'pval=', 'num=', 'runGOElite=', 'denom=', 'output=',
'arraytype=', 'celdir=', 'expdir=', 'output=', 'statdir=',
'filterdir=', 'cdfdir=', 'csvdir=', 'expname=', 'dabgp=',
'rawexp=', 'avgallss=', 'logexp=', 'inclraw=', 'runalt=',
'altmethod=', 'altp=', 'probetype=', 'altscore=', 'GEcutoff=',
'exportnormexp=', 'calcNIp=', 'runMiDAS=', 'GEcutoff=',
'GEelitepval=', 'mirmethod=', 'ASfilter=', 'vendor=',
'GEelitefold=', 'update=', 'version=', 'analyzeAllGroups=',
'GEeliteptype=', 'force=', 'resources_to_analyze=',
'dataToAnalyze=', 'returnAll=', 'groupdir=', 'compdir=',
'annotatedir=', 'additionalScore=', 'additionalAlgorithm=',
'noxhyb=', 'platform=', 'bedDir=', 'altpermutep=', 'altpermute=',
'removeIntronOnlyJunctions=', 'normCounts=',
'buildExonExportFile=', 'groupStat=', 'compendiumPlatform=',
'rpkm=', 'exonExp=', 'specificArray=', 'ignoreBuiltSpecies=',
'ORAstat=', 'outputQCPlots=', 'runLineageProfiler=', 'input=',
'image=', 'wpid=', 'additional=', 'row_method=', 'column_method=',
'row_metric=', 'column_metric=', 'color_gradient=', 'transpose=',
'returnPathways=', 'compendiumType=', 'exonMapFile=', 'geneExp=',
'labels=', 'contrast=', 'plotType=', 'geneRPKM=', 'exonRPKM=',
'runMarkerFinder=', 'update_interactions=', 'includeExpIDs=',
'degrees=', 'genes=', 'inputType=', 'interactionDirs=',
'GeneSetSelection=', 'PathwaySelection=', 'OntologyID=',
'dataType=', 'combat=', 'channelToExtract=', 'showIntrons=',
'display=', 'join=', 'uniqueOnly=', 'accessoryAnalysis=',
'inputIDType=', 'outputIDType=', 'FEdir=', 'channelToExtract=',
'AltResultsDir=', 'geneFileDir=', 'AltResultsDir=', 'modelSize=',
'geneModel=', 'reference=', 'multiThreading=', 'multiProcessing=',
'genesToReport=', 'correlateAll=', 'normalization=',
'justShowTheseIDs=', 'direction=', 'analysisType=', 'algorithm=',
'rho=', 'clusterGOElite=', 'geneSetName=', 'runICGS=', 'IDtype=',
'CountsCutoff=', 'FoldDiff=', 'SamplesDiffering=',
'removeOutliers='
'featurestoEvaluate=', 'restrictBy=', 'ExpressionCutoff=',
'excludeCellCycle=', 'runKallisto=', 'fastq_dir=', 'FDR='])
except Exception:
print traceback.format_exc()
print "There is an error in the supplied command-line arguments (each flag requires an argument)"
sys.exit()
for opt, arg in options:
#print [opt, arg]
if opt == '--species': species = arg
elif opt == '--arraytype':
if array_type != None: additional_array_types.append(arg)
else:
array_type = arg
platform = array_type
if specific_array_type == None: specific_array_type = platform
elif opt == '--exonMapFile':
perform_alt_analysis = 'yes' ### Perform alternative exon analysis
exonMapFile = arg
elif opt == '--specificArray':
specific_array_type = arg ### e.g., hGlue
elif opt == '--celdir':
cel_file_dir = arg
elif opt == '--bedDir':
cel_file_dir = arg
elif opt == '--FEdir':
cel_file_dir = arg
elif opt == '--expdir':
input_exp_file = arg
elif opt == '--statdir':
input_stats_file = arg
elif opt == '--filterdir':
input_filtered_dir = arg
elif opt == '--groupdir':
groups_file = arg
elif opt == '--compdir':
comps_file = arg
elif opt == '--cdfdir':
input_cdf_file = arg
elif opt == '--csvdir':
input_annotation_file = arg
elif opt == '--expname':
exp_name = arg
elif opt == '--output':
output_dir = arg
elif opt == '--vendor':
manufacturer = arg
elif opt == '--runICGS':
runICGS = True
elif opt == '--IDtype':
IDtype = arg
elif opt == '--ignoreBuiltSpecies':
ignore_built_species = arg
elif opt == '--platform':
if array_type != None: additional_array_types.append(arg)
else:
array_type = arg
platform = array_type
if specific_array_type == None: specific_array_type = platform
elif opt == '--update':
update_dbs = 'yes'
update_method.append(arg)
elif opt == '--version':
ensembl_version = arg
elif opt == '--compendiumPlatform':
compendiumPlatform = arg ### platform for which the LineageProfiler compendium is built on
elif opt == '--force':
force = arg
elif opt == '--input':
input_file_dir = arg
pipelineAnalysis = False ### If this option is entered, only perform the indicated analysis
elif opt == '--image':
image_export.append(arg)
elif opt == '--wpid':
wpid = arg
elif opt == '--mod':
mod = arg
elif opt == '--runKallisto':
if arg == 'yes' or string.lower(arg) == 'true':
runKallisto = True
elif opt == '--fastq_dir':
input_fastq_dir = arg
elif opt == '--additional':
if additional_resources[0] == None:
additional_resources = []
additional_resources.append(arg)
else:
additional_resources.append(arg)
elif opt == '--transpose':
if arg == 'True': transpose = True
elif opt == '--runLineageProfiler': ###Variable declared here and later (independent analysis here or pipelined with other analyses later)
run_lineage_profiler = arg
elif opt == '--compendiumType': ### protein-coding, ncRNA, or exon
compendiumType = arg
elif opt == '--denom':
denom_file_dir = arg ### Indicates that GO-Elite is run independent from AltAnalyze itself
elif opt == '--accessoryAnalysis':
accessoryAnalysis = arg
elif opt == '--channelToExtract':
channel_to_extract = arg
elif opt == '--genesToReport':
genesToReport = int(arg)
elif opt == '--correlateAll':
correlateAll = True
elif opt == '--direction':
direction = arg
elif opt == '--logexp':
expression_data_format = arg
elif opt == '--geneRPKM':
rpkm_threshold = arg
elif opt == '--multiThreading' or opt == '--multiProcessing':
multiThreading = arg
if multiThreading == 'yes': multiThreading = True
elif 'rue' in multiThreading: multiThreading = True
else: multiThreading = False
if 'other' in manufacturer or 'Other' in manufacturer:
### For other IDs
systemToUse = array_type
if array_type == None:
print 'Please indicate a ID type as --platform when setting vendor equal to "Other IDs"'
sys.exit()
array_type = "3'array"
if array_type == 'RNASeq': manufacturer = array_type
if platformType == None: platformType = array_type
if perform_alt_analysis == 'yes':
if platform == "3'array":
mappedExonAnalysis = True
cel_file_dir = input_exp_file
exp_name = export.findFilename(input_exp_file)
exp_name = string.replace(exp_name, '.txt', '')
exp_name = string.replace(exp_name, 'exp.', '')
input_exp_file = ''
### To perform alternative exon analyses for platforms without a dedicated database, must happing appropriate mapping info or array type data
### (will need to perform downstream testing for unsupported Affymetrix exon, gene and junction arrays)
if exonMapFile == None and specific_array_type == None and cel_file_dir == '':
print_out = "\nUnable to run!!! Please designate either a specific platfrom (e.g., --specificArray hgU133_2), select CEL files, or an "
print_out += "exon-level mapping file location (--exonMapFile C:/mapping.txt) to perform alternative exon analyses for this platform."
### Will need to check here to see if the platform is supported (local or online files) OR wait until an error is encountered later
######## Perform analyses independent from AltAnalyze database centric analyses that require additional parameters
if len(image_export) > 0 or len(accessoryAnalysis) > 0 or runICGS:
if runICGS:
#python AltAnalyze.py --runICGS yes --expdir "/Users/saljh8/Desktop/demo/Myoblast/ExpressionInput/exp.myoblast.txt" --platform "3'array" --species Hs --GeneSetSelection BioMarkers --PathwaySelection Heart --column_method hopach --rho 0.4 --ExpressionCutoff 200 --justShowTheseIDs "NKX2-5 T TBX5" --FoldDiff 10 --SamplesDiffering 3 --excludeCellCycle conservative
try:
species = species
except Exception:
'Please designate a species before continuing (e.g., --species Hs)'
try:
array_type = array_type
except Exception:
'Please designate a species before continuing (e.g., --species Hs)'
if len(cel_file_dir) > 0:
values = species, exp_file_location_db, dataset, mlp_instance
StatusWindow(
values, 'preProcessRNASeq'
) ### proceed to run the full discovery analysis here!!!
else:
if len(input_exp_file) > 0: pass
else:
'Please indicate a source folder or expression file (e.g., --expdir /dataset/singleCells.txt)'
if array_type == 'Other' or 'Other' in array_type:
if ':' in array_type:
array_type, IDtype = string.split(array_type)
array_type == "3'array"
if IDtype == None: IDtype = manufacturer
row_method = 'weighted'
column_method = 'average'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'yellow_black_blue'
contrast = 3
vendor = manufacturer
GeneSelection = ''
PathwaySelection = ''
GeneSetSelection = 'None Selected'
excludeCellCycle = True
rho_cutoff = 0.4
restrictBy = 'protein_coding'
featurestoEvaluate = 'Genes'
ExpressionCutoff = 1
CountsCutoff = 1
FoldDiff = 2
SamplesDiffering = 3
JustShowTheseIDs = ''
removeOutliers = False
PathwaySelection = []
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--row_method':
row_method = arg
if row_method == 'None': row_method = None
elif opt == '--column_method':
column_method = arg
if column_method == 'None': column_method = None
elif opt == '--row_metric': row_metric = arg
elif opt == '--column_metric': column_metric = arg
elif opt == '--color_gradient': color_gradient = arg
elif opt == '--GeneSetSelection': GeneSetSelection = arg
elif opt == '--PathwaySelection': PathwaySelection.append(arg)
elif opt == '--genes': GeneSelection = arg
elif opt == '--ExpressionCutoff': ExpressionCutoff = arg
elif opt == '--normalization': normalization = arg
elif opt == '--justShowTheseIDs': justShowTheseIDs = arg
elif opt == '--rho': rho_cutoff = float(arg)
elif opt == '--clusterGOElite': clusterGOElite = float(arg)
elif opt == '--CountsCutoff': CountsCutoff = int(float(arg))
elif opt == '--FoldDiff': FoldDiff = int(float(arg))
elif opt == '--SamplesDiffering':
SamplesDiffering = int(float(arg))
elif opt == '--removeOutliers':
removeOutliers = arg
elif opt == '--featurestoEvaluate':
featurestoEvaluate = arg
elif opt == '--restrictBy':
restrictBy = arg
elif opt == '--excludeCellCycle':
excludeCellCycle = arg
if excludeCellCycle == 'False' or excludeCellCycle == 'no':
excludeCellCycle = False
elif excludeCellCycle == 'True' or excludeCellCycle == 'yes' or excludeCellCycle == 'conservative':
excludeCellCycle = True
elif opt == '--contrast':
try:
contrast = float(arg)
except Exception:
print '--contrast not a valid float'
sys.exit()
elif opt == '--vendor':
vendor = arg
elif opt == '--display':
if arg == 'yes':
display = True
elif arg == 'True':
display = True
else:
display = False
if len(PathwaySelection) == 0: PathwaySelection = ''
if len(GeneSetSelection) > 0 or GeneSelection != '':
gsp = UI.GeneSelectionParameters(species, array_type, vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setJustShowTheseIDs(JustShowTheseIDs)
gsp.setNormalize('median')
gsp.setSampleDiscoveryParameters(
ExpressionCutoff, CountsCutoff, FoldDiff, SamplesDiffering,
removeOutliers, featurestoEvaluate, restrictBy,
excludeCellCycle, column_metric, column_method, rho_cutoff)
import RNASeq
mlp_instance = mlp
if cel_file_dir != '':
expFile = cel_file_dir + '/ExpressionInput/' + 'exp.' + exp_name + '.txt'
elif input_exp_file != '':
if 'ExpressionInput' in input_exp_file:
expFile = input_exp_file
else:
### Copy over expression file to ExpressionInput
expdir2 = string.replace(input_exp_file, 'exp.', '')
root_dir = export.findParentDir(expFile)
expFile = root_dir + '/ExpressionInput/exp.' + export.findFilename(
expdir2)
export.copyFile(input_exp_file, expFile)
global log_file
root_dir = export.findParentDir(expFile)
root_dir = string.replace(root_dir, '/ExpressionInput', '')
time_stamp = timestamp()
log_file = filepath(root_dir + 'AltAnalyze_report-' + time_stamp +
'.log')
log_report = open(log_file, 'w')
log_report.close()
sys.stdout = Logger('')
count = verifyFileLength(expFile[:-4] + '-steady-state.txt')
if count > 1:
expFile = expFile[:-4] + '-steady-state.txt'
elif array_type == 'RNASeq':
### Indicates that the steady-state file doesn't exist. The exp. may exist, be could be junction only so need to re-build from bed files here
values = species, exp_file_location_db, dataset, mlp_instance
StatusWindow(
values, 'preProcessRNASeq'
) ### proceed to run the full discovery analysis here!!!
expFile = expFile[:-4] + '-steady-state.txt'
print[excludeCellCycle]
UI.RemotePredictSampleExpGroups(
expFile, mlp_instance, gsp, (species, array_type)
) ### proceed to run the full discovery analysis here!!!
sys.exit()
if 'WikiPathways' in image_export:
#python AltAnalyze.py --input /Users/test/input/criterion1.txt --image WikiPathways --mod Ensembl --species Hs --wpid WP536
if wpid == None:
print 'Please provide a valid WikiPathways ID (e.g., WP1234)'
sys.exit()
if species == None:
print 'Please provide a valid species ID for an installed database (to install: --update Official --species Hs --version EnsMart62Plus)'
sys.exit()
if input_file_dir == None:
print 'Please provide a valid file location for your input IDs (also needs to inlcude system code and value column)'
sys.exit()
import WikiPathways_webservice
try:
print 'Attempting to output a WikiPathways colored image from user data'
print 'mod:', mod
print 'species_code:', species
print 'wpid:', wpid
print 'input GO-Elite ID file:', input_file_dir
graphic_link = WikiPathways_webservice.visualizePathwayAssociations(
input_file_dir, species, mod, wpid)
except Exception, e:
if 'force_no_matching_error' in traceback.format_exc():
print '\nUnable to run!!! None of the input IDs mapped to this pathway\n'
elif 'IndexError' in traceback.format_exc():
print '\nUnable to run!!! Input ID file does not have at least 3 columns, with the second column being system code\n'
elif 'ValueError' in traceback.format_exc():
print '\nUnable to run!!! Input ID file error. Please check that you do not have extra rows with no data\n'
elif 'source_data' in traceback.format_exc():
print '\nUnable to run!!! Input ID file does not contain a valid system code\n'
elif 'goelite' in traceback.format_exc():
print '\nUnable to run!!! A valid species database needs to first be installed. For example, run:'
print 'python AltAnalyze.py --update Official --species Hs --version EnsMart65\n'
else:
print traceback.format_exc()
print '\nError generating the pathway "%s"' % wpid, '\n'
try:
printout = 'Finished exporting visualized pathway to:', graphic_link[
'WP']
print printout, '\n'
except Exception:
None
sys.exit()
if 'MergeFiles' in accessoryAnalysis:
#python AltAnalyze.py --accessoryAnalysis MergeFiles --input "C:\file1.txt" --input "C:\file2.txt" --output "C:\tables"
files_to_merge = []
join_option = 'Intersection'
uniqueOnly = False
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--input': files_to_merge.append(arg)
if opt == '--join': join_option = arg
if opt == '--uniqueOnly': unique_only = arg
if len(files_to_merge) < 2:
print 'Please designate two or more files to merge (--input)'
sys.exit()
UI.MergeFiles(files_to_merge, join_option, uniqueOnly, output_dir,
None)
sys.exit()
if 'IDTranslation' in accessoryAnalysis:
#python AltAnalyze.py --accessoryAnalysis IDTranslation --inputIDType Symbol --outputIDType RefSeq --input "C:\file1.txt" --species Hs
inputIDType = None
outputIDType = None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--inputIDType': inputIDType = arg
if opt == '--outputIDType': outputIDType = arg
if inputIDType == None or outputIDType == None:
print 'Please designate an input ID type and and output ID type (--inputIDType Ensembl --outputIDType Symbol)'
sys.exit()
if species == None:
print "Please enter a valide species (--species)"
sys.exit()
UI.IDconverter(input_file_dir, species, inputIDType, outputIDType,
None)
sys.exit()
if 'hierarchical' in image_export:
#python AltAnalyze.py --input "/Users/test/pluri.txt" --image hierarchical --row_method average --column_method single --row_metric cosine --column_metric euclidean --color_gradient red_white_blue --transpose False --PathwaySelection Apoptosis:WP254 --GeneSetSelection WikiPathways --species Hs --platform exon --display false
if input_file_dir == None:
print 'Please provide a valid file location for your input data matrix (must have an annotation row and an annotation column)'
sys.exit()
row_method = 'weighted'
column_method = 'average'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'red_black_sky'
contrast = 2.5
vendor = 'Affymetrix'
GeneSelection = ''
PathwaySelection = ''
GeneSetSelection = 'None Selected'
rho = None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--row_method':
row_method = arg
if row_method == 'None': row_method = None
elif opt == '--column_method':
column_method = arg
if column_method == 'None': column_method = None
elif opt == '--row_metric': row_metric = arg
elif opt == '--column_metric': column_metric = arg
elif opt == '--color_gradient': color_gradient = arg
elif opt == '--GeneSetSelection': GeneSetSelection = arg
elif opt == '--PathwaySelection': PathwaySelection = arg
elif opt == '--genes': GeneSelection = arg
elif opt == '--OntologyID': OntologyID = arg
elif opt == '--normalization': normalization = arg
elif opt == '--justShowTheseIDs': justShowTheseIDs = arg
elif opt == '--rho': rho = arg
elif opt == '--clusterGOElite': clusterGOElite = arg
elif opt == '--contrast':
try:
contrast = float(arg)
except Exception:
print '--contrast not a valid float'
sys.exit()
elif opt == '--vendor':
vendor = arg
elif opt == '--display':
if arg == 'yes':
display = True
elif arg == 'True':
display = True
else:
display = False
if len(GeneSetSelection) > 0 or GeneSelection != '':
gsp = UI.GeneSelectionParameters(species, array_type, vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setOntologyID(OntologyID)
gsp.setTranspose(transpose)
gsp.setNormalize(normalization)
gsp.setJustShowTheseIDs(justShowTheseIDs)
try:
gsp.setClusterGOElite(clusterGOElite)
except Exception:
pass
if rho != None:
try:
float(rho)
gsp.setRhoCutoff(rho)
except Exception:
print 'Must enter a valid Pearson correlation cutoff (float)'
transpose = gsp ### this allows methods that don't transmit this object to also work
if row_method == 'no': row_method = None
if column_method == 'no': column_method = None
if len(GeneSetSelection) > 0:
if species == None:
print "Please enter a valide species (--species)"
sys.exit()
try:
files = unique.read_directory(input_file_dir + '/')
dir = input_file_dir
for file in files:
filename = dir + '/' + file
UI.createHeatMap(filename,
row_method,
row_metric,
column_method,
column_metric,
color_gradient,
transpose,
contrast,
None,
display=display)
except Exception:
UI.createHeatMap(input_file_dir,
row_method,
row_metric,
column_method,
column_metric,
color_gradient,
transpose,
contrast,
None,
display=display)
#import clustering; clustering.outputClusters([input_file_dir],[])
sys.exit()
if 'PCA' in image_export:
#AltAnalyze.py --input "/Users/nsalomonis/Desktop/folds.txt" --image PCA --plotType 3D --display True --labels yes
#--algorithm "t-SNE"
include_labels = 'yes'
plotType = '2D'
pca_algorithm = 'SVD'
geneSetName = None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--labels':
include_labels = arg
if include_labels == 'True':
include_labels = 'yes'
if opt == '--plotType': plotType = arg
if opt == '--algorithm': pca_algorithm = arg
if opt == '--geneSetName': geneSetName = arg
if opt == '--zscore':
if arg == 'yes' or arg == 'True' or arg == 'true':
zscore = True
else:
zscore = False
if opt == '--display':
if arg == 'yes' or arg == 'True' or arg == 'true':
display = True
if input_file_dir == None:
print 'Please provide a valid file location for your input data matrix (must have an annotation row and an annotation column)'
sys.exit()
UI.performPCA(input_file_dir,
include_labels,
pca_algorithm,
transpose,
None,
plotType=plotType,
display=display,
geneSetName=geneSetName,
species=species,
zscore=zscore)
sys.exit()
if 'VennDiagram' in image_export:
# AltAnalyze.py --image "VennDiagram" --input "C:\file1.txt" --input "C:\file2.txt" --output "C:\graphs"
files_to_merge = []
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--input': files_to_merge.append(arg)
if opt == '--display':
if arg == 'yes' or arg == 'True' or arg == 'true':
display = True
if len(files_to_merge) < 2:
print 'Please designate two or more files to compare (--input)'
sys.exit()
UI.vennDiagram(files_to_merge, output_dir, None, display=display)
sys.exit()
if 'AltExonViewer' in image_export:
#python AltAnalyze.py --image AltExonViewer --AltResultsDir "C:\CP-hESC" --genes "ANXA7 FYN TCF3 NAV2 ETS2 MYLK ATP2A2" --species Hs --platform exon --dataType "splicing-index"
genes = []
show_introns = 'no'
geneFileDir = ''
analysisType = 'plot'
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--genes': genes = arg
elif opt == '--dataType': data_type = arg
elif opt == '--showIntrons': show_introns = arg
elif opt == '--AltResultsDir': altresult_dir = arg
elif opt == '--geneFileDir': geneFileDir = arg
elif opt == '--analysisType': analysisType = arg
if altresult_dir == None:
print 'Please include the location of the AltResults directory (--AltResultsDir)'
sys.exit()
if len(genes) == 0 and len(geneFileDir) == 0:
print "Please indicate the genes (--genes) or gene file location (--geneFileDir) for AltExonViewer"
sys.exit()
if species == None:
print "Please enter a valide species (--species)"
sys.exit()
if array_type == None:
print "Please enter a valide platform (--platform)"
sys.exit()
if 'AltResults' not in altresult_dir:
altresult_dir += '/AltResults/'
if 'Sashimi' in analysisType:
altresult_dir = string.split(altresult_dir, 'AltResults')[0]
genes = geneFileDir
geneFileDir = ''
elif 'raw' in data_type: ### Switch directories if expression
altanalyze_results_folder = string.replace(
altresult_dir, 'AltResults', 'ExpressionInput')
altresult_dir = UI.getValidExpFile(altanalyze_results_folder)
if len(altresult_dir) == 0:
print 'No valid expression input file (e.g., exp.MyExperiment.txt) found in', altanalyze_results_folder
sys.exit()
else:
altanalyze_results_folder = altresult_dir + '/RawSpliceData/' + species
try:
altresult_dir = UI.getValidSplicingScoreFile(
altanalyze_results_folder)
except Exception, e:
print "No files found in: " + altanalyze_results_folder
sys.exit()
if len(geneFileDir) > 0:
try:
genes = UI.importGeneList(
geneFileDir) ### list of gene IDs or symbols
except Exception:
### Can occur if a directory of files is selected
try:
files = unique.read_directory(geneFileDir + '/')
gene_string = ''
for file in files:
if '.txt' in file:
filename = geneFileDir + '/' + file
genes = UI.importGeneList(
filename) ### list of gene IDs or symbols
gene_string = gene_string + ',' + genes
print 'Imported genes from', file, '\n'
#print [altresult_dir];sys.exit()
UI.altExonViewer(species, platform, altresult_dir,
gene_string, show_introns,
analysisType, False)
except Exception:
pass
sys.exit()
if len(genes) == 0:
print 'Please list one or more genes (--genes "ANXA7 FYN TCF3 NAV2 ETS2 MYLK ATP2A2")'
sys.exit()
try:
UI.altExonViewer(species, platform, altresult_dir, genes,
show_introns, analysisType, False)
except Exception:
print traceback.format_exc()
sys.exit()
if 'network' in image_export:
#AltAnalyze.py --image network --species Hs --output "C:\GSE9440_RAW" --PathwaySelection Apoptosis:WP254 --GeneSetSelection WikiPathways
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--update_interactions': update_interactions = arg
elif opt == '--includeExpIDs': includeExpIDs = arg
elif opt == '--degrees': degrees = arg
elif opt == '--genes':
Genes = arg
inputType = 'IDs'
elif opt == '--inputType':
inputType = arg
elif opt == '--interactionDirs':
interactionDirs.append(arg)
elif opt == '--GeneSetSelection':
GeneSetSelection = arg
elif opt == '--PathwaySelection':
PathwaySelection = arg
elif opt == '--OntologyID':
OntologyID = arg
elif opt == '--display':
display = arg
if update_interactions == 'yes': update_interactions = True
else: update_interactions = False
if input_file_dir == None: pass
elif len(input_file_dir) == 0: input_file_dir = None
if len(input_exp_file) == 0: input_exp_file = None
if len(interactionDirs) == 0: interactionDirs = ['WikiPathways']
if interactionDirs == ['all']:
interactionDirs = ['WikiPathways', 'KEGG', 'BioGRID',
'TFTargets', 'common-microRNATargets',
'all-microRNATargets', 'common-DrugBank',
'all-DrugBank']
if interactionDirs == ['main']:
interactionDirs = ['WikiPathways', 'KEGG', 'BioGRID',
'TFTargets']
if interactionDirs == ['confident']:
interactionDirs = ['WikiPathways', 'KEGG', 'TFTargets']
if len(Genes) == 0: Genes = None
if output_dir == None: pass
elif len(output_dir) == 0: output_dir = None
if len(GeneSetSelection) == 'None Selected':
GeneSetSelection = None
if includeExpIDs == 'yes': includeExpIDs = True
else: includeExpIDs = False
gsp = UI.GeneSelectionParameters(species, array_type, manufacturer)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(Genes)
gsp.setOntologyID(OntologyID)
gsp.setIncludeExpIDs(includeExpIDs)
root = ''
if species == None:
print 'Please designate a species (--species).'
sys.exit()
if output_dir == None:
print 'Please designate an ouput directory (--output)'
sys.exit()
if input_file_dir != None:
if '.txt' in input_file_dir or '.sif' in input_file_dir:
UI.networkBuilder(input_file_dir, inputType, output_dir,
interactionDirs, degrees, input_exp_file,
gsp, root)
else:
parent_dir = input_file_dir
dir_list = read_directory(parent_dir)
for file in dir_list:
input_file_dir = parent_dir + '/' + file
try:
UI.networkBuilder(input_file_dir, inputType,
output_dir, interactionDirs,
degrees, input_exp_file, gsp,
root)
except Exception:
print file, 'failed to produce network'
else:
UI.networkBuilder(None, inputType, output_dir, interactionDirs,
degrees, input_exp_file, gsp, root)
sys.exit()
########## Begin database dependent AltAnalyze workflows
if ensembl_version != 'current' and 'markers' not in update_method:
dbversion = string.replace(ensembl_version, 'EnsMart', '')
UI.exportDBversion('EnsMart' + dbversion)
gene_database = unique.getCurrentGeneDatabaseVersion()
print 'Current database version:', gene_database
if array_type == None and update_dbs != 'yes' and denom_file_dir == None:
print "Please specify an array or data type (e.g., RNASeq, exon, gene, junction, AltMouse, 3'array)."
sys.exit()
if 'archive' in update_method:
###
print 'Archiving databases', ensembl_version
try:
archive_dir = 'ArchiveDBs/EnsMart' + ensembl_version + '/archive'
export.createDirPath(filepath(archive_dir))
except Exception:
null = [] ### directory already exists
dirs = unique.read_directory('/ArchiveDBs/EnsMart' + ensembl_version)
print len(dirs), dirs
import shutil
for species_dir in dirs:
try:
#print '/ArchiveDBs/EnsMart'+ensembl_version+'/'+species_dir+'/'+species_dir+'_RNASeq.zip'
src = filepath('ArchiveDBs/EnsMart' + ensembl_version + '/' +
species_dir + '/' + species_dir + '_RNASeq.zip')
dstn = filepath('ArchiveDBs/EnsMart' + ensembl_version +
'/archive/' + species_dir + '_RNASeq.zip')
#export.copyFile(src, dstn)
shutil.move(src, dstn)
try:
srcj = string.replace(src, 'RNASeq.', 'junction.')
dstnj = string.replace(dstn, 'RNASeq.', 'junction.')
shutil.move(srcj, dstnj)
except Exception:
null = []
try:
src = string.replace(src, '_RNASeq.', '.')
dstn = string.replace(dstn, '_RNASeq.', '.')
shutil.move(src, dstn)
except Exception:
null = []
except Exception:
null = []
sys.exit()
if update_dbs == 'yes' and 'Official' not in update_method:
if 'cleanup' in update_method:
existing_species_dirs = unique.read_directory(
'/AltDatabase/ensembl')
print 'Deleting EnsemblSQL directory for all species, ensembl version', ensembl_version
for species in existing_species_dirs:
export.deleteFolder('AltDatabase/ensembl/' + species +
'/EnsemblSQL')
existing_species_dirs = unique.read_directory('/AltDatabase')
print 'Deleting SequenceData directory for all species, ensembl version', ensembl_version
for species in existing_species_dirs:
export.deleteFolder('AltDatabase/' + species + '/SequenceData')
print 'Finished...exiting'
sys.exit()
if 'package' not in update_method and 'markers' not in update_method:
### Example:
### python AltAnalyze.py --species all --arraytype all --update all --version 60
### tr -d \\r < AltAnalyze.py > AltAnalyze_new.py
### chmod +x AltAnalyze_new.py
### nohup ./AltAnalyze.py --update all --species Mm --arraytype gene --arraytype exon --version 60 2>&1 > nohup_v60_Mm.txt
if array_type == 'all' and (species == 'Mm' or species == 'all'):
array_type = ['AltMouse', 'exon', 'gene', 'junction', 'RNASeq']
elif array_type == 'all' and (species == 'Hs' or species == 'Rn'):
array_type = ['exon', 'gene', 'junction', 'RNASeq']
else:
array_type = [array_type] + additional_array_types
if species == 'all' and 'RNASeq' not in array_type:
species = selected_species ### just analyze the species for which multiple platforms are supported
if species == 'selected':
species = selected_species ### just analyze the species for which multiple platforms are supported
elif species == 'all':
all_supported_names = {}
all_species_names = {}
species_names = UI.getSpeciesInfo()
for species in species_names:
all_supported_names[species_names[species]] = species
import EnsemblSQL
child_dirs, ensembl_species, ensembl_versions = EnsemblSQL.getCurrentEnsemblSpecies(
'release-' + ensembl_version)
for ens_species in ensembl_species:
ens_species = string.replace(ens_species, '_', ' ')
if ens_species in all_supported_names:
all_species_names[all_supported_names[
ens_species]] = []
del all_species_names['Hs']
del all_species_names['Mm']
del all_species_names['Rn']
"""
del all_species_names['Go']
del all_species_names['Bt']
del all_species_names['Sc']
del all_species_names['Ss']
del all_species_names['Pv']
del all_species_names['Pt']
del all_species_names['La']
del all_species_names['Tt']
del all_species_names['Tr']
del all_species_names['Ts']
del all_species_names['Pb']
del all_species_names['Pc']
del all_species_names['Ec']
del all_species_names['Tb']
del all_species_names['Tg']
del all_species_names['Dn']
del all_species_names['Do']
del all_species_names['Tn']
del all_species_names['Dm']
del all_species_names['Oc']
del all_species_names['Og']
del all_species_names['Fc']
del all_species_names['Dr']
del all_species_names['Me']
del all_species_names['Cp']
del all_species_names['Tt']
del all_species_names['La']
del all_species_names['Tr']
del all_species_names['Ts']
del all_species_names['Et'] ### No alternative isoforms?
del all_species_names['Pc']
del all_species_names['Tb']
del all_species_names['Fc']
del all_species_names['Sc']
del all_species_names['Do']
del all_species_names['Dn']
del all_species_names['Og']
del all_species_names['Ga']
del all_species_names['Me']
del all_species_names['Ml']
del all_species_names['Mi']
del all_species_names['St']
del all_species_names['Sa']
del all_species_names['Cs']
del all_species_names['Vp']
del all_species_names['Ch']
del all_species_names['Ee']
del all_species_names['Ac']"""
sx = []
all_species_names2 = [
] ### Ensure that the core selected species are run first
for species in selected_species:
if species in all_species_names: sx.append(species)
for species in all_species_names:
if species not in selected_species:
all_species_names2.append(species)
all_species_names = sx + all_species_names2
species = all_species_names
else:
species = [species]
update_uniprot = 'no'
update_ensembl = 'no'
update_probeset_to_ensembl = 'no'
update_domain = 'no'
update_miRs = 'no'
genomic_build = 'new'
update_miR_seq = 'yes'
if 'all' in update_method:
update_uniprot = 'yes'
update_ensembl = 'yes'
update_probeset_to_ensembl = 'yes'
update_domain = 'yes'
update_miRs = 'yes'
if 'UniProt' in update_method: update_uniprot = 'yes'
if 'Ensembl' in update_method: update_ensembl = 'yes'
if 'Probeset' in update_method or 'ExonAnnotations' in update_method:
update_probeset_to_ensembl = 'yes'
if 'Domain' in update_method:
update_domain = 'yes'
try:
from Bio import Entrez #test this
except Exception:
print 'The dependent module Bio is not installed or not accessible through the default python interpretter. Existing AltAnalyze.'
sys.exit()
if 'miRBs' in update_method or 'miRBS' in update_method:
update_miRs = 'yes'
if 'NewGenomeBuild' in update_method: genomic_build = 'new'
if 'current' in ensembl_version:
print "Please specify an Ensembl version number (e.g., 60) before proceeding with the update."
sys.exit()
try:
force = force ### Variable is not declared otherwise
except Exception:
force = 'yes'
print 'force:', force
existing_species_dirs = {}
update_all = 'no' ### We don't pass this as yes, in order to skip certain steps when multiple array types are analyzed (others are specified above)
try:
print "Updating AltDatabase the following array_types", string.join(
array_type), "for the species", string.join(species)
except Exception:
print 'Please designate a valid platform/array_type (e.g., exon) and species code (e.g., Mm).'
for specific_species in species:
for platform_name in array_type:
if platform_name == 'AltMouse' and specific_species == 'Mm':
proceed = 'yes'
elif platform_name == 'exon' or platform_name == 'gene':
import ExonArrayEnsemblRules
#### Check to see if the probeset.csv file is present
#try: probeset_transcript_file = ExonArrayEnsemblRules.getDirectoryFiles('/AltDatabase/'+specific_species+'/'+platform_name)
#except Exception: print "Affymetrix probeset.csv anotation file is not found. You must save this to",'/AltDatabase/'+specific_species+'/'+platform_name,'before updating (unzipped).'; sys.exit()
proceed = 'yes'
elif platform_name == 'junction' and (
specific_species == 'Hs' or
specific_species == 'Mm'):
proceed = 'yes'
elif platform_name == 'RNASeq':
proceed = 'yes'
else:
proceed = 'no'
if proceed == 'yes':
print "Analyzing", specific_species, platform_name
if (platform_name !=
array_type[0]) and len(species) == 1:
update_uniprot = 'no'
update_ensembl = 'no'
update_miR_seq = 'no' ### Don't need to do this twice in a row
print 'Skipping ensembl, uniprot and mir-sequence file import updates since already completed for this species', array_type, platform_name
if ignore_built_species == 'yes': ### Useful for when building all species for a new database build
existing_species_dirs = unique.read_directory(
'/AltDatabase/ensembl'
) ### call this here to update with every species - if running multiple instances
if specific_array_type != None and specific_array_type != platform_name:
platform_name += '|' + specific_array_type ### For the hGlue vs. JAY arrays
if specific_species not in existing_species_dirs: ### Useful when running multiple instances of AltAnalyze to build all species
print 'update_ensembl', update_ensembl
print 'update_uniprot', update_uniprot
print 'update_probeset_to_ensembl', update_probeset_to_ensembl
print 'update_domain', update_domain
print 'update_miRs', update_miRs
update.executeParameters(
specific_species, platform_name, force,
genomic_build, update_uniprot, update_ensembl,
update_probeset_to_ensembl, update_domain,
update_miRs, update_all, update_miR_seq,
ensembl_version)
else:
print 'ignoring', specific_species
sys.exit()
if 'package' in update_method:
### Example: python AltAnalyze.py --update package --species all --platform all --version 65
if ensembl_version == 'current':
print '\nPlease specify version of the database to package (e.g., --version 60).'
sys.exit()
ensembl_version = 'EnsMart' + ensembl_version
### Get all possible species
species_names = UI.getSpeciesInfo()
possible_species = {}
possible_species = species_names
possible_arrays = ['exon', 'gene', 'junction', 'AltMouse', 'RNASeq']
try:
if species == 'all': possible_species = possible_species
elif species == 'selected': possible_species = selected_species
else: possible_species = [species]
except Exception:
species = possible_species
if array_type == None or array_type == 'all':
possible_arrays = possible_arrays
else:
possible_arrays = [array_type] + additional_array_types
species_to_package = {}
dirs = unique.read_directory('/AltDatabase/' + ensembl_version)
#print possible_arrays, possible_species; sys.exit()
for species_code in dirs:
if species_code in possible_species:
array_types = unique.read_directory(
'/AltDatabase/' + ensembl_version + '/' + species_code)
for arraytype in array_types:
if arraytype in possible_arrays:
if species_code in possible_species:
array_types = unique.read_directory(
'/AltDatabase/' + ensembl_version + '/' +
species_code)
try:
species_to_package[species_code].append(
arraytype)
except Exception:
species_to_package[species_code] = [arraytype]
species_to_package = eliminate_redundant_dict_values(
species_to_package)
for species in species_to_package:
files_to_copy = [species + '_Ensembl_domain_aligning_probesets.txt'
]
files_to_copy += [
species + '_Ensembl_indirect_domain_aligning_probesets.txt'
]
files_to_copy += [species + '_Ensembl_probesets.txt']
files_to_copy += [species + '_Ensembl_exons.txt']
#files_to_copy+=[species+'_Ensembl_junctions.txt']
files_to_copy += [species + '_exon_core.mps']
files_to_copy += [species + '_exon_extended.mps']
files_to_copy += [species + '_exon_full.mps']
files_to_copy += [species + '_gene_core.mps']
files_to_copy += [species + '_gene_extended.mps']
files_to_copy += [species + '_gene_full.mps']
files_to_copy += [species + '_gene-exon_probesets.txt']
files_to_copy += [species + '_probes_to_remove.txt']
files_to_copy += [species + '_probeset-probes.txt']
files_to_copy += [species + '_probeset_microRNAs_any.txt']
files_to_copy += [species + '_probeset_microRNAs_multiple.txt']
files_to_copy += ['probeset-domain-annotations-exoncomp.txt']
files_to_copy += ['probeset-protein-annotations-exoncomp.txt']
#files_to_copy+=['probeset-protein-dbase_exoncomp.txt']
files_to_copy += ['SEQUENCE-protein-dbase_exoncomp.txt']
files_to_copy += [species + '_Ensembl_junction_probesets.txt']
files_to_copy += [species + '_Ensembl_AltMouse_probesets.txt']
files_to_copy += [species + '_RNASeq-exon_probesets.txt']
files_to_copy += [species + '_junction-exon_probesets.txt']
files_to_copy += [species + '_junction_all.mps']
files_to_copy += [
'platform.txt'
] ### Indicates the specific platform for an array type (e.g., HJAY for junction or hGlue for junction)
files_to_copy += [species + '_junction_comps_updated.txt']
files_to_copy += ['MASTER-probeset-transcript.txt']
files_to_copy += ['AltMouse-Ensembl.txt']
files_to_copy += ['AltMouse_junction-comparisons.txt']
files_to_copy += ['AltMouse_gene_annotations.txt']
files_to_copy += ['AltMouse_annotations.txt']
common_to_copy = ['uniprot/' + species + '/custom_annotations.txt']
common_to_copy += ['ensembl/' + species + '/' + species +
'_Ensembl-annotations_simple.txt']
common_to_copy += ['ensembl/' + species + '/' + species +
'_Ensembl-annotations.txt']
common_to_copy += ['ensembl/' + species + '/' + species +
'_microRNA-Ensembl.txt']
common_to_copy += ['ensembl/' + species + '/' + species +
'_Ensembl_transcript-biotypes.txt']
common_to_copy += ['ensembl/' + species + '/' + species +
'_Ensembl_transcript-annotations.txt']
common_to_copy += searchDirectory(
"AltDatabase/ensembl/" + species + "/", 'Ensembl_Protein')
common_to_copy += searchDirectory(
"AltDatabase/ensembl/" + species + "/", 'ProteinFeatures')
common_to_copy += searchDirectory(
"AltDatabase/ensembl/" + species + "/", 'ProteinCoordinates')
supported_arrays_present = 'no'
for arraytype in selected_platforms:
if arraytype in species_to_package[species]:
supported_arrays_present = 'yes' #Hence a non-RNASeq platform is present
if supported_arrays_present == 'yes':
for file in common_to_copy:
ir = 'AltDatabase/' + ensembl_version + '/'
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version + '/'
export.copyFile(ir + file, er + file)
if 'RNASeq' in species_to_package[species]:
common_to_copy += ['ensembl/' + species + '/' + species +
'_Ensembl_junction.txt']
common_to_copy += ['ensembl/' + species + '/' + species +
'_Ensembl_exon.txt']
for file in common_to_copy:
ir = 'AltDatabase/' + ensembl_version + '/'
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version + '/'
if species in selected_species:
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/RNASeq/' + ensembl_version + '/' ### This allows us to build the package archive in a separate directory for selected species, so separate but overlapping content can be packaged
export.copyFile(ir + file, er + file)
for array_type in species_to_package[species]:
ir = 'AltDatabase/' + ensembl_version + '/' + species + '/' + array_type + '/'
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version + '/' + species + '/' + array_type + '/'
if array_type == 'junction':
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + array_type + '/'
if array_type == 'RNASeq' and species in selected_species:
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/RNASeq/' + ensembl_version + '/' + species + '/' + array_type + '/'
for file in files_to_copy:
if array_type == 'RNASeq':
file = string.replace(file, '_updated.txt', '.txt')
filt_file = string.replace(file, '.txt', '-filtered.txt')
try:
export.copyFile(ir + filt_file, er + filt_file)
export_path = er + filt_file
except Exception:
try:
export.copyFile(ir + file, er + file)
export_path = er + file
except Exception:
null = [] ### File not found in directory
if len(export_path) > 0:
if 'AltMouse' in export_path or 'probes_' in export_path:
export.cleanFile(export_path)
if array_type == 'junction':
subdir = '/exon/'
ir = 'AltDatabase/' + ensembl_version + '/' + species + '/' + array_type + subdir
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + array_type + subdir
for file in files_to_copy:
export_path = []
filt_file = string.replace(file, '.txt',
'-filtered.txt')
try:
export.copyFile(ir + filt_file, er + filt_file)
export_path = er + filt_file
except Exception:
try:
export.copyFile(ir + file, er + file)
export_path = er + file
except Exception:
null = [] ### File not found in directory
if array_type == 'RNASeq':
subdir = '/junction/'
ir = 'AltDatabase/' + ensembl_version + '/' + species + '/' + array_type + subdir
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version + '/' + species + '/' + array_type + subdir
if species in selected_species:
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/RNASeq/' + ensembl_version + '/' + species + '/' + array_type + subdir
for file in files_to_copy:
if 'SEQUENCE-protein-dbase' not in file and 'domain_aligning' not in file: ### This data is now combined into the main file
export_path = []
filt_file = string.replace(file, '.txt',
'-filtered.txt')
try:
export.copyFile(ir + filt_file, er + filt_file)
export_path = er + filt_file
except Exception:
try:
export.copyFile(ir + file, er + file)
export_path = er + file
except Exception:
null = [] ### File not found in directory
if 'RNASeq' in species_to_package[species]:
src = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version
dst = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + species + '_RNASeq.zip'
if species in selected_species:
src = 'ArchiveDBs/' + ensembl_version + '/' + species + '/RNASeq/' + ensembl_version
update.zipDirectory(src)
print 'Zipping', species, array_type, dst
os.rename(src + '.zip', dst)
if supported_arrays_present == 'yes':
src = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version
dst = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + species + '.zip'
update.zipDirectory(src)
print 'Zipping', species, array_type, dst
os.rename(src + '.zip', dst)
if 'junction' in species_to_package[species]:
src = 'ArchiveDBs/' + ensembl_version + '/' + species + '/junction'
dst = string.replace(src, 'junction',
species + '_junction.zip')
update.zipDirectory(src)
print 'Zipping', species + '_junction'
os.rename(src + '.zip', dst)
sys.exit()
if 'markers' in update_method:
if species == None or platform == None:
print "WARNING! A species and platform (e.g., exon, junction, 3'array or RNASeq) must be defined to identify markers."
sys.exit()
elif input_exp_file == '':
print "WARNING! A input expression file must be supplied (e.g., ExpressionOutput/DATASET.YourExperimentName.txt) for this analysis."
sys.exit()
else:
#python AltAnalyze.py --update markers --platform gene --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/Mm_Gene-TissueAtlas/ExpressionInput/exp.meta.txt"
#python AltAnalyze.py --update markers --platform gene --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/Mm_Gene-TissueAtlas/AltResults/RawSpliceData/Mm/splicing-index/meta.txt"
#python AltAnalyze.py --update markers --platform "3'array" --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/U133/ExpressionOutput/DATASET-meta.txt"
#python AltAnalyze.py --update markers --compendiumType ncRNA --platform "exon" --expdir "/home/socr/c/users2/salomoni/conklin/nsalomonis/normalization/Hs_Exon-TissueAtlas/ExpressionOutput/DATASET-meta.txt"
#python AltAnalyze.py --update markers --platform RNASeq --species Mm --geneRPKM 1 --expdir /Users/saljh8/Desktop/Grimes/MergedRSEM/DN-Analysis/ExpressionInput/exp.DN.txt --genesToReport 200
"""The markerFinder module:
1) takes an input ExpressionOutput file (DATASET.YourExperimentName.txt)
2) extracts group average expression and saves to AVERAGE.YourExperimentName.txt to the ExpressionOutput directory
3) re-imports AVERAGE.YourExperimentName.txt
4) correlates the average expression of each gene to an idealized profile to derive a Pearson correlation coefficient
5) identifies optimal markers based on these correlations for each tissue
6) exports an expression file with just these marker genes and tissues
This module can peform these analyses on protein coding or ncRNAs and can segregate the cell/tissue groups into clusters
when a group notation is present in the sample name (e.g., 0~Heart, 0~Brain, 1~Stem Cell)"""
import markerFinder
if 'AltResults' in input_exp_file and 'Clustering' not in input_exp_file:
### This applies to a file compoosed of exon-level normalized intensities (calculae average group expression)
markerFinder.getAverageExonExpression(species, platform,
input_exp_file)
if 'Raw' in input_exp_file:
group_exp_file = string.replace(input_exp_file, 'Raw',
'AVERAGE')
else:
group_exp_file = string.replace(
input_exp_file, 'FullDatasets', 'AVERAGE-FullDatasets')
altexon_correlation_file = markerFinder.analyzeData(
group_exp_file,
species,
platform,
compendiumType,
geneToReport=genesToReport,
correlateAll=correlateAll,
AdditionalParameters=fl)
markerFinder.getExprValsForNICorrelations(
platform, altexon_correlation_file, group_exp_file)
else:
### This applies to an ExpressionOutput DATASET file compoosed of gene expression values (averages already present)
import collections
try:
test_ordereddict = collections.OrderedDict()
except Exception:
try:
import ordereddict
except Exception:
### This is needed to re-order the average file so that the groups are sequentially ordered when analyzing clustered groups (0~)
print 'Warning!!!! To run markerFinder correctly call python version 2.7x or greater (python 3.x not supported)'
print 'Requires ordereddict (also can install the library ordereddict). To call 2.7: /usr/bin/python2.7'
sys.exit()
try:
output_dir = markerFinder.getAverageExpressionValues(
input_exp_file, platform
) ### Either way, make an average annotated file from the DATASET file
if 'DATASET' in input_exp_file:
group_exp_file = string.replace(input_exp_file,
'DATASET', 'AVERAGE')
else:
group_exp_file = (
input_exp_file, output_dir
) ### still analyze the primary sample
except Exception:
### Work around when performing this analysis on an alternative exon input cluster file
group_exp_file = input_exp_file
fl = UI.ExpressionFileLocationData(input_exp_file, '', '', '')
fl.setOutputDir(export.findParentDir(export.findParentDir(
input_exp_file)[:-1]))
if platform == 'RNASeq':
try:
rpkm_threshold = float(rpkm_threshold)
except Exception:
rpkm_threshold = 1.0
fl.setRPKMThreshold(rpkm_threshold)
try:
correlationDirection = direction ### correlate to a positive or inverse negative in silico artificial pattern
except Exception:
correlationDirection = 'up'
fl.setCorrelationDirection(correlationDirection)
if expression_data_format == 'non-log': logTransform = True
else: logTransform = False
if 'topSplice' in input_exp_file:
markerFinder.filterRNASeqSpliceEvents(species, platform,
fl, input_exp_file)
sys.exit()
if 'stats.' in input_exp_file:
markerFinder.filterDetectionPvalues(species, platform, fl,
input_exp_file)
sys.exit()
else:
markerFinder.analyzeData(group_exp_file,
species,
platform,
compendiumType,
geneToReport=genesToReport,
correlateAll=correlateAll,
AdditionalParameters=fl,
logTransform=logTransform)
try:
fl.setVendor(manufacturer)
except Exception:
print '--vendor not indicated by user... assuming Affymetrix'
fl.setVendor('Affymetrix')
try:
markerFinder.generateMarkerHeatMaps(
fl,
array_type,
convertNonLogToLog=logTransform)
except Exception:
print traceback.format_exc()
print 'Cell/Tissue marker classification analysis finished'
sys.exit()
if 'EnsMart' in ensembl_version:
UI.exportDBversion(ensembl_version)
annotation_found = verifyFile(input_annotation_file)
proceed = 'no'
if 'Official' not in update_method and denom_file_dir == None: ### If running GO-Elite independent of AltAnalyze (see below GO_Elite call)
try:
time_stamp = timestamp()
if len(cel_file_dir) > 0:
if output_dir == None:
output_dir = cel_file_dir
print "Setting output directory to the input path:", output_dir
if output_dir == None and input_filtered_dir > 0:
output_dir = input_filtered_dir
if '/' == output_dir[-1] or '\\' in output_dir[-2]: null = []
else: output_dir += '/'
log_file = filepath(output_dir + 'AltAnalyze_report-' + time_stamp
+ '.log')
log_report = open(log_file, 'w')
log_report.close()
sys.stdout = Logger('')
except Exception, e:
print e
print 'Please designate an output directory before proceeding (e.g., --output "C:\RNASeq)'
sys.exit()
if mappedExonAnalysis:
array_type = 'RNASeq' ### Although this is not the actual platform, the resulting data will be treated as RNA-Seq with parameters most suitable for arrays
if len(external_annotation_dir) > 0:
run_from_scratch = 'Annotate External Results'
if channel_to_extract != None:
run_from_scratch = 'Process Feature Extraction files' ### Agilent Feature Extraction files as input for normalization
manufacturer = 'Agilent'
constitutive_source = 'Agilent'
expression_threshold = 'NA'
perform_alt_analysis = 'NA'
if len(input_filtered_dir) > 0:
run_from_scratch = 'Process AltAnalyze filtered'
proceed = 'yes'
if len(input_exp_file) > 0:
run_from_scratch = 'Process Expression file'
proceed = 'yes'
input_exp_file = string.replace(
input_exp_file, '\\', '/'
) ### Windows convention is \ rather than /, but works with /
ief_list = string.split(input_exp_file, '/')
if len(output_dir) > 0: parent_dir = output_dir
else: parent_dir = string.join(ief_list[:-1], '/')
exp_name = ief_list[-1]
if len(cel_file_dir) > 0 or runKallisto == True:
# python AltAnalyze.py --species Mm --platform RNASeq --runKallisto yes --expname test
if exp_name == None:
print "No experiment name defined. Please sumbit a name (e.g., --expname CancerComp) before proceeding."
sys.exit()
else:
dataset_name = 'exp.' + exp_name + '.txt'
exp_file_dir = filepath(output_dir + '/ExpressionInput/' +
dataset_name)
if runKallisto:
run_from_scratch == 'Process RNA-seq reads'
elif run_from_scratch != 'Process Feature Extraction files':
run_from_scratch = 'Process CEL files'
proceed = 'yes'
if array_type == 'RNASeq': file_ext = '.BED'
else: file_ext = '.CEL'
try:
cel_files, cel_files_fn = UI.identifyCELfiles(
cel_file_dir, array_type, manufacturer)
except Exception, e:
print e
if mappedExonAnalysis: pass
else:
print "No", file_ext, "files found in the directory:", cel_file_dir
sys.exit()
if array_type != 'RNASeq':
cel_file_list_dir = UI.exportCELFileList(cel_files_fn,
cel_file_dir)
if groups_file != None and comps_file != None:
try:
export.copyFile(groups_file, string.replace(exp_file_dir,
'exp.', 'groups.'))
except Exception:
print 'Groups file already present in target location OR bad input path.'
try:
export.copyFile(comps_file, string.replace(exp_file_dir,
'exp.', 'comps.'))
except Exception:
print 'Comparison file already present in target location OR bad input path.'
groups_file = string.replace(exp_file_dir, 'exp.', 'groups.')
comps_file = string.replace(exp_file_dir, 'exp.', 'comps.')
if verifyGroupFileFormat(groups_file) == False:
print "\nWarning! The format of your groups file is not correct. For details, see:\nhttp://code.google.com/p/altanalyze/wiki/ManualGroupsCompsCreation\n"
sys.exit()
if array_type != 'RNASeq' and manufacturer != 'Agilent':
"""Determine if Library and Annotations for the array exist, if not, download or prompt for selection"""
try:
### For the HGLUE and HJAY arrays, this step is critical in order to have the commond-line AltAnalyze downloadthe appropriate junction database (determined from specific_array_type)
specific_array_types, specific_array_type = UI.identifyArrayType(
cel_files_fn)
num_array_types = len(specific_array_types)
except Exception:
null = []
num_array_types = 1
specific_array_type = None
if array_type == 'exon':
if species == 'Hs': specific_array_type = 'HuEx-1_0-st-v2'
if species == 'Mm': specific_array_type = 'MoEx-1_0-st-v2'
if species == 'Rn': specific_array_type = 'RaEx-1_0-st-v2'
elif array_type == 'gene':
if species == 'Hs':
specific_array_type = 'HuGene-1_0-st-v1'
if species == 'Mm':
specific_array_type = 'MoGene-1_0-st-v1'
if species == 'Rn':
specific_array_type = 'RaGene-1_0-st-v1'
elif array_type == 'AltMouse':
specific_array_type = 'altMouseA'
"""
elif array_type == 'junction':
if species == 'Mm': specific_array_type = 'MJAY'
if species == 'Hs': specific_array_type = 'HJAY'
"""
supproted_array_db = UI.importSupportedArrayInfo()
if specific_array_type in supproted_array_db and input_cdf_file == None and input_annotation_file == None:
sa = supproted_array_db[specific_array_type]
species = sa.Species()
array_type = sa.ArrayType()
input_cdf_file, input_annotation_file, bgp_file, clf_file = UI.getAffyFilesRemote(
specific_array_type, array_type, species)
else:
array_type = "3'array"
cdf_found = verifyFile(input_cdf_file)
annotation_found = verifyFile(input_annotation_file)
if input_cdf_file == None:
print[
specific_array_type
], 'not currently supported... Please provide CDF to AltAnalyze (commandline or GUI) or manually add to AltDatabase/affymetrix/LibraryFiles'
sys.exit()
if cdf_found != "found":
### Copy valid Library files to a local AltAnalyze database directory
input_cdf_file_lower = string.lower(input_cdf_file)
if array_type == "3'array":
if '.cdf' in input_cdf_file_lower:
clf_file = ''
bgp_file = ''
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_cdf_file, '/')
cdf_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
destination_parent = osfilepath(destination_parent +
cdf_short)
info_list = input_cdf_file, destination_parent
UI.StatusWindow(info_list, 'copy')
else:
print "Valid CDF file not found. Exiting program."
sys.exit()
else:
if '.pgf' in input_cdf_file_lower:
###Check to see if the clf and bgp files are present in this directory
icf_list = string.split(input_cdf_file, '/')
parent_dir = string.join(icf_list[:-1], '/')
cdf_short = icf_list[-1]
clf_short = string.replace(cdf_short, '.pgf', '.clf')
kil_short = string.replace(
cdf_short, '.pgf', '.kil'
) ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction':
bgp_short = string.replace(cdf_short, '.pgf',
'.antigenomic.bgp')
else:
bgp_short = string.replace(cdf_short, '.pgf',
'.bgp')
dir_list = read_directory(parent_dir)
if clf_short in dir_list and bgp_short in dir_list:
pgf_file = input_cdf_file
clf_file = string.replace(pgf_file, '.pgf', '.clf')
kil_file = string.replace(
pgf_file, '.pgf', '.kil'
) ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction':
bgp_file = string.replace(pgf_file, '.pgf',
'.antigenomic.bgp')
else:
bgp_file = string.replace(pgf_file, '.pgf',
'.bgp')
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
info_list = input_cdf_file, osfilepath(
destination_parent + cdf_short)
UI.StatusWindow(info_list, 'copy')
info_list = clf_file, osfilepath(destination_parent
+ clf_short)
UI.StatusWindow(info_list, 'copy')
info_list = bgp_file, osfilepath(destination_parent
+ bgp_short)
UI.StatusWindow(info_list, 'copy')
if 'Glue' in pgf_file:
info_list = kil_file, osfilepath(
destination_parent + kil_short)
UI.StatusWindow(info_list, 'copy')
if annotation_found != "found" and update_dbs == 'no' and array_type != 'RNASeq' and denom_file_dir == None and manufacturer != 'Agilent':
### Copy valid Annotation files to a local AltAnalyze database directory
try:
input_annotation_lower = string.lower(input_annotation_file)
if '.csv' in input_annotation_lower:
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_annotation_file, '/')
csv_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/' + species + '/'
info_list = input_annotation_file, filepath(destination_parent
+ csv_short)
UI.StatusWindow(info_list, 'copy')
except Exception:
print "No Affymetrix annotation file provided. AltAnalyze will use any .csv annotations files in AltDatabase/Affymetrix/" + species
if 'Official' in update_method and species != None:
proceed = 'yes'
elif array_type != None and species != None:
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults(
array_type, species)
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, goelite_permutations, mod, returnPathways, NA = goelite_defaults
use_direct_domain_alignments_only, microRNA_prediction_method = functional_analysis_defaults
analysis_method, additional_algorithms, filter_probeset_types, analyze_all_conditions, p_threshold, alt_exon_fold_variable, additional_score, permute_p_threshold, gene_expression_cutoff, remove_intronic_junctions, perform_permutation_analysis, export_NI_values, run_MiDAS, calculate_normIntensity_p, filter_for_AS = alt_exon_defaults
dabg_p, rpkm_threshold, gene_exp_threshold, exon_exp_threshold, exon_rpkm_threshold, expression_threshold, perform_alt_analysis, analyze_as_groups, expression_data_format, normalize_feature_exp, normalize_gene_data, avg_all_for_ss, include_raw_data, probability_statistic, FDR_statistic, batch_effects, marker_finder, visualize_qc_results, run_lineage_profiler, null = expr_defaults
elif denom_file_dir != None and species != None:
proceed = 'yes' ### Only run GO-Elite
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults(
'RNASeq', species) ### platform not relevant
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, goelite_permutations, mod, returnPathways, NA = goelite_defaults
else:
print 'No species defined. Please include the species code (e.g., "--species Hs") and array type (e.g., "--arraytype exon") before proceeding.'
print '\nAlso check the printed arguments above to see if there are formatting errors, such as bad quotes.'
sys.exit()
array_type_original = array_type
#if array_type == 'gene': array_type = "3'array"
for opt, arg in options:
if opt == '--runGOElite': run_GOElite = arg
elif opt == '--outputQCPlots': visualize_qc_results = arg
elif opt == '--runLineageProfiler': run_lineage_profiler = arg
elif opt == '--elitepermut': goelite_permutations = arg
elif opt == '--method': filter_method = arg
elif opt == '--zscore': z_threshold = arg
elif opt == '--elitepval': p_val_threshold = arg
elif opt == '--num': change_threshold = arg
elif opt == '--dataToAnalyze': resources_to_analyze = arg
elif opt == '--GEelitepval': ge_pvalue_cutoffs = arg
elif opt == '--GEelitefold': ge_fold_cutoffs = arg
elif opt == '--GEeliteptype': ge_ptype = arg
elif opt == '--ORAstat': ORA_algorithm = arg
elif opt == '--returnPathways': returnPathways = arg
elif opt == '--FDR': FDR_statistic = arg
elif opt == '--dabgp': dabg_p = arg
elif opt == '--rawexp': expression_threshold = arg
elif opt == '--geneRPKM': rpkm_threshold = arg
elif opt == '--exonRPKM': exon_rpkm_threshold = arg
elif opt == '--geneExp': gene_exp_threshold = arg
elif opt == '--exonExp': exon_exp_threshold = arg
elif opt == '--groupStat': probability_statistic = arg
elif opt == '--avgallss': avg_all_for_ss = arg
elif opt == '--logexp': expression_data_format = arg
elif opt == '--inclraw': include_raw_data = arg
elif opt == '--combat': batch_effects = arg
elif opt == '--runalt': perform_alt_analysis = arg
elif opt == '--altmethod': analysis_method = arg
elif opt == '--altp': p_threshold = arg
elif opt == '--probetype': filter_probeset_types = arg
elif opt == '--altscore': alt_exon_fold_variable = arg
elif opt == '--GEcutoff': gene_expression_cutoff = arg
elif opt == '--removeIntronOnlyJunctions':
remove_intronic_junctions = arg
elif opt == '--normCounts':
normalize_feature_exp = arg
elif opt == '--normMatrix':
normalize_gene_data = arg
elif opt == '--altpermutep':
permute_p_threshold = arg
elif opt == '--altpermute':
perform_permutation_analysis = arg
elif opt == '--exportnormexp':
export_NI_values = arg
elif opt == '--buildExonExportFile':
build_exon_bedfile = 'yes'
elif opt == '--runMarkerFinder':
marker_finder = arg
elif opt == '--calcNIp':
calculate_normIntensity_p = arg
elif opt == '--runMiDAS':
run_MiDAS = arg
elif opt == '--analyzeAllGroups':
analyze_all_conditions = arg
if analyze_all_conditions == 'yes':
analyze_all_conditions = 'all groups'
elif opt == '--GEcutoff':
use_direct_domain_alignments_only = arg
elif opt == '--mirmethod':
microRNA_prediction_method = arg
elif opt == '--ASfilter':
filter_for_AS = arg
elif opt == '--noxhyb':
xhyb_remove = arg
elif opt == '--returnAll':
return_all = arg
elif opt == '--annotatedir':
external_annotation_dir = arg
elif opt == '--additionalScore':
additional_score = arg
elif opt == '--additionalAlgorithm':
additional_algorithms = arg
elif opt == '--modelSize':
modelSize = arg
try:
modelSize = int(modelSize)
except Exception:
modelSize = None
elif opt == '--geneModel':
geneModel = arg # file location
if geneModel == 'no' or 'alse' in geneModel:
geneModel = False
elif opt == '--reference':
custom_reference = arg
if run_from_scratch == 'Process Feature Extraction files': ### Agilent Feature Extraction files as input for normalization
normalize_gene_data = 'quantile' ### required for Agilent
proceed = 'yes'
if returnPathways == 'no' or returnPathways == 'None':
returnPathways = None
if pipelineAnalysis == False:
proceed = 'yes'
if proceed == 'yes':
species_codes = UI.remoteSpeciesInfo()
### Update Ensembl Databases
if 'Official' in update_method:
file_location_defaults = UI.importDefaultFileLocations()
db_versions_vendors, db_versions = UI.remoteOnlineDatabaseVersions(
)
array_codes = UI.remoteArrayInfo()
UI.getOnlineDBConfig(file_location_defaults, '')
if len(species) == 2:
species_names = UI.getSpeciesInfo()
species_full = species_names[species]
else:
species_full = species
print 'Species name to update:', species_full
db_version_list = []
for version in db_versions:
db_version_list.append(version)
db_version_list.sort()
db_version_list.reverse()
select_version = db_version_list[0]
db_versions[select_version].sort()
print 'Ensembl version', ensembl_version
if ensembl_version != 'current':
if len(ensembl_version) < 4:
ensembl_version = 'EnsMart' + ensembl_version
if ensembl_version not in db_versions:
try:
UI.getOnlineEliteDatabase(file_location_defaults,
ensembl_version, [species],
'no', '')
sys.exit()
except Exception:
### This is only for database that aren't officially released yet for prototyping
print ensembl_version, 'is not a valid version of Ensembl, while', select_version, 'is.'
sys.exit()
else:
select_version = ensembl_version
### Export basic species information
sc = species
db_version = ensembl_version
if sc != None:
for ad in db_versions_vendors[db_version]:
if ad.SpeciesCodes() == species_full:
for array_system in array_codes:
ac = array_codes[array_system]
compatible_species = ac.SpeciesCodes()
if ac.Manufacturer() in ad.Manufacturer() and (
'expression' in ac.ArrayName() or
'RNASeq' in ac.ArrayName() or
'RNA-seq' in ac.ArrayName()):
if sc not in compatible_species:
compatible_species.append(sc)
ac.setSpeciesCodes(compatible_species)
UI.exportArrayInfo(array_codes)
if species_full not in db_versions[select_version]:
print db_versions[select_version]
print species_full, ': This species is not available for this version %s of the Official database.' % select_version
else:
update_goelite_resources = 'no' ### This is handled separately below
UI.getOnlineEliteDatabase(file_location_defaults,
ensembl_version, [species],
update_goelite_resources, '')
### Attempt to download additional Ontologies and GeneSets
if additional_resources[
0] != None: ### Indicates that the user requested the download of addition GO-Elite resources
try:
import GeneSetDownloader
print 'Adding supplemental GeneSet and Ontology Collections'
if 'all' in additional_resources:
additionalResources = UI.importResourceList(
) ### Get's all additional possible resources
else:
additionalResources = additional_resources
GeneSetDownloader.buildAccessoryPathwayDatabases(
[species], additionalResources, 'yes')
print 'Finished adding additional analysis resources.'
except Exception:
print 'Download error encountered for additional Ontologies and GeneSets...\nplease try again later.'
status = UI.verifyLineageProfilerDatabases(species,
'command-line')
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...'
if array_type == 'junction' or array_type == 'RNASeq': ### Download junction databases
try:
UI.checkForLocalArraySupport(species, array_type,
specific_array_type,
'command-line')
except Exception:
print 'Please install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart65'
sys.exit()
status = UI.verifyLineageProfilerDatabases(species,
'command-line')
print "Finished adding database"
sys.exit()
try:
#print ge_fold_cutoffs,ge_pvalue_cutoffs, change_threshold, resources_to_analyze, goelite_permutations, p_val_threshold, z_threshold
change_threshold = int(change_threshold) - 1
goelite_permutations = int(goelite_permutations)
change_threshold = change_threshold
p_val_threshold = float(p_val_threshold)
z_threshold = float(z_threshold)
if ORA_algorithm == 'Fisher Exact Test':
goelite_permutations = 'FisherExactTest'
except Exception, e:
print e
print 'One of the GO-Elite input values is inapporpriate. Please review and correct.'
sys.exit()
if run_GOElite == None or run_GOElite == 'no':
goelite_permutations = 'NA' ### This haults GO-Elite from running
else:
if output_dir == None:
print "\nPlease specify an output directory using the flag --output"
sys.exit()
try:
expression_threshold = float(expression_threshold)
except Exception:
expression_threshold = 1
try:
dabg_p = float(dabg_p)
except Exception:
dabg_p = 1 ### Occurs for RNASeq
if microRNA_prediction_method == 'two or more':
microRNA_prediction_method = 'multiple'
else:
microRNA_prediction_method = 'any'
### Run GO-Elite directly from user supplied input and denominator ID folders (outside of the normal workflows)
if run_GOElite == 'yes' and pipelineAnalysis == False and '--runGOElite' in arguments: # and denom_file_dir != None:
#python AltAnalyze.py --input "/Users/nsalomonis/Desktop/Mm_sample/input_list_small" --runGOElite yes --denom "/Users/nsalomonis/Desktop/Mm_sample/denominator" --mod Ensembl --species Mm
"""if denom_file_dir == None:
print 'Please include a folder containing a valid denominator ID list for the input ID sets.'; sys.exit()"""
try:
if output_dir == None:
### Set output to the same directory or parent if none selected
i = -1 ### 1 directory up
output_dir = string.join(
string.split(input_file_dir, '/')[:i], '/')
file_dirs = input_file_dir, denom_file_dir, output_dir
import GO_Elite
if ORA_algorithm == 'Fisher Exact Test':
goelite_permutations = 'FisherExactTest'
goelite_var = species, mod, goelite_permutations, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, returnPathways, file_dirs, ''
GO_Elite.remoteAnalysis(goelite_var, 'non-UI', Multi=mlp)
sys.exit()
except Exception:
print traceback.format_exc()
print "Unexpected error encountered. Please see log file."
sys.exit()
if run_lineage_profiler == 'yes':
status = UI.verifyLineageProfilerDatabases(species, 'command-line')
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...'
if run_lineage_profiler == 'yes' and input_file_dir != None and pipelineAnalysis == False and '--runLineageProfiler' in arguments:
#python AltAnalyze.py --input "/Users/arrays/test.txt" --runLineageProfiler yes --vendor Affymetrix --platform "3'array" --species Mm --output "/Users/nsalomonis/Merrill"
#python AltAnalyze.py --input "/Users/qPCR/samples.txt" --runLineageProfiler yes --geneModel "/Users/qPCR/models.txt"
if array_type == None:
print "Please include a platform name (e.g., --platform RNASeq)"
sys.exit()
if species == None:
print "Please include a species name (e.g., --species Hs)"
sys.exit()
try:
status = UI.verifyLineageProfilerDatabases(species,
'command-line')
except ValueError:
### Occurs due to if int(gene_database[-2:]) < 65: - ValueError: invalid literal for int() with base 10: ''
print '\nPlease install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart65\n'
sys.exit()
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...'
sys.exit()
try:
fl = UI.ExpressionFileLocationData('', '', '', '')
fl.setSpecies(species)
fl.setVendor(manufacturer)
fl.setPlatformType(array_type)
fl.setCompendiumType('protein_coding')
#fl.setCompendiumType('AltExon')
fl.setCompendiumPlatform(array_type)
try:
expr_input_dir
except Exception:
expr_input_dir = input_file_dir
UI.remoteLP(fl,
expr_input_dir,
manufacturer,
custom_reference,
geneModel,
None,
modelSize=modelSize)
#graphic_links = ExpressionBuilder.remoteLineageProfiler(fl,input_file_dir,array_type,species,manufacturer)
print_out = 'Lineage profiles and images saved to the folder "DataPlots" in the input file folder.'
print print_out
except Exception:
print traceback.format_exc()
print_out = 'Analysis error occured...\nplease see warning printouts.'
print print_out
sys.exit()
if array_type == 'junction' or array_type == 'RNASeq': ### Download junction databases
try:
UI.checkForLocalArraySupport(
species, array_type, specific_array_type, 'command-line')
except Exception:
print 'Please install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart65'
sys.exit()
probeset_types = ['full', 'core', 'extended']
if return_all == 'yes': ### Perform no alternative exon filtering when annotating existing FIRMA or MADS results
dabg_p = 1
expression_threshold = 1
p_threshold = 1
alt_exon_fold_variable = 1
gene_expression_cutoff = 10000
filter_probeset_types = 'full'
exon_exp_threshold = 1
rpkm_threshold = 0
gene_exp_threshold = 1
exon_rpkm_threshold = 0
if array_type == 'RNASeq':
gene_exp_threshold = 0
else:
if array_type != "3'array":
try:
p_threshold = float(p_threshold)
alt_exon_fold_variable = float(alt_exon_fold_variable)
expression_threshold = float(expression_threshold)
gene_expression_cutoff = float(gene_expression_cutoff)
dabg_p = float(dabg_p)
additional_score = float(additional_score)
gene_expression_cutoff = float(gene_expression_cutoff)
except Exception:
try:
gene_expression_cutoff = float(gene_expression_cutoff)
except Exception:
gene_expression_cutoff = 0
try:
rpkm_threshold = float(rpkm_threshold)
except Exception:
rpkm_threshold = -1
try:
exon_exp_threshold = float(exon_exp_threshold)
except Exception:
exon_exp_threshold = 0
try:
gene_exp_threshold = float(gene_exp_threshold)
except Exception:
gene_exp_threshold = 0
try:
exon_rpkm_threshold = float(exon_rpkm_threshold)
except Exception:
exon_rpkm_threshold = 0
if filter_probeset_types not in probeset_types and array_type == 'exon':
print "Invalid probeset-type entered:", filter_probeset_types, '. Must be "full", "extended" or "core"'
sys.exit()
elif array_type == 'gene' and filter_probeset_types == 'NA':
filter_probeset_types = 'core'
if dabg_p > 1 or dabg_p <= 0:
print "Invalid DABG p-value entered:", dabg_p, '. Must be > 0 and <= 1'
sys.exit()
if expression_threshold < 1:
print "Invalid expression threshold entered:", expression_threshold, '. Must be > 1'
sys.exit()
if p_threshold > 1 or p_threshold <= 0:
print "Invalid alternative exon p-value entered:", p_threshold, '. Must be > 0 and <= 1'
sys.exit()
if alt_exon_fold_variable < 1 and analysis_method != 'ASPIRE':
print "Invalid alternative exon threshold entered:", alt_exon_fold_variable, '. Must be > 1'
sys.exit()
if gene_expression_cutoff < 1:
print "Invalid gene expression threshold entered:", gene_expression_cutoff, '. Must be > 1'
sys.exit()
if additional_score < 1:
print "Invalid additional score threshold entered:", additional_score, '. Must be > 1'
sys.exit()
if array_type == 'RNASeq':
if rpkm_threshold < 0:
print "Invalid gene RPKM threshold entered:", rpkm_threshold, '. Must be >= 0'
sys.exit()
if exon_exp_threshold < 1:
print "Invalid exon expression threshold entered:", exon_exp_threshold, '. Must be > 1'
sys.exit()
if exon_rpkm_threshold < 0:
print "Invalid exon RPKM threshold entered:", exon_rpkm_threshold, '. Must be >= 0'
sys.exit()
if gene_exp_threshold < 1:
print "Invalid gene expression threshold entered:", gene_exp_threshold, '. Must be > 1'
sys.exit()
if 'FIRMA' in additional_algorithms and array_type == 'RNASeq':
print 'FIRMA is not an available option for RNASeq... Changing this to splicing-index.'
additional_algorithms = 'splicing-index'
additional_algorithms = UI.AdditionalAlgorithms(additional_algorithms)
additional_algorithms.setScore(additional_score)
if array_type == 'RNASeq':
manufacturer = 'RNASeq'
if 'CEL' in run_from_scratch:
run_from_scratch = 'Process RNA-seq reads'
if build_exon_bedfile == 'yes':
run_from_scratch = 'buildExonExportFiles'
if run_from_scratch == 'Process AltAnalyze filtered':
expression_data_format = 'log' ### This is switched to log no matter what, after initial import and analysis of CEL or BED files
### These variables are modified from the defaults in the module UI as below
excludeNonExpExons = True
if avg_all_for_ss == 'yes': avg_all_for_ss = 'yes'
elif 'all exon aligning' in avg_all_for_ss or 'known exons' in avg_all_for_ss or 'expressed exons' in avg_all_for_ss:
if 'known exons' in avg_all_for_ss and array_type == 'RNASeq':
excludeNonExpExons = False
avg_all_for_ss = 'yes'
else:
avg_all_for_ss = 'no'
if run_MiDAS == 'NA': run_MiDAS = 'no'
if perform_alt_analysis == 'yes': perform_alt_analysis = 'yes'
elif perform_alt_analysis == 'expression':
perform_alt_analysis = 'expression'
elif perform_alt_analysis == 'just expression':
perform_alt_analysis = 'expression'
elif perform_alt_analysis == 'no':
perform_alt_analysis = 'expression'
elif platform != "3'array":
perform_alt_analysis = 'both'
if systemToUse != None: array_type = systemToUse
try:
permute_p_threshold = float(permute_p_threshold)
except Exception:
permute_p_threshold = permute_p_threshold
### Store variables for AltAnalyzeMain
expr_var = species, array_type, manufacturer, constitutive_source, dabg_p, expression_threshold, avg_all_for_ss, expression_data_format, include_raw_data, run_from_scratch, perform_alt_analysis
alt_var = analysis_method, p_threshold, filter_probeset_types, alt_exon_fold_variable, gene_expression_cutoff, remove_intronic_junctions, permute_p_threshold, perform_permutation_analysis, export_NI_values, analyze_all_conditions
additional_var = calculate_normIntensity_p, run_MiDAS, use_direct_domain_alignments_only, microRNA_prediction_method, filter_for_AS, additional_algorithms
goelite_var = ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, goelite_permutations, mod, returnPathways
if run_from_scratch == 'buildExonExportFiles':
fl = UI.ExpressionFileLocationData('', '', '', '')
fl.setExonBedBuildStatus('yes')
fl.setFeatureNormalization('none')
fl.setCELFileDir(cel_file_dir)
fl.setArrayType(array_type)
fl.setOutputDir(output_dir)
fl.setMultiThreading(multiThreading)
exp_file_location_db = {}
exp_file_location_db[dataset_name] = fl
parent_dir = output_dir
perform_alt_analysis = 'expression'
if run_from_scratch == 'Process Expression file':
if len(input_exp_file) > 0:
if groups_file != None and comps_file != None:
if 'exp.' in input_exp_file: new_exp_file = input_exp_file
else:
new_exp_file = export.findParentDir(
input_exp_file) + 'exp.' + export.findFilename(
input_exp_file)
if 'ExpressionInput' not in new_exp_file:
### This expression file is not currently used (could make it the default after copying to this location)
if output_dir[-1] != '/' and output_dir[-1] != '\\':
output_dir += '/'
new_exp_file = output_dir + 'ExpressionInput/' + export.findFilename(
new_exp_file)
try:
export.copyFile(input_exp_file, new_exp_file)
except Exception:
print 'Expression file already present in target location.'
try:
export.copyFile(groups_file, string.replace(
new_exp_file, 'exp.', 'groups.'))
except Exception:
print 'Groups file already present in target location OR bad input path.'
try:
export.copyFile(comps_file, string.replace(
new_exp_file, 'exp.', 'comps.'))
except Exception:
print 'Comparison file already present in target location OR bad input path.'
groups_file = string.replace(new_exp_file, 'exp.',
'groups.')
comps_file = string.replace(new_exp_file, 'exp.', 'comps.')
input_exp_file = new_exp_file
if verifyGroupFileFormat(groups_file) == False:
print "\nWarning! The format of your groups file is not correct. For details, see:\nhttp://code.google.com/p/altanalyze/wiki/ManualGroupsCompsCreation\n"
sys.exit()
try:
cel_files, array_linker_db = ExpressionBuilder.getArrayHeaders(
input_exp_file)
if len(input_stats_file) > 1: ###Make sure the files have the same arrays and order first
cel_files2, array_linker_db2 = ExpressionBuilder.getArrayHeaders(
input_stats_file)
if cel_files2 != cel_files:
print "The probe set p-value file:\n" + input_stats_file + "\ndoes not have the same array order as the\nexpression file. Correct before proceeding."
sys.exit()
except Exception:
print '\nWARNING...Expression file not found: "' + input_exp_file + '"\n\n'
sys.exit()
exp_name = string.replace(exp_name, 'exp.', '')
dataset_name = exp_name
exp_name = string.replace(exp_name, '.txt', '')
groups_name = 'ExpressionInput/groups.' + dataset_name
comps_name = 'ExpressionInput/comps.' + dataset_name
groups_file_dir = output_dir + '/' + groups_name
comps_file_dir = output_dir + '/' + comps_name
groups_found = verifyFile(groups_file_dir)
comps_found = verifyFile(comps_file_dir)
if ((groups_found != 'found' or comps_found != 'found') and
analyze_all_conditions != 'all groups') or (
analyze_all_conditions == 'all groups' and
groups_found != 'found'):
files_exported = UI.predictGroupsAndComps(cel_files,
output_dir, exp_name)
if files_exported == 'yes':
print "AltAnalyze inferred a groups and comps file from the CEL file names."
elif run_lineage_profiler == 'yes' and input_file_dir != None and pipelineAnalysis == False and '--runLineageProfiler' in arguments:
pass
else:
print '...groups and comps files not found. Create before running AltAnalyze in command line mode.'
sys.exit()
fl = UI.ExpressionFileLocationData(
input_exp_file, input_stats_file, groups_file_dir,
comps_file_dir)
dataset_name = exp_name
if analyze_all_conditions == "all groups":
try:
array_group_list, group_db = UI.importArrayGroupsSimple(
groups_file_dir, cel_files)
except Exception:
print '...groups and comps files not found. Create before running AltAnalyze in command line mode.'
sys.exit()
print len(group_db), 'groups found'
if len(group_db) == 2: analyze_all_conditions = 'pairwise'
exp_file_location_db = {}
exp_file_location_db[exp_name] = fl
elif run_from_scratch == 'Process CEL files' or run_from_scratch == 'Process RNA-seq reads' or run_from_scratch == 'Process Feature Extraction files':
if groups_file != None and comps_file != None:
try:
shutil.copyfile(groups_file, string.replace(
exp_file_dir, 'exp.', 'groups.'))
except Exception:
print 'Groups file already present in target location OR bad input path.'
try:
shutil.copyfile(comps_file, string.replace(
exp_file_dir, 'exp.', 'comps.'))
except Exception:
print 'Comparison file already present in target location OR bad input path.'
stats_file_dir = string.replace(exp_file_dir, 'exp.', 'stats.')
groups_file_dir = string.replace(exp_file_dir, 'exp.', 'groups.')
comps_file_dir = string.replace(exp_file_dir, 'exp.', 'comps.')
groups_found = verifyFile(groups_file_dir)
comps_found = verifyFile(comps_file_dir)
if ((groups_found != 'found' or comps_found != 'found') and
analyze_all_conditions != 'all groups') or (
analyze_all_conditions == 'all groups' and
groups_found != 'found'):
if mappedExonAnalysis: pass
else:
files_exported = UI.predictGroupsAndComps(
cel_files, output_dir, exp_name)
if files_exported == 'yes':
print "AltAnalyze inferred a groups and comps file from the CEL file names."
#else: print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
fl = UI.ExpressionFileLocationData(exp_file_dir, stats_file_dir,
groups_file_dir, comps_file_dir)
exp_file_location_db = {}
exp_file_location_db[dataset_name] = fl
parent_dir = output_dir ### interchangable terms (parent_dir used with expression file import)
if analyze_all_conditions == "all groups":
array_group_list, group_db = UI.importArrayGroupsSimple(
groups_file_dir, cel_files)
UI.exportGroups(exp_file_location_db, array_group_list)
print len(group_db), 'groups found'
if len(group_db) == 2: analyze_all_conditions = 'pairwise'
try:
fl.setRunKallisto(input_fastq_dir)
except Exception:
pass
elif run_from_scratch == 'Process AltAnalyze filtered':
if '.txt' in input_filtered_dir: ### Occurs if the user tries to load a specific file
dirs = string.split(input_filtered_dir, '/')
input_filtered_dir = string.join(dirs[:-1], '/')
fl = UI.ExpressionFileLocationData('', '', '', '')
dataset_name = 'filtered-exp_dir'
dirs = string.split(input_filtered_dir, 'AltExpression')
parent_dir = dirs[0]
exp_file_location_db = {}
exp_file_location_db[dataset_name] = fl
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset_name]
file_location_defaults = UI.importDefaultFileLocations()
apt_location = UI.getAPTLocations(file_location_defaults,
run_from_scratch, run_MiDAS)
fl.setAPTLocation(apt_location)
if run_from_scratch == 'Process CEL files':
if xhyb_remove == 'yes' and (array_type == 'gene' or
array_type == 'junction'):
xhyb_remove = 'no' ### This is set when the user mistakenly selects exon array, initially
fl.setInputCDFFile(input_cdf_file)
fl.setCLFFile(clf_file)
fl.setBGPFile(bgp_file)
fl.setXHybRemoval(xhyb_remove)
fl.setCELFileDir(cel_file_dir)
fl.setArrayType(array_type_original)
fl.setOutputDir(output_dir)
elif run_from_scratch == 'Process RNA-seq reads':
fl.setCELFileDir(cel_file_dir)
fl.setOutputDir(output_dir)
elif run_from_scratch == 'Process Feature Extraction files':
fl.setCELFileDir(cel_file_dir)
fl.setOutputDir(output_dir)
fl = exp_file_location_db[dataset]
fl.setRootDir(parent_dir)
apt_location = fl.APTLocation()
root_dir = fl.RootDir()
fl.setExonBedBuildStatus(build_exon_bedfile)
fl.setMarkerFinder(marker_finder)
fl.setFeatureNormalization(normalize_feature_exp)
fl.setNormMatrix(normalize_gene_data)
fl.setProbabilityStatistic(probability_statistic)
fl.setProducePlots(visualize_qc_results)
fl.setPerformLineageProfiler(run_lineage_profiler)
fl.setCompendiumType(compendiumType)
fl.setCompendiumPlatform(compendiumPlatform)
fl.setVendor(manufacturer)
try:
fl.setFDRStatistic(FDR_statistic)
except Exception:
pass
fl.setAnalysisMode('commandline')
fl.setBatchEffectRemoval(batch_effects)
fl.setChannelToExtract(channel_to_extract)
fl.setMultiThreading(multiThreading)
try:
fl.setExcludeLowExpressionExons(excludeNonExpExons)
except Exception:
fl.setExcludeLowExpressionExons(True)
if 'other' in manufacturer or 'Other' in manufacturer:
### For data without a primary array ID key
manufacturer = "other:3'array"
fl.setVendor(manufacturer)
if array_type == 'RNASeq': ### Post version 2.0, add variables in fl rather than below
fl.setRPKMThreshold(rpkm_threshold)
fl.setExonExpThreshold(exon_exp_threshold)
fl.setGeneExpThreshold(gene_exp_threshold)
fl.setExonRPKMThreshold(exon_rpkm_threshold)
fl.setJunctionExpThreshold(expression_threshold)
fl.setExonMapFile(exonMapFile)
fl.setPlatformType(platformType)
### Verify database presence
try:
dirs = unique.read_directory('/AltDatabase')
except Exception:
dirs = []
if species not in dirs:
print '\n' + species, 'species not yet installed. Please install before proceeding (e.g., "python AltAnalyze.py --update Official --species', species, '--version EnsMart65").'
global commandLineMode
commandLineMode = 'yes'
AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var,
exp_file_location_db, None)
else:
print 'Insufficient Flags entered (requires --species and --output)'
def cleanUpCommandArguments():
### Needed on PC
command_args = string.join(sys.argv, ' ')
arguments = string.split(command_args, ' --')
for argument in arguments:
"""
argument_list = string.split(argument,' ')
if len(argument_list)>2:
filename = string.join(argument_list[1:],' ')
argument = argument_list[0]+' '+string.replace(filename,' ','$$$')
"""
argument_list = string.split(argument, ' ')
#argument = string.join(re.findall(r"\w",argument),'')
if ':' in argument: ### Windows OS
z = string.find(argument_list[1], ':')
if z != -1 and z != 1: ### Hence, it is in the argument but not at the second position
print 'Illegal parentheses found. Please re-type these and re-run.'
sys.exit()
def runCommandLineVersion():
### This code had to be moved to a separate function to prevent iterative runs upon AltAnalyze.py re-import
command_args = string.join(sys.argv, ' ')
#try: cleanUpCommandArguments()
#except Exception: null=[]
#print [command_args];sys.exit()
if len(sys.argv[1:]) > 0 and '--' in command_args:
if '--GUI' in command_args:
AltAnalyzeSetup(
'no'
) ### a trick to get back to the main page of the GUI (if AltAnalyze has Tkinter conflict)
try:
commandLineRun()
except Exception:
print traceback.format_exc()
###### Determine Command Line versus GUI Control ######
command_args = string.join(sys.argv, ' ')
if len(sys.argv[1:]) > 1 and '-' in command_args: null = []
else:
try:
import Tkinter
from Tkinter import *
import PmwFreeze
import tkFileDialog
from tkFont import Font
use_Tkinter = 'yes'
except ImportError:
use_Tkinter = 'yes'
print "\nPmw or Tkinter not found... Tkinter print out not available"
def testResultsPanel():
import QC
file = "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/3'Array/Merrill/ExpressionInput/exp.test.txt"
#QC.outputArrayQC(file)
global root
root = Tk()
global pathway_permutations
pathway_permutations = 'NA'
global log_file
log_file = 'null.txt'
global array_type
global explicit_data_type
global run_GOElite
run_GOElite = 'run-immediately'
explicit_data_type = 'exon-only'
array_type = 'RNASeq'
fl = UI.ExpressionFileLocationData('', '', '', '')
graphic_links = []
graphic_links.append(['PCA', 'PCA.png'])
graphic_links.append(['HC', 'HC.png'])
graphic_links.append(['PCA1', 'PCA.png'])
graphic_links.append(['HC1', 'HC.png'])
graphic_links.append(['PCA2', 'PCA.png'])
graphic_links.append(['HC2', 'HC.png'])
graphic_links.append(['PCA3', 'PCA.png'])
graphic_links.append(['HC3', 'HC.png'])
graphic_links.append(['PCA4', 'PCA.png'])
graphic_links.append(['HC4', 'HC.png'])
summary_db = {}
summary_db['QC'] = graphic_links
#summary_db={}
fl.setGraphicLinks(graphic_links)
summary_db['gene_assayed'] = 1
summary_db['denominator_exp_genes'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_genes'] = 1
summary_db['direct_domain_genes'] = 1
summary_db['miRNA_gene_hits'] = 1
#summary_db={}
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
dataset = 'test'
results_dir = ''
print "Analysis Complete\n"
if root != '' and root != None:
UI.InfoWindow(print_out, 'Analysis Completed!')
tl = Toplevel()
SummaryResultsWindow(tl, 'GE', results_dir, dataset, 'parent',
summary_db)
print 'here'
#sys.exit()
class Logger(object):
def __init__(self, null):
self.terminal = sys.stdout
self.log = open(log_file, "w")
def write(self, message):
self.terminal.write(message)
self.log = open(log_file, "a")
self.log.write(message)
self.log.close()
def flush(self):
pass
if __name__ == '__main__':
try:
mlp.freeze_support()
except Exception:
pass
#testResultsPanel()
skip_intro = 'yes'
#sys.exit()
#skip_intro = 'remoteViewer'
runCommandLineVersion()
if use_Tkinter == 'yes': AltAnalyzeSetup(skip_intro)
""" To do list:
0) (done) Integrate new network visualizationality in clustering
1) RNA-Seq and LineageProfiler: threshold based RPKM expression filtering for binary absent present gene and exon calls
2) (demo) Splicing graph/isoform visualization
3) SQLite for gene-set databases prior to clustering and network visualization
4) (done) Gene-level correlation queries for clustering
5) (explored - not good) Optional algorithm type of PCA
6) (done) Optional normalization of expression data for clustering
7) (partially) Integrate splicing factor enrichment analysis (separate module?)
8) (done) Venn diagram option
9) (done) Additional Analyses: (A) combine lists, (B) annotate ID list, (C) run marker finder directly, (D) any graph from table option, (E) network from SIF, (F) inference networks from gene-lists (protein-protein, protein-DNA, protein-splicing)
10) Optional denominator option for GO-Elite (create from input and ID system IDs)
11) Update fields in summary combined alt.exon files (key by probeset)
12) Check field names for junction, exon, RNA-Seq in summary alt.exon report
13) (done) Support additional ID types for initial import (ID select option and pulldown - Other)
14) Proper FDR p-value for alt.exon analyses (include all computed p-values)
15) Add all major clustering and LineageProfiler options to UI along with stats filtering by default
16) (done) Make GO-Elite analysis the default
17) Support R check (and response that they need it) along with GUI gcrma, agilent array, hopach, combat
18) Probe-level annotations from Ensembl (partial code in place) and probe-level RMA in R (or possibly APT) - google pgf for U133 array
19) (done) Include various gene databases for LineageProfiler in download and allow for custom databases to be used (markerFinder based)
20) (done) Quantile normalization option for any non-Affy, non-RNASeq data (check box)
21) (done) Import agilent from Feature extraction files (pull-down option)
22) Update the software from the software
Advantages of this tool kit:
0) Easiest to use, hands down
1) Established and novel functionality for transcriptome/proteomics analysis built in
2) Independent and cooperative options for RNA-Seq and array analysis (splicing and gene expression)
3) Superior functional analyses (TF-target, splicing-factor target, lineage markers, WikiPathway visualization)
4) Options for different levels of users with different integration options (multiple statistical method options, option R support)
5) Built in secondary analysis options for already processed data (graphing, clustering, biomarker discovery, pathway analysis, network visualization)
6) Incorporates highly validated alternative exon identification methods, independent and jointly
Primary Engineer Work:
0) C-library calls and/or multithreading where applicable to improve peformance.
1) MySQL or equivalent transition for all large database queries (e.g., HuEx 2.1 on-the-fly coordinate mapping).
2) Splicing-domain visualization (matplotlib).
3) Isoform-domain network visualization and WP overlays.
4) Webservice calls to in silico protein translation, domain prediction, splicing factor regulation.
5) Stand-alone integration with bedtools, QC tools, TopHat, Cufflinks, Miso (optional).
### 2.0.9
moncole integration
generic and cell classification machine learning
PCR primer design (gene centric after file selection)
BAM->BED (local SAMTools)
updated APT
"""
|
wuxue/altanalyze
|
AltAnalyze_new.py
|
Python
|
apache-2.0
| 558,055
|
[
"Cytoscape"
] |
6616a48e50116b6c24510c5567e26ec3244aff51248975e41bd08866fbe8acd4
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Popularity Contest (popcontest) parser."""
import unittest
# pylint: disable-msg=unused-import
from plaso.formatters import popcontest as popcontest_formatter
from plaso.lib import event
from plaso.lib import eventdata
from plaso.lib import timelib_test
from plaso.parsers import popcontest
from plaso.parsers import test_lib
__author__ = 'Francesco Picasso (francesco.picasso@gmail.com)'
class PopularityContestUnitTest(test_lib.ParserTestCase):
"""Tests for the popcontest parser."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
pre_obj = event.PreprocessObject()
self._parser = popcontest.PopularityContestParser(pre_obj, None)
def testParse(self):
"""Tests the Parse function."""
test_file = self._GetTestFilePath(['popcontest1.log'])
event_generator = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjects(event_generator)
self.assertEquals(len(event_objects), 13)
self.assertEquals(
event_objects[0].timestamp,
timelib_test.CopyStringToTimestamp('2010-06-22 05:41:41.000'))
expected_string = (
u'Session 0 start '
u'ID 12345678901234567890123456789012 [ARCH:i386 POPCONVER:1.38]')
expected_short_string = u'Session 0 start'
self._TestGetMessageStrings(
event_objects[0], expected_string, expected_short_string)
self.assertEquals(
event_objects[1].timestamp,
timelib_test.CopyStringToTimestamp('2010-06-22 07:34:42.000'))
expected_string = (u'mru [/usr/sbin/atd] package [at]')
expected_short_string = u'/usr/sbin/atd'
self._TestGetMessageStrings(
event_objects[1], expected_string, expected_short_string)
self.assertEquals(
event_objects[2].timestamp,
timelib_test.CopyStringToTimestamp('2010-06-22 07:34:43.000'))
expected_string = (
u'mru [/usr/lib/python2.5/lib-dynload/_struct.so] '
u'package [python2.5-minimal]')
expected_short_string = u'/usr/lib/python2.5/lib-dynload/_struct.so'
self._TestGetMessageStrings(
event_objects[2], expected_string, expected_short_string)
self.assertEquals(
event_objects[3].timestamp,
timelib_test.CopyStringToTimestamp('2010-05-30 05:26:20.000'))
expected_string = (
u'mru [/usr/bin/empathy] package [empathy] tag [RECENT-CTIME]')
expected_short_string = u'/usr/bin/empathy'
self._TestGetMessageStrings(
event_objects[3], expected_string, expected_short_string)
self.assertEquals(
event_objects[6].timestamp,
timelib_test.CopyStringToTimestamp('2010-05-12 07:58:33.000'))
expected_string = (u'mru [/usr/bin/orca] package [gnome-orca] tag [OLD]')
expected_short_string = u'/usr/bin/orca'
self._TestGetMessageStrings(
event_objects[6], expected_string, expected_short_string)
self.assertEquals(
event_objects[7].timestamp,
timelib_test.CopyStringToTimestamp('2010-06-22 05:41:41.000'))
expected_string = u'Session 0 end'
expected_short_string = expected_string
self._TestGetMessageStrings(
event_objects[7], expected_string, expected_short_string)
self.assertEquals(
event_objects[8].timestamp,
timelib_test.CopyStringToTimestamp('2010-06-22 05:41:41.000'))
expected_string = (
u'Session 1 start '
u'ID 12345678901234567890123456789012 [ARCH:i386 POPCONVER:1.38]')
expected_short_string = u'Session 1 start'
self._TestGetMessageStrings(
event_objects[8], expected_string, expected_short_string)
self.assertEquals(
event_objects[9].timestamp,
timelib_test.CopyStringToTimestamp('2010-06-22 07:34:42.000'))
expected_string = (u'mru [/super/cool/plasuz] package [plaso]')
expected_short_string = u'/super/cool/plasuz'
self._TestGetMessageStrings(
event_objects[9], expected_string, expected_short_string)
self.assertEquals(
event_objects[10].timestamp,
timelib_test.CopyStringToTimestamp('2010-04-06 12:25:42.000'))
expected_string = (u'mru [/super/cool/plasuz] package [miss_ctime]')
expected_short_string = u'/super/cool/plasuz'
self._TestGetMessageStrings(
event_objects[10], expected_string, expected_short_string)
self.assertEquals(
event_objects[11].timestamp,
timelib_test.CopyStringToTimestamp('2010-05-12 07:58:33.000'))
expected_string = (u'mru [/super/cool] package [plaso] tag [WRONG_TAG]')
expected_short_string = u'/super/cool'
self._TestGetMessageStrings(
event_objects[11], expected_string, expected_short_string)
self.assertEquals(
event_objects[12].timestamp,
timelib_test.CopyStringToTimestamp('2010-06-22 05:41:41.000'))
expected_string = u'Session 1 end'
expected_short_string = expected_string
self._TestGetMessageStrings(
event_objects[12], expected_string, expected_short_string)
if __name__ == '__main__':
unittest.main()
|
iwm911/plaso
|
plaso/parsers/popcontest_test.py
|
Python
|
apache-2.0
| 5,715
|
[
"ORCA"
] |
bf4783015849541b82d6cf2e83b8eec2b579a5e943443123e9ca902e08a48049
|
#
# cmdline.py
#
# Copyright (C) 2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Red Hat Author(s): Brian C. Lane <bcl@redhat.com>
import os
import sys
import argparse
def lorax_parser():
""" Return the ArgumentParser for lorax"""
# get lorax version
try:
from pylorax import version
vernum = version.num
except ImportError:
vernum = "devel"
version = "{0}-{1}".format(os.path.basename(sys.argv[0]), vernum)
parser = argparse.ArgumentParser(description="Create the Anaconda boot.iso")
# required arguments for image creation
required = parser.add_argument_group("required arguments")
required.add_argument("-p", "--product", help="product name", required=True, metavar="PRODUCT")
required.add_argument("-v", "--version", help="version identifier", required=True, metavar="VERSION")
required.add_argument("-r", "--release", help="release information", required=True, metavar="RELEASE")
required.add_argument("-s", "--source", help="source repository (may be listed multiple times)",
metavar="REPOSITORY", action="append", default=[])
required.add_argument("--repo", help="source dnf repository file", type=os.path.abspath,
dest="repos", metavar="REPOSITORY", action="append", default=[])
# optional arguments
optional = parser.add_argument_group("optional arguments")
optional.add_argument("-m", "--mirrorlist",
help="mirrorlist repository (may be listed multiple times)",
metavar="REPOSITORY", action="append", default=[])
optional.add_argument("-t", "--variant",
help="variant name", metavar="VARIANT")
optional.add_argument("-b", "--bugurl",
help="bug reporting URL for the product", metavar="URL",
default="your distribution provided bug reporting tool")
optional.add_argument("--isfinal", help="",
action="store_true", default=False, dest="isfinal")
optional.add_argument("-c", "--config", default="/etc/lorax/lorax.conf",
help="config file", metavar="CONFIGFILE")
optional.add_argument("--proxy", default=None,
help="repo proxy url:port", metavar="HOST")
optional.add_argument("-i", "--installpkgs", default=[],
action="append", metavar="PACKAGE",
help="package glob to install before runtime-install.tmpl runs. (may be listed multiple times)")
optional.add_argument("--buildarch", default=None,
help="build architecture", metavar="ARCH")
optional.add_argument("--volid", default=None,
help="volume id", metavar="VOLID")
optional.add_argument("--macboot", help="",
action="store_true", default=True, dest="domacboot")
optional.add_argument("--nomacboot", help="",
action="store_false", dest="domacboot")
optional.add_argument("--noupgrade", help="",
action="store_false", default=True, dest="doupgrade")
optional.add_argument("--logfile", default="./lorax.log", type=os.path.abspath,
help="Path to logfile")
optional.add_argument("--tmp", default="/var/tmp",
help="Top level temporary directory" )
optional.add_argument("--cachedir", default=None, type=os.path.abspath,
help="DNF cache directory. Default is a temporary dir.")
optional.add_argument("--workdir", default=None, type=os.path.abspath,
help="Work directory, overrides --tmp. Default is a temporary dir under /var/tmp")
optional.add_argument("--force", default=False, action="store_true",
help="Run even when the destination directory exists")
optional.add_argument("--add-template", dest="add_templates",
action="append", help="Additional template for runtime image",
default=[])
optional.add_argument("--add-template-var", dest="add_template_vars",
action="append", help="Set variable for runtime image template",
default=[])
optional.add_argument("--add-arch-template", dest="add_arch_templates",
action="append", help="Additional template for architecture-specific image",
default=[])
optional.add_argument("--add-arch-template-var", dest="add_arch_template_vars",
action="append", help="Set variable for architecture-specific image",
default=[])
optional.add_argument("--noverify", action="store_false", default=True, dest="verify",
help="Do not verify the install root")
optional.add_argument("--sharedir", metavar="SHAREDIR", type=os.path.abspath,
help="Directory containing all the templates. Overrides config file sharedir")
optional.add_argument("--enablerepo", action="append", default=[], dest="enablerepos",
metavar="[repo]", help="Names of repos to enable")
optional.add_argument("--disablerepo", action="append", default=[], dest="disablerepos",
metavar="[repo]", help="Names of repos to disable")
# add the show version option
parser.add_argument("-V", help="show program's version number and exit",
action="version", version=version)
parser.add_argument("outputdir", help="Output directory", metavar="OUTPUTDIR", type=os.path.abspath)
return parser
def lmc_parser(dracut_default=""):
""" Return a ArgumentParser object for live-media-creator."""
parser = argparse.ArgumentParser(description="Create Live Install Media",
fromfile_prefix_chars="@")
# These are mutually exclusive, one is required
action = parser.add_mutually_exclusive_group(required=True)
action.add_argument("--make-iso", action="store_true",
help="Build a live iso")
action.add_argument("--make-disk", action="store_true",
help="Build a partitioned disk image")
action.add_argument("--make-fsimage", action="store_true",
help="Build a filesystem image")
action.add_argument("--make-appliance", action="store_true",
help="Build an appliance image and XML description")
action.add_argument("--make-ami", action="store_true",
help="Build an ami image")
action.add_argument("--make-tar", action="store_true",
help="Build a tar of the root filesystem")
action.add_argument("--make-pxe-live", action="store_true",
help="Build a live pxe boot squashfs image")
action.add_argument("--make-ostree-live", action="store_true",
help="Build a live pxe boot squashfs image of Atomic Host")
action.add_argument("--make-oci", action="store_true",
help="Build an Open Container Initiative image")
action.add_argument("--make-vagrant", action="store_true",
help="Build a Vagrant Box image")
parser.add_argument("--iso", type=os.path.abspath,
help="Anaconda installation .iso path to use for qemu")
parser.add_argument("--iso-only", action="store_true",
help="Remove all iso creation artifacts except the boot.iso, "
"combine with --iso-name to rename the boot.iso")
parser.add_argument("--iso-name", default=None,
help="Name of output iso file for --iso-only. Default is boot.iso")
parser.add_argument("--ks", action="append", type=os.path.abspath,
help="Kickstart file defining the install.")
parser.add_argument("--image-only", action="store_true",
help="Exit after creating fs/disk image.")
parser.add_argument("--no-virt", action="store_true",
help="Run anaconda directly on host instead of using qemu")
parser.add_argument("--proxy",
help="proxy URL to use for the install")
parser.add_argument("--anaconda-arg", action="append", dest="anaconda_args",
help="Additional argument to pass to anaconda (no-virt "
"mode). Pass once for each argument")
parser.add_argument("--armplatform",
help="the platform to use when creating images for ARM, "
"i.e., highbank, mvebu, omap, tegra, etc.")
parser.add_argument("--location", default=None, type=os.path.abspath,
help="location of iso directory tree with initrd.img "
"and vmlinuz. Used to run qemu with a newer initrd "
"than the iso.")
parser.add_argument("--logfile", default="./livemedia.log",
type=os.path.abspath,
help="Name and path for primary logfile, other logs will "
"be created in the same directory.")
parser.add_argument("--lorax-templates", default=None,
type=os.path.abspath,
help="Path to mako templates for lorax")
parser.add_argument("--tmp", default="/var/tmp", type=os.path.abspath,
help="Top level temporary directory")
parser.add_argument("--resultdir", default=None, dest="result_dir",
type=os.path.abspath,
help="Directory to copy the resulting images and iso into. "
"Defaults to the temporary working directory")
parser.add_argument("--macboot", action="store_true", default=True,
dest="domacboot")
parser.add_argument("--nomacboot", action="store_false",
dest="domacboot")
image_group = parser.add_argument_group("disk/fs image arguments")
image_group.add_argument("--disk-image", type=os.path.abspath,
help="Path to existing disk image to use for creating final image.")
image_group.add_argument("--keep-image", action="store_true",
help="Keep raw disk image after .iso creation")
image_group.add_argument("--fs-image", type=os.path.abspath,
help="Path to existing filesystem image to use for creating final image.")
image_group.add_argument("--image-name", default=None,
help="Name of output file to create. Used for tar, fs and disk image. Default is a random name.")
image_group.add_argument("--fs-label", default="Anaconda",
help="Label to set on fsimage, default is 'Anaconda'")
image_group.add_argument("--image-type", default=None,
help="Create an image with qemu-img. See qemu-img --help for supported formats.")
image_group.add_argument("--qemu-arg", action="append", dest="qemu_args", default=[],
help="Arguments to pass to qemu-img. Pass once for each argument, they will be used for ALL calls to qemu-img.")
image_group.add_argument("--qcow2", action="store_true",
help="Create qcow2 image instead of raw sparse image when making disk images.")
image_group.add_argument("--qcow2-arg", action="append", dest="qemu_args", default=[],
help="Arguments to pass to qemu-img. Pass once for each argument, they will be used for ALL calls to qemu-img.")
image_group.add_argument("--compression", default="xz",
help="Compression binary for make-tar. xz, lzma, gzip, and bzip2 are supported. xz is the default.")
image_group.add_argument("--compress-arg", action="append", dest="compress_args", default=[],
help="Arguments to pass to compression. Pass once for each argument")
# Group of arguments for appliance creation
app_group = parser.add_argument_group("appliance arguments")
app_group.add_argument("--app-name", default=None,
help="Name of appliance to pass to template")
app_group.add_argument("--app-template", default=None,
help="Path to template to use for appliance data.")
app_group.add_argument("--app-file", default="appliance.xml",
help="Appliance template results file.")
# Group of arguments to pass to qemu
virt_group = parser.add_argument_group("qemu arguments")
virt_group.add_argument("--ram", metavar="MEMORY", type=int, default=1024,
help="Memory to allocate for installer in megabytes.")
virt_group.add_argument("--vcpus", type=int, default=None,
help="Passed to qemu -smp command")
virt_group.add_argument("--vnc",
help="Passed to qemu -display command. eg. vnc=127.0.0.1:5, default is to "
"choose the first unused vnc port.")
virt_group.add_argument("--arch", default=None,
help="System arch to build for. Used to select qemu-system-* command. "
"Defaults to qemu-system-<arch>")
virt_group.add_argument("--kernel-args",
help="Additional argument to pass to the installation kernel")
virt_group.add_argument("--ovmf-path", default="/usr/share/OVMF/",
help="Path to OVMF firmware")
virt_group.add_argument("--virt-uefi", action="store_true", default=False,
help="Use OVMF firmware to boot the VM in UEFI mode")
virt_group.add_argument("--no-kvm", action="store_true", default=False,
help="Skip using kvm with qemu even if it is available.")
# dracut arguments
dracut_group = parser.add_argument_group("dracut arguments")
dracut_group.add_argument("--dracut-arg", action="append", dest="dracut_args",
help="Argument to pass to dracut when "
"rebuilding the initramfs. Pass this "
"once for each argument. NOTE: this "
"overrides the default. (default: %s)" % dracut_default)
# pxe to live arguments
pxelive_group = parser.add_argument_group("pxe to live arguments")
pxelive_group.add_argument("--live-rootfs-size", type=int, default=0,
help="Size of root filesystem of live image in GiB")
pxelive_group.add_argument("--live-rootfs-keep-size", action="store_true",
help="Keep the original size of root filesystem in live image")
# OCI specific commands
oci_group = parser.add_argument_group("OCI arguments")
oci_group.add_argument("--oci-config",
help="config.json OCI configuration file")
oci_group.add_argument("--oci-runtime",
help="runtime.json OCI configuration file")
# Vagrant specific commands
vagrant_group = parser.add_argument_group("Vagrant arguments")
vagrant_group.add_argument("--vagrant-metadata",
help="optional metadata.json file")
vagrant_group.add_argument("--vagrantfile",
help="optional vagrantfile")
parser.add_argument("--title", default="Linux Live Media",
help="Substituted for @TITLE@ in bootloader config files")
parser.add_argument("--project", default="Linux",
help="substituted for @PROJECT@ in bootloader config files")
parser.add_argument("--releasever", default="25",
help="substituted for @VERSION@ in bootloader config files")
parser.add_argument("--volid", default=None, help="volume id")
parser.add_argument("--squashfs_args",
help="additional squashfs args")
parser.add_argument("--timeout", default=None, type=int,
help="Cancel installer after X minutes")
return parser
|
Be-ing/lorax
|
src/pylorax/cmdline.py
|
Python
|
gpl-2.0
| 17,016
|
[
"Brian"
] |
94708b418bb7c32ff9b1b6e43afe1ef39560f298f808a627faa8538e233bfae0
|
__all__ = ["create_settings_from_template"]
from scm.plams import Molecule
from qmflows.settings import Settings
from os.path import join
import os
import json
import pkg_resources as pkg
import yaml
path_valence_electrons = pkg.resource_filename(
"nac", "basis/valence_electrons.json")
path_aux_fit = pkg.resource_filename("nac", "basis/aux_fit.json")
with open(path_valence_electrons, 'r') as f1, open(path_aux_fit, 'r') as f2:
valence_electrons = json.load(f1)
aux_fit = json.load(f2)
def generate_auxiliar_basis(sett: Settings, auxiliar_basis: str, quality: str) -> Settings:
"""
Generate the `auxiliar_basis` for all the atoms in the `sett` using the
`quality` of the auxiliar basis provided by the user.
"""
quality_to_number = {"low": 0, "medium": 1,
"good": 2, "verygood": 3, "excellent": 4}
kind = sett.cp2k.force_eval.subsys.kind
for atom in kind.keys():
index = quality_to_number[quality.lower()]
cfit = aux_fit[atom][index]
kind[atom]["BASIS_SET"] = "AUX_FIT " + f"CFIT{cfit}"
return sett
cp2k_pbe_guess = Settings(yaml.load("""
cp2k:
force_eval:
subsys:
cell:
periodic: "None"
dft:
xc:
xc_functional pbe: {}
scf:
eps_scf: 1e-6
added_mos: 0
scf_guess: "restart"
ot:
minimizer: "DIIS"
n_diis: 7
preconditioner: "FULL_SINGLE_INVERSE"
""", Loader=yaml.FullLoader))
cp2k_pbe_main = Settings(yaml.load("""
cp2k:
force_eval:
subsys:
cell:
periodic: "None"
dft:
xc:
xc_functional pbe: {}
scf:
eps_scf: 5e-4
max_scf: 200
""", Loader=yaml.FullLoader))
cp2k_pbe0_guess = Settings(yaml.load("""
cp2k:
force_eval:
subsys:
cell:
periodic: "None"
dft:
auxiliary_density_matrix_method:
method: "basis_projection"
admm_purification_method: "none"
qs:
method: "gpw"
eps_pgf_orb: 1E-8
xc:
xc_functional:
pbe:
scale_x: 0.75
scale_c: 1.00
hf:
fraction: 0.25
screening:
eps_schwarz: 1.0E-6
screen_on_initial_p: "True"
interaction_potential:
potential_type: "truncated"
cutoff_radius: 2.5
memory:
max_memory: 5000
eps_storage_scaling: "0.1"
scf:
eps_scf: 1e-6
added_mos: 0
scf_guess: "restart"
ot:
minimizer: "DIIS"
n_diis: 7
preconditioner: "FULL_SINGLE_INVERSE"
""", Loader=yaml.FullLoader))
cp2k_pbe0_main = Settings(yaml.load("""
cp2k:
force_eval:
subsys:
cell:
periodic: "None"
dft:
auxiliary_density_matrix_method:
method: "basis_projection"
admm_purification_method: "none"
qs:
method: "gpw"
eps_pgf_orb: "1.0E-8"
xc:
xc_functional:
pbe:
scale_x: "0.75"
scale_c: "1.00"
hf:
fraction: "0.25"
screening:
eps_schwarz: 1.0E-6
screen_on_initial_p: "True"
interaction_potential:
potential_type: "truncated"
cutoff_radius: 2.5
memory:
max_memory: "5000"
eps_storage_scaling: "0.1"
scf:
eps_scf: 5e-4
max_scf: 200
""", Loader=yaml.FullLoader))
cp2k_hse06_guess = Settings(yaml.load("""
cp2k:
force_eval:
subsys:
cell:
periodic: "None"
dft:
auxiliary_density_matrix_method:
method: "basis_projection"
admm_purification_method: "none"
qs:
method: "gpw"
eps_pgf_orb: 1E-8
xc:
xc_functional:
pbe:
scale_x: 0.00
scale_c: 1.00
xwpbe:
scale_x: -0.25
scale_x0: 1.00
omega: 0.11
hf:
fraction: 0.25
screening:
eps_schwarz: 1.0E-6
screen_on_initial_p: "True"
interaction_potential:
potential_type: "shortrange"
omega: 0.11
memory:
max_memory: 5000
eps_storage_scaling: "0.1"
scf:
eps_scf: 1e-6
added_mos: 0
scf_guess: "restart"
ot:
minimizer: "DIIS"
n_diis: 7
preconditioner: "FULL_SINGLE_INVERSE"
""", Loader=yaml.FullLoader))
cp2k_hse06_main = Settings(yaml.load("""
cp2k:
force_eval:
subsys:
cell:
periodic: "None"
dft:
auxiliary_density_matrix_method:
method: "basis_projection"
admm_purification_method: "none"
qs:
method: "gpw"
eps_pgf_orb: "1.0E-8"
xc:
xc_functional:
pbe:
scale_x: 0.00
scale_c: 1.00
xwpbe:
scale_x: -0.25
scale_x0: 1.00
omega: 0.11
hf:
fraction: 0.25
screening:
eps_schwarz: 1.0E-6
screen_on_initial_p: "True"
interaction_potential:
potential_type: "shortrange"
omega: 0.11
memory:
max_memory: 5000
eps_storage_scaling: "0.1"
scf:
eps_scf: 1e-6
max_scf: 200
""", Loader=yaml.FullLoader))
kinds_template = Settings(yaml.load("""
cp2k:
force_eval:
subsys:
kind:
C:
basis_set: DZVP-MOLOPT-SR-GTH-q4
potential: GTH-PBE-q4
""", Loader=yaml.FullLoader))
def generate_kinds(elements: list, basis: str, potential: str) -> Settings:
"""
Generate the kind section for cp2k basis
"""
s = Settings()
subsys = s.cp2k.force_eval.subsys
for e in elements:
q = valence_electrons['-'.join((e, basis))]
subsys.kind[e]['basis_set'] = f"{basis}-q{q}"
subsys.kind[e]['potential'] = f"{potential}-q{q}"
return s
# available templates
templates_dict = {
"pbe_guess": cp2k_pbe_guess, "pbe_main": cp2k_pbe_main,
"pbe0_guess": cp2k_pbe0_guess, "pbe0_main": cp2k_pbe0_main,
"hse06_guess": cp2k_hse06_guess, "hse06_main": cp2k_hse06_main}
def create_settings_from_template(
general: dict, template_name: str, path_traj_xyz: str) -> Settings:
"""
Create a job Settings using the name provided by the user
"""
setts = templates_dict[template_name]
elements = read_unique_atomic_labels(path_traj_xyz)
kinds = generate_kinds(elements, general['basis'], general['potential'])
if 'pbe0' in template_name:
s = Settings()
s.cp2k.force_eval.dft.xc.hf.interaction_potential.t_c_g_data = os.path.abspath(
join(general['path_basis'], "t_c_g.dat"))
return generate_auxiliar_basis(setts + s + kinds, general['basis'], general['aux_fit'])
elif 'hse06' in template_name:
return generate_auxiliar_basis(setts + kinds, general['basis'], general['aux_fit'])
else:
return setts + kinds
def read_unique_atomic_labels(path_traj_xyz: str) -> set:
"""
Return the unique atomic labels
"""
mol = Molecule(path_traj_xyz, 'xyz')
return set(at.symbol for at in mol.atoms)
|
felipeZ/nonAdiabaticCoupling
|
nac/workflows/templates.py
|
Python
|
mit
| 7,349
|
[
"CP2K"
] |
7a5cc974582c193842f4968d30845ba5dc6c71df98a522c62687c3c8c93c1bff
|
# Audio Tools, a module and set of tools for manipulating audio data
# Copyright (C) 2007-2016 Brian Langenberger
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import sys
from audiotools import (AudioFile, MetaData)
# takes a pair of integers (or None) for the current and total values
# returns a unicode string of their combined pair
# for example, __number_pair__(2,3) returns u"2/3"
# whereas __number_pair__(4,0) returns u"4"
def __number_pair__(current, total):
def empty(i):
return i is None
unslashed_format = u"{:d}"
slashed_format = u"{:d}/{:d}"
if empty(current) and empty(total):
return unslashed_format.format(0)
elif (not empty(current)) and empty(total):
return unslashed_format.format(current)
elif empty(current) and (not empty(total)):
return slashed_format.format(0, total)
else:
# neither current or total are empty
return slashed_format.format(current, total)
def limited_transfer_data(from_function, to_function, max_bytes):
"""transfers up to max_bytes from from_function to to_function
or as many bytes as from_function generates as strings"""
BUFFER_SIZE = 0x100000
s = from_function(BUFFER_SIZE)
while (len(s) > 0) and (max_bytes > 0):
if len(s) > max_bytes:
s = s[0:max_bytes]
to_function(s)
max_bytes -= len(s)
s = from_function(BUFFER_SIZE)
class ApeTagItem(object):
"""a single item in the ApeTag, typically a unicode value"""
FORMAT = "32u 1u 2u 29p"
def __init__(self, item_type, read_only, key, data):
"""fields are as follows:
item_type is 0 = UTF-8, 1 = binary, 2 = external, 3 = reserved
read_only is 1 if the item is read only
key is a bytes object of the item's key
data is a bytes object of the data itself
"""
self.type = item_type
self.read_only = read_only
assert(isinstance(key, bytes))
self.key = key
assert(isinstance(data, bytes))
self.data = data
def __eq__(self, item):
for attr in ["type", "read_only", "key", "data"]:
if ((not hasattr(item, attr)) or (getattr(self, attr) !=
getattr(item, attr))):
return False
else:
return True
def total_size(self):
"""returns total size of item in bytes"""
return 4 + 4 + len(self.key) + 1 + len(self.data)
def copy(self):
"""returns a duplicate ApeTagItem"""
return ApeTagItem(self.type,
self.read_only,
self.key,
self.data)
def __repr__(self):
return "ApeTagItem({!r},{!r},{!r},{!r})".format(self.type,
self.read_only,
self.key,
self.data)
def raw_info_pair(self):
"""returns a human-readable key/value pair of item data"""
if self.type == 0: # text
if self.read_only:
return (self.key.decode('ascii'),
u"(read only) {}".format(self.data.decode('utf-8')))
else:
return (self.key.decode('ascii'), self.data.decode('utf-8'))
elif self.type == 1: # binary
return (self.key.decode('ascii'),
u"(binary) {:d} bytes".format(len(self.data)))
elif self.type == 2: # external
return (self.key.decode('ascii'),
u"(external) {:d} bytes".format(len(self.data)))
else: # reserved
return (self.key.decode('ascii'),
u"(reserved) {:d} bytes".format(len(self.data)))
if sys.version_info[0] >= 3:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return self.data
def __unicode__(self):
return self.data.rstrip(b"\x00").decode('utf-8', 'replace')
def number(self):
"""returns the track/album_number portion of a slashed number pair"""
import re
unicode_value = self.__unicode__()
int_string = re.search(r'\d+', unicode_value)
if int_string is None:
return None
int_value = int(int_string.group(0))
if (int_value == 0) and (u"/" in unicode_value):
total_value = re.search(r'\d+',
unicode_value.split(u"/")[1])
if total_value is not None:
# don't return placeholder 0 value
# when a _total value is present
# but _number value is 0
return None
else:
return int_value
else:
return int_value
def total(self):
"""returns the track/album_total portion of a slashed number pair"""
import re
unicode_value = self.__unicode__()
if u"/" not in unicode_value:
return None
int_string = re.search(r'\d+', unicode_value.split(u"/")[1])
if int_string is not None:
return int(int_string.group(0))
else:
return None
@classmethod
def parse(cls, reader):
"""returns an ApeTagItem parsed from the given BitstreamReader"""
(item_value_length,
read_only,
encoding) = reader.parse(cls.FORMAT)
key = []
c = reader.read_bytes(1)
while c != b"\x00":
key.append(c)
c = reader.read_bytes(1)
value = reader.read_bytes(item_value_length)
return cls(encoding, read_only, b"".join(key), value)
def build(self, writer):
"""writes the ApeTagItem values to the given BitstreamWriter"""
writer.build("{} {:d}b 8u {:d}b".format(self.FORMAT,
len(self.key),
len(self.data)),
(len(self.data),
self.read_only,
self.type,
self.key, 0, self.data))
@classmethod
def binary(cls, key, data):
"""returns an ApeTagItem of binary data
key is an ASCII string, data is a binary string"""
return cls(1, 0, key, data)
@classmethod
def external(cls, key, data):
"""returns an ApeTagItem of external data
key is an ASCII string, data is a binary string"""
return cls(2, 0, key, data)
@classmethod
def string(cls, key, data):
"""returns an ApeTagItem of text data
key is a bytes object, data is a unicode string"""
assert(isinstance(key, bytes))
assert(isinstance(data,
str if (sys.version_info[0] >= 3) else unicode))
return cls(0, 0, key, data.encode('utf-8', 'replace'))
class ApeTag(MetaData):
"""a complete APEv2 tag"""
HEADER_FORMAT = "8b 32u 32u 32u 1u 2u 26p 1u 1u 1u 64p"
ITEM = ApeTagItem
ATTRIBUTE_MAP = {'track_name': b'Title',
'track_number': b'Track',
'track_total': b'Track',
'album_number': b'Media',
'album_total': b'Media',
'album_name': b'Album',
'artist_name': b'Artist',
'performer_name': b'Performer',
'composer_name': b'Composer',
'conductor_name': b'Conductor',
'ISRC': b'ISRC',
'catalog': b'Catalog',
'copyright': b'Copyright',
'publisher': b'Publisher',
'year': b'Year',
'date': b'Record Date',
'comment': b'Comment',
'compilation': b'Compilation'}
INTEGER_ITEMS = (b'Track', b'Media')
BOOLEAN_ITEMS = (b'Compilation',)
def __init__(self, tags, contains_header=True, contains_footer=True):
"""constructs an ApeTag from a list of ApeTagItem objects"""
for tag in tags:
assert(isinstance(tag, ApeTagItem))
MetaData.__setattr__(self, "tags", list(tags))
MetaData.__setattr__(self, "contains_header", contains_header)
MetaData.__setattr__(self, "contains_footer", contains_footer)
def __repr__(self):
return "ApeTag({!r},{!r},{!r})".format(self.tags,
self.contains_header,
self.contains_footer)
def total_size(self):
"""returns the minimum size of the total ApeTag, in bytes"""
size = 0
if self.contains_header:
size += 32
for tag in self.tags:
size += tag.total_size()
if self.contains_footer:
size += 32
return size
def __eq__(self, metadata):
if isinstance(metadata, ApeTag):
if set(self.keys()) != set(metadata.keys()):
return False
for tag in self.tags:
try:
if tag.data != metadata[tag.key].data:
return False
except KeyError:
return False
else:
return True
elif isinstance(metadata, MetaData):
return MetaData.__eq__(self, metadata)
else:
return False
def keys(self):
return [tag.key for tag in self.tags]
def __contains__(self, key):
for tag in self.tags:
if tag.key == key:
return True
else:
return False
def __getitem__(self, key):
assert(isinstance(key, bytes))
for tag in self.tags:
if tag.key == key:
return tag
else:
raise KeyError(key)
def get(self, key, default):
assert(isinstance(key, bytes))
try:
return self[key]
except KeyError:
return default
def __setitem__(self, key, value):
assert(isinstance(key, bytes))
for i in range(len(self.tags)):
if self.tags[i].key == key:
self.tags[i] = value
return
else:
self.tags.append(value)
def index(self, key):
assert(isinstance(key, bytes))
for (i, tag) in enumerate(self.tags):
if tag.key == key:
return i
else:
raise ValueError(key)
def __delitem__(self, key):
assert(isinstance(key, bytes))
new_tags = [tag for tag in self.tags if tag.key != key]
if len(new_tags) < len(self.tags):
self.tags = new_tags
else:
raise KeyError(key)
def __getattr__(self, attr):
if attr in self.ATTRIBUTE_MAP:
try:
if attr in {'track_number', 'album_number'}:
return self[self.ATTRIBUTE_MAP[attr]].number()
elif attr in {'track_total', 'album_total'}:
return self[self.ATTRIBUTE_MAP[attr]].total()
elif attr == 'compilation':
return self[self.ATTRIBUTE_MAP[attr]].__unicode__() == u"1"
else:
return self[self.ATTRIBUTE_MAP[attr]].__unicode__()
except KeyError:
return None
elif attr in MetaData.FIELDS:
return None
else:
return MetaData.__getattribute__(self, attr)
# if an attribute is updated (e.g. self.track_name)
# make sure to update the corresponding dict pair
def __setattr__(self, attr, value):
def swap_number(unicode_value, new_number):
import re
return re.sub(r'\d+', u"{:d}".format(new_number), unicode_value, 1)
def swap_slashed_number(unicode_value, new_number):
if u"/" in unicode_value:
(first, second) = unicode_value.split(u"/", 1)
return u"/".join([first, swap_number(second, new_number)])
else:
return u"/".join([unicode_value, u"{:d}".format(new_number)])
if attr in self.ATTRIBUTE_MAP:
key = self.ATTRIBUTE_MAP[attr]
if value is not None:
if attr in {'track_number', 'album_number'}:
try:
current_value = self[key].__unicode__()
self[key] = self.ITEM.string(
key, swap_number(current_value, value))
except KeyError:
self[key] = self.ITEM.string(
key, __number_pair__(value, None))
elif attr in {'track_total', 'album_total'}:
try:
current_value = self[key].__unicode__()
self[key] = self.ITEM.string(
key, swap_slashed_number(current_value, value))
except KeyError:
self[key] = self.ITEM.string(
key, __number_pair__(None, value))
elif attr == 'compilation':
self[key] = self.ITEM.string(
key, u"{:d}".format(1 if value else 0))
else:
self[key] = self.ITEM.string(key, value)
else:
delattr(self, attr)
else:
MetaData.__setattr__(self, attr, value)
def __delattr__(self, attr):
import re
def zero_number(unicode_value):
return re.sub(r'\d+', u"0", unicode_value, 1)
if attr in self.ATTRIBUTE_MAP:
key = self.ATTRIBUTE_MAP[attr]
if attr in {'track_number', 'album_number'}:
try:
tag = self[key]
if tag.total() is None:
# if no slashed _total field, delete entire tag
del(self[key])
else:
# otherwise replace initial portion with 0
self[key] = self.ITEM.string(
key, zero_number(tag.__unicode__()))
except KeyError:
# no tag to delete
pass
elif attr in {'track_total', 'album_total'}:
try:
tag = self[key]
if tag.total() is not None:
if tag.number() is not None:
self[key] = self.ITEM.string(
key,
tag.__unicode__().split(u"/", 1)[0].rstrip())
else:
del(self[key])
else:
# no total portion, so nothing to do
pass
except KeyError:
# no tag to delete portion of
pass
else:
try:
del(self[key])
except KeyError:
pass
elif attr in MetaData.FIELDS:
pass
else:
MetaData.__delattr__(self, attr)
@classmethod
def converted(cls, metadata):
"""converts a MetaData object to an ApeTag object"""
if metadata is None:
return None
elif isinstance(metadata, ApeTag):
return ApeTag([tag.copy() for tag in metadata.tags],
contains_header=metadata.contains_header,
contains_footer=metadata.contains_footer)
else:
tags = cls([])
for (field, value) in metadata.filled_fields():
if field in cls.ATTRIBUTE_MAP.keys():
setattr(tags, field, value)
for image in metadata.images():
tags.add_image(image)
return tags
def raw_info(self):
"""returns the ApeTag as a human-readable unicode string"""
from os import linesep
from audiotools import output_table
# align tag values on the "=" sign
table = output_table()
for tag in self.tags:
row = table.row()
(key, value) = tag.raw_info_pair()
row.add_column(key, "right")
row.add_column(u" = ")
row.add_column(value)
return (u"APEv2:" + linesep + linesep.join(table.format()))
@classmethod
def supports_images(cls):
"""returns True"""
return True
def __parse_image__(self, key, type):
from audiotools import Image
from io import BytesIO
data = BytesIO(self[key].data)
description = []
c = data.read(1)
while c != b'\x00':
description.append(c)
c = data.read(1)
return Image.new(data.read(),
b"".join(description).decode('utf-8', 'replace'),
type)
def add_image(self, image):
"""embeds an Image object in this metadata"""
from audiotools import FRONT_COVER, BACK_COVER
if image.type == FRONT_COVER:
self[b'Cover Art (front)'] = self.ITEM.binary(
b'Cover Art (front)',
image.description.encode('utf-8', 'replace') +
b"\x00" +
image.data)
elif image.type == BACK_COVER:
self[b'Cover Art (back)'] = self.ITEM.binary(
b'Cover Art (back)',
image.description.encode('utf-8', 'replace') +
b"\x00" +
image.data)
def delete_image(self, image):
"""deletes an Image object from this metadata"""
if (image.type == 0) and b'Cover Art (front)' in self.keys():
del(self[b'Cover Art (front)'])
elif (image.type == 1) and b'Cover Art (back)' in self.keys():
del(self[b'Cover Art (back)'])
def images(self):
"""returns a list of embedded Image objects"""
from audiotools import FRONT_COVER, BACK_COVER
# APEv2 supports only one value per key
# so a single front and back cover are all that is possible
img = []
if b'Cover Art (front)' in self.keys():
img.append(self.__parse_image__(b'Cover Art (front)',
FRONT_COVER))
if b'Cover Art (back)' in self.keys():
img.append(self.__parse_image__(b'Cover Art (back)',
BACK_COVER))
return img
@classmethod
def read(cls, apefile):
"""returns an ApeTag object from an APEv2 tagged file object
may return None if the file object has no tag"""
from audiotools.bitstream import BitstreamReader, parse
apefile.seek(-32, 2)
tag_footer = apefile.read(32)
if len(tag_footer) < 32:
# not enough bytes for an ApeV2 tag
return None
(preamble,
version,
tag_size,
item_count,
read_only,
item_encoding,
is_header,
no_footer,
has_header) = parse(cls.HEADER_FORMAT, True, tag_footer)
if (preamble != b"APETAGEX") or (version != 2000):
return None
apefile.seek(-tag_size, 2)
reader = BitstreamReader(apefile, True)
return cls([ApeTagItem.parse(reader) for i in range(item_count)],
contains_header=has_header,
contains_footer=True)
def build(self, writer):
"""outputs an APEv2 tag to BitstreamWriter"""
tag_size = sum(tag.total_size() for tag in self.tags) + 32
if self.contains_header:
writer.build(ApeTag.HEADER_FORMAT,
(b"APETAGEX", # preamble
2000, # version
tag_size, # tag size
len(self.tags), # item count
0, # read only
0, # encoding
1, # is header
not self.contains_footer, # no footer
self.contains_header)) # has header
for tag in self.tags:
tag.build(writer)
if self.contains_footer:
writer.build(ApeTag.HEADER_FORMAT,
(b"APETAGEX", # preamble
2000, # version
tag_size, # tag size
len(self.tags), # item count
0, # read only
0, # encoding
0, # is header
not self.contains_footer, # no footer
self.contains_header)) # has header
def clean(self):
import re
from audiotools.text import (CLEAN_REMOVE_DUPLICATE_TAG,
CLEAN_REMOVE_TRAILING_WHITESPACE,
CLEAN_REMOVE_LEADING_WHITESPACE,
CLEAN_FIX_TAG_FORMATTING,
CLEAN_REMOVE_EMPTY_TAG)
fixes_performed = []
used_tags = set()
tag_items = []
for tag in self.tags:
if tag.key.upper() in used_tags:
fixes_performed.append(
CLEAN_REMOVE_DUPLICATE_TAG.format(tag.key.decode('ascii')))
elif tag.type == 0:
used_tags.add(tag.key.upper())
text = tag.__unicode__()
# check trailing whitespace
fix1 = text.rstrip()
if fix1 != text:
fixes_performed.append(
CLEAN_REMOVE_TRAILING_WHITESPACE.format(
tag.key.decode('ascii')))
# check leading whitespace
fix2 = fix1.lstrip()
if fix2 != fix1:
fixes_performed.append(
CLEAN_REMOVE_LEADING_WHITESPACE.format(
tag.key.decode('ascii')))
if tag.key in self.INTEGER_ITEMS:
if u"/" in fix2:
# item is a slashed field of some sort
(current, total) = fix2.split(u"/", 1)
current_int = re.search(r'\d+', current)
total_int = re.search(r'\d+', total)
if (current_int is None) and (total_int is None):
# neither side contains an integer value
# so ignore it altogether
fix3 = fix2
elif ((current_int is not None) and
(total_int is None)):
fix3 = u"{:d}".format(int(current_int.group(0)))
elif ((current_int is None) and
(total_int is not None)):
fix3 = u"{:d}/{:d}".format(
0, int(total_int.group(0)))
else:
# both sides contain an int
fix3 = u"{:d}/{:d}".format(
int(current_int.group(0)),
int(total_int.group(0)))
else:
# item contains no slash
current_int = re.search(r'\d+', fix2)
if current_int is not None:
# item contains an integer
fix3 = u"{:d}".format(int(current_int.group(0)))
else:
# item contains no integer value so ignore it
# (although 'Track' should only contain
# integers, 'Media' may contain strings
# so it may be best to simply ignore that case)
fix3 = fix2
if fix3 != fix2:
fixes_performed.append(
CLEAN_FIX_TAG_FORMATTING.format(
tag.key.decode('ascii')))
else:
fix3 = fix2
if len(fix3) > 0:
tag_items.append(ApeTagItem.string(tag.key, fix3))
else:
fixes_performed.append(
CLEAN_REMOVE_EMPTY_TAG.format(tag.key.decode('ascii')))
else:
used_tags.add(tag.key.upper())
tag_items.append(tag)
return (self.__class__(tag_items,
self.contains_header,
self.contains_footer),
fixes_performed)
def intersection(self, metadata):
"""given a MetaData-compatible object,
returns a new MetaData object which contains
all the matching fields and images of this object and 'metadata'
"""
if type(metadata) is ApeTag:
matching_keys = {key for key in
set(self.keys()) & set(metadata.keys())
if self[key] == metadata[key]}
return ApeTag(
[tag.copy() for tag in self.tags
if tag.key in matching_keys],
contains_header=
self.contains_header or metadata.contains_header,
contains_footer=
self.contains_footer or metadata.contains_footer)
else:
return MetaData.intersection(self, metadata)
class ApeTaggedAudio(object):
"""a class for handling audio formats with APEv2 tags
this class presumes there will be a filename attribute which
can be opened and checked for tags, or written if necessary"""
@classmethod
def supports_metadata(cls):
"""returns True if this audio type supports MetaData"""
return True
def get_metadata(self):
"""returns an ApeTag object, or None
raises IOError if unable to read the file"""
with open(self.filename, "rb") as f:
return ApeTag.read(f)
def update_metadata(self, metadata):
"""takes this track's current MetaData object
as returned by get_metadata() and sets this track's metadata
with any fields updated in that object
raises IOError if unable to write the file
"""
from audiotools.bitstream import (parse,
BitstreamWriter,
BitstreamReader)
from audiotools import transfer_data
if metadata is None:
return
elif not isinstance(metadata, ApeTag):
from audiotools.text import ERR_FOREIGN_METADATA
raise ValueError(ERR_FOREIGN_METADATA)
elif len(metadata.keys()) == 0:
# wipe out entire block of metadata
from os import access, R_OK, W_OK
if not access(self.filename, R_OK | W_OK):
raise IOError(self.filename)
with open(self.filename, "rb") as f:
f.seek(-32, 2)
(preamble,
version,
tag_size,
item_count,
read_only,
item_encoding,
is_header,
no_footer,
has_header) = BitstreamReader(f, True).parse(
ApeTag.HEADER_FORMAT)
if (preamble == b'APETAGEX') and (version == 2000):
from audiotools import TemporaryFile, transfer_data
from os.path import getsize
# there's existing metadata to delete
# so rewrite file without trailing metadata tag
if has_header:
old_tag_size = 32 + tag_size
else:
old_tag_size = tag_size
# copy everything but the last "old_tag_size" bytes
# from existing file to rewritten file
new_apev2 = TemporaryFile(self.filename)
old_apev2 = open(self.filename, "rb")
limited_transfer_data(
old_apev2.read,
new_apev2.write,
getsize(self.filename) - old_tag_size)
old_apev2.close()
new_apev2.close()
else:
# re-set metadata block at end of file
f = open(self.filename, "r+b")
f.seek(-32, 2)
tag_footer = f.read(32)
if len(tag_footer) < 32:
# no existing ApeTag can fit, so append fresh tag
f.close()
with BitstreamWriter(open(self.filename, "ab"),
True) as writer:
metadata.build(writer)
return
(preamble,
version,
tag_size,
item_count,
read_only,
item_encoding,
is_header,
no_footer,
has_header) = parse(ApeTag.HEADER_FORMAT, True, tag_footer)
if (preamble == b'APETAGEX') and (version == 2000):
if has_header:
old_tag_size = 32 + tag_size
else:
old_tag_size = tag_size
if metadata.total_size() >= old_tag_size:
# metadata has grown
# so append it to existing file
f.seek(-old_tag_size, 2)
writer = BitstreamWriter(f, True)
metadata.build(writer)
writer.close()
else:
f.close()
# metadata has shrunk
# so rewrite file with smaller metadata
from audiotools import TemporaryFile
from os.path import getsize
# copy everything but the last "old_tag_size" bytes
# from existing file to rewritten file
new_apev2 = TemporaryFile(self.filename)
with open(self.filename, "rb") as old_apev2:
limited_transfer_data(
old_apev2.read,
new_apev2.write,
getsize(self.filename) - old_tag_size)
# append new tag to rewritten file
with BitstreamWriter(new_apev2, True) as writer:
metadata.build(writer)
# closing writer closes new_apev2 also
else:
# no existing metadata, so simply append a fresh tag
f.close()
with BitstreamWriter(open(self.filename, "ab"),
True) as writer:
metadata.build(writer)
def set_metadata(self, metadata):
"""takes a MetaData object and sets this track's metadata
raises IOError if unable to write the file"""
from audiotools.bitstream import BitstreamWriter
if metadata is None:
return self.delete_metadata()
new_metadata = ApeTag.converted(metadata)
old_metadata = self.get_metadata()
if old_metadata is not None:
# transfer ReplayGain tags from old metadata to new metadata
for tag in [b"replaygain_track_gain",
b"replaygain_track_peak",
b"replaygain_album_gain",
b"replaygain_album_peak"]:
try:
# if old_metadata has tag, shift it over
new_metadata[tag] = old_metadata[tag]
except KeyError:
try:
# otherwise, if new_metadata has tag, delete it
del(new_metadata[tag])
except KeyError:
# if neither has tag, ignore it
continue
# transfer Cuesheet from old metadata to new metadata
if b"Cuesheet" in old_metadata:
new_metadata[b"Cuesheet"] = old_metadata[b"Cuesheet"]
elif b"Cuesheet" in new_metadata:
del(new_metadata[b"Cuesheet"])
self.update_metadata(new_metadata)
else:
# delete ReplayGain tags from new metadata
for tag in [b"replaygain_track_gain",
b"replaygain_track_peak",
b"replaygain_album_gain",
b"replaygain_album_peak"]:
try:
del(new_metadata[tag])
except KeyError:
continue
# delete Cuesheet from new metadata
if b"Cuesheet" in new_metadata:
del(new_metadata[b"Cuesheet"])
if len(new_metadata.keys()) > 0:
# no existing metadata, so simply append a fresh tag
with BitstreamWriter(open(self.filename, "ab"),
True) as writer:
new_metadata.build(writer)
def delete_metadata(self):
"""deletes the track's MetaData
raises IOError if unable to write the file"""
if ((self.get_replay_gain() is not None) or
(self.get_cuesheet() is not None)):
# non-textual metadata is present and needs preserving
self.set_metadata(MetaData())
else:
# no non-textual metadata, so wipe out the entire block
from os import access, R_OK, W_OK
from audiotools.bitstream import BitstreamReader
from audiotools import transfer_data
if not access(self.filename, R_OK | W_OK):
raise IOError(self.filename)
with open(self.filename, "rb") as f:
f.seek(-32, 2)
(preamble,
version,
tag_size,
item_count,
read_only,
item_encoding,
is_header,
no_footer,
has_header) = BitstreamReader(f, True).parse(
ApeTag.HEADER_FORMAT)
if (preamble == b'APETAGEX') and (version == 2000):
from audiotools import TemporaryFile
from os.path import getsize
# there's existing metadata to delete
# so rewrite file without trailing metadata tag
if has_header:
old_tag_size = 32 + tag_size
else:
old_tag_size = tag_size
# copy everything but the last "old_tag_size" bytes
# from existing file to rewritten file
new_apev2 = TemporaryFile(self.filename)
old_apev2 = open(self.filename, "rb")
limited_transfer_data(
old_apev2.read,
new_apev2.write,
getsize(self.filename) - old_tag_size)
old_apev2.close()
new_apev2.close()
class ApeGainedAudio(object):
@classmethod
def supports_replay_gain(cls):
"""returns True if this class supports ReplayGain"""
return True
def get_replay_gain(self):
"""returns a ReplayGain object of our ReplayGain values
returns None if we have no values"""
from audiotools import ReplayGain
metadata = self.get_metadata()
if metadata is None:
return None
if ({b'replaygain_track_gain', b'replaygain_track_peak',
b'replaygain_album_gain', b'replaygain_album_peak'}.issubset(
metadata.keys())): # we have ReplayGain data
try:
return ReplayGain(
metadata[
b'replaygain_track_gain'].__unicode__()[0:-len(" dB")],
metadata[
b'replaygain_track_peak'].__unicode__(),
metadata[
b'replaygain_album_gain'].__unicode__()[0:-len(" dB")],
metadata[
b'replaygain_album_peak'].__unicode__())
except ValueError:
return None
else:
return None
def set_replay_gain(self, replaygain):
"""given a ReplayGain object, sets the track's gain to those values
may raise IOError if unable to read or write the file"""
if replaygain is None:
return self.delete_replay_gain()
metadata = self.get_metadata()
if metadata is None:
metadata = ApeTag([])
metadata[b"replaygain_track_gain"] = ApeTagItem.string(
b"replaygain_track_gain",
u"{:+.2f} dB".format(replaygain.track_gain))
metadata[b"replaygain_track_peak"] = ApeTagItem.string(
b"replaygain_track_peak",
u"{:.6f}".format(replaygain.track_peak))
metadata[b"replaygain_album_gain"] = ApeTagItem.string(
b"replaygain_album_gain",
u"{:+.2f} dB".format(replaygain.album_gain))
metadata[b"replaygain_album_peak"] = ApeTagItem.string(
b"replaygain_album_peak",
u"{:.6f}".format(replaygain.album_peak))
self.update_metadata(metadata)
def delete_replay_gain(self):
"""removes ReplayGain values from file, if any
may raise IOError if unable to modify the file"""
metadata = self.get_metadata()
if metadata is not None:
for field in [b"replaygain_track_gain",
b"replaygain_track_peak",
b"replaygain_album_gain",
b"replaygain_album_peak"]:
try:
del(metadata[field])
except KeyError:
pass
self.update_metadata(metadata)
|
tuffy/python-audio-tools
|
audiotools/ape.py
|
Python
|
gpl-2.0
| 39,279
|
[
"Brian"
] |
1381100cf4fa8343f4ce3e551fc38e96c51a12d27bb4652d84480807187a49ae
|
"""
Copyright (C) 2014, Jaguar Land Rover
This program is licensed under the terms and conditions of the
Mozilla Public License, version 2.0. The full text of the
Mozilla Public License is at https://www.mozilla.org/MPL/2.0/
Maintainer: Rudolf Streif (rstreif@jaguarlandrover.com)
"""
import datetime, pytz
from django.utils import timezone
from django.db import models
from django.db.models import signals
from django.core.exceptions import ValidationError
from django.utils.dateparse import parse_datetime
from vehicles.models import Vehicle
class Location(models.Model):
"""
Location description
Tracks Vehicle locations
"""
loc_vehicle = models.ForeignKey(Vehicle, verbose_name='Vehicle')
loc_time = models.DateTimeField('Time')
loc_latitude = models.FloatField('Latitude [deg]')
loc_longitude = models.FloatField('Longitude [deg]')
loc_altitude = models.FloatField('Altitude [m]', default=0)
loc_speed = models.FloatField('Speed [m/s]', default=0)
loc_climb = models.FloatField('Climb [m/s]', default=0)
loc_track = models.FloatField('Track [deg]', default=0)
loc_odometer = models.FloatField('Odometer [km]', default=0)
@property
def geom(self):
geom = {'type': 'Point', 'coordinates': [self.loc_longitude,self.loc_latitude]}
return (geom)
def __unicode__(self):
"""
Returns the Location string.
"""
return self.to_string()
def to_string(self):
"""
Returns the Location string composed of
<vehicle> on <time> at <longitude, latitude>.
"""
return (self.loc_vehicle.get_name() +
" on " +
unicode(self.loc_time) +
" at (" +
str(self.loc_longitude) + ", " + str(self.loc_latitude) +
")"
" going " +
str(self.loc_speed * 3.6) + " km/h"
)
def to_json(self):
"""
Returns the Location JSON formatted string.
"""
return ("{\"vin\":\"" + self.loc_vehicle.get_vin() +
"\",\"timestamp\":\"" + unicode(self.loc_time) +
"\",\"data\":[{\"channel\":\"location\",\"value\":{" +
"\"lon\":\"" + str(self.loc_longitude) +
"\",\"lat\":\"" + str(self.loc_latitude) +
"\"}},{\"channel\":\"speed\",\"value\":\"" + str(self.loc_speed * 3.6) +
"\"}]}"
)
def get_vehicle_name(self):
"""
Returns the name of the vehicle associated with this Location
"""
return self.loc_vehicle.get_name()
class Waypoints(models.Model):
"""
Waypoints description
This class dynamically creates a GEOJson field containing all the locations of the
vehicle. This is then used to draw the waypoints line on the map.
"""
wp_vehicle = models.OneToOneField(Vehicle, primary_key=True, verbose_name='Vehicle')
from_time = datetime.datetime(1970,1,1,0,0,0,tzinfo=pytz.UTC)
to_time = datetime.datetime(9999,12,31,23,59,59,tzinfo=pytz.UTC)
locations = None
@classmethod
def set_time_utc(cls, from_time, to_time):
cls.from_time = pytz.utc.localize(parse_datetime(from_time))
cls.to_time = pytz.utc.localize(parse_datetime(to_time))
@property
def geom(self):
"""
Returns the waypoints as LineString object for direct use with GeoJSON.
"""
geom = {'type': 'LineString', 'coordinates': [list(e) for e in self.select_locations().values_list('loc_longitude','loc_latitude','loc_altitude','loc_time','loc_speed','loc_climb','loc_odometer')]}
return (geom)
@property
def start_location(self):
"""
Returns the start location or first waypoint of the list.
Note: We need to flip latitude and longitude as Leaflet expects latitude first.
"""
locations = self.select_locations()
if len(locations) > 0:
return list(locations.values_list('loc_latitude','loc_longitude','loc_time','loc_speed'))[0]
@property
def end_location(self):
"""
Returns end last location or last waypoint of the list.
Note: We need to flip latitude and longitude as Leaflet expects latitude first.
"""
locations = self.select_locations()
if len(locations) > 0:
return list(locations.values_list('loc_latitude','loc_longitude','loc_time','loc_speed'))[-1]
@property
def location_info(self):
"""
Returns additional information time, speed, odometer for each waypoint
"""
return list(self.select_locations().values_list('loc_time', 'loc_speed', 'loc_odometer'))
@property
def vehicle_info(self):
return [self.wp_vehicle.veh_name, self.wp_vehicle.get_picture()]
def select_locations(self):
"""
Returns a list with all waypoints as [[long1, lat1], [long2, lat2], ...]
Note: GeoJSON expects [long, lat] while Leaflet uses [lat, long]
"""
if not self.locations:
self.locations = Location.objects.filter(loc_vehicle=self.wp_vehicle, loc_time__range=(Waypoints.from_time, Waypoints.to_time)).order_by('loc_time')
return self.locations
class Position(models.Model):
"""
Position description
This class dynamically creates a GEOJson field containing the position of a vehicle
according to at_time. The date/time of the actual position may be earlier than at_time
but never later.
"""
wp_vehicle = models.OneToOneField(Vehicle, primary_key=True, verbose_name='Vehicle')
at_time = datetime.datetime(9999,12,31,23,59,59,tzinfo=pytz.UTC)
location = None
@classmethod
def set_time_utc(cls, at_time):
cls.at_time = pytz.utc.localize(parse_datetime(at_time))
@property
def geom(self):
"""
Returns the position as a Point object for direct use with GeoJSON.
"""
position = self.select_location()
if position:
geom = {'type': 'Point', 'coordinates': [position[0].loc_longitude,position[0].loc_latitude]}
else:
geom = {'type': 'Point', 'coordinates': [0,51.48]}
#geom = {'type': 'Point', 'coordinates': []}
return (geom)
@property
def vehicle_info(self):
position = self.select_location()
print 'position: ', position
if position:
print self.wp_vehicle.veh_name, position[0].loc_time, position[0].loc_speed
return [self.wp_vehicle.veh_name, self.wp_vehicle.get_picture(), position[0].loc_time, position[0].loc_speed]
else:
return [self.wp_vehicle.veh_name, self.wp_vehicle.get_picture()]
def select_location(self):
"""
Returns a list with all waypoints as [[long1, lat1], [long2, lat2], ...]
Note: GeoJSON expects [long, lat] while Leaflet uses [lat, long]
"""
if not self.location:
self.location = Location.objects.filter(loc_vehicle=self.wp_vehicle, loc_time__lt=(Position.at_time)).order_by('-loc_time')
return self.location
def createVehicleDependentRecords(sender, instance, created, **kwargs):
"""
Automatically create a record in the database for all dependent one-to-one tables
when a Vehicle record is created.
"""
if created:
Waypoints.objects.create(wp_vehicle=instance)
Position.objects.create(wp_vehicle=instance)
signals.post_save.connect(createVehicleDependentRecords, sender=Vehicle, weak=False, dispatch_uid='models.createVehicleDependentRecords')
|
klooer/rvi_backend
|
web/tracking/models.py
|
Python
|
mpl-2.0
| 7,824
|
[
"Jaguar"
] |
59a35cb2b6369925ceb6a31247ceaca6a4e177e284d7a0376ac433584e245fbe
|
# Copyright 2008-2010 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Multiple sequence alignment input/output as alignment objects.
The Bio.AlignIO interface is deliberately very similar to Bio.SeqIO, and in
fact the two are connected internally. Both modules use the same set of file
format names (lower case strings). From the user's perspective, you can read
in a PHYLIP file containing one or more alignments using Bio.AlignIO, or you
can read in the sequences within these alignmenta using Bio.SeqIO.
Bio.AlignIO is also documented at U{http://biopython.org/wiki/AlignIO} and by
a whole chapter in our tutorial:
- U{http://biopython.org/DIST/docs/tutorial/Tutorial.html}
- U{http://biopython.org/DIST/docs/tutorial/Tutorial.pdf}
Input
=====
For the typical special case when your file or handle contains one and only
one alignment, use the function Bio.AlignIO.read(). This takes an input file
handle (or in recent versions of Biopython a filename as a string), format
string and optional number of sequences per alignment. It will return a single
MultipleSeqAlignment object (or raise an exception if there isn't just one
alignment):
>>> from Bio import AlignIO
>>> align = AlignIO.read("Phylip/interlaced.phy", "phylip")
>>> print align
SingleLetterAlphabet() alignment with 3 rows and 384 columns
-----MKVILLFVLAVFTVFVSS---------------RGIPPE...I-- CYS1_DICDI
MAHARVLLLALAVLATAAVAVASSSSFADSNPIRPVTDRAASTL...VAA ALEU_HORVU
------MWATLPLLCAGAWLLGV--------PVCGAAELSVNSL...PLV CATH_HUMAN
For the general case, when the handle could contain any number of alignments,
use the function Bio.AlignIO.parse(...) which takes the same arguments, but
returns an iterator giving MultipleSeqAlignment objects (typically used in a
for loop). If you want random access to the alignments by number, turn this
into a list:
>>> from Bio import AlignIO
>>> alignments = list(AlignIO.parse("Emboss/needle.txt", "emboss"))
>>> print alignments[2]
SingleLetterAlphabet() alignment with 2 rows and 120 columns
-KILIVDDQYGIRILLNEVFNKEGYQTFQAANGLQALDIVTKER...--- ref_rec
LHIVVVDDDPGTCVYIESVFAELGHTCKSFVRPEAAEEYILTHP...HKE gi|94967506|receiver
Most alignment file formats can be concatenated so as to hold as many
different multiple sequence alignments as possible. One common example
is the output of the tool seqboot in the PHLYIP suite. Sometimes there
can be a file header and footer, as seen in the EMBOSS alignment output.
Output
======
Use the function Bio.AlignIO.write(...), which takes a complete set of
Alignment objects (either as a list, or an iterator), an output file handle
(or filename in recent versions of Biopython) and of course the file format::
from Bio import AlignIO
alignments = ...
count = SeqIO.write(alignments, "example.faa", "fasta")
If using a handle make sure to close it to flush the data to the disk::
from Bio import AlignIO
alignments = ...
handle = open("example.faa", "w")
count = SeqIO.write(alignments, handle, "fasta")
handle.close()
In general, you are expected to call this function once (with all your
alignments) and then close the file handle. However, for file formats
like PHYLIP where multiple alignments are stored sequentially (with no file
header and footer), then multiple calls to the write function should work as
expected when using handles.
If you are using a filename, the repeated calls to the write functions will
overwrite the existing file each time.
Conversion
==========
The Bio.AlignIO.convert(...) function allows an easy interface for simple
alignnment file format conversions. Additionally, it may use file format
specific optimisations so this should be the fastest way too.
In general however, you can combine the Bio.AlignIO.parse(...) function with
the Bio.AlignIO.write(...) function for sequence file conversion. Using
generator expressions provides a memory efficient way to perform filtering or
other extra operations as part of the process.
File Formats
============
When specifying the file format, use lowercase strings. The same format
names are also used in Bio.SeqIO and include the following:
- clustal - Ouput from Clustal W or X, see also the module Bio.Clustalw
which can be used to run the command line tool from Biopython.
- emboss - EMBOSS tools' "pairs" and "simple" alignment formats.
- fasta - The generic sequence file format where each record starts with
an identifer line starting with a ">" character, followed by
lines of sequence.
- fasta-m10 - For the pairswise alignments output by Bill Pearson's FASTA
tools when used with the -m 10 command line option for machine
readable output.
- ig - The IntelliGenetics file format, apparently the same as the
MASE alignment format.
- nexus - Output from NEXUS, see also the module Bio.Nexus which can also
read any phylogenetic trees in these files.
- phylip - Used by the PHLIP tools.
- stockholm - A richly annotated alignment file format used by PFAM.
Note that while Bio.AlignIO can read all the above file formats, it cannot
write to all of them.
You can also use any file format supported by Bio.SeqIO, such as "fasta" or
"ig" (which are listed above), PROVIDED the sequences in your file are all the
same length.
"""
__docformat__ = "epytext en" #not just plaintext
#TODO
# - define policy on reading aligned sequences with gaps in
# (e.g. - and . characters) including how the alphabet interacts
#
# - Can we build the to_alignment(...) functionality
# into the generic Alignment class instead?
#
# - How best to handle unique/non unique record.id when writing.
# For most file formats reading such files is fine; The stockholm
# parser would fail.
#
# - MSF multiple alignment format, aka GCG, aka PileUp format (*.msf)
# http://www.bioperl.org/wiki/MSF_multiple_alignment_format
#from cStringIO import StringIO
from StringIO import StringIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
from Bio.Align.Generic import Alignment
from Bio.Alphabet import Alphabet, AlphabetEncoder, _get_base_alphabet
import FastaIO
#Convention for format names is "mainname-subtype" in lower case.
#Please use the same names as BioPerl and EMBOSS where possible.
_FormatToIterator = {#"fasta" is done via Bio.SeqIO
"fasta-m10" : FastaIO.FastaM10Iterator,
}
_FormatToWriter = {#"fasta" is done via Bio.SeqIO
#"emboss" : EmbossIO.EmbossWriter, (unfinished)
}
def write(alignments, handle, format):
"""Write complete set of alignments to a file.
Arguments:
- alignments - A list (or iterator) of Alignment objects (ideally the
new MultipleSeqAlignment objects), or (if using Biopython
1.54 or later) a single alignment object.
- handle - File handle object to write to, or filename as string
(note older versions of Biopython only took a handle).
- format - lower case string describing the file format to write.
You should close the handle after calling this function.
Returns the number of alignments written (as an integer).
"""
from Bio import SeqIO
#Try and give helpful error messages:
if not isinstance(format, basestring):
raise TypeError("Need a string for the file format (lower case)")
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError("Format string '%s' should be lower case" % format)
if isinstance(alignments, Alignment):
#This raised an exception in order version of Biopython
alignments = [alignments]
if isinstance(handle, basestring):
handle = open(handle, "w")
handle_close = True
else:
handle_close = False
#Map the file format to a writer class
if format in _FormatToIterator:
writer_class = _FormatToWriter[format]
count = writer_class(handle).write_file(alignments)
elif format in SeqIO._FormatToWriter:
#Exploit the existing SeqIO parser to the dirty work!
#TODO - Can we make one call to SeqIO.write() and count the alignments?
count = 0
for alignment in alignments:
if not isinstance(alignment, Alignment):
raise TypeError(\
"Expect a list or iterator of Alignment objects.")
SeqIO.write(alignment, handle, format)
count += 1
elif format in _FormatToIterator or format in SeqIO._FormatToIterator:
raise ValueError("Reading format '%s' is supported, but not writing" \
% format)
else:
raise ValueError("Unknown format '%s'" % format)
assert isinstance(count, int), "Internal error - the underlying %s " \
"writer should have returned the alignment count, not %s" \
% (format, repr(count))
if handle_close:
handle.close()
return count
#This is a generator function!
def _SeqIO_to_alignment_iterator(handle, format, alphabet=None, seq_count=None):
"""Uses Bio.SeqIO to create an MultipleSeqAlignment iterator (PRIVATE).
Arguments:
- handle - handle to the file.
- format - string describing the file format.
- alphabet - optional Alphabet object, useful when the sequence type
cannot be automatically inferred from the file itself
(e.g. fasta, phylip, clustal)
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If count is omitted (default) then all the sequences in the file are
combined into a single MultipleSeqAlignment.
"""
from Bio import SeqIO
assert format in SeqIO._FormatToIterator
if seq_count:
#Use the count to split the records into batches.
seq_record_iterator = SeqIO.parse(handle, format, alphabet)
records = []
for record in seq_record_iterator:
records.append(record)
if len(records) == seq_count:
yield MultipleSeqAlignment(records, alphabet)
records = []
if len(records) > 0:
raise ValueError("Check seq_count argument, not enough sequences?")
else:
#Must assume that there is a single alignment using all
#the SeqRecord objects:
records = list(SeqIO.parse(handle, format, alphabet))
if records:
yield MultipleSeqAlignment(records, alphabet)
raise StopIteration
def _force_alphabet(alignment_iterator, alphabet):
"""Iterate over alignments, over-riding the alphabet (PRIVATE)."""
#Assume the alphabet argument has been pre-validated
given_base_class = _get_base_alphabet(alphabet).__class__
for align in alignment_iterator:
if not isinstance(_get_base_alphabet(align._alphabet),
given_base_class):
raise ValueError("Specified alphabet %s clashes with "\
"that determined from the file, %s" \
% (repr(alphabet), repr(align._alphabet)))
for record in align:
if not isinstance(_get_base_alphabet(record.seq.alphabet),
given_base_class):
raise ValueError("Specified alphabet %s clashes with "\
"that determined from the file, %s" \
% (repr(alphabet), repr(record.seq.alphabet)))
record.seq.alphabet = alphabet
align._alphabet = alphabet
yield align
def parse(handle, format, seq_count=None, alphabet=None):
"""Iterate over an alignment file as MultipleSeqAlignment objects.
Arguments:
- handle - handle to the file, or the filename as a string
(note older verions of Biopython only took a handle).
- format - string describing the file format.
- alphabet - optional Alphabet object, useful when the sequence type
cannot be automatically inferred from the file itself
(e.g. fasta, phylip, clustal)
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If you have the file name in a string 'filename', use:
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> for alignment in AlignIO.parse(filename, format):
... print "Alignment of length", alignment.get_alignment_length()
Alignment of length 124
Alignment of length 119
Alignment of length 120
Alignment of length 118
Alignment of length 125
If you have a string 'data' containing the file contents, use:
from Bio import AlignIO
from StringIO import StringIO
my_iterator = AlignIO.parse(StringIO(data), format)
Use the Bio.AlignIO.read() function when you expect a single record only.
"""
from Bio import SeqIO
handle_close = False
if isinstance(handle, basestring):
handle = open(handle, "rU")
#TODO - On Python 2.5+ use with statement to close handle
handle_close = True
#Try and give helpful error messages:
if not isinstance(format, basestring):
raise TypeError("Need a string for the file format (lower case)")
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError("Format string '%s' should be lower case" % format)
if alphabet is not None and not (isinstance(alphabet, Alphabet) or \
isinstance(alphabet, AlphabetEncoder)):
raise ValueError("Invalid alphabet, %s" % repr(alphabet))
if seq_count is not None and not isinstance(seq_count, int):
raise TypeError("Need integer for seq_count (sequences per alignment)")
#Map the file format to a sequence iterator:
if format in _FormatToIterator:
iterator_generator = _FormatToIterator[format]
if alphabet is None :
i = iterator_generator(handle, seq_count)
else:
try:
#Initially assume the optional alphabet argument is supported
i = iterator_generator(handle, seq_count, alphabet=alphabet)
except TypeError:
#It isn't supported.
i = _force_alphabet(iterator_generator(handle, seq_count),
alphabet)
elif format in SeqIO._FormatToIterator:
#Exploit the existing SeqIO parser to the dirty work!
i = _SeqIO_to_alignment_iterator(handle, format,
alphabet=alphabet,
seq_count=seq_count)
else:
raise ValueError("Unknown format '%s'" % format)
#This imposes some overhead... wait until we drop Python 2.4 to fix it
for a in i:
yield a
if handle_close:
handle.close()
def read(handle, format, seq_count=None, alphabet=None):
"""Turns an alignment file into a single MultipleSeqAlignment object.
Arguments:
- handle - handle to the file, or the filename as a string
(note older verions of Biopython only took a handle).
- format - string describing the file format.
- alphabet - optional Alphabet object, useful when the sequence type
cannot be automatically inferred from the file itself
(e.g. fasta, phylip, clustal)
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If the handle contains no alignments, or more than one alignment,
an exception is raised. For example, using a PFAM/Stockholm file
containing one alignment:
>>> from Bio import AlignIO
>>> filename = "Clustalw/protein.aln"
>>> format = "clustal"
>>> alignment = AlignIO.read(filename, format)
>>> print "Alignment of length", alignment.get_alignment_length()
Alignment of length 411
If however you want the first alignment from a file containing
multiple alignments this function would raise an exception.
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> alignment = AlignIO.read(filename, format)
Traceback (most recent call last):
...
ValueError: More than one record found in handle
Instead use:
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> alignment = AlignIO.parse(filename, format).next()
>>> print "First alignment has length", alignment.get_alignment_length()
First alignment has length 124
You must use the Bio.AlignIO.parse() function if you want to read multiple
records from the handle.
"""
iterator = parse(handle, format, seq_count, alphabet)
try:
first = iterator.next()
except StopIteration:
first = None
if first is None:
raise ValueError("No records found in handle")
try:
second = iterator.next()
except StopIteration:
second = None
if second is not None:
raise ValueError("More than one record found in handle")
if seq_count:
assert len(first)==seq_count
return first
def convert(in_file, in_format, out_file, out_format, alphabet=None):
"""Convert between two alignment files, returns number of alignments.
- in_file - an input handle or filename
- in_format - input file format, lower case string
- output - an output handle or filename
- out_file - output file format, lower case string
- alphabet - optional alphabet to assume
NOTE - If you provide an output filename, it will be opened which will
overwrite any existing file without warning. This may happen if even the
conversion is aborted (e.g. an invalid out_format name is given).
"""
#TODO - Add optimised versions of important conversions
#For now just off load the work to SeqIO parse/write
if isinstance(in_file, basestring):
in_handle = open(in_file, "rU")
in_close = True
else:
in_handle = in_file
in_close = False
#This will check the arguments and issue error messages,
alignments = parse(in_handle, in_format, None, alphabet)
#Don't open the output file until we've checked the input is OK:
if isinstance(out_file, basestring):
out_handle = open(out_file, "w")
out_close = True
else:
out_handle = out_file
out_close = False
#This will check the arguments and issue error messages,
#after we have opened the file which is a shame.
count = write(alignments, out_handle, out_format)
#Must now close any handles we opened
if in_close:
in_handle.close()
if out_close:
out_handle.close()
return count
def _test():
"""Run the Bio.AlignIO module's doctests.
This will try and locate the unit tests directory, and run the doctests
from there in order that the relative paths used in the examples work.
"""
import doctest
import os
if os.path.isdir(os.path.join("..", "..", "Tests")):
print "Runing doctests..."
cur_dir = os.path.abspath(os.curdir)
os.chdir(os.path.join("..", "..", "Tests"))
doctest.testmod()
os.chdir(cur_dir)
del cur_dir
print "Done"
elif os.path.isdir(os.path.join("Tests", "Fasta")):
print "Runing doctests..."
cur_dir = os.path.abspath(os.curdir)
os.chdir(os.path.join("Tests"))
doctest.testmod()
os.chdir(cur_dir)
del cur_dir
print "Done"
if __name__ == "__main__":
_test()
|
ktokolwiek/Genetic-Stability-Tool
|
Bio/AlignIO/__init__.py
|
Python
|
gpl-3.0
| 20,217
|
[
"BioPerl",
"Biopython"
] |
e61cc3540fdd2d25453899264adc9d08a41e22c7b10b1e0f568f46b0d7be479f
|
from django.test import SimpleTestCase
from corehq.apps.app_manager.tests.app_factory import AppFactory
from corehq.apps.app_manager.tests.util import TestXmlMixin
from corehq.util.test_utils import flag_enabled
@flag_enabled('GRID_MENUS')
class GridMenuSuiteTests(SimpleTestCase, TestXmlMixin):
def test_that_grid_style_is_added(self):
"""
Confirms that style="grid" is added to the root menu
"""
factory = AppFactory(build_version='2.24')
factory.app.use_grid_menus = True
factory.new_basic_module('registration', 'patient registration')
factory.app.get_module(0).put_in_root = True
factory.new_basic_module('visit', 'patient visit')
factory.app.get_module(1).put_in_root = True
suite = factory.app.create_suite()
root_xpath = './menu[@id="root"]'
self.assertXmlHasXpath(suite, root_xpath)
self.assertXmlPartialEqual(
"""
<partial>
<menu id="root" style="grid">
<text><locale id="modules.m0"/></text>
<command id="m0-f0"/>
</menu>
<menu id="root" style="grid">
<text><locale id="modules.m1"/></text>
<command id="m1-f0"/>
</menu>
</partial>
""",
suite,
root_xpath
)
def test_that_root_menu_added(self):
"""
Confirms that a menu is added with id="root" and style="grid"
when the app normally wouldn't have a menu with id="root".
"""
factory = AppFactory(build_version='2.24')
factory.app.use_grid_menus = True
factory.new_basic_module('registration', 'patient')
suite = factory.app.create_suite()
root_xpath = './menu[@id="root"]'
self.assertXmlHasXpath(suite, root_xpath)
self.assertXmlPartialEqual(
'<partial><menu id="root" style="grid"><text/></menu></partial>',
suite,
root_xpath
)
def test_use_grid_menus_is_false(self):
"""
Confirms that style="grid" is not added to any menus when use_grid_menus is False.
"""
factory = AppFactory(build_version='2.24')
factory.app.use_grid_menus = False
factory.new_basic_module('registration', 'patient')
suite = factory.app.create_suite()
style_xpath = './menu[@style="grid"]'
self.assertXmlDoesNotHaveXpath(suite, style_xpath)
def test_grid_menu_for_none(self):
factory = AppFactory(build_version='2.24.3')
self.assertTrue(factory.app.grid_menu_toggle_enabled())
factory.app.create_profile()
factory.app.grid_form_menus = 'none'
factory.new_basic_module('registration', 'patient')
factory.app.get_module(0).display_style = 'grid'
root_xpath = './menu[@id="root"]'
m0_xpath = './menu[@id="m0"]'
# with Modules Menu to be list should not render root menu and render module w/o style=grid
factory.app.use_grid_menus = False
suite = factory.app.create_suite()
self.assertXmlDoesNotHaveXpath(suite, root_xpath)
self.assertXmlPartialEqual(
'<partial><menu id="m0"><text><locale id="modules.m0"/></text><command id="m0-f0"/></menu></partial>',
suite,
m0_xpath
)
# with Modules Menu to be grid should render root menu w/ style=grid and render module w/o style=grid
factory.app.use_grid_menus = True
suite = factory.app.create_suite()
self.assertXmlPartialEqual(
'<partial><menu id="root" style="grid"><text/></menu></partial>',
suite,
root_xpath
)
self.assertXmlPartialEqual(
'<partial><menu id="m0"><text><locale id="modules.m0"/></text><command id="m0-f0"/></menu></partial>',
suite,
m0_xpath
)
def test_grid_menu_for_some(self):
factory = AppFactory(build_version='2.24.3')
self.assertTrue(factory.app.grid_menu_toggle_enabled())
factory.app.create_profile()
factory.app.grid_form_menus = 'some'
factory.new_basic_module('registration', 'patient')
factory.new_basic_module('visit', 'patient visit')
factory.app.get_module(1).display_style = 'grid'
root_xpath = './menu[@id="root"]'
grid_module_xpath = './menu[@id="m1"]'
# with Modules Menu to be list should not render root menu and render module w/ style=grid
factory.app.use_grid_menus = False
suite = factory.app.create_suite()
self.assertXmlDoesNotHaveXpath(suite, root_xpath)
self.assertXmlHasXpath(suite, grid_module_xpath)
self.assertXmlPartialEqual(
'<partial><menu id="m1" style="grid"><text><locale id="modules.m1"/></text>\
<command id="m1-f0"/></menu></partial>',
suite,
grid_module_xpath
)
# with Modules Menu to be grid should render both root menu and module w/ style=grid
factory.app.use_grid_menus = True
suite = factory.app.create_suite()
self.assertXmlHasXpath(suite, root_xpath)
self.assertXmlPartialEqual(
'<partial><menu id="root" style="grid"><text/></menu></partial>',
suite,
root_xpath
)
self.assertXmlPartialEqual(
'<partial><menu id="m1" style="grid"><text><locale id="modules.m1"/></text>\
<command id="m1-f0"/></menu></partial>',
suite,
grid_module_xpath
)
# with module itself being the root should render root menu style=grid with module content
factory.app.get_module(1).put_in_root = True
suite = factory.app.create_suite()
self.assertXmlPartialEqual(
'<partial><menu id="root" style="grid"><text><locale id="modules.m1"/></text>\
<command id="m1-f0"/></menu></partial>',
suite,
root_xpath
)
def test_grid_menu_for_all(self):
factory = AppFactory(build_version='2.24.3')
self.assertTrue(factory.app.grid_menu_toggle_enabled())
factory.app.create_profile()
factory.app.grid_form_menus = 'all'
factory.new_basic_module('registration', 'patient')
suite = factory.app.create_suite()
root_xpath = './menu[@id="root"]'
grid_module_xpath = './menu[@id="m0"]'
# with Modules Menu to be list should not render root menu and render module w/ style=grid
factory.app.use_grid_menus = False
self.assertXmlDoesNotHaveXpath(suite, root_xpath)
self.assertXmlPartialEqual(
'<partial><menu id="m0" style="grid"><text><locale id="modules.m0"/></text>\
<command id="m0-f0"/></menu></partial>',
suite,
grid_module_xpath
)
# with Modules Menu to be grid should render root menu and module w/ style=grid
factory.app.use_grid_menus = True
suite = factory.app.create_suite()
self.assertXmlPartialEqual(
'<partial><menu id="root" style="grid"><text/></menu></partial>',
suite,
root_xpath
)
self.assertXmlPartialEqual(
'<partial><menu id="m0" style="grid"><text><locale id="modules.m0"/></text>\
<command id="m0-f0"/></menu></partial>',
suite,
grid_module_xpath
)
# with Modules Menu to be list and module itself being the root should render root w/o style=grid with
# module content
factory.app.use_grid_menus = False
factory.app.get_module(0).put_in_root = True
suite = factory.app.create_suite()
self.assertXmlPartialEqual(
'<partial><menu id="root"><text><locale id="modules.m0"/></text>\
<command id="m0-f0"/></menu></partial>',
suite,
root_xpath
)
# with Modules Menu to be grid and module itself being the root should render root w/ style=grid with
# module content
factory.app.get_module(0).put_in_root = True
factory.app.use_grid_menus = True
suite = factory.app.create_suite()
self.assertXmlPartialEqual(
'<partial><menu id="root" style="grid"><text><locale id="modules.m0"/></text>\
<command id="m0-f0"/></menu></partial>',
suite,
root_xpath
)
|
qedsoftware/commcare-hq
|
corehq/apps/app_manager/tests/test_grid_menus.py
|
Python
|
bsd-3-clause
| 8,516
|
[
"VisIt"
] |
7803445be5efe3aac5ef3e9c3363290878311f5150b2524857a98d94f391e7cf
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
####################################################################################
### Copyright (C) 2015-2019 by ABLIFE
####################################################################################
####################################################################################
####################################################################################
# Date Version Author ChangeLog
#
#
#####################################################################################
"""
程序功能说明:
1.计算gene表达量
2.randCheck_gene
3.randCheck_mRNA
程序设计思路:
利用gffutils和HTSeq包进行统计
"""
import re, os, sys, logging, time, datetime
from optparse import OptionParser, OptionGroup
reload(sys)
sys.setdefaultencoding('utf-8')
import subprocess
import threading
import gffutils
import HTSeq
import numpy
import multiprocessing
import signal
from matplotlib import pyplot
sys.path.insert(1, os.path.split(os.path.realpath(__file__))[0] + "/../")
# print(sys.path)
from ablib.utils.tools import *
_version = 'v0.1'
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def configOpt():
"""Init for option
"""
usage = 'Usage: %prog [-f] [other option] [-h]'
p = OptionParser(usage)
##basic options
p.add_option('-g', '--gff', dest='gff', action='store', type='string', help='gff file,do not have to provide it if db is exited')
p.add_option('-d', '--db', dest='db', default='gffdb', action='store', type='string', help='the gff database file to create or use')
p.add_option('-b', '--bamorbed', dest='bamorbed', action='store', type='string', help='bam or bed file, Important: the bamfile\'s suffix must be ".bam"')
p.add_option('-o', '--outfile', dest='outfile', default='Mapping_distribution.txt', action='store', type='string', help='gene expression file')
p.add_option('-n', '--samplename', dest='samplename', default='', action='store', type='string', help='sample name,default is ""')
p.add_option('-m', '--mapinfo', dest='mapinfo', default='', action='store', type='string', help='output which region peak is located on')
p.add_option('-u', '--unstrand', dest='unstrand', default=False, action='store_true', help='unstrand library,antisense will not be considered.')
group = OptionGroup(p, "Preset options")
##preset options
group.add_option('-O', '--outDir', dest='outDir', default='./', action='store', type='string', help='output directory', metavar="DIR")
group.add_option('-L', '--logDir', dest='logDir', default='', action='store', type='string', help='log dir ,default is same as outDir')
group.add_option('-P', '--logPrefix', dest='logPrefix', default='', action='store', type='string', help='log file prefix')
group.add_option('-E', '--email', dest='email', default='none', action='store', type='string', help='email address, if you want get a email when this job is finished,default is no email', metavar="EMAIL")
group.add_option('-Q', '--quiet', dest='quiet', default=False, action='store_true', help='do not print messages to stdout')
group.add_option('-K', '--keepTemp', dest='keepTemp', default=False, action='store_true', help='keep temp dir')
group.add_option('-T', '--test', dest='isTest', default=False, action='store_true', help='run this program for test')
p.add_option_group(group)
if len(sys.argv) == 1:
p.print_help()
sys.exit(1)
opt, args = p.parse_args()
return (p, opt, args)
def listToString(x):
"""获得完整的命令
"""
rVal = ''
for a in x:
rVal += a + ' '
return rVal
opt_parser, opt, args = configOpt()
if opt.logDir == "":
opt.logDir = opt.outDir + '/log/'
sample = ""
if opt.samplename != "":
sample = opt.samplename + '_'
if opt.outfile == 'Mapping_distribution.txt':
opt.outfile = sample + opt.outfile
intype = "bam"
match = re.search(r'\.bam$', opt.bamorbed)
if not match:
intype = "bed"
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
scriptPath = os.path.abspath(os.path.dirname(__file__)) # absolute script path
binPath = "/".join(scriptPath.split("/")[0:-2]) # absolute bin path
outPath = os.path.abspath(opt.outDir) # absolute output path
os.mkdir(outPath) if not os.path.isdir(outPath) else None
logPath = os.path.abspath(opt.logDir)
os.mkdir(logPath) if not os.path.isdir(logPath) else None
tempPath = outPath + '/temp/' # absolute bin path
# os.mkdir(tempPath) if not os.path.isdir(tempPath) else None
resultPath = outPath + '/result/'
# os.mkdir(resultPath) if not os.path.isdir(resultPath) else None
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def initLogging(logFilename):
"""Init for logging
"""
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s : %(levelname)s] %(message)s', datefmt='%y-%m-%d %H:%M', filename=logFilename, filemode='w')
if not opt.quiet:
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('[%(asctime)s : %(levelname)s] %(message)s', datefmt='%y-%m-%d %H:%M')
# tell the handler to use this format
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
dt = datetime.datetime.now()
logFile = logPath + '/' + opt.logPrefix + 'log.' + str(dt.strftime('%Y%m%d.%H%M%S.%f')) + '.txt'
initLogging(logFile)
logging.debug(sys.modules[__name__].__doc__)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug('Program version: %s' % _version)
logging.debug('Start the program with [%s]\n', listToString(sys.argv))
startTime = datetime.datetime.now()
logging.debug("计时器:Program start at %s" % startTime)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### S
# -----------------------------------------------------------------------------------
# def invert_strand(iv):
# """
# :param iv: HTSeq.GenomicInterval object
# :return: HTSeq.GenomicInterval - strand is reversed
# """
# iv2 = iv.copy()
# if iv2.strand == "+":
# iv2.strand = "-"
# elif iv2.strand == "-":
# iv2.strand = "+"
# else:
# raise ValueError, "Illegal strand"
# return iv2
def getTotalBase(iv, coverage):
totalbases = 0
for iv2, value2 in coverage[iv].steps():
if value2 > 0:
totalbases += value2 * iv2.length
return totalbases
# @profile
def readChrwithBam(chr, reads):
print(chr)
reads_dict = {}
anti_dict = {}
db = gffutils.FeatureDB(opt.db)
bamfile = HTSeq.BAM_Reader(opt.bamorbed)
usedreads = {}
forward_end = 0
i = 0
## mapping statics
genes = ('gene','lincRNA_gene','miRNA_gene','mt_gene','processed_pseudogene','pseudogene','rRNA_gene','snoRNA_gene','snRNA_gene')
trans = ('mRNA', 'miRNA', 'mRNA_TE_gene', 'ncRNA', 'rRNA', 'snoRNA', 'snRNA', 'tRNA', 'transcript')
exons = ('three_prime_UTR', 'five_prime_UTR', 'CDS', 'exon')
for gene in db.features_of_type(genes, seqid=chr, order_by='start'):
# print(gene)
## gene info
# if not gene.seqid == chr:
# continue
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
gene_end = gene.end
# if gene_start-forward_end>2500000:
# usedreads.clear()
forward_end = gene_end
reads_dict[gene_id] = {}
for e in exons:
reads_dict[gene_id][e] = 0
reads_dict[gene_id]['intron'] = 0
reads_dict[gene_id]['sense'] = 0
reads_dict[gene_id]['antisense'] = 0
reads_dict[gene_id]['noncoding_exon'] = 0
# gene_iv = HTSeq.GenomicInterval(chr, gene.start - 1, gene.end, gene.strand)
# for r in bamfile[gene_iv]:
# r_name = r.read.name
# if not r.aligned:
# continue
#
# if not opt.unstrand:
# if r.iv.strand == gene_strand:
# if usedreads.has_key(r_name):
# if anti_dict.has_key(r_name):
# gid = anti_dict[r_name]
# reads_dict[gid]['antisense'] -= 1
# anti_dict.pop(r_name)
# usedreads.pop(r_name)
# else:
# continue
# reads_dict[gene_id]['sense'] += 1
# else:
# if usedreads.has_key(r_name):
# continue
# reads_dict[gene_id]['antisense'] += 1
# anti_dict[r_name] = gene_id
# usedreads[r_name] = ""
# else:
# if usedreads.has_key(r_name):
# continue
# reads_dict[gene_id]['sense'] += 1
for isoform in db.children(gene_id, level=1):
gas = HTSeq.GenomicArrayOfSets([chr], stranded=False)
isoform_iv = HTSeq.GenomicInterval(chr, isoform.start - 1, isoform.end, isoform.strand)
for gu in db.children(isoform.id, level=1, featuretype=exons):
gu_type = gu.featuretype
gu_start = gu.start
gu_end = gu.end
gu_strand = gu.strand
gu_iv = HTSeq.GenomicInterval(chr, gu_start - 1, gu_end, gu_strand)
gas[gu_iv] += gu_type
for r in bamfile[isoform_iv]:
r_name = r.read.name
if not r.aligned:
continue
if not opt.unstrand:
if r.iv.strand == gene_strand:
if usedreads.has_key(r_name):
if anti_dict.has_key(r_name):
gid = anti_dict[r_name]
reads_dict[gid]['antisense'] -= 1
anti_dict.pop(r_name)
usedreads.pop(r_name)
else:
continue
reads_dict[gene_id]['sense'] += 1
else:
if usedreads.has_key(r_name):
continue
reads_dict[gene_id]['antisense'] += 1
anti_dict[r_name] = gene_id
usedreads[r_name] = ""
else:
if usedreads.has_key(r_name):
continue
reads_dict[gene_id]['sense'] += 1
if usedreads.has_key(r_name):
continue
else:
usedreads[r.read.name] = ""
r_len = len(r.read)
iv_seq = (co.ref_iv for co in r.cigar if co.type == "M" and co.size > 0)
for iv in iv_seq:
# print(iv)
for iv2, fs in gas[iv].steps():
iv_len = iv2.length
if len(fs) == 0:
reads_dict[gene_id]['intron'] += float(iv_len) / r_len
elif len(fs) == 1 and list(fs)[0] == "exon":
reads_dict[gene_id]['noncoding_exon'] += float(iv_len) / r_len
elif len(fs) >= 1 and "CDS" in list(fs):
reads_dict[gene_id]['CDS'] += float(iv_len) / r_len
elif len(fs) >= 1:
for s in list(fs):
if s == "exon":
continue
else:
reads_dict[gene_id][s] += float(iv_len) / r_len
i += 1
if i > 0 and i % 1000 == 0:
sys.stderr.write("%s : %d gene processed.\n" % (chr, i))
# if i==400:
# break
reads[chr] = reads_dict.copy()
del reads_dict
logging.info("done %s" % chr)
def readChrwithBed(chr, reads, peaks):
print(chr)
reads_dict = {}
peaks_dict = {}
anti_dict = {}
genes = ('gene','lincRNA_gene','miRNA_gene','mt_gene','processed_pseudogene','pseudogene','rRNA_gene','snoRNA_gene','snRNA_gene')
trans = ('mRNA', 'miRNA', 'mRNA_TE_gene', 'ncRNA', 'rRNA', 'snoRNA', 'snRNA', 'tRNA', 'transcript')
exons = ('three_prime_UTR', 'five_prime_UTR', 'CDS', 'exon')
db = gffutils.FeatureDB(opt.db)
bedfile = HTSeq.BED_Reader(opt.bamorbed)
bedga = HTSeq.GenomicArrayOfSets([chr], stranded=False)
n = 0
bed_dict = {}
for r in bedfile:
if r.iv.chrom != chr:
continue
# if r.name == "--":
# r.name = r.iv.chrom + "\t" + str(r.iv.start) + "\t" + str(
# r.iv.end) + "\t" + r.name + "\t" + str(r.score) + "\t" + r.iv.strand
# r.name = r.iv.chrom + "\t" + str(r.iv.start) + "\t" + str(
# r.iv.end) + "\t" + r.name + "\t" + str(r.score) + "\t" + r.iv.strand
r.name = r.line
# print(r.line)
n += 1
bed_dict[str(n)] = r
bedga[r.iv] += str(n)
peaks_dict[r.name] = {}
for e in exons:
peaks_dict[r.name][e] = 0
peaks_dict[r.name]['intron'] = 0
peaks_dict[r.name]['sense'] = 0
peaks_dict[r.name]['antisense'] = 0
peaks_dict[r.name]['noncoding_exon'] = 0
peaks_dict[r.name]['gene'] = "--"
usedreads = {}
forward_end = 0
i = 0
for gene in db.features_of_type(genes, seqid=chr, order_by='start'):
# print(gene)
## gene info
# if not gene.seqid == chr:
# continue
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
gene_end = gene.end
# if gene_start-forward_end>2500000:
# usedreads.clear()
forward_end = gene_end
## mapping statics
reads_dict[gene_id] = {}
for e in exons:
reads_dict[gene_id][e] = 0
reads_dict[gene_id]['intron'] = 0
reads_dict[gene_id]['sense'] = 0
reads_dict[gene_id]['antisense'] = 0
reads_dict[gene_id]['noncoding_exon'] = 0
gene_iv = HTSeq.GenomicInterval(chr, gene.start - 1, gene.end, gene.strand)
bfs = set()
for biv, fs in bedga[gene_iv].steps():
bfs = bfs.union(fs)
# for n in bfs:
# bed = bed_dict[n]
# bedname = bed.name
# bediv = bed.iv
# if not opt.unstrand:
# if bediv.strand == gene_strand:
# if usedreads.has_key(bedname):
# if anti_dict.has_key(bedname):
# gid = anti_dict[bedname]
# reads_dict[gid]['antisense'] -= 1
# peaks_dict[bedname]['antisense'] = 0
# anti_dict.pop(bedname)
# usedreads.pop(bedname)
# else:
# continue
# peaks_dict[bedname]['gene'] = gene_id
# reads_dict[gene_id]['sense'] += 1
# else:
# if usedreads.has_key(bedname):
# continue
# reads_dict[gene_id]['antisense'] += 1
# peaks_dict[bedname]['antisense'] = 1
# anti_dict[bedname] = gene_id
# peaks_dict[bedname]['gene'] = gene_id
# usedreads[bedname] = ""
# else:
# if usedreads.has_key(bedname):
# continue
# peaks_dict[bedname]['gene'] = gene_id
# reads_dict[gene_id]['sense'] += 1
for isoform in db.children(gene_id, level=1):
gas = HTSeq.GenomicArrayOfSets([chr], stranded=False)
isoform_iv = HTSeq.GenomicInterval(chr, isoform.start - 1, isoform.end, isoform.strand)
for gu in db.children(isoform.id, level=1, featuretype=exons):
gu_type = gu.featuretype
gu_start = gu.start
gu_end = gu.end
gu_strand = gu.strand
gu_iv = HTSeq.GenomicInterval(chr, gu_start - 1, gu_end, gu_strand)
gas[gu_iv] += gu_type
bfs = set()
for biv, fs in bedga[isoform_iv].steps():
bfs = bfs.union(fs)
for n in bfs:
bed = bed_dict[n]
bedname = bed.name
bediv = bed.iv
if not opt.unstrand:
if bediv.strand == gene_strand:
if usedreads.has_key(bedname):
if anti_dict.has_key(bedname):
gid = anti_dict[bedname]
reads_dict[gid]['antisense'] -= 1
peaks_dict[bedname]['antisense'] = 0
anti_dict.pop(bedname)
usedreads.pop(bedname)
else:
continue
peaks_dict[bedname]['gene'] = gene_id
reads_dict[gene_id]['sense'] += 1
else:
if usedreads.has_key(bedname):
continue
reads_dict[gene_id]['antisense'] += 1
peaks_dict[bedname]['antisense'] = 1
anti_dict[bedname] = gene_id
peaks_dict[bedname]['gene'] = gene_id
usedreads[bedname] = ""
else:
if usedreads.has_key(bedname):
continue
peaks_dict[bedname]['gene'] = gene_id
reads_dict[gene_id]['sense'] += 1
if usedreads.has_key(bedname):
continue
else:
usedreads[bedname] = ""
r_len = bediv.length
for iv, fs in gas[bediv].steps():
iv_len = iv.length
if len(fs) == 0:
reads_dict[gene_id]['intron'] += float(iv_len) / r_len
peaks_dict[bedname]['intron'] += float(iv_len) / r_len
elif len(fs) == 1 and list(fs)[0] == "exon":
reads_dict[gene_id]['noncoding_exon'] += float(iv_len) / r_len
peaks_dict[bedname]['noncoding_exon'] += float(iv_len) / r_len
elif len(fs) >= 1 and "CDS" in list(fs):
reads_dict[gene_id]['CDS'] += float(iv_len) / r_len
peaks_dict[bedname]['CDS'] += float(iv_len) / r_len
elif len(fs) >= 1:
for s in list(fs):
if s == "exon":
continue
else:
reads_dict[gene_id][s] += float(iv_len) / r_len
peaks_dict[bedname][s] += float(iv_len) / r_len
i += 1
if i > 0 and i % 1000 == 0:
sys.stderr.write("%s : %d gene processed.\n" % (chr, i))
# if i==400:
# break
reads[chr] = reads_dict.copy()
peaks[chr] = peaks_dict.copy()
del reads_dict
del peaks_dict
logging.info("done %s" % chr)
# -----------------------------------------------------------------------------------
### E
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def main():
print("Main procedure start...")
if opt.gff:
db = gffutils.create_db(opt.gff, opt.db, merge_strategy="create_unique", verbose=False, force=True)
db = gffutils.FeatureDB(opt.db)
TMR = 0
bedtitle = ""
if intype == "bam":
if not os.path.isfile(opt.bamorbed + '.bai'):
os.system("samtools index " + opt.bamorbed)
TMR = getBamReadsNumber(opt.bamorbed)
print(TMR)
else:
bedtitle = getTitle(opt.bamorbed)
for line in open(opt.bamorbed):
if line.startswith("#") or line.startswith("track") or line.startswith("\n"):
continue
TMR += 1
print(TMR)
Watcher()
pool = multiprocessing.Pool(processes=15)
server = multiprocessing.Manager()
reads = server.dict()
peaks = server.dict()
if intype == "bam":
chr_dict = readBamHeader(opt.bamorbed)
for chr in db.seqids():
if not chr in chr_dict:
continue
# print(chr)
reads[chr] = {}
# runjobs(readChrwithBam,arglist,10)
pool.apply_async(readChrwithBam, args=(chr, reads))
# pool.apply_async(func, (chr,))
else:
for chr in db.seqids():
# print(chr)
reads[chr] = {}
pool.apply_async(readChrwithBed, args=(chr, reads, peaks))
pool.close()
pool.join()
d = dict(reads).copy()
p = dict(peaks).copy()
server.shutdown()
types = ('three_prime_UTR', 'five_prime_UTR', 'CDS', 'noncoding_exon', 'intron')
ori = ('sense', 'antisense')
total_dict = {}
for k in types:
total_dict[k] = 0
for k in ori:
total_dict[k] = 0
total_dict["intergenic"] = 0
for chr in d:
# print(chr)
for gene in d[chr]:
# print(gene)
for t in types:
total_dict[t] += d[chr][gene][t]
for o in ori:
total_dict[o] += d[chr][gene][o]
print(total_dict["sense"])
# total_dict["intergenic"] = TMR - total_dict["sense"] - total_dict["antisense"]
total_dict["intergenic"] = TMR - total_dict["three_prime_UTR"] - total_dict["five_prime_UTR"] - total_dict["CDS"] - total_dict["noncoding_exon"] - total_dict["intron"] - total_dict["antisense"]
if total_dict["intergenic"] < 0:
total_dict["intergenic"] = 0
os.chdir(opt.outDir)
fout = open(opt.outfile, 'w')
fout.writelines("+Type\tReads\n")
for k, v in total_dict.items():
if k == "sense":
continue
if opt.unstrand and k == "antisense":
continue
fout.writelines("%s\t%s\n" % (k, int(v)))
fout.close()
cmd = "cd " + outPath + "&& Rscript " + scriptPath + "/../plot/Bar_single_Mapping_distribution.r -f " + opt.outfile + " -t " + sample + "Mapping_distribution -n " + sample + "Mapping_distribution -o ./ \n"
os.system(cmd)
if opt.mapinfo != "":
w = open(opt.mapinfo, 'w')
w.writelines(bedtitle + "\tregionInfo\tGeneID\n")
for chr in p:
for peak in p[chr]:
w.writelines(peak + "\t")
pflag = 0
for t in types:
if p[chr][peak][t] > 0:
w.writelines(t + ":" + str(round(p[chr][peak][t], 2)) + ";")
pflag = 1
if p[chr][peak]["antisense"] > 0:
# w.writelines("antisense:" + str(round(p[chr][peak]["antisense"], 2)) + ";")
w.writelines("antisense;")
pflag = 1
if pflag == 0:
w.writelines("intergenic;")
w.writelines("\t" + p[chr][peak]['gene'] + "\n")
w.close()
if __name__ == '__main__':
main()
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if not opt.keepTemp:
os.system('rm -rf ' + tempPath)
logging.debug("Temp folder is deleted..")
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug("Program ended")
currentTime = datetime.datetime.now()
runningTime = (currentTime - startTime).seconds # in seconds
logging.debug("计时器:Program start at %s" % startTime)
logging.debug("计时器:Program end at %s" % currentTime)
logging.debug("计时器:Program ran %.2d:%.2d:%.2d" % (runningTime / 3600, (runningTime % 3600) / 60, runningTime % 60))
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if opt.email != "none":
run_cmd = listToString(sys.argv)
sendEmail(opt.email, str(startTime), str(currentTime), run_cmd, outPath)
logging.info("发送邮件通知到 %s" % opt.email)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
|
ablifedev/ABLIRC
|
ABLIRC/bin/public/mapping_distribution_statics_v2.py
|
Python
|
mit
| 27,283
|
[
"HTSeq"
] |
d135816e276f6a955ef087661f4e21f0ee5ce27ebbd07b1cd2223b2b7fbef25d
|
# Copyright (C) 2016
# Jakub Krajniak (jkrajniak at gmail.com)
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***************************************
espressopp.interaction.TabulatedAngular
***************************************
.. function:: espressopp.interaction.TabulatedAngular(itype, filename)
:param itype: The interpolation type: 1 - linear, 2 - akima spline, 3 - cubic spline
:param filename: The tabulated potential filename.
:type itype: int
:type filename: str
.. function:: espressopp.interaction.FixedTripleListTabulatedAngular(system, ftl, potential)
:param system: The Espresso++ system object.
:param ftl: The FixedTripleList.
:param potential: The potential.
:type system: espressopp.System
:type ftl: espressopp.FixedTripleList
:type potential: espressopp.interaction.Potential
.. function:: espressopp.interaction.FixedTripleListTabulatedAngular.setPotential(potential)
:param potential: The potential object.
:type potential: espressopp.interaction.Potential
.. function:: espressopp.interaction.FixedTripleListTypesTabulatedAngular(system, ftl)
:param system: The Espresso++ system object.
:type system: espressopp.System
:param ftl: The FixedTriple list.
:type ftl: espressopp.FixedTripleList
.. function:: espressopp.interaction.FixedTripleListTypesTabulatedAngular.setPotential(type1, type2, type3, potential)
Defines angular potential for interaction between particles of types type1-type2-type3.
:param type1: Type of particle 1.
:type type1: int
:param type2: Type of particle 2.
:type type2: int
:param type3: Type of particle 3.
:type type3: int
:param potential: The potential to set up.
:type potential: espressopp.interaction.AngularPotential
"""
from espressopp import pmi
from espressopp.esutil import *
from espressopp.interaction.AngularPotential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_TabulatedAngular, \
interaction_FixedTripleListTabulatedAngular, \
interaction_FixedTripleListTypesTabulatedAngular
class TabulatedAngularLocal(AngularPotentialLocal, interaction_TabulatedAngular):
def __init__(self, itype, filename):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_TabulatedAngular, itype, filename)
class FixedTripleListTabulatedAngularLocal(InteractionLocal, interaction_FixedTripleListTabulatedAngular):
def __init__(self, system, ftl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedTripleListTabulatedAngular, system, ftl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
class FixedTripleListTypesTabulatedAngularLocal(InteractionLocal, interaction_FixedTripleListTypesTabulatedAngular):
def __init__(self, system, ftl):
if pmi.workerIsActive():
cxxinit(self, interaction_FixedTripleListTypesTabulatedAngular, system, ftl)
def setPotential(self, type1, type2, type3, potential):
if pmi.workerIsActive():
self.cxxclass.setPotential(self, type1, type2, type3, potential)
def getPotential(self, type1, type2, type3):
if pmi.workerIsActive():
return self.cxxclass.getPotential(self, type1, type2, type3)
def setFixedTripleList(self, ftl):
if pmi.workerIsActive():
self.cxxclass.setFixedTripleList(self, ftl)
def getFixedTripleList(self):
if pmi.workerIsActive():
return self.cxxclass.getFixedTripleList(self)
if pmi.isController:
class TabulatedAngular(AngularPotential):
'The TabulatedAngular potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.TabulatedAngularLocal',
pmiproperty = ['itype', 'filename']
)
class FixedTripleListTabulatedAngular(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedTripleListTabulatedAngularLocal',
pmicall = ['setPotential', 'getFixedTripleList']
)
class FixedTripleListTypesTabulatedAngular(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedTripleListTypesTabulatedAngularLocal',
pmicall = ['setPotential','getPotential', 'setFixedTripleList', 'getFixedTripleList']
)
|
fedepad/espressopp
|
src/interaction/TabulatedAngular.py
|
Python
|
gpl-3.0
| 5,631
|
[
"ESPResSo"
] |
07163143a3560beff7371c3b505b822938e2f93b255a901c98db6140db927888
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Provides functions for creating a python classes from the cp2k_input.xml
file.
"""
import xml.etree.cElementTree as cElementTree
import utilities
import textwrap
#===============================================================================
def validify_section(string):
"""Modifies the section name so that it can be used as a valid attribute
name in a python class.
"""
original = string
changed = False
if "-" in string:
changed = True
string = string.replace("-", "_")
if "+" in string:
changed = True
string = string.replace("+", "PLUS")
if string[0].isdigit():
changed = True
string = "NUM" + string
if changed:
print(" Section {} replaced with {}".format(original, string))
return string
#===============================================================================
def validify_keyword(string):
"""Modifies the keyword name so that it can be used as a valid attribute
name in a python class.
"""
original = string
changed = False
if "-" in string:
changed = True
string = string.replace("-", "_")
if "+" in string:
changed = True
string = string.replace("+", "PLUS")
if string[0].isdigit():
changed = True
string = "NUM" + string
if changed:
print(" Keyword {} replaced with {}".format(original, string))
string = string.capitalize()
return string
#===============================================================================
def create_docstring(item):
description = item.find("DESCRIPTION")
default_value = item.find("DEFAULT_VALUE")
default_unit = item.find("DEFAULT_UNIT")
# Description
output = " \"\"\"\n"
if description is not None:
if description.text is not None:
for line in textwrap.wrap(description.text):
output += " " + line + "\n"
# If the values are enumerated, document the possible values
data_type = item.find("DATA_TYPE")
if data_type.get("kind") == "keyword":
output += "\n Available values:\n"
enumerations = data_type.find("ENUMERATION")
for enum in enumerations.findall("ITEM"):
output += " " + enum.find("NAME").text + "\n"
enum_description = enum.find("DESCRIPTION").text
if enum_description is not None:
for line in textwrap.wrap(enum_description):
output += " " + line + "\n"
# Default value
if default_value is not None:
if default_value.text is not None:
output += "\n Default value:\n"
for line in textwrap.wrap(default_value.text):
output += " " + line + "\n"
# Default unit
if default_unit is not None:
output += "\n Default unit:\n"
if default_unit.text is not None:
for line in textwrap.wrap(default_unit.text):
output += " " + line + "\n"
output += " \"\"\"\n"
return output
#===============================================================================
def recursive_class_creation(section, level, class_dictionary, version_dictionary):
"""Recursively goes throught the .xml file created by cp2k --xml command
and creates a python class for each section. Keywords, default keywords,
section parameters and subsections are stored as attributes.
"""
default_keywords = {}
repeated_default_keywords = {}
keywords = {}
repeated_keywords = {}
subsections = {}
repeated_subsections = {}
repeated_aliases = {}
aliases = {}
attributes = []
inp_name = ""
# Initial string for each section of the class
imports = ["from pycp2k.inputsection import InputSection"]
docstring = ""
properties = ""
setters = ""
public = (
" def __init__(self):\n"
" InputSection.__init__(self)\n"
)
private = ""
class_subsections = ""
functions = "\n"
# The root with tag CP2K_INPUT doesn't have a name. It is hardcoded here.
sectionname = section.find("NAME")
if sectionname is None:
class_name = "_CP2K_INPUT"
inp_name = "CP2K_INPUT"
else:
class_name = sectionname.text
inp_name = class_name
class_name = validify_section(class_name)
class_name = "_" + class_name.lower()
# Start writing class body
section_description = section.find("DESCRIPTION")
# if section_description is not None and section_description.text is not None:
# docstring += " \"\"\"\n"
# for line in textwrap.wrap(section_description.text):
# docstring += " " + line + "\n"
# docstring += " \"\"\"\n"
# else:
# docstring += " \"\"\"\"\"\"\n"
#---------------------------------------------------------------------------
# Create attribute for section parameter
section_parameters = section.find("SECTION_PARAMETERS")
if section_parameters is not None:
attributes.append("Section_parameters")
public += " self.Section_parameters = None\n"
# Write the description for the section parameter
# public += create_docstring(section_parameters)
#---------------------------------------------------------------------------
# Create attribute for all the keywords
for keyword in section.findall("KEYWORD"):
# First find out the default name and whether the attribute is visible or not
default_name = ""
visible = True
for keyname in keyword.findall("NAME"):
keytype = keyname.get("type")
name = keyname.text
newname = validify_keyword(name)
if keytype == "default":
default_name = newname
if name.startswith("__"):
visible = False
# Now store the keywords as class attributes
if visible:
for keyname in keyword.findall("NAME"):
name = keyname.text
newname = validify_keyword(name)
# Create original attribute for the default keyname
if newname == default_name:
# Special case for repeateable keywords.
if keyword.get("repeats") == "yes":
public += " self." + newname + " = []\n"
# public += create_docstring(keyword)
repeated_keywords[newname] = name
else:
public += " self." + newname + " = None\n"
# public += create_docstring(keyword)
keywords[newname] = name
# Create properties for aliases
else:
if keyword.get("repeats") == "yes":
public += " self." + newname + " = self." + default_name + "\n"
repeated_aliases[newname] = default_name
else:
aliases[newname] = default_name
properties += ("\n @property\n"
" def " + newname + "(self):\n"
" \"\"\"\n"
" See documentation for " + default_name + "\n"
" \"\"\"\n"
" return self." + default_name + "\n")
setters += ("\n @" + newname + ".setter\n"
" def " + newname + "(self, value):\n"
" self." + default_name + " = value\n")
#---------------------------------------------------------------------------
# Create a class attribute for all DEFAULT_KEYWORDS
default_keyword = section.find("DEFAULT_KEYWORD")
if default_keyword is not None:
attributes.append("Default_keyword")
# Special case for repeateable default_keywords. Create a dictionary of the
# keyword and add a function for creating them.
name = default_keyword.find("NAME").text
newname = validify_keyword(name)
if default_keyword.get("repeats") == "yes":
public += " self." + newname + " = []\n"
# public += create_docstring(default_keyword)
repeated_default_keywords[newname] = name
else:
public += " self." + newname + " = None\n"
# public += create_docstring(default_keyword)
default_keywords[newname] = name
#---------------------------------------------------------------------------
# Create attribute for each subsection
for subsection in section.findall("SECTION"):
member_class_name = recursive_class_creation(subsection, level+1, class_dictionary, version_dictionary)
member_name = subsection.find("NAME").text
member_name = member_name.replace("-", "_")
member_name = member_name.replace("+", "PLUS")
if member_name[0].isdigit():
member_name = "_" + member_name
imports.append("from .{0} import {0}".format(member_class_name))
# Special case for repeateable sections. Create a dictionary of the
# subsections and add a function for creating them.
if subsection.get("repeats") == "yes":
class_subsections += " self." + member_name + "_list = []\n"
repeated_subsections[member_name] = member_class_name
attributes.append(member_name + "_list")
else:
class_subsections += " self." + member_name + " = " + member_class_name + "()\n"
subsections[member_name] = subsection.find("NAME").text
#---------------------------------------------------------------------------
# Write a list of the stored variable names
private += " self._name = \"" + str(inp_name) + "\"\n"
if len(keywords) != 0:
private += " self._keywords = " + str(keywords) + "\n"
if len(repeated_keywords) != 0:
private += " self._repeated_keywords = " + str(repeated_keywords) + "\n"
if len(default_keywords) != 0:
private += " self._default_keywords = " + str(default_keywords) + "\n"
if len(repeated_default_keywords) != 0:
private += " self._repeated_default_keywords = " + str(repeated_default_keywords) + "\n"
if len(subsections) != 0:
private += " self._subsections = " + str(subsections) + "\n"
if len(repeated_subsections) != 0:
private += " self._repeated_subsections = " + str(repeated_subsections) + "\n"
if len(aliases) != 0:
private += " self._aliases = " + str(aliases) + "\n"
if len(repeated_aliases) != 0:
private += " self._repeated_aliases = " + str(repeated_aliases) + "\n"
if len(attributes) != 0:
private += " self._attributes = " + str(attributes) + "\n"
#---------------------------------------------------------------------------
# Write a function for adding repeateable sections
for repeated in repeated_subsections.items():
attribute_name = repeated[0]
attribute_class_name = repeated[1]
functions += (" def " + attribute_name + "_add(self, section_parameters=None):\n"
" new_section = " + attribute_class_name + "()\n"
" if section_parameters is not None:\n"
" if hasattr(new_section, 'Section_parameters'):\n"
" new_section.Section_parameters = section_parameters\n"
" self." + attribute_name + "_list.append(new_section)\n"
" return new_section\n\n")
#---------------------------------------------------------------------------
# The class names are not unique. Use numbering to identify classes.
exists = False
import_string = "\n".join(imports)
class_string = docstring + public + class_subsections + private + functions + properties + setters
version_number = version_dictionary.get(class_name)
if version_number is None:
version_dictionary[class_name] = 1
class_dictionary[class_name+str(1)] = (import_string, class_string)
return class_name+str(1)
for version in range(version_number):
old_class_body = class_dictionary[class_name+str(version + 1)]
if old_class_body == class_string:
exists = True
version_number = version + 1
break
if not exists:
version_dictionary[class_name] = version_number + 1
class_dictionary[class_name+str(version_number + 1)] = (import_string, class_string)
return class_name + str(version_number + 1)
else:
return class_name + str(version_number)
#===============================================================================
def main(xml_path):
"""Parses the classes and saves them to the package directory as
parsedclasses.py.
"""
# Start parsing here
utilities.print_subtitle("CREATING INPUT STRUCTURE...")
tree = cElementTree.parse(xml_path)
root = tree.getroot()
# Extract the cp2k version and revision
version = root.find("CP2K_VERSION").text
revision = root.find("COMPILE_REVISION").text
class_dictionary = {}
version_dictionary = {}
recursive_class_creation(root, 0, class_dictionary, version_dictionary)
# Put each class into its own module. This produces manu small files, but
# this way it it easier for autocompletion to handle everything
for class_name, class_body in class_dictionary.items():
with open('pycp2k/classes/{}.py'.format(class_name), 'w') as file:
file.write(class_body[0] + "\n\n\n")
class_body_header = (
"class " + class_name + "(InputSection):\n"
)
file.write(class_body_header)
file.write(class_body[1])
return (version, revision)
# Run main function by default
if __name__ == "__main__":
main()
|
SINGROUP/pycp2k
|
inputparser.py
|
Python
|
lgpl-3.0
| 14,369
|
[
"CP2K"
] |
9d98f7f80251c8c63798ae6b97c7a2f3efea8101278abbb1586f35421d84e8de
|
#!/usr/bin/env python
"""
Find regions of first bed file that overlap regions in a second bed file
usage: %prog bed_file_1 bed_file_2 out_file
-1, --cols1=N,N,N,N: Columns for start, end, strand in first file
-2, --cols2=N,N,N,N: Columns for start, end, strand in second file
-m, --mincols=N: Require this much overlap (default 1bp)
-p, --pieces: just print pieces of second set (after padding)
"""
from galaxy import eggs
import pkg_resources
pkg_resources.require( "bx-python" )
import sys, traceback, fileinput
from warnings import warn
from bx.intervals import *
from bx.intervals.io import *
from bx.intervals.operations.intersect import *
from bx.cookbook import doc_optparse
from galaxy.tools.util.galaxyops import *
assert sys.version_info[:2] >= ( 2, 4 )
def main():
mincols = 1
upstream_pad = 0
downstream_pad = 0
options, args = doc_optparse.parse( __doc__ )
try:
chr_col_1, start_col_1, end_col_1, strand_col_1 = parse_cols_arg( options.cols1 )
chr_col_2, start_col_2, end_col_2, strand_col_2 = parse_cols_arg( options.cols2 )
if options.mincols: mincols = int( options.mincols )
pieces = bool( options.pieces )
in_fname, in2_fname, out_fname = args
except:
doc_optparse.exception()
g1 = NiceReaderWrapper( fileinput.FileInput( in_fname ),
chrom_col=chr_col_1,
start_col=start_col_1,
end_col=end_col_1,
strand_col=strand_col_1,
fix_strand=True )
g2 = NiceReaderWrapper( fileinput.FileInput( in2_fname ),
chrom_col=chr_col_2,
start_col=start_col_2,
end_col=end_col_2,
strand_col=strand_col_2,
fix_strand=True )
out_file = open( out_fname, "w" )
try:
for line in intersect( [g1,g2], pieces=pieces, mincols=mincols ):
if type( line ) == GenomicInterval:
out_file.write( "%s\n" % "\t".join( line.fields ) )
else:
out_file.write( "%s\n" % line )
except ParseError, e:
out_file.close()
fail( "Invalid file format: %s" % str( e ) )
out_file.close()
if g1.skipped > 0:
print skipped( g1, filedesc=" of 1st dataset" )
if g2.skipped > 0:
print skipped( g2, filedesc=" of 2nd dataset" )
if __name__ == "__main__":
main()
|
volpino/Yeps-EURAC
|
tools/new_operations/gops_intersect.py
|
Python
|
mit
| 2,542
|
[
"Galaxy"
] |
871d07310a63187a2476231f8d71bf7d591018c69902470626969fd2379056ba
|
#!/usr/bin/env python
import click
import os
import requests
import sys
import leagueids
import teamnames
import writers
BASE_URL = 'http://api.football-data.org/alpha/'
LIVE_URL = 'http://soccer-cli.appspot.com/'
LEAGUE_IDS = leagueids.LEAGUE_IDS
TEAM_NAMES = teamnames.team_names
try:
api_token = os.environ['SOCCER_CLI_API_TOKEN']
except KeyError:
from config import config
api_token = config.get('SOCCER_CLI_API_TOKEN')
if not api_token:
print ('No API Token detected. Please visit {0} and get an API Token, '
'which will be used by the Soccer CLI to get access to the data'
.format(BASE_URL))
sys.exit(1)
headers = {
'X-Auth-Token': api_token
}
def get_live_scores(writer):
"""Gets the live scores"""
req = requests.get(LIVE_URL)
if req.status_code == requests.codes.ok:
scores = req.json()
if len(scores["games"]) == 0:
click.secho("No live action currently", fg="red", bold=True)
return
writer.live_scores(scores)
else:
click.secho("There was problem getting live scores", fg="red", bold=True)
def get_team_scores(team, time, writer):
""" Queries the API and gets the particular team scores """
team_id = TEAM_NAMES.get(team, None)
if team_id:
req = requests.get('{base_url}teams/{team_id}/fixtures?timeFrame=p{time}'.format(
base_url=BASE_URL, team_id=team_id, time=time), headers=headers)
if req.status_code == requests.codes.ok:
team_scores = req.json()
if len(team_scores["fixtures"]) == 0:
click.secho("No action during past week. Change the time "
"parameter to get more fixtures.", fg="red", bold=True)
else:
writer.team_scores(team_scores, time)
else:
click.secho("No data for the team. Please check the team code.",
fg="red", bold=True)
else:
click.secho("No data for the team. Please check the team code.",
fg="red", bold=True)
def get_standings(league, writer):
""" Queries the API and gets the standings for a particular league """
if not league:
click.secho("Please specify a league. Example --standings --league=EPL",
fg="red", bold=True)
return
league_id = LEAGUE_IDS[league]
req = requests.get('{base_url}soccerseasons/{id}/leagueTable'.format(
base_url=BASE_URL, id=league_id), headers=headers)
if req.status_code == requests.codes.ok:
writer.standings(req.json(), league)
else:
click.secho("No standings availble for {league}.".format(league=league),
fg="red", bold=True)
def get_league_scores(league, time, writer):
"""Queries the API and fetches the scores for fixtures
based upon the league and time parameter"""
if league:
league_id = LEAGUE_IDS[league]
req = requests.get('{base_url}soccerseasons/{id}/fixtures?timeFrame=p{time}'.format(
base_url=BASE_URL, id=league_id, time=str(time)), headers=headers)
if req.status_code == requests.codes.ok:
fixtures_results = req.json()
# no fixtures in the past wee. display a help message and return
if len(fixtures_results["fixtures"]) == 0:
click.secho("No {league} matches in the past week.".format(league=league),
fg="red", bold=True)
else:
writer.league_scores(fixtures_results, time)
else:
click.secho("No data for the given league",
fg="red", bold=True)
return
req = requests.get('{base_url}fixtures?timeFrame=p{time}'.format(
base_url=BASE_URL, time=str(time)), headers=headers)
if req.status_code == requests.codes.ok:
fixtures_results = req.json()
writer.league_scores(fixtures_results, time)
@click.command()
@click.option('--live', is_flag=True, help="Shows live scores from various leagues")
@click.option('--standings', is_flag=True, help="Standings for a particular league")
@click.option('--league', '-league', type=click.Choice(LEAGUE_IDS.keys()),
help=("Choose the league whose fixtures you want to see. "
"Bundesliga(BL), Premier League(EPL), La Liga(LLIGA), "
"Serie A(SA), Ligue 1(FL), Eredivisie(DED), "
"Primeira Liga(PPL), Champions League(CL))"))
@click.option('--team', type=click.Choice(TEAM_NAMES.keys()),
help=("Choose the team whose fixtures you want to see. "
"See the various team codes listed on README')"))
@click.option('--time', default=6,
help="The number of days in the past for which you want to see the scores")
@click.option('-o', '--output', type=click.Choice(['stdout', 'csv', 'json']),
default='stdout',
help="Print output in stdout, CSV or JSON format")
def main(league, time, standings, team, live, output):
""" A CLI for live and past football scores from various football leagues """
writer = writers.get_writer(output)
if live:
get_live_scores(writer)
return
if standings:
get_standings(league, writer)
return
if team:
get_team_scores(team, time, writer)
return
get_league_scores(league, time, writer)
if __name__ == '__main__':
main()
|
nare469/soccer-cli
|
soccer/main.py
|
Python
|
mit
| 5,464
|
[
"VisIt"
] |
54200b07c71fdd0182d4c7d3b21b8089d657d70c4b33cfdc508066e43fe0706b
|
from argparse import ArgumentParser
from singlecell import barcode, align, count, cluster
from singlecell.barcode import barcodes_to_plate_well
import os
import glob
from collections import OrderedDict
def get_sample(line, sample_map_filename):
keys = ["sample_id", "subsample_id", "r1_path", "r2_path"]
sample_id, subsample_id, r1_filename, r2_filename = line.split()
r1_path = os.path.join(os.path.dirname(sample_map_filename), r1_filename)
r2_path = os.path.join(os.path.dirname(sample_map_filename), r2_filename)
return dict(zip(keys, [sample_id, subsample_id, r1_path, r2_path]))
def get_samples_to_process(sample_file):
with open(sample_file) as in_handle:
return [get_sample(x, sample_file) for x in in_handle]
def get_r2_prepped_outfile(sample, alignment_dir):
return os.path.join(alignment_dir,
".".join([sample["sample_id"], sample["subsample_id"]]))
def get_bwa_outfile(fastq_file):
return fastq_file + ".sam"
def get_star_prefix(fastq_file):
base, _ = os.path.splitext(fastq_file)
return base
def get_cleaned_outfile(align_file):
base, ext = os.path.splitext(align_file)
return base + ".cleaned" + ext
barcodes = OrderedDict()
with open(barcode_file) as in_handle:
for line in in_handle:
tokens = line.split()
barcodes[tokens[2]] = "_".join(tokens[0:2])
return barcodes
if __name__ == "__main__":
parser = ArgumentParser(description="Run a single cell analysis.")
parser.add_argument("--multimappers", action="store_true",
default=False, help="Keep multimappers")
parser.add_argument("--sample-map", required=True, help="Sample map file.")
parser.add_argument("--aligner", default="bwa",
choices=["bwa", "star"], help="Aligner to use.")
parser.add_argument("--aligner-index", help="Path to aligner index.")
parser.add_argument("--alignment-dir", help="Output directory")
parser.add_argument("--gtf-file", required=True, help="GTF file")
parser.add_argument("--plate-file", required=True, help="Plate file")
parser.add_argument("--num-jobs", type=int,
default=1, help="Number of concurrent jobs to process.")
parser.add_argument("--cores-per-job", type=int,
default=1, help="Number of cores to use.")
parser.add_argument("--memory-per-job", default=2, help="Memory in GB to reserve per job.")
parser.add_argument("--timeout", default=15, help="Time to wait before giving up starting.")
parser.add_argument("--scheduler", default=None, help="Type of scheduler to use.",
choices=["lsf", "slurm", "torque", "sge"])
parser.add_argument("--resources", default=None, help="Extra scheduler resource flags.")
parser.add_argument("--queue", default=None, help="Queue to submit jobs to.")
parser.add_argument("--local", action="store_true", default=False,
help="Run in parallel on a local machine.")
args = parser.parse_args()
samples = get_samples_to_process(args.sample_map)
prepped = []
print "Starting IPython cluster. This may take a while."
with cluster.get_cluster_view(args) as view:
print "IPython cluster is up."
print "Beginning barcode preparation."
for sample in samples:
fq1 = sample["r1_path"]
fq2 = sample["r2_path"]
out_file = get_r2_prepped_outfile(sample, args.alignment_dir)
print ("barcode-prepping %s and %s to %s." % (fq1, fq2, out_file))
prepped.append(view.apply_async(barcode.prep_r2_with_barcode,
sample["r1_path"],
sample["r2_path"],
get_r2_prepped_outfile(sample, args.alignment_dir)))
prepped = cluster.wait_until_complete(prepped)
print "Finshed barcode preparation."
print "Beginning alignment."
aligned = []
for prep in prepped:
print ("aligning %s to %s with %s." % (prep, args.aligner_index,
args.aligner))
if args.aligner == "bwa":
aligned.append(view.apply_async(align.bwa_align, prep, args.aligner_index,
get_bwa_outfile(prep), args.cores_per_job))
elif args.aligner == "star":
aligned.append(view.apply_async(align.star_align, prep, args.aligner_index,
get_star_prefix(prep), args.cores_per_job))
aligned = cluster.wait_until_complete(aligned)
print "Finished alignment."
print "Begin cleaning of poorly mapped reads."
cleaned = []
for sam_file in aligned:
print ("Cleaning %s, removing poorly mapped reads." % sam_file)
cleaned.append(view.apply_async(align.clean_align, sam_file, get_cleaned_outfile(sam_file)))
cleaned = cluster.wait_until_complete(cleaned)
print "Finished cleaning."
print "Reading barcode to well mapping."
barcode_to_well = barcodes_to_plate_well(args.plate_file)
print "Finished reading feature names and barcodes."
print "Counting unique UMI mapping to features."
counted = []
for sam_file in cleaned:
counted.append(view.apply_async(count.count_umi, sam_file, args.gtf_file, barcode_to_well))
counted = cluster.wait_until_complete(counted)
print "Finished counting UMI."
|
roryk/singlecell
|
scripts/run_analysis.py
|
Python
|
mit
| 5,635
|
[
"BWA"
] |
15219eabbb39d435a3a1e3830be29b1a30913a38b490db72fdbf5951b5ad9889
|
def itemTemplate():
return ['object/tangible/item/quest/force_sensitive/shared_fs_crystal_force.iff'] # needs correct iff still
def customItemName():
return "Synapse Focus Crystal"
def requiredLevelForEffect():
return 80
def biolink():
return 1
def lootDescriptor():
return 'rarebuffitem'
def itemStats():
stats =['proc_name','forceCrystalForce','forceCrystalForce']
stats +=['effectname','Extended Action','Extended Action']
stats +=['duration','10800','10800']
stats +=['cooldown','86400','86400']
return stats
|
agry/NGECore2
|
scripts/loot/lootItems/legendarylootchest/synapse_focus_crystal.py
|
Python
|
lgpl-3.0
| 544
|
[
"CRYSTAL"
] |
54dce3bd20e3a07081f04d92708c78d53f52268ad92274cc58c2f6936166bbdd
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 30 20:56:07 2015
@author: agiovann
Updated on Fri Aug 19 17:30:11 2016
@author: deep-introspection
"""
import cv2
import os
import sys
import scipy.ndimage
import scipy
import sklearn
import warnings
import numpy as np
import scipy as sp
from sklearn.decomposition import NMF, IncrementalPCA, FastICA
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import euclidean_distances
import pylab as plt
import h5py
import cPickle as cpk
from scipy.io import loadmat
from matplotlib import animation
import pylab as pl
from skimage.external.tifffile import imread
from tqdm import tqdm
import timeseries
# from ca_source_extraction.utilities import save_memmap,load_memmap
try:
plt.ion()
except:
1
from skimage.transform import warp, AffineTransform
from skimage.feature import match_template
from skimage import data
import timeseries as ts
from traces import trace
from utils import display_animation
class movie(ts.timeseries):
"""
Class representing a movie. This class subclasses timeseries,
that in turn subclasses ndarray
movie(input_arr, fr=None,start_time=0,file_name=None, meta_data=None)
Example of usage
----------
input_arr = 3d ndarray
fr=33; # 33 Hz
start_time=0
m=movie(input_arr, start_time=0,fr=33);
Parameters
----------
input_arr: np.ndarray, 3D, (time,height,width)
fr: frame rate
start_time: time beginning movie, if None it is assumed 0
meta_data: dictionary including any custom meta data
file_name: name associated with the file (e.g. path to the original file)
"""
# def __new__(cls, input_arr, fr=None, start_time=0,
# file_name=None, meta_data=None, **kwargs):
def __new__(cls, input_arr, **kwargs):
if (type(input_arr) is np.ndarray) or (type(input_arr) is h5py._hl.dataset.Dataset):
# kwargs['start_time']=start_time;
# kwargs['file_name']=file_name;
# kwargs['meta_data']=meta_data;
# kwargs['fr']=fr;
return super(movie, cls).__new__(cls, input_arr, **kwargs)
else:
raise Exception('Input must be an ndarray, use load instead!')
def motion_correct(self,
max_shift_w=5,
max_shift_h=5,
num_frames_template=None,
template=None,
method='opencv',
remove_blanks=False):
'''
Extract shifts and motion corrected movie automatically,
for more control consider the functions extract_shifts and apply_shifts
Disclaimer, it might change the object itself.
Parameters
----------
max_shift_w,max_shift_h: maximum pixel shifts allowed when correcting
in the width and height direction
template: if a good template for frame by frame correlation exists
it can be passed. If None it is automatically computed
method: depends on what is installed 'opencv' or 'skimage'. 'skimage'
is an order of magnitude slower
num_frames_template: if only a subset of the movies needs to be loaded
for efficiency/speed reasons
Returns
-------
self: motion corected movie, it might change the object itself
shifts : tuple, contains x & y shifts and correlation with template
xcorrs: cross correlation of the movies with the template
template= the computed template
'''
# adjust the movie so that valuse are non negative
# min_val = np.percentile(self, 8)
# if min_val>0:
# print "removing 8 percentile"
# self = self-min_val
# else:
# min_val=0
if template is None: # if template is not provided it is created
if num_frames_template is None:
num_frames_template = 10e7/(512*512)
frames_to_skip = int(np.maximum(1, self.shape[0]/num_frames_template))
# sometimes it is convenient to only consider a subset of the
# movie when computing the median
# idx = np.random.randint(0, high=self.shape[0], size=(num_frames_template,))
submov = self[::frames_to_skip, :].copy()
templ = submov.bin_median() # create template with portion of movie
shifts,xcorrs=submov.extract_shifts(max_shift_w=max_shift_w, max_shift_h=max_shift_h, template=templ, method=method) #
submov.apply_shifts(shifts,interpolation='cubic',method=method)
template=submov.bin_median()
del submov
m=self.copy()
shifts,xcorrs=m.extract_shifts(max_shift_w=max_shift_w, max_shift_h=max_shift_h, template=template, method=method) #
m=m.apply_shifts(shifts,interpolation='cubic',method=method)
template=(m.bin_median())
del m
else:
template=template-np.percentile(template,8)
# now use the good template to correct
shifts,xcorrs=self.extract_shifts(max_shift_w=max_shift_w, max_shift_h=max_shift_h, template=template, method=method) #
self=self.apply_shifts(shifts,interpolation='cubic',method=method)
# self=self+min_val
if remove_blanks:
max_h,max_w= np.max(shifts,axis=0)
min_h,min_w= np.min(shifts,axis=0)
self=self.crop(crop_top=max_h,crop_bottom=-min_h+1,crop_left=max_w,crop_right=-min_w,crop_begin=0,crop_end=0)
return self,shifts,xcorrs,template
def bin_median(self,window=10):
T,d1,d2=np.shape(self)
num_windows=np.int(T/window)
num_frames=num_windows*window
return np.median(np.mean(np.reshape(self[:num_frames],(window,num_windows,d1,d2)),axis=0),axis=0)
def extract_shifts(self, max_shift_w=5,max_shift_h=5, template=None, method='opencv'):
"""
Performs motion corretion using the opencv matchtemplate function. At every iteration a template is built by taking the median of all frames and then used to align the other frames.
Parameters
----------
max_shift_w,max_shift_h: maximum pixel shifts allowed when correcting in the width and height direction
template: if a good template for frame by frame correlation is available it can be passed. If None it is automatically computed
method: depends on what is installed 'opencv' or 'skimage'. 'skimage' is an order of magnitude slower
Returns
-------
shifts : tuple, contains shifts in x and y and correlation with template
xcorrs: cross correlation of the movies with the template
"""
min_val=np.percentile(self, 1)
if min_val < - 0.1:
print min_val
warnings.warn('** Pixels averages are too negative. Removing 1 percentile. **')
self=self-min_val
else:
min_val=0
if type(self[0, 0, 0]) is not np.float32:
warnings.warn('Casting the array to float 32')
self = np.asanyarray(self, dtype=np.float32)
n_frames_, h_i, w_i = self.shape
ms_w = max_shift_w
ms_h = max_shift_h
if template is None:
template = np.median(self, axis=0)
else:
if np.percentile(template, 8) < - 0.1:
warnings.warn('Pixels averages are too negative for template. Removing 1 percentile.')
template=template-np.percentile(template,1)
template=template[ms_h:h_i-ms_h,ms_w:w_i-ms_w].astype(np.float32)
h, w = template.shape # template width and height
#% run algorithm, press q to stop it
shifts=[]; # store the amount of shift in each frame
xcorrs=[];
for i,frame in enumerate(self):
if i%100==99:
print "Frame %i"%(i+1);
if method == 'opencv':
res = cv2.matchTemplate(frame,template,cv2.TM_CCORR_NORMED)
top_left = cv2.minMaxLoc(res)[3]
elif method == 'skimage':
res = match_template(frame,template)
top_left = np.unravel_index(np.argmax(res),res.shape);
top_left=top_left[::-1]
else:
raise Exception('Unknown motion correction ethod!')
avg_corr=np.mean(res);
sh_y,sh_x = top_left
bottom_right = (top_left[0] + w, top_left[1] + h)
if (0 < top_left[1] < 2 * ms_h-1) & (0 < top_left[0] < 2 * ms_w-1):
# if max is internal, check for subpixel shift using gaussian
# peak registration
log_xm1_y = np.log(res[sh_x-1,sh_y]);
log_xp1_y = np.log(res[sh_x+1,sh_y]);
log_x_ym1 = np.log(res[sh_x,sh_y-1]);
log_x_yp1 = np.log(res[sh_x,sh_y+1]);
four_log_xy = 4*np.log(res[sh_x,sh_y]);
sh_x_n = -(sh_x - ms_h + (log_xm1_y - log_xp1_y) / (2 * log_xm1_y - four_log_xy + 2 * log_xp1_y))
sh_y_n = -(sh_y - ms_w + (log_x_ym1 - log_x_yp1) / (2 * log_x_ym1 - four_log_xy + 2 * log_x_yp1))
else:
sh_x_n = -(sh_x - ms_h)
sh_y_n = -(sh_y - ms_w)
# if not only_shifts:
# if method == 'opencv':
# M = np.float32([[1,0,sh_y_n],[0,1,sh_x_n]])
# self[i] = cv2.warpAffine(frame,M,(w_i,h_i),flags=interpolation)
# elif method == 'skimage':
# tform = AffineTransform(translation=(-sh_y_n,-sh_x_n))
# self[i] = warp(frame, tform,preserve_range=True,order=3)
# if show_movie:
# fr = cv2.resize(self[i],None,fx=2, fy=2, interpolation = cv2.INTER_CUBIC)
# cv2.imshow('frame',fr/255.0)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# cv2.destroyAllWindows()
# break
shifts.append([sh_x_n,sh_y_n])
xcorrs.append([avg_corr])
self=self+min_val
return (shifts,xcorrs)
def apply_shifts(self, shifts,interpolation='linear',method='opencv',remove_blanks=False):
"""
Apply precomputed shifts to a movie, using subpixels adjustment (cv2.INTER_CUBIC function)
Parameters
------------
shifts: array of tuples representing x and y shifts for each frame
interpolation: 'linear', 'cubic', 'nearest' or cvs.INTER_XXX
"""
if type(self[0, 0, 0]) is not np.float32:
warnings.warn('Casting the array to float 32')
self = np.asanyarray(self, dtype=np.float32)
if interpolation == 'cubic':
if method == 'opencv':
interpolation=cv2.INTER_CUBIC
else:
interpolation=3
print 'cubic interpolation'
elif interpolation == 'nearest':
if method == 'opencv':
interpolation=cv2.INTER_NEAREST
else:
interpolation=0
print 'nearest interpolation'
elif interpolation == 'linear':
if method=='opencv':
interpolation=cv2.INTER_LINEAR
else:
interpolation=1
print 'linear interpolation'
elif interpolation == 'area':
if method=='opencv':
interpolation=cv2.INTER_AREA
else:
raise Exception('Method not defined')
print 'area interpolation'
elif interpolation == 'lanczos4':
if method=='opencv':
interpolation=cv2.INTER_LANCZOS4
else:
interpolation=4
print 'lanczos/biquartic interpolation'
else:
raise Exception('Interpolation method not available')
t,h,w=self.shape
for i,frame in enumerate(self):
if i%100==99:
print "Frame %i"%(i+1);
sh_x_n, sh_y_n = shifts[i]
if method == 'opencv':
M = np.float32([[1,0,sh_y_n],[0,1,sh_x_n]])
self[i] = cv2.warpAffine(frame,M,(w,h),flags=interpolation)
elif method == 'skimage':
tform = AffineTransform(translation=(-sh_y_n,-sh_x_n))
self[i] = warp(frame, tform,preserve_range=True,order=interpolation)
else:
raise Exception('Unknown shift application method')
if remove_blanks:
max_h,max_w= np.max(shifts,axis=0)
min_h,min_w= np.min(shifts,axis=0)
self=self.crop(crop_top=max_h,crop_bottom=-min_h+1,crop_left=max_w,crop_right=-min_w,crop_begin=0,crop_end=0)
return self
def debleach(self):
""" Debleach by fiting a model to the median intensity.
"""
if type(self[0, 0, 0]) is not np.float32:
warnings.warn('Casting the array to float 32')
self = np.asanyarray(self, dtype=np.float32)
t, h, w = self.shape
x = np.arange(t)
y = np.median(self.reshape(t, -1), axis=1)
def expf(x, a, b, c):
return a*np.exp(-b*x)+c
def linf(x, a, b):
return a*x+b
try:
p0 = (y[0]-y[-1], 1e-6, y[-1])
popt, pcov = sp.optimize.curve_fit(expf, x, y, p0=p0)
y_fit = expf(x, *popt)
except:
p0 = (float(y[-1]-y[0])/float(x[-1]-x[0]), y[0])
popt, pcov = sp.optimize.curve_fit(linf, x, y, p0=p0)
y_fit = linf(x, *popt)
norm = y_fit - np.median(y[:])
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(x, y)
# plt.plot(x, y_fit, 'r--')
# plt.legend({'Original', 'Fit'})
# plt.show()
for frame in range(t):
self[frame, :, :] = self[frame, :, :] - norm[frame]
return self
def crop(self,crop_top=0,crop_bottom=0,crop_left=0,crop_right=0,crop_begin=0,crop_end=0):
""" Crop movie
"""
t,h,w=self.shape
return self[crop_begin:t-crop_end,crop_top:h-crop_bottom,crop_left:w-crop_right]
def computeDFF(self,secsWindow=5,quantilMin=8,method='only_baseline',order='C'):
"""
compute the DFF of the movie or remove baseline
In order to compute the baseline frames are binned according to the window length parameter
and then the intermediate values are interpolated.
Parameters
----------
secsWindow: length of the windows used to compute the quantile
quantilMin : value of the quantile
method='only_baseline','delta_f_over_f','delta_f_over_sqrt_f'
Returns
-----------
self: DF or DF/F or DF/sqrt(F) movies
movBL=baseline movie
"""
print "computing minimum ..."; sys.stdout.flush()
minmov=np.min(self)
if np.min(self)<=0 and method != 'only_baseline':
raise ValueError("All pixels must be positive")
numFrames,linePerFrame,pixPerLine=np.shape(self)
downsampfact=int(secsWindow*self.fr);
elm_missing=int(np.ceil(numFrames*1.0/downsampfact)*downsampfact-numFrames)
padbefore=int(np.floor(elm_missing/2.0))
padafter=int(np.ceil(elm_missing/2.0))
print 'Inizial Size Image:' + np.str(np.shape(self)); sys.stdout.flush()
self=movie(np.pad(self,((padbefore,padafter),(0,0),(0,0)),mode='reflect'),**self.__dict__)
numFramesNew,linePerFrame,pixPerLine=np.shape(self)
#% compute baseline quickly
print "binning data ..."; sys.stdout.flush()
# import pdb
# pdb.set_trace()
movBL=np.reshape(self,(downsampfact,int(numFramesNew/downsampfact),linePerFrame,pixPerLine),order=order);
movBL=np.percentile(movBL,quantilMin,axis=0);
print "interpolating data ..."; sys.stdout.flush()
print movBL.shape
movBL=scipy.ndimage.zoom(np.array(movBL,dtype=np.float32),[downsampfact ,1, 1],order=0, mode='constant', cval=0.0, prefilter=False)
#% compute DF/F
if method == 'delta_f_over_sqrt_f':
self=(self-movBL)/np.sqrt(movBL)
elif method == 'delta_f_over_f':
self=(self-movBL)/movBL
elif method =='only_baseline':
self=(self-movBL)
else:
raise Exception('Unknown method')
self=self[padbefore:len(movBL)-padafter,:,:];
print 'Final Size Movie:' + np.str(self.shape)
return self,movie(movBL,fr=self.fr,start_time=self.start_time,meta_data=self.meta_data,file_name=self.file_name)
def NonnegativeMatrixFactorization(self,n_components=30, init='nndsvd', beta=1,tol=5e-7, sparseness='components',**kwargs):
'''
See documentation for scikit-learn NMF
'''
minmov=np.min(self)
if np.min(self)<0:
raise ValueError("All values must be positive")
T,h,w=self.shape
Y=np.reshape(self,(T,h*w))
Y=Y-np.percentile(Y,1)
Y=np.clip(Y,0,np.Inf)
estimator=NMF(n_components=n_components, init=init, beta=beta,tol=tol, sparseness=sparseness,**kwargs)
time_components=estimator.fit_transform(Y)
components_ = estimator.components_
space_components=np.reshape(components_,(n_components,h,w))
return space_components,time_components
def online_NMF(self,n_components=30,method='nnsc',lambda1=100,iterations=-5,batchsize=512,model=None,**kwargs):
""" Method performing online matrix factorization and using the spams (http://spams-devel.gforge.inria.fr/doc-python/html/index.html) package from Inria.
Implements bith the nmf and nnsc methods
Parameters
----------
n_components: int
method: 'nnsc' or 'nmf' (see http://spams-devel.gforge.inria.fr/doc-python/html/index.html)
lambda1: see http://spams-devel.gforge.inria.fr/doc-python/html/index.html
iterations: see http://spams-devel.gforge.inria.fr/doc-python/html/index.html
batchsize: see http://spams-devel.gforge.inria.fr/doc-python/html/index.html
model: see http://spams-devel.gforge.inria.fr/doc-python/html/index.html
**kwargs: more arguments to be passed to nmf or nnsc
Return:
-------
time_comps
space_comps
"""
try:
import spams
except:
print "You need to install the SPAMS package"
raise
T,d1,d2=np.shape(self)
d=d1*d2
X=np.asfortranarray(np.reshape(self,[T,d],order='F'))
if method == 'nmf':
(time_comps,V) = spams.nmf(X,return_lasso= True ,K = n_components,numThreads=4,iter = iterations,**kwargs)
elif method == 'nnsc':
(time_comps,V) = spams.nnsc(X,return_lasso=True,K=n_components, lambda1 = lambda1,iter = iterations, model = model, **kwargs)
else:
raise Exception('Method unknown')
space_comps=[]
for idx,mm in enumerate(V):
space_comps.append(np.reshape(mm.todense(),(d1,d2),order='F'))
return time_comps,np.array(space_comps)
# pl.figure()
# for idx,mm in enumerate(V):
# pl.subplot(6,5,idx+1)
# pl.imshow(np.reshape(mm.todense(),(d1,d2),order='F'),cmap=pl.cm.gray)
def IPCA(self, components = 50, batch =1000):
'''
Iterative Principal Component analysis, see sklearn.decomposition.incremental_pca
Parameters:
------------
components (default 50) = number of independent components to return
batch (default 1000) = number of pixels to load into memory simultaneously in IPCA. More requires more memory but leads to better fit
Returns
-------
eigenseries: principal components (pixel time series) and associated singular values
eigenframes: eigenframes are obtained by multiplying the projected frame matrix by the projected movie (whitened frames?)
proj_frame_vectors:the reduced version of the movie vectors using only the principal component projection
'''
# vectorize the images
num_frames, h, w = np.shape(self);
frame_size = h * w;
frame_samples = np.reshape(self, (num_frames, frame_size)).T
# run IPCA to approxiate the SVD
ipca_f = IncrementalPCA(n_components=components, batch_size=batch)
ipca_f.fit(frame_samples)
# construct the reduced version of the movie vectors using only the
# principal component projection
proj_frame_vectors = ipca_f.inverse_transform(ipca_f.transform(frame_samples))
# get the temporal principal components (pixel time series) and
# associated singular values
eigenseries = ipca_f.components_.T
# the rows of eigenseries are approximately orthogonal
# so we can approximately obtain eigenframes by multiplying the
# projected frame matrix by this transpose on the right
eigenframes = np.dot(proj_frame_vectors, eigenseries)
return eigenseries, eigenframes, proj_frame_vectors
def IPCA_stICA(self, componentsPCA=50,componentsICA = 40, batch = 1000, mu = 1, ICAfun = 'logcosh', **kwargs):
'''
Compute PCA + ICA a la Mukamel 2009.
Parameters:
components (default 50) = number of independent components to return
batch (default 1000) = number of pixels to load into memory simultaneously in IPCA. More requires more memory but leads to better fit
mu (default 0.05) = parameter in range [0,1] for spatiotemporal ICA, higher mu puts more weight on spatial information
ICAFun (default = 'logcosh') = cdf to use for ICA entropy maximization
Plus all parameters from sklearn.decomposition.FastICA
Returns:
ind_frames [components, height, width] = array of independent component "eigenframes"
'''
eigenseries, eigenframes,_proj = self.IPCA(componentsPCA, batch)
# normalize the series
frame_scale = mu / np.max(eigenframes)
frame_mean = np.mean(eigenframes, axis = 0)
n_eigenframes = frame_scale * (eigenframes - frame_mean)
series_scale = (1-mu) / np.max(eigenframes)
series_mean = np.mean(eigenseries, axis = 0)
n_eigenseries = series_scale * (eigenseries - series_mean)
# build new features from the space/time data
# and compute ICA on them
eigenstuff = np.concatenate([n_eigenframes, n_eigenseries])
ica = FastICA(n_components=componentsICA, fun=ICAfun,**kwargs)
joint_ics = ica.fit_transform(eigenstuff)
# extract the independent frames
num_frames, h, w = np.shape(self);
frame_size = h * w;
ind_frames = joint_ics[:frame_size, :]
ind_frames = np.reshape(ind_frames.T, (componentsICA, h, w))
return ind_frames
def IPCA_denoise(self, components = 50, batch = 1000):
'''
Create a denoise version of the movie only using the first 'components' components
'''
_, _, clean_vectors = self.IPCA(components, batch)
self = self.__class__(np.reshape(clean_vectors.T, np.shape(self)),**self.__dict__)
return self
def IPCA_io(self, n_components=50, fun='logcosh', max_iter=1000, tol=1e-20):
''' DO NOT USE STILL UNDER DEVELOPMENT
'''
pca_comp=n_components;
[T,d1,d2]=self.shape
M=np.reshape(self,(T,d1*d2))
[U,S,V] = scipy.sparse.linalg.svds(M,pca_comp)
S=np.diag(S);
# whiteningMatrix = np.dot(scipy.linalg.inv(np.sqrt(S)),U.T)
# dewhiteningMatrix = np.dot(U,np.sqrt(S))
whiteningMatrix = np.dot(scipy.linalg.inv(S),U.T)
dewhiteningMatrix = np.dot(U,S)
whitesig = np.dot(whiteningMatrix,M)
wsigmask=np.reshape(whitesig.T,(d1,d2,pca_comp));
f_ica=sklearn.decomposition.FastICA(whiten=False, fun=fun, max_iter=max_iter, tol=tol)
S_ = f_ica.fit_transform(whitesig.T)
A_ = f_ica.mixing_
A=np.dot(A_,whitesig)
mask=np.reshape(A.T,(d1,d2,pca_comp))
return mask
def local_correlations(self,eight_neighbours=False):
'''
Compute local correlations.
Parameters:
-----------
if eight_neighbours=True it will take the diagonal neighbours too
Returns
-------
rho M x N matrix, cross-correlation with adjacent pixel
'''
rho = np.zeros(np.shape(self)[1:3])
w_mov = (self - np.mean(self, axis = 0))/np.std(self, axis = 0)
rho_h = np.mean(np.multiply(w_mov[:,:-1,:], w_mov[:,1:,:]), axis = 0)
rho_w = np.mean(np.multiply(w_mov[:,:,:-1], w_mov[:,:,1:,]), axis = 0)
if True:
rho_d1 = np.mean(np.multiply(w_mov[:,1:,:-1], w_mov[:,:-1,1:,]), axis = 0)
rho_d2 = np.mean(np.multiply(w_mov[:,:-1,:-1], w_mov[:,1:,1:,]), axis = 0)
rho[:-1,:] = rho[:-1,:] + rho_h
rho[1:,:] = rho[1:,:] + rho_h
rho[:,:-1] = rho[:,:-1] + rho_w
rho[:,1:] = rho[:,1:] + rho_w
if eight_neighbours:
rho[:-1,:-1] = rho[:-1,:-1] + rho_d2
rho[1:,1:] = rho[1:,1:] + rho_d1
rho[1:,:-1] = rho[1:,:-1] + rho_d1
rho[:-1,1:] = rho[:-1,1:] + rho_d2
if eight_neighbours:
neighbors = 8 * np.ones(np.shape(self)[1:3])
neighbors[0,:] = neighbors[0,:] - 3;
neighbors[-1,:] = neighbors[-1,:] - 3;
neighbors[:,0] = neighbors[:,0] - 3;
neighbors[:,-1] = neighbors[:,-1] - 3;
neighbors[0,0] = neighbors[0,0] + 1;
neighbors[-1,-1] = neighbors[-1,-1] + 1;
neighbors[-1,0] = neighbors[-1,0] + 1;
neighbors[0,-1] = neighbors[0,-1] + 1;
else:
neighbors = 4 * np.ones(np.shape(self)[1:3])
neighbors[0,:] = neighbors[0,:] - 1;
neighbors[-1,:] = neighbors[-1,:] - 1;
neighbors[:,0] = neighbors[:,0] - 1;
neighbors[:,-1] = neighbors[:,-1] - 1;
rho = np.divide(rho, neighbors)
return rho
def partition_FOV_KMeans(self,tradeoff_weight=.5,fx=.25,fy=.25,n_clusters=4,max_iter=500):
"""
Partition the FOV in clusters that are grouping pixels close in space and in mutual correlation
Parameters
------------------------------
tradeoff_weight:between 0 and 1 will weight the contributions of distance and correlation in the overall metric
fx,fy: downsampling factor to apply to the movie
n_clusters,max_iter: KMeans algorithm parameters
Outputs
-------------------------------
fovs:array 2D encoding the partitions of the FOV
mcoef: matric of pairwise correlation coefficients
distanceMatrix: matrix of picel distances
Example
"""
_,h1,w1=self.shape
self.resize(fx,fy)
T,h,w=self.shape
Y=np.reshape(self,(T,h*w))
mcoef=np.corrcoef(Y.T)
idxA,idxB = np.meshgrid(range(w),range(h));
coordmat=np.vstack((idxA.flatten(),idxB.flatten()))
distanceMatrix=euclidean_distances(coordmat.T);
distanceMatrix=distanceMatrix/np.max(distanceMatrix)
estim=KMeans(n_clusters=n_clusters,max_iter=max_iter);
kk=estim.fit(tradeoff_weight*mcoef-(1-tradeoff_weight)*distanceMatrix)
labs=kk.labels_
fovs=np.reshape(labs,(h,w))
fovs=cv2.resize(np.uint8(fovs),(w1,h1),1./fx,1./fy,interpolation=cv2.INTER_NEAREST)
return np.uint8(fovs), mcoef, distanceMatrix
def extract_traces_from_masks(self,masks):
"""
Parameters
----------------------
masks: array, 3D with each 2D slice bein a mask (integer or fractional)
Outputs
----------------------
traces: array, 2D of fluorescence traces
"""
T,h,w=self.shape
Y=np.reshape(self,(T,h*w))
nA,_,_=masks.shape
A=np.reshape(masks,(nA,h*w))
pixelsA=np.sum(A,axis=1)
A=A/pixelsA[:,None] # obtain average over ROI
traces=trace(np.dot(A,np.transpose(Y)).T,**self.__dict__)
return traces
def resize(self,fx=1,fy=1,fz=1,interpolation=cv2.INTER_AREA):
"""
resize movies along axis and interpolate or lowpass when necessary
it will not work without opencv
Parameters
-------------------
fx,fy,fz:fraction/multiple of dimension (.5 means the image will be half the size)
interpolation=cv2.INTER_AREA. Set to none if you do not want interpolation or lowpass
"""
T,d1,d2 =self.shape
d=d1*d2
elm=d*T
max_els=2**31-1
if elm > max_els:
chunk_size=(max_els)/d
new_m=[]
print('Resizing in chunks because of opencv bug')
for chunk in range(0,T,chunk_size):
print([chunk,np.minimum(chunk+chunk_size,T)])
m_tmp=self[chunk:np.minimum(chunk+chunk_size,T)].copy()
m_tmp=m_tmp.resize(fx=fx,fy=fy,fz=fz,interpolation=interpolation)
if len(new_m) == 0:
new_m=m_tmp
else:
new_m=timeseries.concatenate([new_m,m_tmp],axis=0)
return new_m
else:
if fx!=1 or fy!=1:
print "reshaping along x and y"
t,h,w=self.shape
newshape=(int(w*fy),int(h*fx))
mov=[];
print(newshape)
for frame in self:
mov.append(cv2.resize(frame,newshape,fx=fx,fy=fy,interpolation=interpolation))
self=movie(np.asarray(mov),**self.__dict__)
if fz!=1:
print "reshaping along z"
t,h,w=self.shape
self=np.reshape(self,(t,h*w))
mov=cv2.resize(self,(h*w,int(fz*t)),fx=1,fy=fz,interpolation=interpolation)
# self=cv2.resize(self,(h*w,int(fz*t)),fx=1,fy=fz,interpolation=interpolation)
mov=np.reshape(mov,(np.maximum(1,int(fz*t)),h,w))
self=movie(mov,**self.__dict__)
self.fr=self.fr*fz
return self
def guided_filter_blur_2D(self,guide_filter,radius=5, eps=0):
"""
performs guided filtering on each frame. See opencv documentation of cv2.ximgproc.guidedFilter
"""
for idx,fr in enumerate(self):
if idx%1000==0:
print idx
self[idx] = cv2.ximgproc.guidedFilter(guide_filter,fr,radius=radius,eps=eps)
return self
def bilateral_blur_2D(self,diameter=5,sigmaColor=10000,sigmaSpace=0):
"""
performs bilateral filtering on each frame. See opencv documentation of cv2.bilateralFilter
"""
if type(self[0,0,0]) is not np.float32:
warnings.warn('Casting the array to float 32')
self=np.asanyarray(self,dtype=np.float32)
for idx,fr in enumerate(self):
if idx%1000==0:
print idx
self[idx] = cv2.bilateralFilter(fr,diameter,sigmaColor,sigmaSpace)
return self
def gaussian_blur_2D(self,kernel_size_x=5,kernel_size_y=5,kernel_std_x=1,kernel_std_y=1,borderType=cv2.BORDER_REPLICATE):
"""
Compute gaussian blut in 2D. Might be useful when motion correcting
Parameters
----------
kernel_size: double
see opencv documentation of GaussianBlur
kernel_std_: double
see opencv documentation of GaussianBlur
borderType: int
see opencv documentation of GaussianBlur
Returns
--------
self: ndarray
blurred movie
"""
for idx,fr in enumerate(self):
print idx
self[idx] = cv2.GaussianBlur(fr,ksize=(kernel_size_x,kernel_size_y),sigmaX=kernel_std_x,sigmaY=kernel_std_y,borderType=borderType)
return self
def median_blur_2D(self,kernel_size=3):
"""
Compute gaussian blut in 2D. Might be useful when motion correcting
Parameters
----------
kernel_size: double
see opencv documentation of GaussianBlur
kernel_std_: double
see opencv documentation of GaussianBlur
borderType: int
see opencv documentation of GaussianBlur
Returns
--------
self: ndarray
blurred movie
"""
for idx,fr in enumerate(self):
print idx
self[idx] = cv2.medianBlur(fr,ksize=kernel_size)
return self
def resample(self,new_time_vect):
print 1
def to_2D(self,order='F'):
[T,d1,d2]=self.shape
d=d1*d2
return np.reshape(self,(T,d),order=order)
def zproject(self,method='mean',cmap=pl.cm.gray,aspect='auto',**kwargs):
"""
Compute and plot projection across time:
method: String
'mean','median','std'
**kwargs: dict
arguments to imagesc
"""
if method is 'mean':
zp=np.mean(self,axis=0)
elif method is 'median':
zp=np.median(self,axis=0)
elif method is 'std':
zp=np.std(self,axis=0)
else:
raise Exception('Method not implemented')
pl.imshow(zp,cmap=cmap,aspect=aspect,**kwargs)
return zp
def local_correlations_movie(self,window=10):
[T,d1,d2]=self.shape
return movie(np.concatenate([self[j:j+window,:,:].local_correlations(eight_neighbours=True)[np.newaxis,:,:] for j in range(T-window)],axis=0),fr=self.fr)
def play(self,gain=1,fr=None,magnification=1,offset=0,interpolation=cv2.INTER_LINEAR,backend='pylab',do_loop=False):
"""
Play the movie using opencv
Parameters
----------
gain: adjust movie brightness
frate : playing speed if different from original (inter frame interval in seconds)
backend: 'pylab' or 'opencv', the latter much faster
"""
if backend is 'pylab':
print '*** WARNING *** SPEED MIGHT BE LOW. USE opencv backend if available'
gain*=1.
maxmov=np.max(self)
if backend is 'pylab':
plt.ion()
fig = plt.figure( 1 )
ax = fig.add_subplot( 111 )
ax.set_title("Play Movie")
im = ax.imshow( (offset+self[0])*gain/maxmov ,cmap=plt.cm.gray,vmin=0,vmax=1,interpolation='none') # Blank starting image
fig.show()
im.axes.figure.canvas.draw()
plt.pause(1)
if backend is 'notebook':
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure()
im = plt.imshow(self[0],interpolation='None',cmap=plt.cm.gray)
plt.axis('off')
def animate(i):
im.set_data(self[i])
return im,
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate,
frames=self.shape[0], interval=1, blit=True)
# call our new function to display the animation
return display_animation(anim, fps=fr)
if fr==None:
fr=self.fr
looping=True
terminated=False
while looping:
for iddxx,frame in enumerate(self):
if backend is 'opencv':
if magnification != 1:
frame = cv2.resize(frame,None,fx=magnification, fy=magnification, interpolation = interpolation)
cv2.imshow('frame',(offset+frame)*gain/maxmov)
if cv2.waitKey(int(1./fr*1000)) & 0xFF == ord('q'):
# cv2.destroyAllWindows()
looping=False
terminated=True
break
elif backend is 'pylab':
im.set_data((offset+frame)*gain/maxmov)
ax.set_title( str( iddxx ) )
plt.axis('off')
fig.canvas.draw()
plt.pause(1./fr*.5)
ev=plt.waitforbuttonpress(1./fr*.5)
if ev is not None:
plt.close()
break
elif backend is 'notebook':
print 'Animated via MP4'
break
else:
raise Exception('Unknown backend!')
if terminated:
break
if do_loop:
looping=True
if backend is 'opencv':
cv2.waitKey(100)
cv2.destroyAllWindows()
for i in range(10):
cv2.waitKey(100)
def load(file_name,fr=None,start_time=0,meta_data=None,subindices=None,shape=None,num_frames_sub_idx=np.inf):
'''
load movie from file.
Parameters
-----------
file_name: string
name of file. Possible extensions are tif, avi, npy, (npz and hdf5 are usable only if saved by calblitz)
fr: float
frame rate
start_time: float
initial time for frame 1
meta_data: dict
same as for calblitz.movie
subindices: iterable indexes
for loading only portion of the movie
shape: tuple of two values
dimension of the movie along x and y if loading from a two dimensional numpy array
Returns
-------
mov: calblitz.movie
'''
# case we load movie from file
if os.path.exists(file_name):
name, extension = os.path.splitext(file_name)[:2]
if extension == '.tif' or extension == '.tiff': # load avi file
if subindices is not None:
input_arr = imread(file_name)[subindices, :, :]
else:
input_arr = imread(file_name)
input_arr = np.squeeze(input_arr)
# with pims.open(file_name) as f:
# if len(f.frame_shape)==3:
# for ext_fr in f:
# if subindices is None:
# input_arr = np.array(ext_fr)
# else:
# input_arr = np.array([ext_fr[j] for j in subindices])
# elif len(f.frame_shape)==2:
# if subindices is None:
# input_arr = np.array(f)
# else:
# input_arr = np.array([f[j] for j in subindices])
# else:
# raise Exception('The input file has an unknown numberof dimensions')
# necessary for the way pims work with tiffs
# input_arr = input_arr[:,::-1,:]
elif extension == '.avi': # load avi file
#raise Exception('Use sintax mov=cb.load(filename)')
if subindices is not None:
raise Exception('Subindices not implemented')
cap = cv2.VideoCapture(file_name)
try:
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
except:
print 'Roll back top opencv 2'
length = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
input_arr=np.zeros((length, height,width),dtype=np.uint8)
counter=0
ret=True
while True:
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
break
input_arr[counter]=frame[:,:,0]
counter=counter+1
if not counter%100:
print counter
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
elif extension == '.npy': # load npy file
if subindices is not None:
input_arr=np.load(file_name)[subindices]
else:
input_arr=np.load(file_name)
if input_arr.ndim==2:
if shape is not None:
d,T=np.shape(input_arr)
d1,d2=shape
input_arr=np.transpose(np.reshape(input_arr,(d1,d2,T),order='F'),(2,0,1))
else:
raise Exception('Loaded vector is 2D , you need to provide the shape parameter')
elif extension == '.mat': # load npy file
input_arr=loadmat(file_name)['data']
input_arr=np.rollaxis(input_arr,2,-3)
if subindices is not None:
input_arr=input_arr[subindices]
elif extension == '.npz': # load movie from saved file
if subindices is not None:
raise Exception('Subindices not implemented')
with np.load(file_name) as f:
return movie(**f)
elif extension== '.hdf5':
with h5py.File(file_name, "r") as f:
attrs=dict(f['mov'].attrs)
#print attrs
if meta_data in attrs:
attrs['meta_data']=cpk.loads(attrs['meta_data'])
if subindices is None:
# fr=f['fr'],start_time=f['start_time'],file_name=f['file_name']
return movie(f['mov'],**attrs)
else:
return movie(f['mov'][subindices],**attrs)
elif extension == '.mmap':
filename=os.path.split(file_name)[-1]
fpart=filename.split('_')[1:-1]
d1,d2,d3,T,order=int(fpart[-9]),int(fpart[-7]),int(fpart[-5]),int(fpart[-1]),fpart[-3]
Yr=np.memmap(file_name,mode='r',shape=(d1*d2,T),dtype=np.float32,order=order)
print 'mmap'
return movie(to_3D(np.array(Yr).T,(T,d1,d2),order=order),fr=fr)
elif extension == '.sbx':
print 'sbx'
return movie(sbxread(file_name[:-4],num_frames_sub_idx).transpose([0,3,2,1]),fr=fr)
else:
raise Exception('Unknown file type')
else:
raise Exception('File not found!')
return movie(input_arr,fr=fr,start_time=start_time,file_name=os.path.split(file_name)[-1], meta_data=meta_data)
def load_movie_chain(file_list, fr=None, start_time=0,
meta_data=None, subindices=None,
bottom=0, top=0, left=0, right=0):
''' load movies from list of file names
file_list: list of file names in string format
other parameters as in load_movie except
bottom, top, left, right to load only portion of the field of view
'''
mov = []
for f in tqdm(file_list):
m = load(f, fr=fr, start_time=start_time,
meta_data=meta_data, subindices=subindices)
if m.ndim == 2:
m = m[np.newaxis, :, :]
tm, h, w = np.shape(m)
m = m[:, top:h-bottom, left:w-right]
mov.append(m)
return ts.concatenate(mov, axis=0)
def loadmat_sbx(filename):
'''
this function should be called instead of direct spio.loadmat
as it cures the problem of not properly recovering python dictionaries
from mat files. It calls the function check keys to cure all entries
which are still mat-objects
'''
data_ = loadmat(filename, struct_as_record=False, squeeze_me=True)
return _check_keys(data_)
def _check_keys(dict):
'''
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
'''
for key in dict:
if isinstance(dict[key], scipy.io.matlab.mio5_params.mat_struct):
dict[key] = _todict(dict[key])
return dict
def _todict(matobj):
'''
A recursive function which constructs from matobjects nested dictionaries
'''
dict = {}
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, scipy.io.matlab.mio5_params.mat_struct):
dict[strg] = _todict(elem)
else:
dict[strg] = elem
return dict
def sbxread(filename,n_frames=np.inf):
'''
Input: filename should be full path excluding .sbx
'''
# Check if contains .sbx and if so just truncate
if '.sbx' in filename:
filename = filename[:-4]
# Load info
info = loadmat_sbx(filename + '.mat')['info']
#print info.keys()
# Defining number of channels/size factor
if info['channels'] == 1:
info['nChan'] = 2; factor = 1
elif info['channels'] == 2:
info['nChan'] = 1; factor = 2
elif info['channels'] == 3:
info['nChan'] = 1; factor = 2
# Determine number of frames in whole file
max_idx = os.path.getsize(filename + '.sbx')/info['recordsPerBuffer']/info['sz'][1]*factor/4-1
# Paramters
k = 0; #First frame
N = max_idx; #Last frame
N = np.minimum(max_idx,n_frames)
nSamples = info['sz'][1] * info['recordsPerBuffer'] * 2 * info['nChan']
# Open File
fo = open(filename + '.sbx')
# Note: There is a weird inversion that happns thus I am dding the negative sign....
fo.seek(k*nSamples, 0)
x = np.fromfile(fo, dtype = 'uint16',count = nSamples/2*N)
x = -x.reshape((info['nChan'], info['sz'][1], info['recordsPerBuffer'], N), order = 'F')
return x
def to_3D(mov2D,shape,order='F'):
"""
transform to 3D a vectorized movie
"""
return np.reshape(mov2D,shape,order=order)
if __name__ == "__main__":
print 1
# mov=movie('/Users/agiovann/Dropbox/Preanalyzed Data/ExamplesDataAnalysis/Andrea/PC1/M_FLUO.tif',fr=15.62,start_time=0,meta_data={'zoom':2,'location':[100, 200, 300]})
# mov1=movie('/Users/agiovann/Dropbox/Preanalyzed Data/ExamplesDataAnalysis/Andrea/PC1/M_FLUO.tif',fr=15.62,start_time=0,meta_data={'zoom':2,'location':[100, 200, 300]})
## newmov=ts.concatenate([mov,mov1])
## mov.save('./test.npz')
## mov=movie.load('test.npz')
# max_shift=5;
# mov,template,shifts,xcorrs=mov.motion_correct(max_shift_h=max_shift,max_shift_w=max_shift,show_movie=0)
# max_shift=5;
# mov1,template1,shifts1,xcorrs1=mov1.motion_correct(max_shift_h=max_shift,max_shift_w=max_shift,show_movie=0,method='skimage')
# mov=mov.apply_shifts(shifts)
# mov=mov.crop(crop_top=max_shift,crop_bottom=max_shift,crop_left=max_shift,crop_right=max_shift)
# mov=mov.resize(fx=.25,fy=.25,fz=.2)
# mov=mov.computeDFF()
# mov=mov-np.min(mov)
# space_components,time_components=mov.NonnegativeMatrixFactorization();
# trs=mov.extract_traces_from_masks(1.*(space_components>0.4))
# trs=trs.computeDFF()
|
agiovann/CalBlitz
|
calblitz/movies.py
|
Python
|
gpl-3.0
| 47,510
|
[
"Gaussian"
] |
72c2e502e5e49788a9b86e2b608e8557b742307bf41bb45aa4da67c55ba65e42
|
# proxy module
from __future__ import absolute_import
from mayavi.preferences.bindings import *
|
enthought/etsproxy
|
enthought/mayavi/preferences/bindings.py
|
Python
|
bsd-3-clause
| 96
|
[
"Mayavi"
] |
526125a7ca190292a5858243f657d0d8b07b0cfdf2b9cacb5534cfa0a5a4df2b
|
"""
********************************************************************************
* Name: app_base.py
* Author: Nathan Swain and Scott Christensen
* Created On: August 19, 2013
* Copyright: (c) Brigham Young University 2013
* License: BSD 2-Clause
********************************************************************************
"""
import logging
import os
import sys
import traceback
import warnings
import uuid
from django.db.utils import ProgrammingError
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.http import HttpRequest
from django.utils.functional import SimpleLazyObject
from django.conf.urls import url
from tethys_apps.base.testing.environment import is_testing_environment, get_test_db_name, TESTING_DB_FLAG
from tethys_apps.base import permissions
from .handoff import HandoffManager
from .workspace import TethysWorkspace
from .mixins import TethysBaseMixin
from ..exceptions import TethysAppSettingDoesNotExist, TethysAppSettingNotAssigned
from bokeh.server.django.consumers import WSConsumer
from bokeh.server.django import autoload
from bokeh.server.django.consumers import AutoloadJsConsumer
tethys_log = logging.getLogger('tethys.app_base')
class TethysBase(TethysBaseMixin):
"""
Abstract base class of app and extension classes.
"""
name = ''
package = ''
root_url = ''
description = ''
def __init__(self):
self._url_patterns = None
self._handler_patterns = None
self._namespace = None
@staticmethod
def _resolve_ref_function(ref, ref_type, is_extension):
"""
This method retrieves a controller or handler function.
Args:
ref: The function of dot-formatted string path to the function
ref_type: Handler or controller
is_extension: Boolean. True if working with a Tethys Extension
Returns:
func: If the reference is a string returns the attribute value of the function.
If the reference is a function returns the referenced function itself.
Example:
controller_function = self._resolve_ref_function(url_map.controller, 'controller', is_extension)
"""
if isinstance(ref, str):
root_controller_path = 'tethysext' if is_extension else 'tethysapp'
full_controller_path = '.'.join([root_controller_path, ref])
controller_parts = full_controller_path.split('.')
module_name = '.'.join(controller_parts[:-1])
function_name = controller_parts[-1]
try:
module = __import__(module_name, fromlist=[function_name])
except Exception as e:
error_msg = f'The following error occurred while trying to import the {ref_type} function ' \
f'"{ref}":\n {traceback.format_exc(2)}'
tethys_log.error(error_msg)
raise e
try:
ref_function = getattr(module, function_name)
except AttributeError as e:
error_msg = f'The following error occurred while trying to access the {ref_type} function ' \
f'"{ref}":\n {traceback.format_exc(2)}'
tethys_log.error(error_msg)
raise e
else:
ref_function = ref
return ref_function
def _resolve_bokeh_handler(self, namespace, url_map, handler_function, handler_patterns):
"""
Create and add url patterns for bokeh handler
Args:
namespace: App name
url_map: Mapping containing url name, controller, and handler information
handler_function: The function returned by _resolve_ref_function
handler_patterns: dictionary to add http and websocket patterns to
Returns:
None
Example:
self._resolve_bokeh_handler(namespace, url_map, handler_function, handler_patterns)
"""
if url_map.url in [r'', r'/', r'^$', r'^/$']:
app_endpoint = '/'.join(['apps', self.root_url])
else:
stripped_url = url_map.url.replace("^", "").replace("$", "")
if stripped_url.endswith('/'):
stripped_url = stripped_url[:-1]
app_endpoint = '/'.join(['apps', self.root_url, stripped_url])
bokeh_app = autoload(app_endpoint, handler_function)
kwargs = dict(app_context=bokeh_app.app_context)
def urlpattern(suffix=""):
url_pattern = bokeh_app.url + suffix
return f'^{url_pattern}$'
http_url = url(urlpattern('/autoload.js'), AutoloadJsConsumer,
name=f'{url_map.name}_bokeh_autoload', kwargs=kwargs)
ws_url = url(urlpattern('/ws'), WSConsumer, name=f'{url_map.name}_bokeh_ws', kwargs=kwargs)
# Append to namespace list
handler_patterns['http'][namespace].append(http_url)
handler_patterns['websocket'][namespace].append(ws_url)
def url_maps(self):
"""
Override this method to define the URL Maps for your app. Your ``UrlMap`` objects must be created from a ``UrlMap`` class that is bound to the ``root_url`` of your app. Use the ``url_map_maker()`` function to create the bound ``UrlMap`` class. If you generate your app project from the scaffold, this will be done automatically. Starting in Tethys 3.0, the ``WebSocket`` protocol is supported along with the ``HTTP`` protocol. To create a ``WebSocket UrlMap``, follow the same pattern used for the ``HTTP`` protocol. In addition, provide a ``Consumer`` path in the controllers parameter as well as a ``WebSocket`` string value for the new protocol parameter for the ``WebSocket UrlMap``. Alternatively, Bokeh Server can also be integrated into Tethys using ``Django Channels`` and ``Websockets``. Tethys will automatically set these up for you if a ``handler`` and ``handler_type`` parameters are provided as part of the ``UrlMap``.
Returns:
iterable: A list or tuple of ``UrlMap`` objects.
**Example:**
::
from tethys_sdk.base import url_map_maker
class MyFirstApp(TethysAppBase):
def url_maps(self):
\"""
Example url_maps method.
\"""
# Create UrlMap class that is bound to the root url.
UrlMap = url_map_maker(self.root_url)
url_maps = (
UrlMap(name='home',
url='my-first-app',
controller='my_first_app.controllers.home',
),
UrlMap(name='home_ws',
url='my-first-ws',
controller='my_first_app.controllers.HomeConsumer',
protocol='websocket'
),
UrlMap(name='bokeh_handler',
url='my-first-app/bokeh-example',
controller='my_first_app.controllers.bokeh_example',
handler='my_first_app.controllers.bokeh_example_handler',
handler_type='bokeh'
),
)
return url_maps
""" # noqa: E501
return []
@property
def url_patterns(self):
"""
Generate the url pattern lists for app and namespace them accordingly.
"""
if self._url_patterns is None:
is_extension = isinstance(self, TethysExtensionBase)
url_patterns = {'http': dict(), 'websocket': dict()}
if hasattr(self, 'url_maps'):
url_maps = self.url_maps()
for url_map in url_maps:
namespace = self.namespace
if namespace not in url_patterns[url_map.protocol]:
url_patterns[url_map.protocol][namespace] = []
# Create django url object
controller_function = self._resolve_ref_function(url_map.controller, 'controller', is_extension)
django_url = url(url_map.url, controller_function, name=url_map.name)
# Append to namespace list
url_patterns[url_map.protocol][namespace].append(django_url)
self._url_patterns = url_patterns
return self._url_patterns
@property
def handler_patterns(self):
"""
Generate the url pattern lists for app and namespace them accordingly.
"""
if self._handler_patterns is None:
is_extension = isinstance(self, TethysExtensionBase)
handler_patterns = {'http': dict(), 'websocket': dict()}
if hasattr(self, 'url_maps'):
url_maps = self.url_maps()
for url_map in url_maps:
if url_map.handler:
namespace = self.namespace
if namespace not in handler_patterns['http']:
handler_patterns['http'][namespace] = []
if namespace not in handler_patterns['websocket']:
handler_patterns['websocket'][namespace] = []
# Create django url routing objects
handler_function = self._resolve_ref_function(url_map.handler, 'handler', is_extension)
if url_map.handler_type == 'bokeh':
self._resolve_bokeh_handler(namespace, url_map, handler_function, handler_patterns)
self._handler_patterns = handler_patterns
return self._handler_patterns
def sync_with_tethys_db(self):
"""
Sync installed apps with database.
"""
raise NotImplementedError
def remove_from_db(self):
"""
Remove the instance from the db.
"""
raise NotImplementedError
class TethysExtensionBase(TethysBase):
"""
Base class used to define the extension class for Tethys extensions.
"""
def __str__(self):
"""
String representation
"""
return '<TethysApp: {0}>'.format(self.name)
def __repr__(self):
"""
String representation
"""
return '<TethysApp: {0}>'.format(self.name)
def url_maps(self):
"""
Override this method to define the URL Maps for your app. Your ``UrlMap`` objects must be created from a ``UrlMap`` class that is bound to the ``root_url`` of your app. Use the ``url_map_maker()`` function to create the bound ``UrlMap`` class. If you generate your app project from the scaffold, this will be done automatically.
Returns:
iterable: A list or tuple of ``UrlMap`` objects.
**Example:**
::
from tethys_sdk.base import url_map_maker
class MyFirstApp(TethysAppBase):
def url_maps(self):
\"""
Example url_maps method.
\"""
# Create UrlMap class that is bound to the root url.
UrlMap = url_map_maker(self.root_url)
url_maps = (UrlMap(name='home',
url='my-first-app',
controller='my_first_app.controllers.home',
),
)
return url_maps
""" # noqa: E501
return []
def sync_with_tethys_db(self):
"""
Sync installed apps with database.
"""
from django.conf import settings
from tethys_apps.models import TethysExtension
try:
# Query to see if installed extension is in the database
db_extensions = TethysExtension.objects. \
filter(package__exact=self.package). \
all()
# If the extension is not in the database, then add it
if len(db_extensions) == 0:
extension = TethysExtension(
name=self.name,
package=self.package,
description=self.description,
root_url=self.root_url,
)
extension.save()
# If the extension is in the database, update developer-first attributes
elif len(db_extensions) == 1:
db_extension = db_extensions[0]
db_extension.root_url = self.root_url
db_extension.save()
if hasattr(settings, 'DEBUG') and settings.DEBUG:
db_extension.name = self.name
db_extension.description = self.description
db_extension.save()
except ProgrammingError:
tethys_log.warning("Unable to sync extension with database. tethys_apps_tethysextension "
"table does not exist")
except Exception as e:
tethys_log.error(e)
class TethysAppBase(TethysBase):
"""
Base class used to define the app class for Tethys apps.
Attributes:
name (string): Name of the app.
index (string): Lookup term for the index URL of the app.
icon (string): Location of the image to use for the app icon.
package (string): Name of the app package.
root_url (string): Root URL of the app.
color (string): App theme color as RGB hexadecimal.
description (string): Description of the app.
tag (string): A string for filtering apps.
enable_feedback (boolean): Shows feedback button on all app pages.
feedback_emails (list): A list of emails corresponding to where submitted feedback forms are sent.
"""
index = ''
icon = ''
color = ''
tags = ''
enable_feedback = False
feedback_emails = []
def __str__(self):
"""
String representation
"""
return '<TethysApp: {0}>'.format(self.name)
def __repr__(self):
"""
String representation
"""
return '<TethysApp: {0}>'.format(self.name)
def custom_settings(self):
"""
Override this method to define custom settings for use in your app.
Returns:
iterable: A list or tuple of ``CustomSetting`` objects.
**Example:**
::
from tethys_sdk.app_settings import CustomSetting
class MyFirstApp(TethysAppBase):
def custom_settings(self):
\"""
Example custom_settings method.
\"""
custom_settings = (
CustomSetting(
name='default_name',
type=CustomSetting.TYPE_STRING
description='Default model name.',
required=True
),
CustomSetting(
name='max_count',
type=CustomSetting.TYPE_INTEGER,
description='Maximum allowed count in a method.',
required=False
),
CustomSetting(
name='change_factor',
type=CustomSetting.TYPE_FLOAT,
description='Change factor that is applied to some process.',
required=True
),
CustomSetting(
name='enable_feature',
type=CustomSetting.TYPE_BOOLEAN,
description='Enable this feature when True.',
required=True
)
)
return custom_settings
"""
return None
def persistent_store_settings(self):
"""
Override this method to define a persistent store service connections and databases for your app.
Returns:
iterable: A list or tuple of ``PersistentStoreDatabaseSetting`` or ``PersistentStoreConnectionSetting`` objects.
**Example:**
::
from tethys_sdk.app_settings import PersistentStoreDatabaseSetting, PersistentStoreConnectionSetting
class MyFirstApp(TethysAppBase):
def persistent_store_settings(self):
\"""
Example persistent_store_settings method.
\"""
ps_settings = (
# Connection only, no database
PersistentStoreConnectionSetting(
name='primary',
description='Connection with superuser role needed.',
required=True
),
# Connection only, no database
PersistentStoreConnectionSetting(
name='creator',
description='Create database role only.',
required=False
),
# Spatial database
PersistentStoreDatabaseSetting(
name='spatial_db',
description='for storing important spatial stuff',
required=True,
initializer='appsettings.model.init_spatial_db',
spatial=True,
),
# Non-spatial database
PersistentStoreDatabaseSetting(
name='temp_db',
description='for storing temporary stuff',
required=False,
initializer='appsettings.model.init_temp_db',
spatial=False,
)
)
return ps_settings
""" # noqa: E501
return None
def dataset_service_settings(self):
"""
Override this method to define dataset service connections for use in your app.
Returns:
iterable: A list or tuple of ``DatasetServiceSetting`` objects.
**Example:**
::
from tethys_sdk.app_settings import DatasetServiceSetting
class MyFirstApp(TethysAppBase):
def dataset_service_settings(self):
\"""
Example dataset_service_settings method.
\"""
ds_settings = (
DatasetServiceSetting(
name='primary_ckan',
description='Primary CKAN service for app to use.',
engine=DatasetServiceSetting.CKAN,
required=True,
),
DatasetServiceSetting(
name='hydroshare',
description='HydroShare service for app to use.',
engine=DatasetServiceSetting.HYDROSHARE,
required=False
)
)
return ds_settings
"""
return None
def spatial_dataset_service_settings(self):
"""
Override this method to define spatial dataset service connections for use in your app.
Returns:
iterable: A list or tuple of ``SpatialDatasetServiceSetting`` objects.
**Example:**
::
from tethys_sdk.app_settings import SpatialDatasetServiceSetting
class MyFirstApp(TethysAppBase):
def spatial_dataset_service_settings(self):
\"""
Example spatial_dataset_service_settings method.
\"""
sds_settings = (
SpatialDatasetServiceSetting(
name='primary_geoserver',
description='spatial dataset service for app to use',
engine=SpatialDatasetServiceSetting.GEOSERVER,
required=True,
),
)
return sds_settings
"""
return None
def web_processing_service_settings(self):
"""
Override this method to define web processing service connections for use in your app.
Returns:
iterable: A list or tuple of ``WebProcessingServiceSetting`` objects.
**Example:**
::
from tethys_sdk.app_settings import WebProcessingServiceSetting
class MyFirstApp(TethysAppBase):
def web_processing_service_settings(self):
\"""
Example wps_services method.
\"""
wps_services = (
WebProcessingServiceSetting(
name='primary_52n',
description='WPS service for app to use',
required=True,
),
)
return wps_services
"""
return None
def handoff_handlers(self):
"""
Override this method to define handoff handlers for use in your app.
Returns:
iterable: A list or tuple of ``HandoffHandler`` objects.
**Example:**
::
from tethys_sdk.handoff import HandoffHandler
class MyFirstApp(TethysAppBase):
def handoff_handlers(self):
\"""
Example handoff_handlers method.
\"""
handoff_handlers = (
HandoffHandlers(
name='example',
handler='my_first_app.controllers.my_handler'
),
)
return handoff_handlers
"""
return None
def permissions(self):
"""
Override this method to define permissions for your app.
Returns:
iterable: A list or tuple of ``Permission`` or ``PermissionGroup`` objects.
**Example:**
::
from tethys_sdk.permissions import Permission, PermissionGroup
class MyFirstApp(TethysAppBase):
def permissions(self):
\"""
Example permissions method.
\"""
# Viewer Permissions
view_map = Permission(
name='view_map',
description='View map'
)
delete_projects = Permission(
name='delete_projects',
description='Delete projects'
)
create_projects = Permission(
name='create_projects',
description='Create projects'
)
admin = PermissionGroup(
name='admin',
permissions=(delete_projects, create_projects)
)
permissions = (admin, view_map)
return permissions
"""
return None
def register_app_permissions(self):
"""
Register and sync the app permissions.
"""
from guardian.shortcuts import assign_perm, remove_perm, get_perms
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission, Group
from tethys_apps.models import TethysApp
perms = self.permissions()
# add default access_app permission
app_permissions = {f'{self.package}:access_app': f'{self.package} | Can access app'}
app_groups = dict()
# Name spaced prefix for app permissions
# e.g. my_first_app:view_things
# e.g. my_first_app | View things
perm_codename_prefix = self.package + ':'
perm_name_prefix = self.package + ' | '
if perms is not None:
# Thing is either a Permission or a PermissionGroup object
for thing in perms:
# Permission Case
if isinstance(thing, permissions.Permission):
# Name space the permissions and add it to the list
permission_codename = perm_codename_prefix + thing.name
permission_name = perm_name_prefix + thing.description
app_permissions[permission_codename] = permission_name
# PermissionGroup Case
elif isinstance(thing, permissions.PermissionGroup):
# Record in dict of groups
group_permissions = []
group_name = perm_codename_prefix + thing.name
for perm in thing.permissions:
# Name space the permissions and add it to the list
permission_codename = perm_codename_prefix + perm.name
permission_name = perm_name_prefix + perm.description
app_permissions[permission_codename] = permission_name
group_permissions.append(permission_codename)
# Store all groups for all apps
app_groups[group_name] = {'permissions': group_permissions, 'app_package': self.package}
# Get the TethysApp content type
tethys_content_type = ContentType.objects.get(
app_label='tethys_apps',
model='tethysapp'
)
# Remove any permissions that no longer exist
db_app_permissions = Permission.objects.\
filter(content_type=tethys_content_type).\
filter(codename__icontains=perm_codename_prefix).\
all()
for db_app_permission in db_app_permissions:
# Delete the permission if the permission is no longer required by this app
if db_app_permission.codename not in app_permissions:
db_app_permission.delete()
# Create permissions that need to be created
for perm in app_permissions:
# Create permission if it doesn't exist
try:
# If permission exists, update it
p = Permission.objects.get(codename=perm)
p.name = app_permissions[perm]
p.content_type = tethys_content_type
p.save()
except Permission.DoesNotExist:
p = Permission(
name=app_permissions[perm],
codename=perm,
content_type=tethys_content_type
)
p.save()
# Remove any groups that no longer exist
db_groups = Group.objects.filter(name__icontains=perm_codename_prefix).all()
db_apps = TethysApp.objects.all()
db_app_names = [db_app.package for db_app in db_apps]
for db_group in db_groups:
db_group_name_parts = db_group.name.split(':')
# Only perform maintenance on groups that belong to Tethys Apps
if (len(db_group_name_parts) > 1) and (db_group_name_parts[0] in db_app_names):
# Delete groups that is no longer required by this app
if db_group.name not in app_groups:
db_group.delete()
# Create groups that need to be created
for group in app_groups:
# Look up the app
db_app = TethysApp.objects.get(
package=app_groups[group]['app_package'])
# Create group if it doesn't exist
try:
# If it exists, update the permissions assigned to it
g = Group.objects.get(name=group)
# Get the permissions for the group and remove all of them
perms = get_perms(g, db_app)
for p in perms:
remove_perm(p, g, db_app)
# Assign the permission to the group and the app instance
for p in app_groups[group]['permissions']:
assign_perm(p, g, db_app)
except Group.DoesNotExist:
# Create a new group
g = Group(name=group)
g.save()
# Assign the permission to the group and the app instance
for p in app_groups[group]['permissions']:
assign_perm(p, g, db_app)
@classmethod
def get_handoff_manager(cls):
"""
Get the HandoffManager for the app.
"""
app = cls()
handoff_manager = HandoffManager(app)
return handoff_manager
@classmethod
def get_job_manager(cls):
"""
Get the JobManager for the app.
"""
from tethys_compute.job_manager import JobManager
app = cls()
job_manager = JobManager(app)
return job_manager
@classmethod
def get_user_workspace(cls, user):
"""
Get the file workspace (directory) for the given User.
Args:
user(User or HttpRequest): User or request object.
Returns:
tethys_apps.base.TethysWorkspace: An object representing the workspace.
**Example:**
::
import os
from my_first_app.app import MyFirstApp as app
def a_controller(request):
\"""
Example controller that uses get_user_workspace() method.
\"""
# Retrieve the workspace
user_workspace = app.get_user_workspace(request.user)
new_file_path = os.path.join(user_workspace.path, 'new_file.txt')
with open(new_file_path, 'w') as a_file:
a_file.write('...')
context = {}
return render(request, 'my_first_app/template.html', context)
"""
warnings.warn('@user_workspace decorator is now the preferred method for getting user workspace', DeprecationWarning) # noqa: E501
username = ''
from django.contrib.auth.models import User
if isinstance(user, User) or isinstance(user, SimpleLazyObject):
username = user.username
elif isinstance(user, HttpRequest):
username = user.user.username
elif user is None:
pass
else:
raise ValueError("Invalid type for argument 'user': must be either an User or HttpRequest object.")
if not username:
username = 'anonymous_user'
project_directory = os.path.dirname(sys.modules[cls.__module__].__file__)
workspace_directory = os.path.join(project_directory, 'workspaces', 'user_workspaces', username)
return TethysWorkspace(workspace_directory)
@classmethod
def get_app_workspace(cls):
"""
Get the file workspace (directory) for the app.
Returns:
tethys_apps.base.TethysWorkspace: An object representing the workspace.
**Example:**
::
import os
from my_first_app.app import MyFirstApp as app
def a_controller(request):
\"""
Example controller that uses get_app_workspace() method.
\"""
# Retrieve the workspace
app_workspace = app.get_app_workspace()
new_file_path = os.path.join(app_workspace.path, 'new_file.txt')
with open(new_file_path, 'w') as a_file:
a_file.write('...')
context = {}
return render(request, 'my_first_app/template.html', context)
"""
warnings.warn('@app_workspace decorator is now the preferred method for getting app workspace', DeprecationWarning) # noqa: E501
# Find the path to the app project directory
# Hint: cls is a child class of this class.
# Credits: http://stackoverflow.com/questions/4006102/ is-possible-to-know-the-_path-of-the-file-of-a-subclass-in-python # noqa: E501
project_directory = os.path.dirname(sys.modules[cls.__module__].__file__)
workspace_directory = os.path.join(project_directory, 'workspaces', 'app_workspace')
return TethysWorkspace(workspace_directory)
@classmethod
def get_custom_setting(cls, name):
"""
Retrieves the value of a CustomSetting for the app.
Args:
name(str): The name of the CustomSetting as defined in the app.py.
Returns:
variable: Value of the CustomSetting or None if no value assigned.
**Example:**
::
from my_first_app.app import MyFirstApp as app
max_count = app.get_custom_setting('max_count')
"""
from tethys_apps.models import TethysApp
db_app = TethysApp.objects.get(package=cls.package)
custom_settings = db_app.custom_settings
try:
custom_setting = custom_settings.get(name=name)
return custom_setting.get_value()
except ObjectDoesNotExist:
raise TethysAppSettingDoesNotExist('CustomTethysAppSetting', name, cls.name)
@classmethod
def set_custom_setting(cls, name, value):
"""
Assign the value of a CustomSetting for the app.
Args:
name(str): The name of the CustomSetting as defined in the app.py.
value(str/int/float/boolean/uuid.UUID): the value of the customSetting.
**Example:**
::
from my_first_app.app import MyFirstApp as app
max_count = app.set_custom_setting('max_count', 5)
"""
from tethys_apps.models import TethysApp
db_app = TethysApp.objects.get(package=cls.package)
custom_settings = db_app.custom_settings
try:
custom_setting = custom_settings.get(name=name)
except ObjectDoesNotExist:
raise TethysAppSettingDoesNotExist('CustomTethysAppSetting', name, cls.name)
type_matches = False
if custom_setting.type == 'STRING':
type_matches = isinstance(value, str)
elif custom_setting.type == 'INTEGER':
type_matches = isinstance(value, int)
elif custom_setting.type == 'FLOAT':
type_matches = isinstance(value, float)
elif custom_setting.type == 'BOOLEAN':
type_matches = str(value).lower() in ['true', 'false', 'yes', 'no', 't', 'f', 'y', 'n', '1', '0']
elif custom_setting.type == 'UUID':
try:
type_matches = bool(uuid.UUID(str(value)))
except ValueError:
pass
if type_matches:
custom_setting.value = value
custom_setting.save()
else:
raise ValidationError(f'Value must be of type {custom_setting.type}.')
@classmethod
def get_dataset_service(cls, name, as_public_endpoint=False, as_endpoint=False,
as_engine=False):
"""
Retrieves dataset service engine assigned to named DatasetServiceSetting for the app.
Args:
name(str): name fo the DatasetServiceSetting as defined in the app.py.
as_endpoint(bool): Returns endpoint url string if True, Defaults to False.
as_public_endpoint(bool): Returns public endpoint url string if True. Defaults to False.
as_engine(bool): Returns tethys_dataset_services.engine of appropriate type if True. Defaults to False.
Returns:
DatasetService: DatasetService assigned to setting if no other options are specified.
**Example:**
::
from my_first_app.app import MyFirstApp as app
ckan_engine = app.get_dataset_service('primary_ckan', as_engine=True)
"""
from tethys_apps.models import TethysApp
app = cls()
db_app = TethysApp.objects.get(package=app.package)
dataset_services_settings = db_app.dataset_services_settings
try:
dataset_services_setting = dataset_services_settings.get(name=name)
dataset_services_setting.get_value(as_public_endpoint=as_public_endpoint, as_endpoint=as_endpoint,
as_engine=as_engine)
except ObjectDoesNotExist:
raise TethysAppSettingDoesNotExist('DatasetServiceSetting', name, cls.name)
@classmethod
def get_spatial_dataset_service(cls, name, as_public_endpoint=False, as_endpoint=False, as_wms=False,
as_wfs=False, as_engine=False):
"""
Retrieves spatial dataset service engine assigned to named SpatialDatasetServiceSetting for the app.
Args:
name(str): name fo the SpatialDatasetServiceSetting as defined in the app.py.
as_endpoint(bool): Returns endpoint url string if True, Defaults to False.
as_public_endpoint(bool): Returns public endpoint url string if True. Defaults to False.
as_wfs(bool): Returns OGC-WFS enpdoint url for spatial dataset service if True. Defaults to False.
as_wms(bool): Returns OGC-WMS enpdoint url for spatial dataset service if True. Defaults to False.
as_engine(bool): Returns tethys_dataset_services.engine of appropriate type if True. Defaults to False.
Returns:
SpatialDatasetService: SpatialDatasetService assigned to setting if no other options are specified.
**Example:**
::
from my_first_app.app import MyFirstApp as app
geoserver_engine = app.get_spatial_dataset_service('primary_geoserver', as_engine=True)
"""
from tethys_apps.models import TethysApp
app = cls()
db_app = TethysApp.objects.get(package=app.package)
spatial_dataset_service_settings = db_app.spatial_dataset_service_settings
try:
spatial_dataset_service_setting = spatial_dataset_service_settings.get(
name=name)
return spatial_dataset_service_setting.get_value(
as_public_endpoint=as_public_endpoint,
as_endpoint=as_endpoint,
as_wms=as_wms, as_wfs=as_wfs,
as_engine=as_engine
)
except ObjectDoesNotExist:
raise TethysAppSettingDoesNotExist('SpatialDatasetServiceSetting', name, cls.name)
@classmethod
def get_web_processing_service(cls, name, as_public_endpoint=False, as_endpoint=False, as_engine=False):
"""
Retrieves web processing service engine assigned to named WebProcessingServiceSetting for the app.
Args:
name(str): name fo the WebProcessingServiceSetting as defined in the app.py.
as_endpoint(bool): Returns endpoint url string if True, Defaults to False.
as_public_endpoint(bool): Returns public endpoint url string if True. Defaults to False.
as_engine(bool): Returns owslib.wps.WebProcessingService engine if True. Defaults to False.
Returns:
WpsService: WpsService assigned to setting if no other options are specified.
**Example:**
::
from my_first_app.app import MyFirstApp as app
wps_engine = app.get_web_processing_service('primary_52n')
"""
from tethys_apps.models import TethysApp
db_app = TethysApp.objects.get(package=cls.package)
wps_services_settings = db_app.wps_services_settings
try:
wps_service_setting = wps_services_settings.objects.get(name=name)
return wps_service_setting.get_value(as_public_endpoint=as_public_endpoint,
as_endpoint=as_endpoint, as_engine=as_engine)
except ObjectDoesNotExist:
raise TethysAppSettingDoesNotExist('WebProcessingServiceSetting', name, cls.name)
@classmethod
def get_persistent_store_connection(cls, name, as_url=False, as_sessionmaker=False):
"""
Gets an SQLAlchemy Engine or URL object for the named persistent store connection.
Args:
name(string): Name of the PersistentStoreConnectionSetting as defined in app.py.
as_url(bool): Return SQLAlchemy URL object instead of engine object if True. Defaults to False.
as_sessionmaker(bool): Returns SessionMaker class bound to the engine if True. Defaults to False.
Returns:
sqlalchemy.Engine or sqlalchemy.URL: An SQLAlchemy Engine or URL object for the persistent store requested.
**Example:**
::
from my_first_app.app import MyFirstApp as app
conn_engine = app.get_persistent_store_connection('primary')
conn_url = app.get_persistent_store_connection('primary', as_url=True)
SessionMaker = app.get_persistent_store_database('primary', as_sessionmaker=True)
session = SessionMaker()
"""
from tethys_apps.models import TethysApp
db_app = TethysApp.objects.get(package=cls.package)
ps_connection_settings = db_app.persistent_store_connection_settings
try:
# Return as_engine if the other two are False
as_engine = not as_sessionmaker and not as_url
ps_connection_setting = ps_connection_settings.get(name=name)
return ps_connection_setting.get_value(as_url=as_url, as_sessionmaker=as_sessionmaker, as_engine=as_engine)
except ObjectDoesNotExist:
raise TethysAppSettingDoesNotExist('PersistentStoreConnectionSetting', name, cls.name)
except TethysAppSettingNotAssigned:
cls._log_tethys_app_setting_not_assigned_error('PersistentStoreConnectionSetting', name)
@classmethod
def get_persistent_store_database(cls, name, as_url=False, as_sessionmaker=False):
"""
Gets an SQLAlchemy Engine or URL object for the named persistent store database given.
Args:
name(string): Name of the PersistentStoreConnectionSetting as defined in app.py.
as_url(bool): Return SQLAlchemy URL object instead of engine object if True. Defaults to False.
as_sessionmaker(bool): Returns SessionMaker class bound to the engine if True. Defaults to False.
Returns:
sqlalchemy.Engine or sqlalchemy.URL: An SQLAlchemy Engine or URL object for the persistent store requested.
**Example:**
::
from my_first_app.app import MyFirstApp as app
db_engine = app.get_persistent_store_database('example_db')
db_url = app.get_persistent_store_database('example_db', as_url=True)
SessionMaker = app.get_persistent_store_database('example_db', as_sessionmaker=True)
session = SessionMaker()
"""
from tethys_apps.models import TethysApp
db_app = TethysApp.objects.get(package=cls.package)
ps_database_settings = db_app.persistent_store_database_settings
verified_name = name if not is_testing_environment() else get_test_db_name(name)
try:
# Return as_engine if the other two are False
as_engine = not as_sessionmaker and not as_url
ps_database_setting = ps_database_settings.get(name=verified_name)
return ps_database_setting.get_value(with_db=True, as_url=as_url, as_sessionmaker=as_sessionmaker,
as_engine=as_engine)
except ObjectDoesNotExist:
raise TethysAppSettingDoesNotExist('PersistentStoreDatabaseSetting', verified_name, cls.name)
except TethysAppSettingNotAssigned:
cls._log_tethys_app_setting_not_assigned_error('PersistentStoreDatabaseSetting', verified_name)
@classmethod
def create_persistent_store(cls, db_name, connection_name, spatial=False, initializer='', refresh=False,
force_first_time=False):
"""
Creates a new persistent store database for the app. This method is idempotent.
Args:
db_name(string): Name of the persistent store that will be created.
connection_name(string|None): Name of persistent store connection or None if creating a test copy of an existing persistent store (only while in the testing environment)
spatial(bool): Enable spatial extension on the database being created when True. Connection must have superuser role. Defaults to False.
initializer(string): Dot-notation path to initializer function (e.g.: 'my_first_app.models.init_db').
refresh(bool): Drop database if it exists and create again when True. Defaults to False.
force_first_time(bool): Call initializer function with "first_time" parameter forced to True, even if this is not the first time intializing the persistent store database. Defaults to False.
Returns:
bool: True if successful.
**Example:**
::
from my_first_app.app import MyFirstApp as app
result = app.create_persistent_store('example_db', 'primary')
if result:
engine = app.get_persistent_store_engine('example_db')
""" # noqa: E501
# Get named persistent store service connection
from tethys_apps.models import TethysApp
from tethys_apps.models import PersistentStoreDatabaseSetting
db_app = TethysApp.objects.get(package=cls.package)
# Get connection service
ps_connection_settings = db_app.persistent_store_connection_settings
if is_testing_environment():
verified_db_name = get_test_db_name(db_name)
else:
verified_db_name = db_name
if connection_name is None:
raise ValueError('The connection_name cannot be None unless running in the testing environment.')
try:
if connection_name is None:
ps_database_settings = db_app.persistent_store_database_settings
ps_setting = ps_database_settings.get(name=db_name)
else:
ps_setting = ps_connection_settings.get(name=connection_name)
except ObjectDoesNotExist:
if connection_name is None:
raise TethysAppSettingDoesNotExist(
'PersistentStoreDatabaseSetting named "{0}" does not exist.'.format(db_name),
connection_name, cls.name)
else:
raise TethysAppSettingDoesNotExist('PersistentStoreConnectionSetting ', connection_name, cls.name)
ps_service = ps_setting.persistent_store_service
# Check if persistent store database setting already exists before creating it
try:
db_setting = db_app.persistent_store_database_settings.get(name=verified_db_name)
db_setting.persistent_store_service = ps_service
db_setting.initializer = initializer
db_setting.save()
except ObjectDoesNotExist:
# Create new PersistentStoreDatabaseSetting
db_setting = PersistentStoreDatabaseSetting(
name=verified_db_name,
description='',
required=False,
initializer=initializer,
spatial=spatial,
dynamic=True
)
# Assign the connection service
db_setting.persistent_store_service = ps_service
db_app.add_settings((db_setting,))
# Save database entry
db_app.save()
# Create the new database
db_setting.create_persistent_store_database(refresh=refresh, force_first_time=force_first_time)
return True
@classmethod
def drop_persistent_store(cls, name):
"""
Drop a persistent store database for the app. This method is idempotent.
Args:
name(string): Name of the persistent store to be dropped.
Returns:
bool: True if successful.
**Example:**
::
from my_first_app.app import MyFirstApp as app
result = app.drop_persistent_store('example_db')
if result:
# App database 'example_db' was successfully destroyed and no longer exists
pass
"""
# Get the setting
from tethys_apps.models import TethysApp
db_app = TethysApp.objects.get(package=cls.package)
ps_database_settings = db_app.persistent_store_database_settings
verified_name = name if not is_testing_environment() else get_test_db_name(name)
try:
ps_database_setting = ps_database_settings.get(name=verified_name)
except ObjectDoesNotExist:
return True
# Drop the persistent store
ps_database_setting.drop_persistent_store_database()
# Remove the database setting
ps_database_setting.delete()
return True
@classmethod
def list_persistent_store_databases(cls, dynamic_only=False, static_only=False):
"""
Returns a list of existing persistent store databases for the app.
Args:
dynamic_only(bool): only persistent store created dynamically if True. Defaults to False.
static_only(bool): only static persistent stores if True. Defaults to False.
Returns:
list: A list of all persistent store database names for the app.
**Example:**
::
from my_first_app.app import MyFirstApp as app
ps_databases = app.list_persistent_store_databases()
"""
from tethys_apps.models import TethysApp
db_app = TethysApp.objects.get(package=cls.package)
ps_database_settings = db_app.persistent_store_database_settings
if dynamic_only:
ps_database_settings = ps_database_settings.filter(persistentstoredatabasesetting__dynamic=True)
elif static_only:
ps_database_settings = ps_database_settings.filter(persistentstoredatabasesetting__dynamic=False)
return [ps_database_setting.name for ps_database_setting in ps_database_settings
if TESTING_DB_FLAG not in ps_database_setting.name]
@classmethod
def list_persistent_store_connections(cls):
"""
Returns a list of existing persistent store connections for this app.
Returns:
list: A list of persistent store connection names.
**Example:**
::
from my_first_app.app import MyFirstApp as app
ps_connections = app.list_persistent_store_connections()
"""
from tethys_apps.models import TethysApp
db_app = TethysApp.objects.get(package=cls.package)
ps_connection_settings = db_app.persistent_store_connection_settings
return [ps_connection_setting.name for ps_connection_setting in ps_connection_settings
if TESTING_DB_FLAG not in ps_connection_setting.name]
@classmethod
def persistent_store_exists(cls, name):
"""
Returns True if a persistent store with the given name exists for the app.
Args:
name(string): Name of the persistent store database to check.
Returns:
bool: True if persistent store exists.
**Example:**
::
from my_first_app.app import MyFirstApp as app
result = app.persistent_store_exists('example_db')
if result:
engine = app.get_persistent_store_engine('example_db')
"""
from tethys_apps.models import TethysApp
db_app = TethysApp.objects.get(package=cls.package)
ps_database_settings = db_app.persistent_store_database_settings
verified_name = name if not is_testing_environment() else get_test_db_name(name)
try:
# If it exists return True
ps_database_setting = ps_database_settings.get(name=verified_name)
except ObjectDoesNotExist:
# Else return False
return False
# Check if it exists
ps_database_setting.persistent_store_database_exists()
return True
def sync_with_tethys_db(self):
"""
Sync installed apps with database.
"""
from django.conf import settings
from tethys_apps.models import TethysApp
try:
# Make pass to add apps to db that are newly installed
# Query to see if installed app is in the database
db_apps = TethysApp.objects.\
filter(package__exact=self.package).all()
# If the app is not in the database, then add it
if len(db_apps) == 0:
db_app = TethysApp(
name=self.name,
package=self.package,
description=self.description,
enable_feedback=self.enable_feedback,
feedback_emails=self.feedback_emails,
index=self.index,
icon=self.icon,
root_url=self.root_url,
color=self.color,
tags=self.tags
)
db_app.save()
# custom settings
db_app.add_settings(self.custom_settings())
# dataset services settings
db_app.add_settings(self.dataset_service_settings())
# spatial dataset services settings
db_app.add_settings(self.spatial_dataset_service_settings())
# wps settings
db_app.add_settings(self.web_processing_service_settings())
# persistent store settings
db_app.add_settings(self.persistent_store_settings())
db_app.save()
# If the app is in the database, update developer priority attributes
elif len(db_apps) == 1:
db_app = db_apps[0]
db_app.index = self.index
db_app.root_url = self.root_url
# custom settings
db_app.add_settings(self.custom_settings())
# dataset services settings
db_app.add_settings(self.dataset_service_settings())
# spatial dataset services settings
db_app.add_settings(self.spatial_dataset_service_settings())
# wps settings
db_app.add_settings(self.web_processing_service_settings())
# persistent store settings
db_app.add_settings(self.persistent_store_settings())
db_app.save()
# In debug mode, update all fields, not just developer priority attributes
if hasattr(settings, 'DEBUG') and settings.DEBUG:
db_app.name = self.name
db_app.description = self.description
db_app.icon = self.icon
db_app.color = self.color
db_app.tags = self.tags
db_app.enable_feedback = self.enable_feedback
db_app.feedback_emails = self.feedback_emails
db_app.save()
# More than one instance of the app in db... (what to do here?)
elif len(db_apps) >= 2:
pass
except ProgrammingError:
tethys_log.warning("Unable to sync app with database. tethys_apps_tethysapp "
"table does not exist")
except Exception as e:
tethys_log.error(e)
def remove_from_db(self):
"""
Remove the instance from the db.
"""
from tethys_apps.models import TethysApp
try:
# Attempt to delete the object
TethysApp.objects.filter(package__exact=self.package).delete()
except Exception as e:
tethys_log.error(e)
@classmethod
def _log_tethys_app_setting_not_assigned_error(cls, setting_type, setting_name):
"""
Logs useful traceback and message without actually raising an exception when an attempt
to access a non-existent setting is made.
Args:
setting_type (str, required):
Name of specific settings class (e.g. CustomTethysAppSetting, PersistentStoreDatabaseSetting etc).
setting_name (str, required):
Name attribute of the setting.
"""
tethys_log.warning('Tethys app setting is not assigned.\nTraceback (most recent call last):\n{0} '
'TethysAppSettingNotAssigned: {1} named "{2}" has not been assigned. '
'Please visit the setting page for the app {3} and assign all required settings.'
.format(traceback.format_stack(limit=3)[0], setting_type, setting_name,
cls.name.encode('utf-8'))
)
@classmethod
def pre_delete_user_workspace(cls, user):
"""
Override this method to pre-process a user's workspace before it is emptied
Args:
user (User, required):
User that requested to clear their workspace
"""
@classmethod
def post_delete_user_workspace(cls, user):
"""
Override this method to post-process a user's workspace after it is emptied
Args:
user (User, required):
User that requested to clear their workspace
"""
@classmethod
def pre_delete_app_workspace(cls):
"""
Override this method to pre-process the app workspace before it is emptied
"""
@classmethod
def post_delete_app_workspace(cls):
"""
Override this method to post-process the app workspace after it is emptied
"""
|
CI-WATER/tethys
|
tethys_apps/base/app_base.py
|
Python
|
bsd-2-clause
| 58,404
|
[
"VisIt"
] |
cf1d338f37d48620479b313a3ce0679de8a9ac63c7db45a159cde01f5cd21ced
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Matcher classes to be used inside of the testtools assertThat framework."""
import pprint
import StringIO
from lxml import etree
from testtools import content
class DictKeysMismatch(object):
def __init__(self, d1only, d2only):
self.d1only = d1only
self.d2only = d2only
def describe(self):
return ('Keys in d1 and not d2: %(d1only)s.'
' Keys in d2 and not d1: %(d2only)s' %
{'d1only': self.d1only, 'd2only': self.d2only})
def get_details(self):
return {}
class DictMismatch(object):
def __init__(self, key, d1_value, d2_value):
self.key = key
self.d1_value = d1_value
self.d2_value = d2_value
def describe(self):
return ("Dictionaries do not match at %(key)s."
" d1: %(d1_value)s d2: %(d2_value)s" %
{'key': self.key, 'd1_value': self.d1_value,
'd2_value': self.d2_value})
def get_details(self):
return {}
class DictMatches(object):
def __init__(self, d1, approx_equal=False, tolerance=0.001):
self.d1 = d1
self.approx_equal = approx_equal
self.tolerance = tolerance
def __str__(self):
return 'DictMatches(%s)' % (pprint.pformat(self.d1))
# Useful assertions
def match(self, d2):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
d1keys = set(self.d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
return DictKeysMismatch(d1only, d2only)
for key in d1keys:
d1value = self.d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= self.tolerance
except (ValueError, TypeError):
# If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
matcher = DictMatches(d1value)
did_match = matcher.match(d2value)
if did_match is not None:
return did_match
elif 'DONTCARE' in (d1value, d2value):
continue
elif self.approx_equal and within_tolerance:
continue
elif d1value != d2value:
return DictMismatch(key, d1value, d2value)
class ListLengthMismatch(object):
def __init__(self, len1, len2):
self.len1 = len1
self.len2 = len2
def describe(self):
return ('Length mismatch: len(L1)=%(len1)d != '
'len(L2)=%(len2)d' % {'len1': self.len1, 'len2': self.len2})
def get_details(self):
return {}
class DictListMatches(object):
def __init__(self, l1, approx_equal=False, tolerance=0.001):
self.l1 = l1
self.approx_equal = approx_equal
self.tolerance = tolerance
def __str__(self):
return 'DictListMatches(%s)' % (pprint.pformat(self.l1))
# Useful assertions
def match(self, l2):
"""Assert a list of dicts are equivalent."""
l1count = len(self.l1)
l2count = len(l2)
if l1count != l2count:
return ListLengthMismatch(l1count, l2count)
for d1, d2 in zip(self.l1, l2):
matcher = DictMatches(d2,
approx_equal=self.approx_equal,
tolerance=self.tolerance)
did_match = matcher.match(d1)
if did_match:
return did_match
class SubDictMismatch(object):
def __init__(self,
key=None,
sub_value=None,
super_value=None,
keys=False):
self.key = key
self.sub_value = sub_value
self.super_value = super_value
self.keys = keys
def describe(self):
if self.keys:
return "Keys between dictionaries did not match"
else:
return("Dictionaries do not match at %s. d1: %s d2: %s"
% (self.key,
self.super_value,
self.sub_value))
def get_details(self):
return {}
class IsSubDictOf(object):
def __init__(self, super_dict):
self.super_dict = super_dict
def __str__(self):
return 'IsSubDictOf(%s)' % (self.super_dict)
def match(self, sub_dict):
"""Assert a sub_dict is subset of super_dict."""
if not set(sub_dict.keys()).issubset(set(self.super_dict.keys())):
return SubDictMismatch(keys=True)
for k, sub_value in sub_dict.items():
super_value = self.super_dict[k]
if isinstance(sub_value, dict):
matcher = IsSubDictOf(super_value)
did_match = matcher.match(sub_value)
if did_match is not None:
return did_match
elif 'DONTCARE' in (sub_value, super_value):
continue
else:
if sub_value != super_value:
return SubDictMismatch(k, sub_value, super_value)
class FunctionCallMatcher(object):
def __init__(self, expected_func_calls):
self.expected_func_calls = expected_func_calls
self.actual_func_calls = []
def call(self, *args, **kwargs):
func_call = {'args': args, 'kwargs': kwargs}
self.actual_func_calls.append(func_call)
def match(self):
dict_list_matcher = DictListMatches(self.expected_func_calls)
return dict_list_matcher.match(self.actual_func_calls)
class XMLMismatch(object):
"""Superclass for XML mismatch."""
def __init__(self, state):
self.path = str(state)
self.expected = state.expected
self.actual = state.actual
def describe(self):
return "%(path)s: XML does not match" % self.path
def get_details(self):
return {
'expected': content.text_content(self.expected),
'actual': content.text_content(self.actual),
}
class XMLDocInfoMismatch(XMLMismatch):
"""XML version or encoding doesn't match."""
def __init__(self, state, expected_doc_info, actual_doc_info):
super(XMLDocInfoMismatch, self).__init__(state)
self.expected_doc_info = expected_doc_info
self.actual_doc_info = actual_doc_info
def describe(self):
return ("%(path)s: XML information mismatch(version, encoding) "
"expected version %(expected_version)s, "
"expected encoding %(expected_encoding)s; "
"actual version %(actual_version)s, "
"actual encoding %(actual_encoding)s" %
{'path': self.path,
'expected_version': self.expected_doc_info['version'],
'expected_encoding': self.expected_doc_info['encoding'],
'actual_version': self.actual_doc_info['version'],
'actual_encoding': self.actual_doc_info['encoding']})
class XMLTagMismatch(XMLMismatch):
"""XML tags don't match."""
def __init__(self, state, idx, expected_tag, actual_tag):
super(XMLTagMismatch, self).__init__(state)
self.idx = idx
self.expected_tag = expected_tag
self.actual_tag = actual_tag
def describe(self):
return ("%(path)s: XML tag mismatch at index %(idx)d: "
"expected tag <%(expected_tag)s>; "
"actual tag <%(actual_tag)s>" %
{'path': self.path, 'idx': self.idx,
'expected_tag': self.expected_tag,
'actual_tag': self.actual_tag})
class XMLAttrKeysMismatch(XMLMismatch):
"""XML attribute keys don't match."""
def __init__(self, state, expected_only, actual_only):
super(XMLAttrKeysMismatch, self).__init__(state)
self.expected_only = ', '.join(sorted(expected_only))
self.actual_only = ', '.join(sorted(actual_only))
def describe(self):
return ("%(path)s: XML attributes mismatch: "
"keys only in expected: %(expected_only)s; "
"keys only in actual: %(actual_only)s" %
{'path': self.path, 'expected_only': self.expected_only,
'actual_only': self.actual_only})
class XMLAttrValueMismatch(XMLMismatch):
"""XML attribute values don't match."""
def __init__(self, state, key, expected_value, actual_value):
super(XMLAttrValueMismatch, self).__init__(state)
self.key = key
self.expected_value = expected_value
self.actual_value = actual_value
def describe(self):
return ("%(path)s: XML attribute value mismatch: "
"expected value of attribute %(key)s: %(expected_value)r; "
"actual value: %(actual_value)r" %
{'path': self.path, 'key': self.key,
'expected_value': self.expected_value,
'actual_value': self.actual_value})
class XMLTextValueMismatch(XMLMismatch):
"""XML text values don't match."""
def __init__(self, state, expected_text, actual_text):
super(XMLTextValueMismatch, self).__init__(state)
self.expected_text = expected_text
self.actual_text = actual_text
def describe(self):
return ("%(path)s: XML text value mismatch: "
"expected text value: %(expected_text)r; "
"actual value: %(actual_text)r" %
{'path': self.path, 'expected_text': self.expected_text,
'actual_text': self.actual_text})
class XMLUnexpectedChild(XMLMismatch):
"""Unexpected child present in XML."""
def __init__(self, state, tag, idx):
super(XMLUnexpectedChild, self).__init__(state)
self.tag = tag
self.idx = idx
def describe(self):
return ("%(path)s: XML unexpected child element <%(tag)s> "
"present at index %(idx)d" %
{'path': self.path, 'tag': self.tag, 'idx': self.idx})
class XMLExpectedChild(XMLMismatch):
"""Expected child not present in XML."""
def __init__(self, state, tag, idx):
super(XMLExpectedChild, self).__init__(state)
self.tag = tag
self.idx = idx
def describe(self):
return ("%(path)s: XML expected child element <%(tag)s> "
"not present at index %(idx)d" %
{'path': self.path, 'tag': self.tag, 'idx': self.idx})
class XMLMatchState(object):
"""Maintain some state for matching.
Tracks the XML node path and saves the expected and actual full
XML text, for use by the XMLMismatch subclasses.
"""
def __init__(self, expected, actual):
self.path = []
self.expected = expected
self.actual = actual
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, exc_tb):
self.path.pop()
return False
def __str__(self):
return '/' + '/'.join(self.path)
def node(self, tag, idx):
"""Adds tag and index to the path; they will be popped off when
the corresponding 'with' statement exits.
:param tag: The element tag
:param idx: If not None, the integer index of the element
within its parent. Not included in the path
element if None.
"""
if idx is not None:
self.path.append("%s[%d]" % (tag, idx))
else:
self.path.append(tag)
return self
class XMLMatches(object):
"""Compare XML strings. More complete than string comparison."""
SKIP_TAGS = (etree.Comment, etree.ProcessingInstruction)
def __init__(self, expected, allow_mixed_nodes=False,
skip_empty_text_nodes=True, skip_values=('DONTCARE',)):
self.expected_xml = expected
self.expected = etree.parse(StringIO.StringIO(expected))
self.allow_mixed_nodes = allow_mixed_nodes
self.skip_empty_text_nodes = skip_empty_text_nodes
self.skip_values = set(skip_values)
def __str__(self):
return 'XMLMatches(%r)' % self.expected_xml
def match(self, actual_xml):
actual = etree.parse(StringIO.StringIO(actual_xml))
state = XMLMatchState(self.expected_xml, actual_xml)
expected_doc_info = self._get_xml_docinfo(self.expected)
actual_doc_info = self._get_xml_docinfo(actual)
if expected_doc_info != actual_doc_info:
return XMLDocInfoMismatch(state, expected_doc_info,
actual_doc_info)
result = self._compare_node(self.expected.getroot(),
actual.getroot(), state, None)
if result is False:
return XMLMismatch(state)
elif result is not True:
return result
@staticmethod
def _get_xml_docinfo(xml_document):
return {'version': xml_document.docinfo.xml_version,
'encoding': xml_document.docinfo.encoding}
def _compare_text_nodes(self, expected, actual, state):
expected_text = [expected.text]
expected_text.extend(child.tail for child in expected)
actual_text = [actual.text]
actual_text.extend(child.tail for child in actual)
if self.skip_empty_text_nodes:
expected_text = [text for text in expected_text
if text and not text.isspace()]
actual_text = [text for text in actual_text
if text and not text.isspace()]
if self.skip_values.intersection(
expected_text + actual_text):
return
if self.allow_mixed_nodes:
# lets sort text nodes because they can be mixed
expected_text = sorted(expected_text)
actual_text = sorted(actual_text)
if expected_text != actual_text:
return XMLTextValueMismatch(state, expected_text, actual_text)
def _compare_node(self, expected, actual, state, idx):
"""Recursively compares nodes within the XML tree."""
# Start by comparing the tags
if expected.tag != actual.tag:
return XMLTagMismatch(state, idx, expected.tag, actual.tag)
with state.node(expected.tag, idx):
# Compare the attribute keys
expected_attrs = set(expected.attrib.keys())
actual_attrs = set(actual.attrib.keys())
if expected_attrs != actual_attrs:
expected_only = expected_attrs - actual_attrs
actual_only = actual_attrs - expected_attrs
return XMLAttrKeysMismatch(state, expected_only, actual_only)
# Compare the attribute values
for key in expected_attrs:
expected_value = expected.attrib[key]
actual_value = actual.attrib[key]
if self.skip_values.intersection(
[expected_value, actual_value]):
continue
elif expected_value != actual_value:
return XMLAttrValueMismatch(state, key, expected_value,
actual_value)
# Compare text nodes
text_nodes_mismatch = self._compare_text_nodes(
expected, actual, state)
if text_nodes_mismatch:
return text_nodes_mismatch
# Compare the contents of the node
matched_actual_child_idxs = set()
# first_actual_child_idx - pointer to next actual child
# used with allow_mixed_nodes=False ONLY
# prevent to visit actual child nodes twice
first_actual_child_idx = 0
for expected_child in expected:
if expected_child.tag in self.SKIP_TAGS:
continue
related_actual_child_idx = None
if self.allow_mixed_nodes:
first_actual_child_idx = 0
for actual_child_idx in range(
first_actual_child_idx, len(actual)):
if actual[actual_child_idx].tag in self.SKIP_TAGS:
first_actual_child_idx += 1
continue
if actual_child_idx in matched_actual_child_idxs:
continue
# Compare the nodes
result = self._compare_node(expected_child,
actual[actual_child_idx],
state, actual_child_idx)
first_actual_child_idx += 1
if result is not True:
if self.allow_mixed_nodes:
continue
else:
return result
else: # nodes match
related_actual_child_idx = actual_child_idx
break
if related_actual_child_idx is not None:
matched_actual_child_idxs.add(actual_child_idx)
else:
return XMLExpectedChild(state, expected_child.tag,
actual_child_idx + 1)
# Make sure we consumed all nodes in actual
for actual_child_idx, actual_child in enumerate(actual):
if (actual_child.tag not in self.SKIP_TAGS and
actual_child_idx not in matched_actual_child_idxs):
return XMLUnexpectedChild(state, actual_child.tag,
actual_child_idx)
# The nodes match
return True
|
Metaswitch/calico-nova
|
nova/tests/unit/matchers.py
|
Python
|
apache-2.0
| 18,977
|
[
"VisIt"
] |
c37a0740eda9409b2896784b0c08848024481c6e6346aa535bd3f66e106edcb0
|
# functions for plotting and fitting gaussians
from math import sqrt, pi, log10
import numpy as N
def g_residuals(p, x, y):
ampl, centre, sigma = p
g = gaussian(x, ampl, centre, sigma)
return y - g
def multi_g_residuals(p, n, x, y):
ampl = p[0::3]
centre = p[1::3]
sigma = p[2::3]
g = multi_gaussian(x, ampl, centre, sigma)
return y - g
def g2d_residuals(p, shape, z):
ampl, xcentre, ycentre, xsigma, ysigma = p
g = gaussian2d(shape, ampl, xcentre, ycentre, xsigma, ysigma)
r = N.ravel(z - g)
return r
def gaussian(x, ampl, centre, sigma):
# with unit area
return (ampl/(sigma*sqrt(2.0*pi)) *
N.exp(-(x-centre)**2/(2.0*sigma**2)))
def gaussian_unit_max(x, ampl, centre, sigma):
# with unit maximum
return ampl * N.exp(-(x-centre)**2/(2.0*sigma**2))
def gaussian2dx(x, y, ampl, xcentre, ycentre, xsigma, ysigma):
return (ampl/(xsigma*ysigma*2*pi) *
N.exp(-(x-xcentre)**2/(2.0*xsigma**2)) *
N.exp(-(y-ycentre)**2/(2.0*ysigma**2)))
def multi_gaussian_derivatives(x, ampl, centre, sigma, shift):
dg_dampl = []
dg_dcentre = []
dg_dsigma = []
for i in range(len(ampl)):
dg_dampl.append(1.0/(sigma[i]*sqrt(2*pi)) *
N.exp(-(x-centre[i]-shift)**2/(2.0*sigma[i]**2)))
dg_dcentre.append(ampl[i]/(sigma[i]*sqrt(2*pi)) *
N.exp(-(x-centre[i]-shift)**2/(2.0*sigma[i]**2)) *
((x-centre[i]-shift)/sigma[i]**2))
dg_dsigma.append(-ampl[i]/sqrt(2*pi) *
N.exp(-(x-centre[i]-shift)**2/(2.0*sigma[i]**2)) *
(1.0/sigma[i]**2 - (x-centre[i]-shift)**2/sigma[i]**4))
if len(dg_dcentre) > 1:
dg_dshift = N.sum(dg_dcentre, 0)
else:
dg_dshift = dg_dcentre
return dg_dampl, dg_dcentre, dg_dsigma, dg_dshift
def gaussian_derivatives(x, ampl, centre, sigma):
dg_dampl = (1.0/(sigma*sqrt(2*pi)) *
N.exp(-(x-centre)**2/(2.0*sigma**2)))
dg_dcentre = -(ampl/(sigma*sqrt(2*pi)) *
N.exp(-(x-centre)**2/(2.0*sigma**2)) *
(-(x-centre)/sigma**2))
dg_dsigma = -(ampl/sqrt(2*pi) *
N.exp(-(x-centre)**2/(2.0*sigma**2)) *
(1.0/sigma**2 - (x-centre)**2/sigma**4))
return dg_dampl, dg_dcentre, dg_dsigma
def multi_gaussian_derivatives_numerical(x, ampl, centre, sigma):
eps = 1.0e-8
g = multi_gaussian(x, ampl, centre, sigma)
dg_dampl = []
for i in range(len(ampl)):
a = N.array(ampl)
a[i] += eps
ge = multi_gaussian(x, a, centre, sigma)
dg_dampl.append((ge - g)/eps)
dg_dcentre = []
for i in range(len(centre)):
c = N.array(centre)
c[i] += eps
ge = multi_gaussian(x, ampl, c, sigma)
dg_dcentre.append((ge - g)/eps)
dg_dsigma = []
for i in range(len(sigma)):
s = N.array(sigma)
s[i] += eps
ge = multi_gaussian(x, ampl, centre, s)
dg_dsigma.append((ge - g)/eps)
return dg_dampl, dg_dcentre, dg_dsigma
def gaussian_derivatives_numerical(x, ampl, centre, sigma):
eps = 1.0e-8
g = gaussian(x, ampl, centre, sigma)
ge = gaussian(x, ampl+eps, centre, sigma)
dg_dampl = (ge - g)/eps
ge = gaussian(x, ampl, centre+eps, sigma)
dg_dcentre = (ge - g)/eps
ge = gaussian(x, ampl, centre, sigma+eps)
dg_dsigma = (ge - g)/eps
return dg_dampl, dg_dcentre, dg_dsigma
def gaussian2d(shape, ampl, xcentre, ycentre, xsigma, ysigma):
def make_dx(j, i):
return (i-float(xcentre))
def make_dy(j, i):
return (j-float(ycentre))
dx = N.fromfunction(make_dx, shape)
dy = N.fromfunction(make_dy, shape)
dx2 = dx**2
dy2 = dy**2
return (ampl/(xsigma*ysigma*2*pi) *
N.exp(-dx2/(2.0*xsigma**2)) *
N.exp(-dy2/(2.0*ysigma**2)))
def multi_gaussian(x, ampl, centre, sigma, shift=0.0):
if type(x) != type(1.0):
g = N.zeros(x.shape, N.float64)
else:
g = 0
for i in range(len(ampl)):
g = g + gaussian(x, ampl[i], centre[i]+shift, sigma[i])
return g
|
bamford/astrobamf
|
gaussian.py
|
Python
|
mit
| 3,797
|
[
"Gaussian"
] |
433f41aa8d24322617d6205092545dbbb5bf0ae9c27d7b4903dabf3d06d6fa10
|
from __future__ import division, print_function
import numpy as np
from dipy.denoise.denspeed import nlmeans_3d
def nlmeans(arr, sigma, mask=None, patch_radius=1, block_radius=5,
rician=True, num_threads=None):
""" Non-local means for denoising 3D and 4D images
Parameters
----------
arr : 3D or 4D ndarray
The array to be denoised
mask : 3D ndarray
sigma : float or 3D array
standard deviation of the noise estimated from the data
patch_radius : int
patch size is ``2 x patch_radius + 1``. Default is 1.
block_radius : int
block size is ``2 x block_radius + 1``. Default is 5.
rician : boolean
If True the noise is estimated as Rician, otherwise Gaussian noise
is assumed.
num_threads : int
Number of threads. If None (default) then all available threads
will be used (all CPU cores).
Returns
-------
denoised_arr : ndarray
the denoised ``arr`` which has the same shape as ``arr``.
"""
if arr.ndim == 3:
sigma = np.ones(arr.shape, dtype=np.float64) * sigma
return nlmeans_3d(arr, mask, sigma,
patch_radius, block_radius,
rician).astype(arr.dtype)
elif arr.ndim == 4:
denoised_arr = np.zeros_like(arr)
if isinstance(sigma, np.ndarray) and sigma.ndim == 3:
sigma = (np.ones(arr.shape, dtype=np.float64) *
sigma[..., np.newaxis])
else:
sigma = np.ones(arr.shape, dtype=np.float64) * sigma
for i in range(arr.shape[-1]):
denoised_arr[..., i] = nlmeans_3d(arr[..., i],
mask,
sigma[..., i],
patch_radius,
block_radius,
rician,
num_threads).astype(arr.dtype)
return denoised_arr
else:
raise ValueError("Only 3D or 4D array are supported!", arr.shape)
|
JohnGriffiths/dipy
|
dipy/denoise/nlmeans.py
|
Python
|
bsd-3-clause
| 2,157
|
[
"Gaussian"
] |
cfb04526e5a13d5d53aa6b6987e0ac9be15dd6735ee19323dbc59f7c94f63e29
|
import numpy as np
import psi4
import time
psi4.core.set_output_file("output.dat", False)
mol = psi4.geometry("""
He 0 0 1
He 0 0 -1
no_reorient
symmetry c1
""")
psi4.core.set_global_option("SCF_TYPE", "PK")
e, wfn = psi4.energy("B3LYP/6-31G", molecule=mol, return_wfn=True)
# Yank some things from the wavefunction
V = wfn.V_potential()
D = np.asarray(wfn.Da())
C = wfn.Ca()
Co = wfn.Ca_subset("AO", "OCC")
Cv = wfn.Ca_subset("AO", "VIR")
nbf = wfn.nmo()
nocc = wfn.nalpha()
nvir = nbf - nocc
epsilon = np.asarray(wfn.epsilon_a())
points_func = V.properties()
superfunc = V.functional()
superfunc.set_deriv(2)
x_omega = superfunc.x_omega()
print("N blocks: %d" % V.nblocks())
# Lets do a LDA functional
Varr = np.zeros((nbf, nbf))
for x in range(V.nblocks()):
grid = V.get_block(x)
w = np.array(grid.w())
npoints = w.shape[0]
points_func.compute_points(grid)
ret = superfunc.compute_functional(points_func.point_values(), -1)
phi = np.array(points_func.basis_values()["PHI"])[:npoints]
v_rho_a = np.array(ret["V_RHO_A"])[:npoints]
tmp = np.zeros((npoints, nbf))
# LDA
tmpv = 0.5 * np.einsum('pb,p,p,pa->ab', phi, v_rho_a, w, phi)
Varr += tmpv + tmpv.T
# Build a V
print('\nV Allclose? %s' % np.allclose(Varr, np.array(wfn.Va())))
def build_XCderiv(k):
# Lets do a LDA functional
Varr = np.zeros((nbf, nbf))
for x in range(V.nblocks()):
grid = V.get_block(x)
w = np.array(grid.w())
npoints = w.shape[0]
phi = np.array(points_func.basis_values()["PHI"])[:npoints]
phi_x = np.array(points_func.basis_values()["PHI_X"])[:npoints]
phi_y = np.array(points_func.basis_values()["PHI_Y"])[:npoints]
phi_z = np.array(points_func.basis_values()["PHI_Z"])[:npoints]
rho = np.einsum('ab,bc,ca->a', phi, k, phi.T)
inp = {
'RHO_A' : psi4.core.Vector.from_array(rho),
'RHO_B' : psi4.core.Vector.from_array(rho),
}
points_func.compute_points(grid)
ret = superfunc.compute_functional(points_func.point_values(), -1)
#ret = superfunc.compute_functional(inp, -1)
# [u'V_RHO_B_RHO_B', u'V_RHO_A', u'V_RHO_B', u'V', u'V_RHO_A_RHO_B', u'V_RHO_A_RHO_A']
# print ret.keys()
v_rho_a = np.array(ret["V_RHO_A"])[:npoints]
#v_rho_a = np.array(ret["V_RHO_A_RHO_A"])[:npoints]
#v_rho_a = np.array(ret["V_RHO_A"])[:npoints]
tmp = np.zeros((npoints, nbf))
# LDA
tmpv = 0.5 * np.einsum('pb,p,p,pa->ab', phi, v_rho_a, w, phi)
tmpv = 0.5 * np.einsum('pb,p,p,pa->ab', phi, v_rho_a, w, phi)
tmpv = 0.5 * np.einsum('pb,p,p,pa->ab', phi, v_rho_a, w, phi)
Varr += tmpv + tmpv.T
print Varr
print k * 1.e6
exit()
#print k
return Varr * k
print('\nBuild CPKS objects')
maxiter = 3
conv = 1.e-6
# Grab perturbation tensors in MO basis
nCo = np.asarray(Co)
nCv = np.asarray(Cv)
mints = psi4.core.MintsHelper(wfn.basisset())
eri = mints.ao_eri()
tmp_dipoles = mints.so_dipole()
dipoles_xyz = []
for num in range(3):
Fso = np.asarray(tmp_dipoles[num])
Fia = (nCo.T).dot(Fso).dot(nCv)
Fia *= -2
dipoles_xyz.append(Fia)
# Build initial guess, previous vectors, diis object, and C_left updates
x = []
x_old = []
diis = []
ia_denom = - epsilon[:nocc].reshape(-1, 1) + epsilon[nocc:]
for xyz in range(3):
x.append(dipoles_xyz[xyz] / ia_denom)
x_old.append(np.zeros(ia_denom.shape))
# Convert Co and Cv to numpy arrays
mCo = Co
Co = np.asarray(Co)
Cv = np.asarray(Cv)
Va = np.array(wfn.Va())
print('\nStarting CPHF iterations:')
t = time.time()
for CPHF_ITER in range(1, maxiter + 1):
# Update amplitudes
for xyz in range(3):
# Build J and K objects
Kao = -np.dot(Co, x[xyz]).dot(Cv.T)
J = np.einsum('pqrs,rs->pq', eri, Kao)
K = np.einsum('prqs,rs->pq', eri, Kao)
# Bulid new guess
X = dipoles_xyz[xyz].copy()
X -= x_omega * (Co.T).dot(4 * J - K.T - K).dot(Cv)
#X += (Co.T).dot(Va * Kao).dot(Cv)
X += (Co.T).dot(build_XCderiv(Kao)).dot(Cv)
#print X
X /= ia_denom
#print Fia
#exit()
# DIIS for good measure
x[xyz] = X.copy()
# Check for convergence
rms = []
for xyz in range(3):
rms.append(np.max((x[xyz] - x_old[xyz]) ** 2))
x_old[xyz] = x[xyz]
avg_RMS = sum(rms) / 3
max_RMS = max(rms)
if max_RMS < conv:
print('CPHF converged in %d iterations and %.2f seconds.' % (CPHF_ITER, time.time() - t))
break
print('CPHF Iteration %3d: Average RMS = %3.8f Maximum RMS = %3.8f' %
(CPHF_ITER, avg_RMS, max_RMS))
# Compute 3x3 polarizability tensor
polar = np.empty((3, 3))
for numx in range(3):
for numf in range(3):
polar[numx, numf] = -1 * np.einsum('ia,ia->', x[numx], dipoles_xyz[numf])
print('\nB3LYP Dipole Polarizability:')
print(np.around(polar, 5))
print("\nHF Dipole Polarizability:")
print("""[[-0.61569 -0. 0. ]
[-0. -0.61569 0. ]
[ 0. 0. -0.68648]]""")
|
rmcgibbo/psi4public
|
tests/libxc/devl/cpks.py
|
Python
|
lgpl-3.0
| 5,150
|
[
"Psi4"
] |
af75744a137eab57a96391cab58f849770e83b379a8a797a0c2c2769a5d18ad0
|
#
# Copyright (C) 2013 Stanislav Bohm
#
# This file is part of Kaira.
#
# Kaira is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Kaira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kaira. If not, see <http://www.gnu.org/licenses/>.
#
import gtk
class TextViewWithLinks(gtk.TextView):
"""
GtkTextView with support of clickable links
"""
def __init__(self):
gtk.TextView.__init__(self)
self.id_counter = 0
self.link_callbacks = {}
self.set_editable(False)
self.connect("button-press-event", self._button_down)
self.connect("motion_notify_event", self._mouse_move)
self.buffer = self.get_buffer()
self.buffer.create_tag("normal")
self.link_tag = self.buffer.create_tag("link", underline=True)
self.link_hidden_tag = self.buffer.create_tag("link_hidden", invisible = True)
def write(self, text, tag_name="normal"):
self.buffer.insert_with_tags_by_name(self.buffer.get_end_iter(), text, tag_name)
def create_tag(self, *args, **kw):
self.buffer.create_tag(*args, **kw)
def reset(self):
self.buffer.set_text("")
self.id_counter = 0
self.link_callbacks = {}
def write_link(self, text, callback):
new_id = str(self.id_counter)
self.link_callbacks[new_id] = callback
self.id_counter += 1
self.write(text, "link")
self.write(new_id, "link_hidden")
def _iter_at_position(self, px, py):
px, py = self.window_to_buffer_coords(
gtk.TEXT_WINDOW_WIDGET, int(px), int(py))
return self.get_iter_at_location(px, py)
def _button_down(self, w, event):
i = self._iter_at_position(event.x, event.y)
if i.has_tag(self.link_tag):
i.forward_to_tag_toggle(self.link_tag)
j = i.copy()
j.forward_to_tag_toggle(self.link_hidden_tag)
self.link_callbacks[self.buffer.get_text(i, j, True)]()
return True
else:
return False
def _mouse_move(self, w, event):
i = self._iter_at_position(event.x, event.y)
if i.has_tag(self.link_tag):
cursor = gtk.gdk.Cursor(gtk.gdk.FLEUR)
else:
cursor = None
w = self.get_window(gtk.TEXT_WINDOW_TEXT)
w.set_cursor(cursor)
|
mec059/kaira
|
gui/textview.py
|
Python
|
gpl-3.0
| 2,807
|
[
"FLEUR"
] |
48dac36efba0d86badb465f4982669e9e0ae5f2372dec64243795906a8b4cb8f
|
# posix.py - Posix utility function implementations for Mercurial
#
# Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import errno
import fcntl
import getpass
import grp
import os
import pwd
import re
import select
import stat
import sys
import tempfile
import unicodedata
from .i18n import _
from .pycompat import (
getattr,
open,
)
from . import (
encoding,
error,
policy,
pycompat,
)
osutil = policy.importmod('osutil')
normpath = os.path.normpath
samestat = os.path.samestat
abspath = os.path.abspath # re-exports
try:
oslink = os.link
except AttributeError:
# Some platforms build Python without os.link on systems that are
# vaguely unix-like but don't have hardlink support. For those
# poor souls, just say we tried and that it failed so we fall back
# to copies.
def oslink(src, dst):
raise OSError(
errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst)
)
readlink = os.readlink
unlink = os.unlink
rename = os.rename
removedirs = os.removedirs
expandglobs = False
umask = os.umask(0)
os.umask(umask)
if not pycompat.ispy3:
def posixfile(name, mode='r', buffering=-1):
fp = open(name, mode=mode, buffering=buffering)
# The position when opening in append mode is implementation defined, so
# make it consistent by always seeking to the end.
if 'a' in mode:
fp.seek(0, os.SEEK_END)
return fp
else:
# The underlying file object seeks as required in Python 3:
# https://github.com/python/cpython/blob/v3.7.3/Modules/_io/fileio.c#L474
posixfile = open
def split(p):
"""Same as posixpath.split, but faster
>>> import posixpath
>>> for f in [b'/absolute/path/to/file',
... b'relative/path/to/file',
... b'file_alone',
... b'path/to/directory/',
... b'/multiple/path//separators',
... b'/file_at_root',
... b'///multiple_leading_separators_at_root',
... b'']:
... assert split(f) == posixpath.split(f), f
"""
ht = p.rsplit(b'/', 1)
if len(ht) == 1:
return b'', p
nh = ht[0].rstrip(b'/')
if nh:
return nh, ht[1]
return ht[0] + b'/', ht[1]
def openhardlinks():
'''return true if it is safe to hold open file handles to hardlinks'''
return True
def nlinks(name):
'''return number of hardlinks for the given file'''
return os.lstat(name).st_nlink
def parsepatchoutput(output_line):
"""parses the output produced by patch and returns the filename"""
pf = output_line[14:]
if pycompat.sysplatform == b'OpenVMS':
if pf[0] == b'`':
pf = pf[1:-1] # Remove the quotes
else:
if pf.startswith(b"'") and pf.endswith(b"'") and b" " in pf:
pf = pf[1:-1] # Remove the quotes
return pf
def sshargs(sshcmd, host, user, port):
'''Build argument list for ssh'''
args = user and (b"%s@%s" % (user, host)) or host
if b'-' in args[:1]:
raise error.Abort(
_(b'illegal ssh hostname or username starting with -: %s') % args
)
args = shellquote(args)
if port:
args = b'-p %s %s' % (shellquote(port), args)
return args
def isexec(f):
"""check whether a file is executable"""
return os.lstat(f).st_mode & 0o100 != 0
def setflags(f, l, x):
st = os.lstat(f)
s = st.st_mode
if l:
if not stat.S_ISLNK(s):
# switch file to link
with open(f, b'rb') as fp:
data = fp.read()
unlink(f)
try:
os.symlink(data, f)
except OSError:
# failed to make a link, rewrite file
with open(f, b"wb") as fp:
fp.write(data)
# no chmod needed at this point
return
if stat.S_ISLNK(s):
# switch link to file
data = os.readlink(f)
unlink(f)
with open(f, b"wb") as fp:
fp.write(data)
s = 0o666 & ~umask # avoid restatting for chmod
sx = s & 0o100
if st.st_nlink > 1 and bool(x) != bool(sx):
# the file is a hardlink, break it
with open(f, b"rb") as fp:
data = fp.read()
unlink(f)
with open(f, b"wb") as fp:
fp.write(data)
if x and not sx:
# Turn on +x for every +r bit when making a file executable
# and obey umask.
os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
elif not x and sx:
# Turn off all +x bits
os.chmod(f, s & 0o666)
def copymode(src, dst, mode=None, enforcewritable=False):
"""Copy the file mode from the file at path src to dst.
If src doesn't exist, we're using mode instead. If mode is None, we're
using umask."""
try:
st_mode = os.lstat(src).st_mode & 0o777
except OSError as inst:
if inst.errno != errno.ENOENT:
raise
st_mode = mode
if st_mode is None:
st_mode = ~umask
st_mode &= 0o666
new_mode = st_mode
if enforcewritable:
new_mode |= stat.S_IWUSR
os.chmod(dst, new_mode)
def checkexec(path):
"""
Check whether the given path is on a filesystem with UNIX-like exec flags
Requires a directory (like /foo/.hg)
"""
# VFAT on some Linux versions can flip mode but it doesn't persist
# a FS remount. Frequently we can detect it if files are created
# with exec bit on.
try:
EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
basedir = os.path.join(path, b'.hg')
cachedir = os.path.join(basedir, b'wcache')
storedir = os.path.join(basedir, b'store')
if not os.path.exists(cachedir):
try:
# we want to create the 'cache' directory, not the '.hg' one.
# Automatically creating '.hg' directory could silently spawn
# invalid Mercurial repositories. That seems like a bad idea.
os.mkdir(cachedir)
if os.path.exists(storedir):
copymode(storedir, cachedir)
else:
copymode(basedir, cachedir)
except (IOError, OSError):
# we other fallback logic triggers
pass
if os.path.isdir(cachedir):
checkisexec = os.path.join(cachedir, b'checkisexec')
checknoexec = os.path.join(cachedir, b'checknoexec')
try:
m = os.stat(checkisexec).st_mode
except OSError as e:
if e.errno != errno.ENOENT:
raise
# checkisexec does not exist - fall through ...
else:
# checkisexec exists, check if it actually is exec
if m & EXECFLAGS != 0:
# ensure checkisexec exists, check it isn't exec
try:
m = os.stat(checknoexec).st_mode
except OSError as e:
if e.errno != errno.ENOENT:
raise
open(checknoexec, b'w').close() # might fail
m = os.stat(checknoexec).st_mode
if m & EXECFLAGS == 0:
# check-exec is exec and check-no-exec is not exec
return True
# checknoexec exists but is exec - delete it
unlink(checknoexec)
# checkisexec exists but is not exec - delete it
unlink(checkisexec)
# check using one file, leave it as checkisexec
checkdir = cachedir
else:
# check directly in path and don't leave checkisexec behind
checkdir = path
checkisexec = None
fh, fn = pycompat.mkstemp(dir=checkdir, prefix=b'hg-checkexec-')
try:
os.close(fh)
m = os.stat(fn).st_mode
if m & EXECFLAGS == 0:
os.chmod(fn, m & 0o777 | EXECFLAGS)
if os.stat(fn).st_mode & EXECFLAGS != 0:
if checkisexec is not None:
os.rename(fn, checkisexec)
fn = None
return True
finally:
if fn is not None:
unlink(fn)
except (IOError, OSError):
# we don't care, the user probably won't be able to commit anyway
return False
def checklink(path):
"""check whether the given path is on a symlink-capable filesystem"""
# mktemp is not racy because symlink creation will fail if the
# file already exists
while True:
cachedir = os.path.join(path, b'.hg', b'wcache')
checklink = os.path.join(cachedir, b'checklink')
# try fast path, read only
if os.path.islink(checklink):
return True
if os.path.isdir(cachedir):
checkdir = cachedir
else:
checkdir = path
cachedir = None
name = tempfile.mktemp(
dir=pycompat.fsdecode(checkdir), prefix=r'checklink-'
)
name = pycompat.fsencode(name)
try:
fd = None
if cachedir is None:
fd = pycompat.namedtempfile(
dir=checkdir, prefix=b'hg-checklink-'
)
target = os.path.basename(fd.name)
else:
# create a fixed file to link to; doesn't matter if it
# already exists.
target = b'checklink-target'
try:
fullpath = os.path.join(cachedir, target)
open(fullpath, b'w').close()
except IOError as inst:
# pytype: disable=unsupported-operands
if inst[0] == errno.EACCES:
# pytype: enable=unsupported-operands
# If we can't write to cachedir, just pretend
# that the fs is readonly and by association
# that the fs won't support symlinks. This
# seems like the least dangerous way to avoid
# data loss.
return False
raise
try:
os.symlink(target, name)
if cachedir is None:
unlink(name)
else:
try:
os.rename(name, checklink)
except OSError:
unlink(name)
return True
except OSError as inst:
# link creation might race, try again
if inst.errno == errno.EEXIST:
continue
raise
finally:
if fd is not None:
fd.close()
except AttributeError:
return False
except OSError as inst:
# sshfs might report failure while successfully creating the link
if inst.errno == errno.EIO and os.path.exists(name):
unlink(name)
return False
def checkosfilename(path):
"""Check that the base-relative path is a valid filename on this platform.
Returns None if the path is ok, or a UI string describing the problem."""
return None # on posix platforms, every path is ok
def getfsmountpoint(dirpath):
"""Get the filesystem mount point from a directory (best-effort)
Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
"""
return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath)
def getfstype(dirpath):
"""Get the filesystem type name from a directory (best-effort)
Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
"""
return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
def get_password():
return encoding.strtolocal(getpass.getpass(''))
def setbinary(fd):
pass
def pconvert(path):
return path
def localpath(path):
return path
def samefile(fpath1, fpath2):
"""Returns whether path1 and path2 refer to the same file. This is only
guaranteed to work for files, not directories."""
return os.path.samefile(fpath1, fpath2)
def samedevice(fpath1, fpath2):
"""Returns whether fpath1 and fpath2 are on the same device. This is only
guaranteed to work for files, not directories."""
st1 = os.lstat(fpath1)
st2 = os.lstat(fpath2)
return st1.st_dev == st2.st_dev
# os.path.normcase is a no-op, which doesn't help us on non-native filesystems
def normcase(path):
return path.lower()
# what normcase does to ASCII strings
normcasespec = encoding.normcasespecs.lower
# fallback normcase function for non-ASCII strings
normcasefallback = normcase
if pycompat.isdarwin:
def normcase(path):
"""
Normalize a filename for OS X-compatible comparison:
- escape-encode invalid characters
- decompose to NFD
- lowercase
- omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
>>> normcase(b'UPPER')
'upper'
>>> normcase(b'Caf\\xc3\\xa9')
'cafe\\xcc\\x81'
>>> normcase(b'\\xc3\\x89')
'e\\xcc\\x81'
>>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918
'%b8%ca%c3\\xca\\xbe%c8.jpg'
"""
try:
return encoding.asciilower(path) # exception for non-ASCII
except UnicodeDecodeError:
return normcasefallback(path)
normcasespec = encoding.normcasespecs.lower
def normcasefallback(path):
try:
u = path.decode('utf-8')
except UnicodeDecodeError:
# OS X percent-encodes any bytes that aren't valid utf-8
s = b''
pos = 0
l = len(path)
while pos < l:
try:
c = encoding.getutf8char(path, pos)
pos += len(c)
except ValueError:
c = b'%%%02X' % ord(path[pos : pos + 1])
pos += 1
s += c
u = s.decode('utf-8')
# Decompose then lowercase (HFS+ technote specifies lower)
enc = unicodedata.normalize('NFD', u).lower().encode('utf-8')
# drop HFS+ ignored characters
return encoding.hfsignoreclean(enc)
if pycompat.sysplatform == b'cygwin':
# workaround for cygwin, in which mount point part of path is
# treated as case sensitive, even though underlying NTFS is case
# insensitive.
# default mount points
cygwinmountpoints = sorted(
[
b"/usr/bin",
b"/usr/lib",
b"/cygdrive",
],
reverse=True,
)
# use upper-ing as normcase as same as NTFS workaround
def normcase(path):
pathlen = len(path)
if (pathlen == 0) or (path[0] != pycompat.ossep):
# treat as relative
return encoding.upper(path)
# to preserve case of mountpoint part
for mp in cygwinmountpoints:
if not path.startswith(mp):
continue
mplen = len(mp)
if mplen == pathlen: # mount point itself
return mp
if path[mplen] == pycompat.ossep:
return mp + encoding.upper(path[mplen:])
return encoding.upper(path)
normcasespec = encoding.normcasespecs.other
normcasefallback = normcase
# Cygwin translates native ACLs to POSIX permissions,
# but these translations are not supported by native
# tools, so the exec bit tends to be set erroneously.
# Therefore, disable executable bit access on Cygwin.
def checkexec(path):
return False
# Similarly, Cygwin's symlink emulation is likely to create
# problems when Mercurial is used from both Cygwin and native
# Windows, with other native tools, or on shared volumes
def checklink(path):
return False
_needsshellquote = None
def shellquote(s):
if pycompat.sysplatform == b'OpenVMS':
return b'"%s"' % s
global _needsshellquote
if _needsshellquote is None:
_needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search
if s and not _needsshellquote(s):
# "s" shouldn't have to be quoted
return s
else:
return b"'%s'" % s.replace(b"'", b"'\\''")
def shellsplit(s):
"""Parse a command string in POSIX shell way (best-effort)"""
return pycompat.shlexsplit(s, posix=True)
def testpid(pid):
'''return False if pid dead, True if running or not sure'''
if pycompat.sysplatform == b'OpenVMS':
return True
try:
os.kill(pid, 0)
return True
except OSError as inst:
return inst.errno != errno.ESRCH
def isowner(st):
"""Return True if the stat object st is from the current user."""
return st.st_uid == os.getuid()
def findexe(command):
"""Find executable for command searching like which does.
If command is a basename then PATH is searched for command.
PATH isn't searched if command is an absolute or relative path.
If command isn't found None is returned."""
if pycompat.sysplatform == b'OpenVMS':
return command
def findexisting(executable):
b'Will return executable if existing file'
if os.path.isfile(executable) and os.access(executable, os.X_OK):
return executable
return None
if pycompat.ossep in command:
return findexisting(command)
if pycompat.sysplatform == b'plan9':
return findexisting(os.path.join(b'/bin', command))
for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep):
executable = findexisting(os.path.join(path, command))
if executable is not None:
return executable
return None
def setsignalhandler():
pass
_wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
def statfiles(files):
"""Stat each file in files. Yield each stat, or None if a file does not
exist or has a type we don't care about."""
lstat = os.lstat
getkind = stat.S_IFMT
for nf in files:
try:
st = lstat(nf)
if getkind(st.st_mode) not in _wantedkinds:
st = None
except OSError as err:
if err.errno not in (errno.ENOENT, errno.ENOTDIR):
raise
st = None
yield st
def getuser():
'''return name of current user'''
return pycompat.fsencode(getpass.getuser())
def username(uid=None):
"""Return the name of the user with the given uid.
If uid is None, return the name of the current user."""
if uid is None:
uid = os.getuid()
try:
return pycompat.fsencode(pwd.getpwuid(uid)[0])
except KeyError:
return b'%d' % uid
def groupname(gid=None):
"""Return the name of the group with the given gid.
If gid is None, return the name of the current group."""
if gid is None:
gid = os.getgid()
try:
return pycompat.fsencode(grp.getgrgid(gid)[0])
except KeyError:
return pycompat.bytestr(gid)
def groupmembers(name):
"""Return the list of members of the group with the given
name, KeyError if the group does not exist.
"""
name = pycompat.fsdecode(name)
return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem))
def spawndetached(args):
return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args)
def gethgcmd():
return sys.argv[:1]
def makedir(path, notindexed):
os.mkdir(path)
def lookupreg(key, name=None, scope=None):
return None
def hidewindow():
"""Hide current shell window.
Used to hide the window opened when starting asynchronous
child process under Windows, unneeded on other systems.
"""
pass
class cachestat(object):
def __init__(self, path):
self.stat = os.stat(path)
def cacheable(self):
return bool(self.stat.st_ino)
__hash__ = object.__hash__
def __eq__(self, other):
try:
# Only dev, ino, size, mtime and atime are likely to change. Out
# of these, we shouldn't compare atime but should compare the
# rest. However, one of the other fields changing indicates
# something fishy going on, so return False if anything but atime
# changes.
return (
self.stat.st_mode == other.stat.st_mode
and self.stat.st_ino == other.stat.st_ino
and self.stat.st_dev == other.stat.st_dev
and self.stat.st_nlink == other.stat.st_nlink
and self.stat.st_uid == other.stat.st_uid
and self.stat.st_gid == other.stat.st_gid
and self.stat.st_size == other.stat.st_size
and self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME]
and self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME]
)
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def statislink(st):
'''check whether a stat result is a symlink'''
return st and stat.S_ISLNK(st.st_mode)
def statisexec(st):
'''check whether a stat result is an executable file'''
return st and (st.st_mode & 0o100 != 0)
def poll(fds):
"""block until something happens on any file descriptor
This is a generic helper that will check for any activity
(read, write. exception) and return the list of touched files.
In unsupported cases, it will raise a NotImplementedError"""
try:
while True:
try:
res = select.select(fds, fds, fds)
break
except select.error as inst:
if inst.args[0] == errno.EINTR:
continue
raise
except ValueError: # out of range file descriptor
raise NotImplementedError()
return sorted(list(set(sum(res, []))))
def readpipe(pipe):
"""Read all available data from a pipe."""
# We can't fstat() a pipe because Linux will always report 0.
# So, we set the pipe to non-blocking mode and read everything
# that's available.
flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
flags |= os.O_NONBLOCK
oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
try:
chunks = []
while True:
try:
s = pipe.read()
if not s:
break
chunks.append(s)
except IOError:
break
return b''.join(chunks)
finally:
fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
def bindunixsocket(sock, path):
"""Bind the UNIX domain socket to the specified path"""
# use relative path instead of full path at bind() if possible, since
# AF_UNIX path has very small length limit (107 chars) on common
# platforms (see sys/un.h)
dirname, basename = os.path.split(path)
bakwdfd = None
try:
if dirname:
bakwdfd = os.open(b'.', os.O_DIRECTORY)
os.chdir(dirname)
sock.bind(basename)
if bakwdfd:
os.fchdir(bakwdfd)
finally:
if bakwdfd:
os.close(bakwdfd)
|
smmribeiro/intellij-community
|
plugins/hg4idea/testData/bin/mercurial/posix.py
|
Python
|
apache-2.0
| 23,605
|
[
"FEFF"
] |
67aaeddf372b48cfa45488aca0d1e0388b572e870b149cf6fe17ccedbcc35f77
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import re
import numpy
import sys
import os
try:
import __builtin__ # for Python 2
except ImportError:
import builtins as __builtin__ # for Python 3
col_types = {'^di': 3,
'^ext_f': 3,
'^ext_t': 3,
'^f': 3,
'^fix': 3,
'^i': 1,
'^ma': 1,
'^mo': 1,
'^omega': 3,
'^omega_b': 3,
'^omega_l': 3,
'^p': 3,
'^q$': 1,
'^qu': 4,
'^tbf': 3,
'^torque': 3,
'^torque_b': 3,
'^torque_l': 3,
'^ty': 1,
'^v': 3,
'^vi': 1,
'^vs': 2}
re_block_type = re.compile('^{([a-z_]+)\s')
re_particles = re.compile('{particles\s*{([a-z_\s]*)}((?:\s*{.*?})*)\s*}')
re_particle = re.compile('\{(.*?)\}')
re_space = re.compile('\s')
re_variable = re.compile('\{(?!variable\s)([^\s\}]+)([^\}]*)\}')
re_int_list = re.compile('^[0-9\s]+$')
re_float_list = re.compile('^[0-9\.\s]+$')
re_int = re.compile('[0-9]+')
re_float = re.compile('[0-9\.]+')
re_col_types = {}
for col_type in col_types:
re_col_types[col_type] = re.compile(col_type)
def load_col_types(blockfile_support_tcl):
global col_types, re_col_types
col_types, re_col_types = {}, {}
with __builtin__.open(blockfile_support_tcl) as f:
for i in re.finditer('"(\^[a-z_]+\$*)"\s*{.*?; incr idx ([0-9]*)\s*}', f.read(), re.DOTALL):
var_name, var_cols = i.group(1), i.group(2)
if var_cols == '': var_cols = 1
else: var_cols = int(var_cols)
col_types[var_name] = var_cols
for col_type in col_types:
re_col_types[col_type] = re.compile(col_type)
def process(block):
"""
Processes the block and returns the block's type and the block's contents as a tuple.
"""
if re_block_type.match(block) != None:
block_type = re_block_type.match(block).group(1)
else:
return None
# particle data looks like this:
# {particles {id pos v q f}
# {0 7.875 9.0 0.0 0.0 0.0 0.0 -0.6 7.29706 -1.08036 -3.39398}
# {1 7.875 9.0 1.2 0.0 0.0 0.0 -0.6 1.37885 -0.320172 -2.49209}
if block_type == 'particles':
particles = re_particles.match(block)
# get the field labels and determine how many columns a field consists of (i.e. dimensionality of vector quantities)
fields = particles.group(1).split(' ')
field_lengths = [0]*len(fields)
for i,field in enumerate(fields):
for col_type in col_types:
if re_col_types[col_type].match(field) is not None:
field_lengths[i] = col_types[col_type]
break
if field_lengths[i] == 0:
raise Exception("Field '%s' is unknown. Dimensionality of this field cannot be determined." % field)
# split the particle block into the individual particles
particles = [re_space.split(x) for x in re_particle.findall(particles.group(2))]
N = len(particles)
# create one empty numpy array per field with the appropriate dimensionality
particle_data = {}
for i,field in enumerate(fields):
dtype = float
if re_col_types['^i'].match(field) is not None or re_col_types['^ty'].match(field) is not None:
# the ID and type fields are integer, everything else is a float
dtype = int
particle_data[field] = numpy.empty((N,field_lengths[i]), dtype=dtype)
# fill the numpy arrays with the particle data
for i,particle in enumerate(particles):
fieldcount = 0
for j,field in enumerate(fields):
particle_data[field][i] = particle[fieldcount:fieldcount+field_lengths[j]]
fieldcount += field_lengths[j]
return block_type, particle_data
# variables look like this:
# {variable {box_l 18.0 18.0 480.0} {test 18.0} {stringtest test} }
if block_type == 'variable':
variables = {}
# extract all variable names and values from the block
for m in re_variable.finditer(block):
try:
# convert it to an integer/float if it's a (list of) integer/float variable(s)
value = m.group(2).strip()
if re_int_list.match(value) is not None:
value = numpy.array(re_int.findall(value), dtype=int)
elif re_float_list.match(value) is not None:
value = numpy.array(re_float.findall(value), dtype=float)
variables[m.group(1)] = value
except:
# return it as a string if we can't convert it to a number
variables[m.group(1)] = m.group(2).strip()
return block_type, variables
# any other block types are returned as string
return block_type, block[len(block_type)+1:-2].strip()
class blockfile(object):
f = None
def __init__(self, path):
"""
Opens the blockfile.
"""
self.f = __builtin__.open(path)
def __iter__(self):
"""
Iterator over all the blocks in the open blockfile.
The iterator returns each block as a tuple of block type and block contents.
"""
block = ''
while True:
block += self.f.readline() # this assumes that all blocks end with a newline
if block.count('{') == block.count('}'): # we have a full block:
output = process(block)
if output != None:
yield output
else:
pass
block = ''
if self.f.tell() == os.fstat(self.f.fileno()).st_size:
self._rewind()
return
def __del__(self):
"""
Closes the blockfile.
"""
self.close()
def _rewind(self):
self.f.seek(0)
def close(self):
"""
Closes the blockfile.
"""
self.f.close()
if __name__ != "__main__":
def open(path):
"""
Opens the blockfile at path.
"""
return blockfile(path)
if __name__ == "__main__":
# If this script is called directly with the argument --extract, it extracts the list of column types from scripts/blockfile_support.tcl.
if len(sys.argv) > 1 and sys.argv[1] == '--extract':
if len(sys.argv) < 2 or not os.path.exists(sys.argv[2]):
sys.stderr.write("Please specify the path to scripts/blockfile_support.tcl from Espresso.\n")
sys.exit(1)
load_col_types(sys.argv[2])
from pprint import pprint
print(pprint(col_types))
sys.exit()
|
Marcello-Sega/espresso
|
tools/blockfile.py
|
Python
|
gpl-3.0
| 7,495
|
[
"ESPResSo"
] |
7b43b5fb56f4a9b433443364b8379ee1125a73ed3cc83c2c96ec107a257e53d0
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
An interface to the excellent spglib library by Atsushi Togo
(http://spglib.sourceforge.net/) for pymatgen.
v1.0 - Now works with both ordered and disordered structure.
v2.0 - Updated for spglib 1.6.
v3.0 - pymatgen no longer ships with spglib. Instead, spglib (the python
version) is now a dependency and the SpacegroupAnalyzer merely serves
as an interface to spglib for pymatgen Structures.
"""
import itertools
import logging
from collections import defaultdict
import copy
import math
from math import cos
from math import sin
from fractions import Fraction
import numpy as np
import spglib
from pymatgen.core.structure import Structure, Molecule
from pymatgen.symmetry.structure import SymmetrizedStructure
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import PeriodicSite
from pymatgen.core.operations import SymmOp
from pymatgen.util.coord import find_in_coord_list, pbc_diff
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "3.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "May 14, 2016"
logger = logging.getLogger(__name__)
class SpacegroupAnalyzer:
"""
Takes a pymatgen.core.structure.Structure object and a symprec.
Uses spglib to perform various symmetry finding operations.
"""
def __init__(self, structure, symprec=0.01, angle_tolerance=5.0):
"""
Args:
structure (Structure/IStructure): Structure to find symmetry
symprec (float): Tolerance for symmetry finding. Defaults to 0.01,
which is fairly strict and works well for properly refined
structures with atoms in the proper symmetry coordinates. For
structures with slight deviations from their proper atomic
positions (e.g., structures relaxed with electronic structure
codes), a looser tolerance of 0.1 (the value used in Materials
Project) is often needed.
angle_tolerance (float): Angle tolerance for symmetry finding.
"""
self._symprec = symprec
self._angle_tol = angle_tolerance
self._structure = structure
latt = structure.lattice.matrix
positions = structure.frac_coords
unique_species = []
zs = []
magmoms = []
for species, g in itertools.groupby(structure,
key=lambda s: s.species):
if species in unique_species:
ind = unique_species.index(species)
zs.extend([ind + 1] * len(tuple(g)))
else:
unique_species.append(species)
zs.extend([len(unique_species)] * len(tuple(g)))
for site in structure:
if hasattr(site, 'magmom'):
magmoms.append(site.magmom)
elif site.is_ordered and hasattr(site.specie, 'spin'):
magmoms.append(site.specie.spin)
else:
magmoms.append(0)
self._unique_species = unique_species
self._numbers = zs
# For now, we are setting magmom to zero.
self._cell = latt, positions, zs, magmoms
self._space_group_data = spglib.get_symmetry_dataset(
self._cell, symprec=self._symprec, angle_tolerance=angle_tolerance)
def get_space_group_symbol(self):
"""
Get the spacegroup symbol (e.g., Pnma) for structure.
Returns:
(str): Spacegroup symbol for structure.
"""
return self._space_group_data["international"]
def get_space_group_number(self):
"""
Get the international spacegroup number (e.g., 62) for structure.
Returns:
(int): International spacegroup number for structure.
"""
return int(self._space_group_data["number"])
def get_space_group_operations(self):
"""
Get the SpacegroupOperations for the Structure.
Returns:
SpacgroupOperations object.
"""
return SpacegroupOperations(self.get_space_group_symbol(),
self.get_space_group_number(),
self.get_symmetry_operations())
def get_hall(self):
"""
Returns Hall symbol for structure.
Returns:
(str): Hall symbol
"""
return self._space_group_data["hall"]
def get_point_group_symbol(self):
"""
Get the point group associated with the structure.
Returns:
(Pointgroup): Point group for structure.
"""
rotations = self._space_group_data["rotations"]
# passing a 0-length rotations list to spglib can segfault
if len(rotations) == 0:
return '1'
return spglib.get_pointgroup(rotations)[0].strip()
def get_crystal_system(self):
"""
Get the crystal system for the structure, e.g., (triclinic,
orthorhombic, cubic, etc.).
Returns:
(str): Crystal system for structure or None if system cannot be detected.
"""
n = self._space_group_data["number"]
def f(i, j):
return i <= n <= j
cs = {"triclinic": (1, 2), "monoclinic": (3, 15),
"orthorhombic": (16, 74), "tetragonal": (75, 142),
"trigonal": (143, 167), "hexagonal": (168, 194),
"cubic": (195, 230)}
crystal_sytem = None
for k, v in cs.items():
if f(*v):
crystal_sytem = k
break
return crystal_sytem
def get_lattice_type(self):
"""
Get the lattice for the structure, e.g., (triclinic,
orthorhombic, cubic, etc.).This is the same than the
crystal system with the exception of the hexagonal/rhombohedral
lattice
Returns:
(str): Lattice type for structure or None if type cannot be detected.
"""
n = self._space_group_data["number"]
system = self.get_crystal_system()
if n in [146, 148, 155, 160, 161, 166, 167]:
return "rhombohedral"
elif system == "trigonal":
return "hexagonal"
else:
return system
def get_symmetry_dataset(self):
"""
Returns the symmetry dataset as a dict.
Returns:
(dict): With the following properties:
number: International space group number
international: International symbol
hall: Hall symbol
transformation_matrix: Transformation matrix from lattice of
input cell to Bravais lattice L^bravais = L^original * Tmat
origin shift: Origin shift in the setting of "Bravais lattice"
rotations, translations: Rotation matrices and translation
vectors. Space group operations are obtained by
[(r,t) for r, t in zip(rotations, translations)]
wyckoffs: Wyckoff letters
"""
return self._space_group_data
def _get_symmetry(self):
"""
Get the symmetry operations associated with the structure.
Returns:
Symmetry operations as a tuple of two equal length sequences.
(rotations, translations). "rotations" is the numpy integer array
of the rotation matrices for scaled positions
"translations" gives the numpy float64 array of the translation
vectors in scaled positions.
"""
d = spglib.get_symmetry(self._cell, symprec=self._symprec,
angle_tolerance=self._angle_tol)
# Sometimes spglib returns small translation vectors, e.g.
# [1e-4, 2e-4, 1e-4]
# (these are in fractional coordinates, so should be small denominator
# fractions)
trans = []
for t in d["translations"]:
trans.append([float(Fraction.from_float(c).limit_denominator(1000))
for c in t])
trans = np.array(trans)
# fractional translations of 1 are more simply 0
trans[np.abs(trans) == 1] = 0
return d["rotations"], trans
def get_symmetry_operations(self, cartesian=False):
"""
Return symmetry operations as a list of SymmOp objects.
By default returns fractional coord symmops.
But cartesian can be returned too.
Returns:
([SymmOp]): List of symmetry operations.
"""
rotation, translation = self._get_symmetry()
symmops = []
mat = self._structure.lattice.matrix.T
invmat = np.linalg.inv(mat)
for rot, trans in zip(rotation, translation):
if cartesian:
rot = np.dot(mat, np.dot(rot, invmat))
trans = np.dot(trans, self._structure.lattice.matrix)
op = SymmOp.from_rotation_and_translation(rot, trans)
symmops.append(op)
return symmops
def get_point_group_operations(self, cartesian=False):
"""
Return symmetry operations as a list of SymmOp objects.
By default returns fractional coord symmops.
But cartesian can be returned too.
Args:
cartesian (bool): Whether to return SymmOps as cartesian or
direct coordinate operations.
Returns:
([SymmOp]): List of point group symmetry operations.
"""
rotation, translation = self._get_symmetry()
symmops = []
mat = self._structure.lattice.matrix.T
invmat = np.linalg.inv(mat)
for rot in rotation:
if cartesian:
rot = np.dot(mat, np.dot(rot, invmat))
op = SymmOp.from_rotation_and_translation(rot, np.array([0, 0, 0]))
symmops.append(op)
return symmops
def get_symmetrized_structure(self):
"""
Get a symmetrized structure. A symmetrized structure is one where the
sites have been grouped into symmetrically equivalent groups.
Returns:
:class:`pymatgen.symmetry.structure.SymmetrizedStructure` object.
"""
ds = self.get_symmetry_dataset()
sg = SpacegroupOperations(self.get_space_group_symbol(),
self.get_space_group_number(),
self.get_symmetry_operations())
return SymmetrizedStructure(self._structure, sg,
ds["equivalent_atoms"],
ds["wyckoffs"])
def get_refined_structure(self):
"""
Get the refined structure based on detected symmetry. The refined
structure is a *conventional* cell setting with atoms moved to the
expected symmetry positions.
Returns:
Refined structure.
"""
# Atomic positions have to be specified by scaled positions for spglib.
lattice, scaled_positions, numbers \
= spglib.refine_cell(self._cell, self._symprec, self._angle_tol)
species = [self._unique_species[i - 1] for i in numbers]
s = Structure(lattice, species, scaled_positions)
return s.get_sorted_structure()
def find_primitive(self):
"""
Find a primitive version of the unit cell.
Returns:
A primitive cell in the input cell is searched and returned
as an Structure object. If no primitive cell is found, None is
returned.
"""
lattice, scaled_positions, numbers = spglib.find_primitive(
self._cell, symprec=self._symprec)
species = [self._unique_species[i - 1] for i in numbers]
return Structure(lattice, species, scaled_positions,
to_unit_cell=True).get_reduced_structure()
def get_ir_reciprocal_mesh(self, mesh=(10, 10, 10), is_shift=(0, 0, 0)):
"""
k-point mesh of the Brillouin zone generated taken into account
symmetry.The method returns the irreducible kpoints of the mesh
and their weights
Args:
mesh (3x1 array): The number of kpoint for the mesh needed in
each direction
is_shift (3x1 array): Whether to shift the kpoint grid. (1, 1,
1) means all points are shifted by 0.5, 0.5, 0.5.
Returns:
A list of irreducible kpoints and their weights as a list of
tuples [(ir_kpoint, weight)], with ir_kpoint given
in fractional coordinates
"""
shift = np.array([1 if i else 0 for i in is_shift])
mapping, grid = spglib.get_ir_reciprocal_mesh(
np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec)
results = []
for i, count in zip(*np.unique(mapping, return_counts=True)):
results.append(((grid[i] + shift * (0.5, 0.5, 0.5)) / mesh,
count))
return results
def get_conventional_to_primitive_transformation_matrix(self, international_monoclinic=True):
"""
Gives the transformation matrix to transform a conventional
unit cell to a primitive cell according to certain standards
the standards are defined in Setyawan, W., & Curtarolo, S. (2010).
High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
Returns:
Transformation matrix to go from conventional to primitive cell
"""
conv = self.get_conventional_standard_structure(
international_monoclinic=international_monoclinic)
lattice = self.get_lattice_type()
if "P" in self.get_space_group_symbol() or lattice == "hexagonal":
return np.eye(3)
if lattice == "rhombohedral":
# check if the conventional representation is hexagonal or
# rhombohedral
lengths = conv.lattice.lengths
if abs(lengths[0] - lengths[2]) < 0.0001:
transf = np.eye
else:
transf = np.array([[-1, 1, 1], [2, 1, 1], [-1, -2, 1]],
dtype=np.float) / 3
elif "I" in self.get_space_group_symbol():
transf = np.array([[-1, 1, 1], [1, -1, 1], [1, 1, -1]],
dtype=np.float) / 2
elif "F" in self.get_space_group_symbol():
transf = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]],
dtype=np.float) / 2
elif "C" in self.get_space_group_symbol() or "A" in self.get_space_group_symbol():
if self.get_crystal_system() == "monoclinic":
transf = np.array([[1, 1, 0], [-1, 1, 0], [0, 0, 2]],
dtype=np.float) / 2
else:
transf = np.array([[1, -1, 0], [1, 1, 0], [0, 0, 2]],
dtype=np.float) / 2
else:
transf = np.eye(3)
return transf
def get_primitive_standard_structure(self, international_monoclinic=True):
"""
Gives a structure with a primitive cell according to certain standards
the standards are defined in Setyawan, W., & Curtarolo, S. (2010).
High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
Returns:
The structure in a primitive standardized cell
"""
conv = self.get_conventional_standard_structure(
international_monoclinic=international_monoclinic)
lattice = self.get_lattice_type()
if "P" in self.get_space_group_symbol() or lattice == "hexagonal":
return conv
transf = self.get_conventional_to_primitive_transformation_matrix(
international_monoclinic=international_monoclinic)
new_sites = []
latt = Lattice(np.dot(transf, conv.lattice.matrix))
for s in conv:
new_s = PeriodicSite(
s.specie, s.coords, latt,
to_unit_cell=True, coords_are_cartesian=True,
properties=s.properties)
if not any(map(new_s.is_periodic_image, new_sites)):
new_sites.append(new_s)
if lattice == "rhombohedral":
prim = Structure.from_sites(new_sites)
lengths = prim.lattice.lengths
angles = prim.lattice.angles
a = lengths[0]
alpha = math.pi * angles[0] / 180
new_matrix = [
[a * cos(alpha / 2), -a * sin(alpha / 2), 0],
[a * cos(alpha / 2), a * sin(alpha / 2), 0],
[a * cos(alpha) / cos(alpha / 2), 0,
a * math.sqrt(1 - (cos(alpha) ** 2 / (cos(alpha / 2) ** 2)))]]
new_sites = []
latt = Lattice(new_matrix)
for s in prim:
new_s = PeriodicSite(
s.specie, s.frac_coords, latt,
to_unit_cell=True, properties=s.properties)
if not any(map(new_s.is_periodic_image, new_sites)):
new_sites.append(new_s)
return Structure.from_sites(new_sites)
return Structure.from_sites(new_sites)
def get_conventional_standard_structure(
self, international_monoclinic=True):
"""
Gives a structure with a conventional cell according to certain
standards. The standards are defined in Setyawan, W., & Curtarolo,
S. (2010). High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
They basically enforce as much as possible
norm(a1)<norm(a2)<norm(a3)
Returns:
The structure in a conventional standardized cell
"""
tol = 1e-5
struct = self.get_refined_structure()
latt = struct.lattice
latt_type = self.get_lattice_type()
sorted_lengths = sorted(latt.abc)
sorted_dic = sorted([{'vec': latt.matrix[i],
'length': latt.abc[i],
'orig_index': i} for i in [0, 1, 2]],
key=lambda k: k['length'])
if latt_type in ("orthorhombic", "cubic"):
# you want to keep the c axis where it is
# to keep the C- settings
transf = np.zeros(shape=(3, 3))
if self.get_space_group_symbol().startswith("C"):
transf[2] = [0, 0, 1]
a, b = sorted(latt.abc[:2])
sorted_dic = sorted([{'vec': latt.matrix[i],
'length': latt.abc[i],
'orig_index': i} for i in [0, 1]],
key=lambda k: k['length'])
for i in range(2):
transf[i][sorted_dic[i]['orig_index']] = 1
c = latt.abc[2]
elif self.get_space_group_symbol().startswith(
"A"): # change to C-centering to match Setyawan/Curtarolo convention
transf[2] = [1, 0, 0]
a, b = sorted(latt.abc[1:])
sorted_dic = sorted([{'vec': latt.matrix[i],
'length': latt.abc[i],
'orig_index': i} for i in [1, 2]],
key=lambda k: k['length'])
for i in range(2):
transf[i][sorted_dic[i]['orig_index']] = 1
c = latt.abc[0]
else:
for i in range(len(sorted_dic)):
transf[i][sorted_dic[i]['orig_index']] = 1
a, b, c = sorted_lengths
latt = Lattice.orthorhombic(a, b, c)
elif latt_type == "tetragonal":
# find the "a" vectors
# it is basically the vector repeated two times
transf = np.zeros(shape=(3, 3))
a, b, c = sorted_lengths
for d in range(len(sorted_dic)):
transf[d][sorted_dic[d]['orig_index']] = 1
if abs(b - c) < tol and abs(a - c) > tol:
a, c = c, a
transf = np.dot([[0, 0, 1], [0, 1, 0], [1, 0, 0]], transf)
latt = Lattice.tetragonal(a, c)
elif latt_type in ("hexagonal", "rhombohedral"):
# for the conventional cell representation,
# we allways show the rhombohedral lattices as hexagonal
# check first if we have the refined structure shows a rhombohedral
# cell
# if so, make a supercell
a, b, c = latt.abc
if np.all(np.abs([a - b, c - b, a - c]) < 0.001):
struct.make_supercell(((1, -1, 0), (0, 1, -1), (1, 1, 1)))
a, b, c = sorted(struct.lattice.abc)
if abs(b - c) < 0.001:
a, c = c, a
new_matrix = [[a / 2, -a * math.sqrt(3) / 2, 0],
[a / 2, a * math.sqrt(3) / 2, 0],
[0, 0, c]]
latt = Lattice(new_matrix)
transf = np.eye(3, 3)
elif latt_type == "monoclinic":
# You want to keep the c axis where it is to keep the C- settings
if self.get_space_group_operations().int_symbol.startswith("C"):
transf = np.zeros(shape=(3, 3))
transf[2] = [0, 0, 1]
sorted_dic = sorted([{'vec': latt.matrix[i],
'length': latt.abc[i],
'orig_index': i} for i in [0, 1]],
key=lambda k: k['length'])
a = sorted_dic[0]['length']
b = sorted_dic[1]['length']
c = latt.abc[2]
new_matrix = None
for t in itertools.permutations(list(range(2)), 2):
m = latt.matrix
latt2 = Lattice([m[t[0]], m[t[1]], m[2]])
lengths = latt2.lengths
angles = latt2.angles
if angles[0] > 90:
# if the angle is > 90 we invert a and b to get
# an angle < 90
a, b, c, alpha, beta, gamma = Lattice(
[-m[t[0]], -m[t[1]], m[2]]).parameters
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = -1
transf[1][t[1]] = -1
transf[2][2] = 1
alpha = math.pi * alpha / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
continue
elif angles[0] < 90:
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = 1
transf[1][t[1]] = 1
transf[2][2] = 1
a, b, c = lengths
alpha = math.pi * angles[0] / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
if new_matrix is None:
# this if is to treat the case
# where alpha==90 (but we still have a monoclinic sg
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, 0, c]]
transf = np.zeros(shape=(3, 3))
for c in range(len(sorted_dic)):
transf[c][sorted_dic[c]['orig_index']] = 1
# if not C-setting
else:
# try all permutations of the axis
# keep the ones with the non-90 angle=alpha
# and b<c
new_matrix = None
for t in itertools.permutations(list(range(3)), 3):
m = latt.matrix
a, b, c, alpha, beta, gamma = Lattice(
[m[t[0]], m[t[1]], m[t[2]]]).parameters
if alpha > 90 and b < c:
a, b, c, alpha, beta, gamma = Lattice(
[-m[t[0]], -m[t[1]], m[t[2]]]).parameters
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = -1
transf[1][t[1]] = -1
transf[2][t[2]] = 1
alpha = math.pi * alpha / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
continue
elif alpha < 90 and b < c:
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = 1
transf[1][t[1]] = 1
transf[2][t[2]] = 1
alpha = math.pi * alpha / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
if new_matrix is None:
# this if is to treat the case
# where alpha==90 (but we still have a monoclinic sg
new_matrix = [[sorted_lengths[0], 0, 0],
[0, sorted_lengths[1], 0],
[0, 0, sorted_lengths[2]]]
transf = np.zeros(shape=(3, 3))
for c in range(len(sorted_dic)):
transf[c][sorted_dic[c]['orig_index']] = 1
if international_monoclinic:
# The above code makes alpha the non-right angle.
# The following will convert to proper international convention
# that beta is the non-right angle.
op = [[0, 1, 0], [1, 0, 0], [0, 0, -1]]
transf = np.dot(op, transf)
new_matrix = np.dot(op, new_matrix)
beta = Lattice(new_matrix).beta
if beta < 90:
op = [[-1, 0, 0], [0, -1, 0], [0, 0, 1]]
transf = np.dot(op, transf)
new_matrix = np.dot(op, new_matrix)
latt = Lattice(new_matrix)
elif latt_type == "triclinic":
# we use a LLL Minkowski-like reduction for the triclinic cells
struct = struct.get_reduced_structure("LLL")
a, b, c = latt.lengths
alpha, beta, gamma = [math.pi * i / 180 for i in latt.angles]
new_matrix = None
test_matrix = [[a, 0, 0],
[b * cos(gamma), b * sin(gamma), 0.0],
[c * cos(beta),
c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
def is_all_acute_or_obtuse(m):
recp_angles = np.array(Lattice(m).reciprocal_lattice.angles)
return np.all(recp_angles <= 90) or np.all(recp_angles > 90)
if is_all_acute_or_obtuse(test_matrix):
transf = np.eye(3)
new_matrix = test_matrix
test_matrix = [[-a, 0, 0],
[b * cos(gamma), b * sin(gamma), 0.0],
[-c * cos(beta),
-c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
-c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
if is_all_acute_or_obtuse(test_matrix):
transf = [[-1, 0, 0],
[0, 1, 0],
[0, 0, -1]]
new_matrix = test_matrix
test_matrix = [[-a, 0, 0],
[-b * cos(gamma), -b * sin(gamma), 0.0],
[c * cos(beta),
c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
if is_all_acute_or_obtuse(test_matrix):
transf = [[-1, 0, 0],
[0, -1, 0],
[0, 0, 1]]
new_matrix = test_matrix
test_matrix = [[a, 0, 0],
[-b * cos(gamma), -b * sin(gamma), 0.0],
[-c * cos(beta),
-c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
-c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
if is_all_acute_or_obtuse(test_matrix):
transf = [[1, 0, 0],
[0, -1, 0],
[0, 0, -1]]
new_matrix = test_matrix
latt = Lattice(new_matrix)
new_coords = np.dot(transf, np.transpose(struct.frac_coords)).T
new_struct = Structure(latt, struct.species_and_occu, new_coords,
site_properties=struct.site_properties,
to_unit_cell=True)
return new_struct.get_sorted_structure()
def get_kpoint_weights(self, kpoints, atol=1e-5):
"""
Calculate the weights for a list of kpoints.
Args:
kpoints (Sequence): Sequence of kpoints. np.arrays is fine. Note
that the code does not check that the list of kpoints
provided does not contain duplicates.
atol (float): Tolerance for fractional coordinates comparisons.
Returns:
List of weights, in the SAME order as kpoints.
"""
kpts = np.array(kpoints)
shift = []
mesh = []
for i in range(3):
nonzero = [i for i in kpts[:, i] if abs(i) > 1e-5]
if len(nonzero) != len(kpts):
# gamma centered
if not nonzero:
mesh.append(1)
else:
m = np.abs(np.round(1 / np.array(nonzero)))
mesh.append(int(max(m)))
shift.append(0)
else:
# Monk
m = np.abs(np.round(0.5 / np.array(nonzero)))
mesh.append(int(max(m)))
shift.append(1)
mapping, grid = spglib.get_ir_reciprocal_mesh(
np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec)
mapping = list(mapping)
grid = (np.array(grid) + np.array(shift) * (0.5, 0.5, 0.5)) / mesh
weights = []
mapped = defaultdict(int)
for k in kpoints:
for i, g in enumerate(grid):
if np.allclose(pbc_diff(k, g), (0, 0, 0), atol=atol):
mapped[tuple(g)] += 1
weights.append(mapping.count(mapping[i]))
break
if (len(mapped) != len(set(mapping))) or (
not all([v == 1 for v in mapped.values()])):
raise ValueError("Unable to find 1:1 corresponding between input "
"kpoints and irreducible grid!")
return [w / sum(weights) for w in weights]
def is_laue(self):
"""
Check if the point group of the structure
has Laue symmetry (centrosymmetry)
"""
laue = ["-1", "2/m", "mmm", "4/m", "4/mmm",
"-3", "-3m", "6/m", "6/mmm", "m-3", "m-3m"]
return str(self.get_point_group_symbol()) in laue
class PointGroupAnalyzer:
"""
A class to analyze the point group of a molecule. The general outline of
the algorithm is as follows:
1. Center the molecule around its center of mass.
2. Compute the inertia tensor and the eigenvalues and eigenvectors.
3. Handle the symmetry detection based on eigenvalues.
a. Linear molecules have one zero eigenvalue. Possible symmetry
operations are C*v or D*v
b. Asymetric top molecules have all different eigenvalues. The
maximum rotational symmetry in such molecules is 2
c. Symmetric top molecules have 1 unique eigenvalue, which gives a
unique rotation axis. All axial point groups are possible
except the cubic groups (T & O) and I.
d. Spherical top molecules have all three eigenvalues equal. They
have the rare T, O or I point groups.
.. attribute:: sch_symbol
Schoenflies symbol of the detected point group.
"""
inversion_op = SymmOp.inversion()
def __init__(self, mol, tolerance=0.3, eigen_tolerance=0.01,
matrix_tol=0.1):
"""
The default settings are usually sufficient.
Args:
mol (Molecule): Molecule to determine point group for.
tolerance (float): Distance tolerance to consider sites as
symmetrically equivalent. Defaults to 0.3 Angstrom.
eigen_tolerance (float): Tolerance to compare eigen values of
the inertia tensor. Defaults to 0.01.
matrix_tol (float): Tolerance used to generate the full set of
symmetry operations of the point group.
"""
self.mol = mol
self.centered_mol = mol.get_centered_molecule()
self.tol = tolerance
self.eig_tol = eigen_tolerance
self.mat_tol = matrix_tol
self._analyze()
if self.sch_symbol in ["C1v", "C1h"]:
self.sch_symbol = "Cs"
def _analyze(self):
if len(self.centered_mol) == 1:
self.sch_symbol = "Kh"
else:
inertia_tensor = np.zeros((3, 3))
total_inertia = 0
for site in self.centered_mol:
c = site.coords
wt = site.species.weight
for i in range(3):
inertia_tensor[i, i] += wt * (c[(i + 1) % 3] ** 2
+ c[(i + 2) % 3] ** 2)
for i, j in [(0, 1), (1, 2), (0, 2)]:
inertia_tensor[i, j] += -wt * c[i] * c[j]
inertia_tensor[j, i] += -wt * c[j] * c[i]
total_inertia += wt * np.dot(c, c)
# Normalize the inertia tensor so that it does not scale with size
# of the system. This mitigates the problem of choosing a proper
# comparison tolerance for the eigenvalues.
inertia_tensor /= total_inertia
eigvals, eigvecs = np.linalg.eig(inertia_tensor)
self.principal_axes = eigvecs.T
self.eigvals = eigvals
v1, v2, v3 = eigvals
eig_zero = abs(v1 * v2 * v3) < self.eig_tol
eig_all_same = abs(v1 - v2) < self.eig_tol and abs(
v1 - v3) < self.eig_tol
eig_all_diff = abs(v1 - v2) > self.eig_tol and abs(
v1 - v3) > self.eig_tol and abs(v2 - v3) > self.eig_tol
self.rot_sym = []
self.symmops = [SymmOp(np.eye(4))]
if eig_zero:
logger.debug("Linear molecule detected")
self._proc_linear()
elif eig_all_same:
logger.debug("Spherical top molecule detected")
self._proc_sph_top()
elif eig_all_diff:
logger.debug("Asymmetric top molecule detected")
self._proc_asym_top()
else:
logger.debug("Symmetric top molecule detected")
self._proc_sym_top()
def _proc_linear(self):
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.sch_symbol = "D*h"
self.symmops.append(PointGroupAnalyzer.inversion_op)
else:
self.sch_symbol = "C*v"
def _proc_asym_top(self):
"""
Handles assymetric top molecules, which cannot contain rotational
symmetry larger than 2.
"""
self._check_R2_axes_asym()
if len(self.rot_sym) == 0:
logger.debug("No rotation symmetries detected.")
self._proc_no_rot_sym()
elif len(self.rot_sym) == 3:
logger.debug("Dihedral group detected.")
self._proc_dihedral()
else:
logger.debug("Cyclic group detected.")
self._proc_cyclic()
def _proc_sym_top(self):
"""
Handles symetric top molecules which has one unique eigenvalue whose
corresponding principal axis is a unique rotational axis. More complex
handling required to look for R2 axes perpendicular to this unique
axis.
"""
if abs(self.eigvals[0] - self.eigvals[1]) < self.eig_tol:
ind = 2
elif abs(self.eigvals[1] - self.eigvals[2]) < self.eig_tol:
ind = 0
else:
ind = 1
logger.debug("Eigenvalues = %s." % self.eigvals)
unique_axis = self.principal_axes[ind]
self._check_rot_sym(unique_axis)
logger.debug("Rotation symmetries = %s" % self.rot_sym)
if len(self.rot_sym) > 0:
self._check_perpendicular_r2_axis(unique_axis)
if len(self.rot_sym) >= 2:
self._proc_dihedral()
elif len(self.rot_sym) == 1:
self._proc_cyclic()
else:
self._proc_no_rot_sym()
def _proc_no_rot_sym(self):
"""
Handles molecules with no rotational symmetry. Only possible point
groups are C1, Cs and Ci.
"""
self.sch_symbol = "C1"
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.sch_symbol = "Ci"
self.symmops.append(PointGroupAnalyzer.inversion_op)
else:
for v in self.principal_axes:
mirror_type = self._find_mirror(v)
if not mirror_type == "":
self.sch_symbol = "Cs"
break
def _proc_cyclic(self):
"""
Handles cyclic group molecules.
"""
main_axis, rot = max(self.rot_sym, key=lambda v: v[1])
self.sch_symbol = "C{}".format(rot)
mirror_type = self._find_mirror(main_axis)
if mirror_type == "h":
self.sch_symbol += "h"
elif mirror_type == "v":
self.sch_symbol += "v"
elif mirror_type == "":
if self.is_valid_op(SymmOp.rotoreflection(main_axis,
angle=180 / rot)):
self.sch_symbol = "S{}".format(2 * rot)
def _proc_dihedral(self):
"""
Handles dihedral group molecules, i.e those with intersecting R2 axes
and a main axis.
"""
main_axis, rot = max(self.rot_sym, key=lambda v: v[1])
self.sch_symbol = "D{}".format(rot)
mirror_type = self._find_mirror(main_axis)
if mirror_type == "h":
self.sch_symbol += "h"
elif not mirror_type == "":
self.sch_symbol += "d"
def _check_R2_axes_asym(self):
"""
Test for 2-fold rotation along the principal axes. Used to handle
asymetric top molecules.
"""
for v in self.principal_axes:
op = SymmOp.from_axis_angle_and_translation(v, 180)
if self.is_valid_op(op):
self.symmops.append(op)
self.rot_sym.append((v, 2))
def _find_mirror(self, axis):
"""
Looks for mirror symmetry of specified type about axis. Possible
types are "h" or "vd". Horizontal (h) mirrors are perpendicular to
the axis while vertical (v) or diagonal (d) mirrors are parallel. v
mirrors has atoms lying on the mirror plane while d mirrors do
not.
"""
mirror_type = ""
# First test whether the axis itself is the normal to a mirror plane.
if self.is_valid_op(SymmOp.reflection(axis)):
self.symmops.append(SymmOp.reflection(axis))
mirror_type = "h"
else:
# Iterate through all pairs of atoms to find mirror
for s1, s2 in itertools.combinations(self.centered_mol, 2):
if s1.species == s2.species:
normal = s1.coords - s2.coords
if np.dot(normal, axis) < self.tol:
op = SymmOp.reflection(normal)
if self.is_valid_op(op):
self.symmops.append(op)
if len(self.rot_sym) > 1:
mirror_type = "d"
for v, r in self.rot_sym:
if not np.linalg.norm(v - axis) < self.tol:
if np.dot(v, normal) < self.tol:
mirror_type = "v"
break
else:
mirror_type = "v"
break
return mirror_type
def _get_smallest_set_not_on_axis(self, axis):
"""
Returns the smallest list of atoms with the same species and
distance from origin AND does not lie on the specified axis. This
maximal set limits the possible rotational symmetry operations,
since atoms lying on a test axis is irrelevant in testing rotational
symmetryOperations.
"""
def not_on_axis(site):
v = np.cross(site.coords, axis)
return np.linalg.norm(v) > self.tol
valid_sets = []
origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol)
for test_set in dist_el_sites.values():
valid_set = list(filter(not_on_axis, test_set))
if len(valid_set) > 0:
valid_sets.append(valid_set)
return min(valid_sets, key=lambda s: len(s))
def _check_rot_sym(self, axis):
"""
Determines the rotational symmetry about supplied axis. Used only for
symmetric top molecules which has possible rotational symmetry
operations > 2.
"""
min_set = self._get_smallest_set_not_on_axis(axis)
max_sym = len(min_set)
for i in range(max_sym, 0, -1):
if max_sym % i != 0:
continue
op = SymmOp.from_axis_angle_and_translation(axis, 360 / i)
rotvalid = self.is_valid_op(op)
if rotvalid:
self.symmops.append(op)
self.rot_sym.append((axis, i))
return i
return 1
def _check_perpendicular_r2_axis(self, axis):
"""
Checks for R2 axes perpendicular to unique axis. For handling
symmetric top molecules.
"""
min_set = self._get_smallest_set_not_on_axis(axis)
for s1, s2 in itertools.combinations(min_set, 2):
test_axis = np.cross(s1.coords - s2.coords, axis)
if np.linalg.norm(test_axis) > self.tol:
op = SymmOp.from_axis_angle_and_translation(test_axis, 180)
r2present = self.is_valid_op(op)
if r2present:
self.symmops.append(op)
self.rot_sym.append((test_axis, 2))
return True
def _proc_sph_top(self):
"""
Handles Sperhical Top Molecules, which belongs to the T, O or I point
groups.
"""
self._find_spherical_axes()
if len(self.rot_sym) == 0:
logger.debug("Accidental speherical top!")
self._proc_sym_top()
main_axis, rot = max(self.rot_sym, key=lambda v: v[1])
if rot < 3:
logger.debug("Accidental speherical top!")
self._proc_sym_top()
elif rot == 3:
mirror_type = self._find_mirror(main_axis)
if mirror_type != "":
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.symmops.append(PointGroupAnalyzer.inversion_op)
self.sch_symbol = "Th"
else:
self.sch_symbol = "Td"
else:
self.sch_symbol = "T"
elif rot == 4:
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.symmops.append(PointGroupAnalyzer.inversion_op)
self.sch_symbol = "Oh"
else:
self.sch_symbol = "O"
elif rot == 5:
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.symmops.append(PointGroupAnalyzer.inversion_op)
self.sch_symbol = "Ih"
else:
self.sch_symbol = "I"
def _find_spherical_axes(self):
"""
Looks for R5, R4, R3 and R2 axes in spherical top molecules. Point
group T molecules have only one unique 3-fold and one unique 2-fold
axis. O molecules have one unique 4, 3 and 2-fold axes. I molecules
have a unique 5-fold axis.
"""
rot_present = defaultdict(bool)
origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol)
test_set = min(dist_el_sites.values(), key=lambda s: len(s))
coords = [s.coords for s in test_set]
for c1, c2, c3 in itertools.combinations(coords, 3):
for cc1, cc2 in itertools.combinations([c1, c2, c3], 2):
if not rot_present[2]:
test_axis = cc1 + cc2
if np.linalg.norm(test_axis) > self.tol:
op = SymmOp.from_axis_angle_and_translation(test_axis,
180)
rot_present[2] = self.is_valid_op(op)
if rot_present[2]:
self.symmops.append(op)
self.rot_sym.append((test_axis, 2))
test_axis = np.cross(c2 - c1, c3 - c1)
if np.linalg.norm(test_axis) > self.tol:
for r in (3, 4, 5):
if not rot_present[r]:
op = SymmOp.from_axis_angle_and_translation(
test_axis, 360 / r)
rot_present[r] = self.is_valid_op(op)
if rot_present[r]:
self.symmops.append(op)
self.rot_sym.append((test_axis, r))
break
if rot_present[2] and rot_present[3] and (
rot_present[4] or rot_present[5]):
break
def get_pointgroup(self):
"""
Returns a PointGroup object for the molecule.
"""
return PointGroupOperations(self.sch_symbol, self.symmops,
self.mat_tol)
def get_symmetry_operations(self):
"""
Return symmetry operations as a list of SymmOp objects.
Returns Cartesian coord symmops.
Returns:
([SymmOp]): List of symmetry operations.
"""
return generate_full_symmops(self.symmops, self.tol)
def is_valid_op(self, symmop):
"""
Check if a particular symmetry operation is a valid symmetry operation
for a molecule, i.e., the operation maps all atoms to another
equivalent atom.
Args:
symmop (SymmOp): Symmetry operation to test.
Returns:
(bool): Whether SymmOp is valid for Molecule.
"""
coords = self.centered_mol.cart_coords
for site in self.centered_mol:
coord = symmop.operate(site.coords)
ind = find_in_coord_list(coords, coord, self.tol)
if not (len(ind) == 1
and self.centered_mol[ind[0]].species
== site.species):
return False
return True
def _get_eq_sets(self):
"""
Calculates the dictionary for mapping equivalent atoms onto each other.
Args:
None
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
UNIT = np.eye(3)
eq_sets, operations = defaultdict(set), defaultdict(dict)
symm_ops = [op.rotation_matrix
for op in generate_full_symmops(self.symmops, self.tol)]
def get_clustered_indices():
indices = cluster_sites(self.centered_mol, self.tol,
give_only_index=True)
out = list(indices[1].values())
if indices[0] is not None:
out.append([indices[0]])
return out
for index in get_clustered_indices():
sites = self.centered_mol.cart_coords[index]
for i, reference in zip(index, sites):
for op in symm_ops:
rotated = np.dot(op, sites.T).T
matched_indices = find_in_coord_list(rotated, reference,
self.tol)
matched_indices = {
dict(enumerate(index))[i] for i in matched_indices}
eq_sets[i] |= matched_indices
if i not in operations:
operations[i] = {j: op.T if j != i else UNIT
for j in matched_indices}
else:
for j in matched_indices:
if j not in operations[i]:
operations[i][j] = op.T if j != i else UNIT
for j in matched_indices:
if j not in operations:
operations[j] = {i: op if j != i else UNIT}
elif i not in operations[j]:
operations[j][i] = op if j != i else UNIT
return {'eq_sets': eq_sets,
'sym_ops': operations}
@staticmethod
def _combine_eq_sets(eq_sets, operations):
"""Combines the dicts of _get_equivalent_atom_dicts into one
Args:
eq_sets (dict)
operations (dict)
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
UNIT = np.eye(3)
def all_equivalent_atoms_of_i(i, eq_sets, ops):
"""WORKS INPLACE on operations
"""
visited = set([i])
tmp_eq_sets = {j: (eq_sets[j] - visited) for j in eq_sets[i]}
while tmp_eq_sets:
new_tmp_eq_sets = {}
for j in tmp_eq_sets:
if j in visited:
continue
visited.add(j)
for k in tmp_eq_sets[j]:
new_tmp_eq_sets[k] = eq_sets[k] - visited
if i not in ops[k]:
ops[k][i] = (np.dot(ops[j][i], ops[k][j])
if k != i else UNIT)
ops[i][k] = ops[k][i].T
tmp_eq_sets = new_tmp_eq_sets
return visited, ops
eq_sets = copy.deepcopy(eq_sets)
ops = copy.deepcopy(operations)
to_be_deleted = set()
for i in eq_sets:
if i in to_be_deleted:
continue
visited, ops = all_equivalent_atoms_of_i(i, eq_sets, ops)
to_be_deleted |= visited - {i}
for k in to_be_deleted:
eq_sets.pop(k, None)
return {'eq_sets': eq_sets,
'sym_ops': ops}
def get_equivalent_atoms(self):
"""Returns sets of equivalent atoms with symmetry operations
Args:
None
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
eq = self._get_eq_sets()
return self._combine_eq_sets(eq['eq_sets'],
eq['sym_ops'])
def symmetrize_molecule(self):
"""Returns a symmetrized molecule
The equivalent atoms obtained via
:meth:`~pymatgen.symmetry.analyzer.PointGroupAnalyzer.get_equivalent_atoms`
are rotated, mirrored... unto one position.
Then the average position is calculated.
The average position is rotated, mirrored... back with the inverse
of the previous symmetry operations, which gives the
symmetrized molecule
Args:
None
Returns:
dict: The returned dictionary has three possible keys:
``sym_mol``:
A symmetrized molecule instance.
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
eq = self.get_equivalent_atoms()
eq_sets, ops = eq['eq_sets'], eq['sym_ops']
coords = self.centered_mol.cart_coords.copy()
for i, eq_indices in eq_sets.items():
for j in eq_indices:
coords[j] = np.dot(ops[j][i], coords[j])
coords[i] = np.mean(coords[list(eq_indices)], axis=0)
for j in eq_indices:
if j == i:
continue
coords[j] = np.dot(ops[i][j], coords[i])
coords[j] = np.dot(ops[i][j], coords[i])
molecule = Molecule(species=self.centered_mol.species_and_occu,
coords=coords)
return {'sym_mol': molecule,
'eq_sets': eq_sets,
'sym_ops': ops}
def iterative_symmetrize(mol, max_n=10, tolerance=0.3, epsilon=1e-2):
"""Returns a symmetrized molecule
The equivalent atoms obtained via
:meth:`~pymatgen.symmetry.analyzer.PointGroupAnalyzer.get_equivalent_atoms`
are rotated, mirrored... unto one position.
Then the average position is calculated.
The average position is rotated, mirrored... back with the inverse
of the previous symmetry operations, which gives the
symmetrized molecule
Args:
mol (Molecule): A pymatgen Molecule instance.
max_n (int): Maximum number of iterations.
tolerance (float): Tolerance for detecting symmetry.
Gets passed as Argument into
:class:`~pymatgen.analyzer.symmetry.PointGroupAnalyzer`.
epsilon (float): If the elementwise absolute difference of two
subsequently symmetrized structures is smaller epsilon,
the iteration stops before ``max_n`` is reached.
Returns:
dict: The returned dictionary has three possible keys:
``sym_mol``:
A symmetrized molecule instance.
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
new = mol
n = 0
finished = False
while not finished and n <= max_n:
previous = new
PA = PointGroupAnalyzer(previous, tolerance=tolerance)
eq = PA.symmetrize_molecule()
new = eq['sym_mol']
finished = np.allclose(new.cart_coords, previous.cart_coords,
atol=epsilon)
n += 1
return eq
def cluster_sites(mol, tol, give_only_index=False):
"""
Cluster sites based on distance and species type.
Args:
mol (Molecule): Molecule **with origin at center of mass**.
tol (float): Tolerance to use.
Returns:
(origin_site, clustered_sites): origin_site is a site at the center
of mass (None if there are no origin atoms). clustered_sites is a
dict of {(avg_dist, species_and_occu): [list of sites]}
"""
# Cluster works for dim > 2 data. We just add a dummy 0 for second
# coordinate.
dists = [[np.linalg.norm(site.coords), 0] for site in mol]
import scipy.cluster as spcluster
f = spcluster.hierarchy.fclusterdata(dists, tol, criterion='distance')
clustered_dists = defaultdict(list)
for i, site in enumerate(mol):
clustered_dists[f[i]].append(dists[i])
avg_dist = {label: np.mean(val) for label, val in clustered_dists.items()}
clustered_sites = defaultdict(list)
origin_site = None
for i, site in enumerate(mol):
if avg_dist[f[i]] < tol:
if give_only_index:
origin_site = i
else:
origin_site = site
else:
if give_only_index:
clustered_sites[
(avg_dist[f[i]], site.species)].append(i)
else:
clustered_sites[
(avg_dist[f[i]], site.species)].append(site)
return origin_site, clustered_sites
def generate_full_symmops(symmops, tol):
"""
Recursive algorithm to permute through all possible combinations of the
initially supplied symmetry operations to arrive at a complete set of
operations mapping a single atom to all other equivalent atoms in the
point group. This assumes that the initial number already uniquely
identifies all operations.
Args:
symmops ([SymmOp]): Initial set of symmetry operations.
Returns:
Full set of symmetry operations.
"""
# Uses an algorithm described in:
# Gregory Butler. Fundamental Algorithms for Permutation Groups.
# Lecture Notes in Computer Science (Book 559). Springer, 1991. page 15
UNIT = np.eye(4)
generators = [op.affine_matrix for op in symmops
if not np.allclose(op.affine_matrix, UNIT)]
if not generators:
# C1 symmetry breaks assumptions in the algorithm afterwards
return symmops
else:
full = list(generators)
for g in full:
for s in generators:
op = np.dot(g, s)
d = np.abs(full - op) < tol
if not np.any(np.all(np.all(d, axis=2), axis=1)):
full.append(op)
d = np.abs(full - UNIT) < tol
if not np.any(np.all(np.all(d, axis=2), axis=1)):
full.append(UNIT)
return [SymmOp(op) for op in full]
class SpacegroupOperations(list):
"""
Represents a space group, which is a collection of symmetry operations.
"""
def __init__(self, int_symbol, int_number, symmops):
"""
Args:
int_symbol (str): International symbol of the spacegroup.
int_number (int): International number of the spacegroup.
symmops ([SymmOp]): Symmetry operations associated with the
spacegroup.
"""
self.int_symbol = int_symbol
self.int_number = int_number
super().__init__(symmops)
def are_symmetrically_equivalent(self, sites1, sites2, symm_prec=1e-3):
"""
Given two sets of PeriodicSites, test if they are actually
symmetrically equivalent under this space group. Useful, for example,
if you want to test if selecting atoms 1 and 2 out of a set of 4 atoms
are symmetrically the same as selecting atoms 3 and 4, etc.
One use is in PartialRemoveSpecie transformation to return only
symmetrically distinct arrangements of atoms.
Args:
sites1 ([Site]): 1st set of sites
sites2 ([Site]): 2nd set of sites
symm_prec (float): Tolerance in atomic distance to test if atoms
are symmetrically similar.
Returns:
(bool): Whether the two sets of sites are symmetrically
equivalent.
"""
def in_sites(site):
for test_site in sites1:
if test_site.is_periodic_image(site, symm_prec, False):
return True
return False
for op in self:
newsites2 = [PeriodicSite(site.species,
op.operate(site.frac_coords),
site.lattice) for site in sites2]
for site in newsites2:
if not in_sites(site):
break
else:
return True
return False
def __str__(self):
return "{} ({}) spacegroup".format(self.int_symbol, self.int_number)
class PointGroupOperations(list):
"""
Defines a point group, which is essentially a sequence of symmetry
operations.
.. attribute:: sch_symbol
Schoenflies symbol of the point group.
"""
def __init__(self, sch_symbol, operations, tol=0.1):
"""
Args:
sch_symbol (str): Schoenflies symbol of the point group.
operations ([SymmOp]): Initial set of symmetry operations. It is
sufficient to provide only just enough operations to generate
the full set of symmetries.
tol (float): Tolerance to generate the full set of symmetry
operations.
"""
self.sch_symbol = sch_symbol
super().__init__(
generate_full_symmops(operations, tol))
def __str__(self):
return self.sch_symbol
def __repr__(self):
return self.__str__()
|
gVallverdu/pymatgen
|
pymatgen/symmetry/analyzer.py
|
Python
|
mit
| 64,034
|
[
"CRYSTAL",
"pymatgen"
] |
958ca83eef6f1b29e5adfcee4a11ef991cbcf8377a7d493966c1004feaf012fe
|
import os
import sys
import collections
def sam_linkage_claster (linkage_info, output):
"""
blast_linkage_claster takes 2 arguments:
1) linkage_info file which was generated from self blast allignments.
it has 2 tab seperated columns: seq_id(link1), seq_id(link2)
link1 and link2 are lated in that they have mapped 1st and 2nd pair ends, respectivelly
2) 2nd argument id ouput file
"""
################################
#declear dictionary to count number of reads for each ref genome in each index file
linkage_store = collections.defaultdict(list)
cluster_id = 0
################################
with open(linkage_info,"r") as self_linkage_info, open(output,"w") as self_outp_result:
for lines in self_linkage_info:
list_same_clusters = []
link1 = None
link2 = None
append_flag = False #before the dictionary loop initialize flag to check if link1 or link2 was appneded
link1, link2 = lines.strip().split()
link1 = link1.strip()
link2 = link2.strip()
#here first cluster ID has to be defined
#cluster ID is defined by looping throught the values of the dictionary
for cluster_id in linkage_store:
#if ether link1 or link2 are in already defined clusters than append it
if link1 in linkage_store[cluster_id]:
if link2 not in linkage_store[cluster_id]:
linkage_store[cluster_id].append(link2)
list_same_clusters.append(cluster_id)
#flag that it was appended
append_flag = True
elif link2 in linkage_store[cluster_id]:
#In case link2 is already present in cluster than flag as append anyway as True
append_flag = True
if link2 in linkage_store[cluster_id]:
if link1 not in linkage_store[cluster_id]:
linkage_store[cluster_id].append(link1)
list_same_clusters.append(cluster_id)
#flag that it was appended
append_flag = True
elif link1 in linkage_store[cluster_id]:
#In case link1 is already present in cluster than flag as append anyway as True
append_flag = True
if link1 in linkage_store[cluster_id] and link2 in linkage_store[cluster_id]:
append_flag = True
if len(list_same_clusters) > 1:
same_cluster_id = None
top_cluster = None
same_links = None
list_same_clusters = sorted(list_same_clusters)
top_cluster = list_same_clusters[0]
for same_cluster_id in list_same_clusters:
if same_cluster_id != top_cluster:
for same_links in linkage_store[same_cluster_id]:
if same_links not in linkage_store[top_cluster]:
linkage_store[top_cluster].append(same_links)
del linkage_store[same_cluster_id]
if append_flag == False:
#if nothing was append create new cluster
cluster_id +=1
#and store both
linkage_store[cluster_id].append(link1)
linkage_store[cluster_id].append(link2)
#Now loop through the dictionarry and output
#2 column text file (1st column: cluster_id, 2nd column seq_id)
for cluster_id in linkage_store:
for seq_id in linkage_store[cluster_id]:
self_outp_result.write ('%i\t%s\n' % (cluster_id,seq_id))
#####################################################################
sam_linkage_claster(sys.argv[1], sys.argv[2])
#####################################################################
|
NIASC/VirusMeta
|
blast_module/blast_linkage_claster.py
|
Python
|
gpl-3.0
| 3,813
|
[
"BLAST"
] |
e81f82fe8453c68f27fbf31de5c688332a660e88348d94852a8e5d90b9be20f2
|
"""Helpful utilities for building analysis pipelines.
"""
import gzip
import os
import tempfile
import time
import shutil
import contextlib
import itertools
import functools
import random
from six.moves import configparser
import fnmatch
import subprocess
import sys
import types
import toolz as tz
import yaml
try:
from concurrent import futures
except ImportError:
try:
import futures
except ImportError:
futures = None
@contextlib.contextmanager
def cpmap(cores=1):
"""Configurable parallel map context manager.
Returns appropriate map compatible function based on configuration:
- Local single core (the default)
- Multiple local cores
"""
if int(cores) == 1:
yield itertools.imap
else:
if futures is None:
raise ImportError("concurrent.futures not available")
pool = futures.ProcessPoolExecutor(cores)
yield pool.map
pool.shutdown()
def map_wrap(f):
"""Wrap standard function to easily pass into 'map' processing.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return apply(f, *args, **kwargs)
return wrapper
def transform_to(ext):
"""
Decorator to create an output filename from an output filename with
the specified extension. Changes the extension, in_file is transformed
to a new type.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@transform(".bam")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.bam")
@transform(".bam")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.bam")
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
out_file = kwargs.get("out_file", None)
if not out_file:
in_path = kwargs.get("in_file", args[0])
out_dir = kwargs.get("out_dir", os.path.dirname(in_path))
safe_makedir(out_dir)
out_name = replace_suffix(os.path.basename(in_path), ext)
out_file = os.path.join(out_dir, out_name)
kwargs["out_file"] = out_file
if not file_exists(out_file):
out_file = f(*args, **kwargs)
return out_file
return wrapper
return decor
def filter_to(word):
"""
Decorator to create an output filename from an input filename by
adding a word onto the stem. in_file is filtered by the function
and the results are written to out_file. You would want to use
this over transform_to if you don't know the extension of the file
going in. This also memoizes the output file.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@filter_to(".foo")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.foo.bam")
@filter_to(".foo")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.foo.bam")
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
out_file = kwargs.get("out_file", None)
if not out_file:
in_path = kwargs.get("in_file", args[0])
out_dir = kwargs.get("out_dir", os.path.dirname(in_path))
safe_makedir(out_dir)
out_name = append_stem(os.path.basename(in_path), word)
out_file = os.path.join(out_dir, out_name)
kwargs["out_file"] = out_file
if not file_exists(out_file):
out_file = f(*args, **kwargs)
return out_file
return wrapper
return decor
def memoize_outfile(ext=None, stem=None):
"""
Memoization decorator.
See docstring for transform_to and filter_to for details.
"""
if ext:
return transform_to(ext)
if stem:
return filter_to(stem)
def to_single_data(input):
"""Convert an input to a single bcbio data/world object.
Handles both single sample cases (CWL) and all sample cases (standard bcbio).
"""
if (isinstance(input, (list, tuple)) and len(input) == 1):
return input[0]
else:
assert isinstance(input, dict), input
return input
def unpack_worlds(items):
"""Handle all the ways we can pass multiple samples for back-compatibility.
"""
# Unpack nested lists of samples grouped together (old IPython style)
if isinstance(items[0], (list, tuple)) and len(items[0]) == 1:
out = []
for d in items:
assert len(d) == 1 and isinstance(d[0], dict), len(d)
out.append(d[0])
# Unpack a single argument with multiple samples (CWL style)
elif isinstance(items, (list, tuple)) and len(items) == 1 and isinstance(items[0], (list, tuple)):
out = items[0]
else:
out = items
return out
def safe_makedir(dname):
"""Make a directory if it doesn't exist, handling concurrent race conditions.
"""
if not dname:
return dname
num_tries = 0
max_tries = 5
while not os.path.exists(dname):
# we could get an error here if multiple processes are creating
# the directory at the same time. Grr, concurrency.
try:
os.makedirs(dname)
except OSError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(2)
return dname
@contextlib.contextmanager
def chdir(new_dir):
"""Context manager to temporarily change to a new directory.
http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/
"""
cur_dir = os.getcwd()
safe_makedir(new_dir)
os.chdir(new_dir)
try:
yield
finally:
os.chdir(cur_dir)
@contextlib.contextmanager
def tmpfile(*args, **kwargs):
"""Make a tempfile, safely cleaning up file descriptors on completion.
"""
(fd, fname) = tempfile.mkstemp(*args, **kwargs)
try:
yield fname
finally:
os.close(fd)
if os.path.exists(fname):
os.remove(fname)
def file_exists(fname):
"""Check if a file exists and is non-empty.
"""
try:
return fname and os.path.exists(fname) and os.path.getsize(fname) > 0
except OSError:
return False
def get_size(path):
""" Returns the size in bytes if `path` is a file,
or the size of all files in `path` if it's a directory.
Analogous to `du -s`.
"""
if os.path.isfile(path):
return os.path.getsize(path)
return sum(get_size(os.path.join(path, f)) for f in os.listdir(path))
def file_uptodate(fname, cmp_fname):
"""Check if a file exists, is non-empty and is more recent than cmp_fname.
"""
try:
return (file_exists(fname) and file_exists(cmp_fname) and
os.path.getmtime(fname) >= os.path.getmtime(cmp_fname))
except OSError:
return False
def create_dirs(config, names=None):
if names is None:
names = config["dir"].keys()
for dname in names:
d = config["dir"][dname]
safe_makedir(d)
def save_diskspace(fname, reason, config):
"""Overwrite a file in place with a short message to save disk.
This keeps files as a sanity check on processes working, but saves
disk by replacing them with a short message.
"""
if config["algorithm"].get("save_diskspace", False):
for ext in ["", ".bai"]:
if os.path.exists(fname + ext):
with open(fname + ext, "w") as out_handle:
out_handle.write("File removed to save disk space: %s" % reason)
def read_galaxy_amqp_config(galaxy_config, base_dir):
"""Read connection information on the RabbitMQ server from Galaxy config.
"""
galaxy_config = add_full_path(galaxy_config, base_dir)
config = configparser.ConfigParser()
config.read(galaxy_config)
amqp_config = {}
for option in config.options("galaxy_amqp"):
amqp_config[option] = config.get("galaxy_amqp", option)
return amqp_config
def add_full_path(dirname, basedir=None):
if basedir is None:
basedir = os.getcwd()
if not dirname.startswith("/"):
dirname = os.path.join(basedir, dirname)
return dirname
def splitext_plus(f):
"""Split on file extensions, allowing for zipped extensions.
"""
base, ext = os.path.splitext(f)
if ext in [".gz", ".bz2", ".zip"]:
base, ext2 = os.path.splitext(base)
ext = ext2 + ext
return base, ext
def remove_safe(f):
try:
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
except OSError:
pass
def move_safe(origin, target):
"""
Move file, skip if exists
"""
if origin == target:
return origin
if file_exists(target):
return target
shutil.move(origin, target)
return target
def file_plus_index(fname):
"""Convert a file name into the file plus required indexes.
"""
exts = {".vcf": ".idx", ".bam": ".bai", ".vcf.gz": ".tbi", ".bed.gz": ".tbi",
".fq.gz": ".gbi"}
ext = splitext_plus(fname)[-1]
if ext in exts:
return [fname, fname + exts[ext]]
else:
return [fname]
def remove_plus(orig):
"""Remove a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext):
remove_safe(orig + ext)
def copy_plus(orig, new):
"""Copy a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
shutil.copyfile(orig + ext, new + ext)
def symlink_plus(orig, new):
"""Create relative symlinks and handle associated biological index files.
"""
orig = os.path.abspath(orig)
if not os.path.exists(orig):
raise RuntimeError("File not found: %s" % orig)
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
with chdir(os.path.dirname(new)):
remove_safe(new + ext)
# Work around symlink issues on some filesystems. Randomly
# fail to symlink.
try:
os.symlink(os.path.relpath(orig + ext), os.path.basename(new + ext))
except OSError:
if not os.path.exists(new + ext) or not os.path.lexists(new + ext):
remove_safe(new + ext)
shutil.copyfile(orig + ext, new + ext)
orig_noext = splitext_plus(orig)[0]
new_noext = splitext_plus(new)[0]
for sub_ext in [".bai"]:
if os.path.exists(orig_noext + sub_ext) and not os.path.lexists(new_noext + sub_ext):
with chdir(os.path.dirname(new_noext)):
os.symlink(os.path.relpath(orig_noext + sub_ext), os.path.basename(new_noext + sub_ext))
def open_gzipsafe(f):
return gzip.open(f) if f.endswith(".gz") else open(f)
def append_stem(to_transform, word):
"""
renames a filename or list of filenames with 'word' appended to the stem
of each one:
example: append_stem("/path/to/test.sam", "_filtered") ->
"/path/to/test_filtered.sam"
"""
if is_sequence(to_transform):
return [append_stem(f, word) for f in to_transform]
elif is_string(to_transform):
(base, ext) = splitext_plus(to_transform)
return "".join([base, word, ext])
else:
raise ValueError("append_stem takes a single filename as a string or "
"a list of filenames to transform.")
def replace_suffix(to_transform, suffix):
"""
replaces the suffix on a filename or list of filenames
example: replace_suffix("/path/to/test.sam", ".bam") ->
"/path/to/test.bam"
"""
if is_sequence(to_transform):
transformed = []
for f in to_transform:
(base, _) = os.path.splitext(f)
transformed.append(base + suffix)
return transformed
elif is_string(to_transform):
(base, _) = os.path.splitext(to_transform)
return base + suffix
else:
raise ValueError("replace_suffix takes a single filename as a string or "
"a list of filenames to transform.")
# ## Functional programming
def partition_all(n, iterable):
"""Partition a list into equally sized pieces, including last smaller parts
http://stackoverflow.com/questions/5129102/python-equivalent-to-clojures-partition-all
"""
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, n))
if not chunk:
break
yield chunk
def robust_partition_all(n, iterable):
"""
replaces partition_all with a more robust version.
Workaround for a segfault in pybedtools when using a BedTool as an iterator:
https://github.com/daler/pybedtools/issues/88 for the discussion
"""
it = iter(iterable)
while True:
x = []
for _ in range(n):
try:
x.append(it.next())
except StopIteration:
yield x
# Omitting this StopIteration results in a segfault!
raise StopIteration
yield x
def partition(pred, iterable, tolist=False):
'Use a predicate to partition entries into false entries and true entries'
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = itertools.tee(iterable)
ifalse = itertools.ifilterfalse(pred, t1)
itrue = itertools.ifilter(pred, t2)
if tolist:
return list(ifalse), list(itrue)
else:
return ifalse, itrue
# ## Dealing with configuration files
def merge_config_files(fnames):
"""Merge configuration files, preferring definitions in latter files.
"""
def _load_yaml(fname):
with open(fname) as in_handle:
config = yaml.load(in_handle)
return config
out = _load_yaml(fnames[0])
for fname in fnames[1:]:
cur = _load_yaml(fname)
for k, v in cur.items():
if k in out and isinstance(out[k], dict):
out[k].update(v)
else:
out[k] = v
return out
def deepish_copy(org):
"""Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
"""
out = dict().fromkeys(org)
for k, v in org.items():
if isinstance(v, dict):
out[k] = deepish_copy(v)
else:
try:
out[k] = v.copy() # dicts, sets
except AttributeError:
try:
out[k] = v[:] # lists, tuples, strings, unicode
except TypeError:
out[k] = v # ints
return out
def safe_to_float(x):
"""Convert to float, handling None and non-float inputs.
Useful for cleaning complicated output from variant callers.
"""
if x is None:
return None
else:
try:
return float(x)
except ValueError:
return None
def get_in(d, t, default=None):
"""
look up if you can get a tuple of values from a nested dictionary,
each item in the tuple a deeper layer
example: get_in({1: {2: 3}}, (1, 2)) -> 3
example: get_in({1: {2: 3}}, (2, 3)) -> {}
"""
return tz.get_in(t, d, default)
def flatten(l):
"""
flatten an irregular list of lists
example: flatten([[[1, 2, 3], [4, 5]], 6]) -> [1, 2, 3, 4, 5, 6]
lifted from: http://stackoverflow.com/questions/2158395/
"""
for el in l:
if isinstance(el, (list, tuple)):
for sub in flatten(el):
yield sub
else:
yield el
def is_sequence(arg):
"""
check if 'arg' is a sequence
example: arg([]) -> True
example: arg("lol") -> False
"""
return (not hasattr(arg, "strip") and
hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__"))
def is_pair(arg):
"""
check if 'arg' is a two-item sequence
"""
return is_sequence(arg) and len(arg) == 2
def is_string(arg):
return isinstance(arg, basestring)
def locate(pattern, root=os.curdir):
'''Locate all files matching supplied filename pattern in and below
supplied root directory.'''
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
def itersubclasses(cls, _seen=None):
"""
snagged from: http://code.activestate.com/recipes/576949/
itersubclasses(cls)
Generator over all subclasses of a given class, in depth first order.
>>> list(itersubclasses(int)) == [bool]
True
>>> class A(object): pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL (new-style) classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)] #doctest: +ELLIPSIS
['type', ...'tuple', ...]
"""
if not isinstance(cls, type):
raise TypeError('itersubclasses must be called with '
'new-style classes, not %.100r' % cls)
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in subs:
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def replace_directory(out_files, dest_dir):
"""
change the output directory to dest_dir
can take a string (single file) or a list of files
"""
if is_sequence(out_files):
filenames = map(os.path.basename, out_files)
return [os.path.join(dest_dir, x) for x in filenames]
elif is_string(out_files):
return os.path.join(dest_dir, os.path.basename(out_files))
else:
raise ValueError("in_files must either be a sequence of filenames "
"or a string")
def which(program, env=None):
""" returns the path to an executable or None if it can't be found"""
if env is None:
env = os.environ.copy()
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in env["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def reservoir_sample(stream, num_items, item_parser=lambda x: x):
"""
samples num_items from the stream keeping each with equal probability
"""
kept = []
for index, item in enumerate(stream):
if index < num_items:
kept.append(item_parser(item))
else:
r = random.randint(0, index)
if r < num_items:
kept[r] = item_parser(item)
return kept
def compose(f, g):
return lambda x: f(g(x))
def dictapply(d, fn):
"""
apply a function to all non-dict values in a dictionary
"""
for k, v in d.items():
if isinstance(v, dict):
v = dictapply(v, fn)
else:
d[k] = fn(v)
return d
def Rscript_cmd():
"""Retrieve path to locally installed Rscript or first in PATH.
Prefers Rscript version installed via conda to a system version.
"""
rscript = which(os.path.join(get_bcbio_bin(), "Rscript"))
if rscript:
return rscript
else:
return which("Rscript")
def R_sitelib():
"""Retrieve the R site-library installed with the bcbio installer.
"""
return os.path.join(os.path.dirname(get_bcbio_bin()), "lib", "R", "library")
def R_package_path(package):
"""
return the path to an installed R package
"""
local_sitelib = R_sitelib()
rscript = Rscript_cmd()
cmd = """{rscript} -e '.libPaths(c("{local_sitelib}")); find.package("{package}")'"""
try:
output = subprocess.check_output(cmd.format(**locals()), shell=True)
except subprocess.CalledProcessError as e:
return None
for line in output.split("\n"):
if "[1]" not in line:
continue
dirname = line.split("[1]")[1].replace("\"", "").strip()
if os.path.exists(dirname):
return dirname
return None
def get_java_binpath(cmd=None):
"""Retrieve path for java to use, handling custom BCBIO_JAVA_HOME
Defaults to the dirname of cmd, or local anaconda directory
"""
if os.environ.get("BCBIO_JAVA_HOME"):
test_cmd = os.path.join(os.environ["BCBIO_JAVA_HOME"], "bin", "java")
if os.path.exists(test_cmd):
cmd = test_cmd
if not cmd:
cmd = Rscript_cmd()
return os.path.dirname(cmd)
def get_R_exports():
return "unset R_HOME && unset R_LIBS && export PATH=%s:$PATH" % (os.path.dirname(Rscript_cmd()))
def perl_cmd():
"""Retrieve path to locally installed conda Perl or first in PATH.
"""
perl = which(os.path.join(get_bcbio_bin(), "perl"))
if perl:
return perl
else:
return which("perl")
def get_perl_exports(tmpdir=None):
"""Environmental exports to use conda installed perl.
"""
perl_path = os.path.dirname(perl_cmd())
out = "unset PERL5LIB && export PATH=%s:$PATH" % (perl_path)
if tmpdir:
out += " && export TMPDIR=%s" % (tmpdir)
return out
def get_bcbio_env():
env = os.environ.copy()
env["PATH"] = append_path(get_bcbio_bin(), env['PATH'])
return env
def append_path(bin, path, at_start=True):
if at_start:
tmpl = "{bin}:{path}"
else:
tmpl = "{path}:{bin}"
return tmpl.format(bin=bin, path=path)
def get_bcbio_bin():
return os.path.dirname(os.path.realpath(sys.executable))
def local_path_export(at_start=True):
path = get_bcbio_bin()
if at_start:
return "export PATH=%s:$PATH && " % (path)
else:
return "export PATH=$PATH:%s && " % (path)
def is_gzipped(fname):
_, ext = os.path.splitext(fname)
return ext in [".gz", "gzip"]
def is_bzipped(fname):
_, ext = os.path.splitext(fname)
return ext in [".bz2", "bzip2"]
def open_possible_gzip(fname, flag="r"):
if is_gzipped(fname):
if "b" not in flag:
flag += "b"
return gzip.open(fname, flag)
else:
return open(fname, flag)
def filter_missing(xs):
"""
remove items from a list if they evaluate to False
"""
return filter(lambda x: x, xs)
def rbind(dfs):
"""
acts like rbind for pandas dataframes
"""
if len(dfs) == 1:
return dfs[0]
df = dfs[0]
for d in dfs[1:]:
df = df.append(d)
return df
def max_command_length():
"""
get the maximum length of the command line, in bytes, defaulting
to a conservative number if not set
http://www.in-ulm.de/~mascheck/various/argmax/
"""
DEFAULT_MAX_LENGTH = 150000 # lowest seen so far is 200k
try:
arg_max = os.sysconf('SC_ARG_MAX')
env_lines = len(os.environ) * 4
env_chars = sum([len(x) + len(y) for x, y in os.environ.items()])
arg_length = arg_max - env_lines - 2048
except ValueError:
arg_length = DEFAULT_MAX_LENGTH
return arg_length if arg_length > 0 else DEFAULT_MAX_LENGTH
def get_abspath(path, pardir=None):
if pardir is None:
pardir = os.getcwd()
path = os.path.expandvars(path)
return os.path.normpath(os.path.join(pardir, path))
def sort_filenames(filenames):
"""
sort a list of files by filename only, ignoring the directory names
"""
basenames = [os.path.basename(x) for x in filenames]
indexes = [i[0] for i in sorted(enumerate(basenames), key=lambda x:x[1])]
return [filenames[x] for x in indexes]
# LazyImport from NIPY
# https://github.com/nipy/nitime/blob/master/nitime/lazyimports.py
class LazyImport(types.ModuleType):
"""
This class takes the module name as a parameter, and acts as a proxy for
that module, importing it only when the module is used, but effectively
acting as the module in every other way (including inside IPython with
respect to introspection and tab completion) with the *exception* of
reload()- reloading a :class:`LazyImport` raises an :class:`ImportError`.
>>> mlab = LazyImport('matplotlib.mlab')
No import happens on the above line, until we do something like call an
``mlab`` method or try to do tab completion or introspection on ``mlab``
in IPython.
>>> mlab
<module 'matplotlib.mlab' will be lazily loaded>
Now the :class:`LazyImport` will do an actual import, and call the dist
function of the imported module.
>>> mlab.dist(1969,2011)
42.0
"""
def __getattribute__(self, x):
# This method will be called only once, since we'll change
# self.__class__ to LoadedLazyImport, and __getattribute__ will point
# to module.__getattribute__
name = object.__getattribute__(self, '__name__')
__import__(name)
# if name above is 'package.foo.bar', package is returned, the docs
# recommend that in order to get back the full thing, that we import
# and then lookup the full name is sys.modules, see:
# http://docs.python.org/library/functions.html#__import__
module = sys.modules[name]
# Now that we've done the import, cutout the middleman and make self
# act as the imported module
class LoadedLazyImport(types.ModuleType):
__getattribute__ = module.__getattribute__
__repr__ = module.__repr__
object.__setattr__(self, '__class__', LoadedLazyImport)
# The next line will make "reload(l)" a silent no-op
# sys.modules[name] = self
return module.__getattribute__(x)
def __repr__(self):
return "<module '%s' will be lazily loaded>" %\
object.__getattribute__(self,'__name__')
|
biocyberman/bcbio-nextgen
|
bcbio/utils.py
|
Python
|
mit
| 26,720
|
[
"Galaxy"
] |
d5d3a68a78db9c17e16bc3f152981551f84bd5be46f975ef9040567ea69b0757
|
#
# Copyright (C) 2017 Peter Gedeck
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
'''
Collection of utilities to be used with descriptors
'''
import math
def setDescriptorVersion(version='1.0.0'):
""" Set the version on the descriptor function.
Use as a decorator """
def wrapper(func):
func.version = version
return func
return wrapper
class VectorDescriptorNamespace(dict):
def __init__(self, **kwargs):
self.update(kwargs)
class VectorDescriptorWrapper:
"""Wrap a function that returns a vector and make it seem like there
is one function for each entry. These functions are added to the global
namespace with the names provided"""
def __init__(self, func, names, version, namespace):
self.func = func
self.names = names
self.func_key = "__%s"%(func.__name__)
function_namespace = {}
for i,n in enumerate(names):
def f(mol, index=i):
return self.call_desc(mol, index=index)
f.__name__ = n
f.__qualname__ = n
f.version = version
function_namespace[n] = f
self.namespace = VectorDescriptorNamespace(**function_namespace)
self.namespace.update(namespace)
namespace.update(function_namespace)
def _get_key(self, index):
return "%s%s"%(self.func_key, index)
def call_desc(self, mol, index):
if hasattr(mol, self.func_key):
results = getattr(mol, self.func_key, None)
if results is not None:
return results[index]
try:
results = self.func(mol)
except:
return math.nan
setattr(mol, self.func_key, results)
return results[index]
|
greglandrum/rdkit
|
rdkit/Chem/ChemUtils/DescriptorUtilities.py
|
Python
|
bsd-3-clause
| 1,903
|
[
"RDKit"
] |
cb801d1bb6fa88907ab09a71cd01a174a87b120d073312ec393da55997aeed75
|
########################################################################
# File : Watchdog.py
# Author: Stuart Paterson
########################################################################
""" The Watchdog class is used by the Job Wrapper to resolve and monitor
the system resource consumption. The Watchdog can determine if
a running job is stalled and indicate this to the Job Wrapper.
Furthermore, the Watchdog will identify when the Job CPU limit has been
exceeded and fail jobs meaningfully.
Information is returned to the WMS via the heart-beat mechanism. This
also interprets control signals from the WMS e.g. to kill a running
job.
- Still to implement:
- CPU normalization for correct comparison with job limit
"""
__RCSID__ = "$Id$"
import os
import re
import time
import resource
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Utilities import MJF
from DIRAC.Core.Utilities.Profiler import Profiler
from DIRAC.Core.Utilities.TimeLeft.TimeLeft import TimeLeft
from DIRAC.Core.Utilities.Subprocess import getChildrenPIDs
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.ConfigurationSystem.Client.PathFinder import getSystemInstance
from DIRAC.WorkloadManagementSystem.Client.JobStateUpdateClient import JobStateUpdateClient
class Watchdog(object):
#############################################################################
def __init__(self, pid, exeThread, spObject, jobCPUTime, memoryLimit=0, processors=1, systemFlag='linux', jobArgs={}):
""" Constructor, takes system flag as argument.
"""
self.stopSigStartSeconds = int(jobArgs.get('StopSigStartSeconds', 1800)) # 30 minutes
self.stopSigFinishSeconds = int(jobArgs.get('StopSigFinishSeconds', 1800)) # 30 minutes
self.stopSigNumber = int(jobArgs.get('StopSigNumber', 2)) # SIGINT
self.stopSigRegex = jobArgs.get('StopSigRegex', None)
self.stopSigSent = False
self.log = gLogger.getSubLogger("Watchdog")
self.systemFlag = systemFlag
self.exeThread = exeThread
self.wrapperPID = pid
self.appPID = self.exeThread.getCurrentPID()
self.spObject = spObject
self.jobCPUTime = jobCPUTime
self.memoryLimit = memoryLimit
self.calibration = 0
self.initialValues = {}
self.parameters = {}
self.peekFailCount = 0
self.peekRetry = 5
self.profiler = Profiler(pid)
self.checkError = ''
self.currentStats = {}
self.initialized = False
self.count = 0
# defaults
self.testWallClock = 1
self.testDiskSpace = 1
self.testLoadAvg = 1
self.maxWallClockTime = 3 * 24 * 60 * 60
self.testCPUConsumed = 1
self.testCPULimit = 0
self.testMemoryLimit = 0
self.testTimeLeft = 1
self.pollingTime = 10 # 10 seconds
self.checkingTime = 30 * 60 # 30 minute period
self.minCheckingTime = 20 * 60 # 20 mins
self.wallClockCheckSeconds = 5 * 60 # 5 minutes
self.maxWallClockTime = 3 * 24 * 60 * 60 # e.g. 4 days
self.jobPeekFlag = 1 # on / off
self.minDiskSpace = 10 # MB
self.loadAvgLimit = 1000 # > 1000 and jobs killed
self.sampleCPUTime = 30 * 60 # e.g. up to 20mins sample
self.jobCPUMargin = 20 # %age buffer before killing job
self.minCPUWallClockRatio = 5 # ratio %age
self.nullCPULimit = 5 # After 5 sample times return null CPU consumption kill job
self.checkCount = 0
self.wallClockCheckCount = 0
self.nullCPUCount = 0
self.grossTimeLeftLimit = 10 * self.checkingTime
self.timeLeftUtil = TimeLeft()
self.timeLeft = 0
self.littleTimeLeft = False
self.scaleFactor = 1.0
self.processors = processors
#############################################################################
def initialize(self, loops=0):
""" Watchdog initialization.
"""
if self.initialized:
self.log.info('Watchdog already initialized')
return S_OK()
else:
self.initialized = True
setup = gConfig.getValue('/DIRAC/Setup', '')
if not setup:
return S_ERROR('Can not get the DIRAC Setup value')
wms_instance = getSystemInstance("WorkloadManagement")
if not wms_instance:
return S_ERROR('Can not get the WorkloadManagement system instance')
self.section = '/Systems/WorkloadManagement/%s/JobWrapper' % wms_instance
self.maxcount = loops
self.log.verbose('Watchdog initialization')
self.log.info('Attempting to Initialize Watchdog for: %s' % (self.systemFlag))
# Test control flags
self.testWallClock = gConfig.getValue(self.section + '/CheckWallClockFlag', 1)
self.testDiskSpace = gConfig.getValue(self.section + '/CheckDiskSpaceFlag', 1)
self.testLoadAvg = gConfig.getValue(self.section + '/CheckLoadAvgFlag', 1)
self.testCPUConsumed = gConfig.getValue(self.section + '/CheckCPUConsumedFlag', 1)
self.testCPULimit = gConfig.getValue(self.section + '/CheckCPULimitFlag', 0)
self.testMemoryLimit = gConfig.getValue(self.section + '/CheckMemoryLimitFlag', 0)
self.testTimeLeft = gConfig.getValue(self.section + '/CheckTimeLeftFlag', 1)
# Other parameters
self.pollingTime = gConfig.getValue(self.section + '/PollingTime', 10) # 10 seconds
self.checkingTime = gConfig.getValue(self.section + '/CheckingTime', 30 * 60) # 30 minute period
self.minCheckingTime = gConfig.getValue(self.section + '/MinCheckingTime', 20 * 60) # 20 mins
self.maxWallClockTime = gConfig.getValue(self.section + '/MaxWallClockTime', 3 * 24 * 60 * 60) # e.g. 4 days
self.jobPeekFlag = gConfig.getValue(self.section + '/JobPeekFlag', 1) # on / off
self.minDiskSpace = gConfig.getValue(self.section + '/MinDiskSpace', 10) # MB
self.loadAvgLimit = gConfig.getValue(self.section + '/LoadAverageLimit', 1000) # > 1000 and jobs killed
self.sampleCPUTime = gConfig.getValue(self.section + '/CPUSampleTime', 30 * 60) # e.g. up to 20mins sample
self.jobCPUMargin = gConfig.getValue(self.section + '/JobCPULimitMargin', 20) # %age buffer before killing job
self.minCPUWallClockRatio = gConfig.getValue(self.section + '/MinCPUWallClockRatio', 5) # ratio %age
# After 5 sample times return null CPU consumption kill job
self.nullCPULimit = gConfig.getValue(self.section + '/NullCPUCountLimit', 5)
if self.checkingTime < self.minCheckingTime:
self.log.info(
'Requested CheckingTime of %s setting to %s seconds (minimum)' %
(self.checkingTime, self.minCheckingTime))
self.checkingTime = self.minCheckingTime
# The time left is returned in seconds @ 250 SI00 = 1 HS06,
# the self.checkingTime and self.pollingTime are in seconds,
# thus they need to be multiplied by a large enough factor
self.fineTimeLeftLimit = gConfig.getValue(self.section + '/TimeLeftLimit', 150 * self.pollingTime)
self.scaleFactor = gConfig.getValue('/LocalSite/CPUScalingFactor', 1.0)
return S_OK()
def run(self):
""" The main watchdog execution method
"""
result = self.initialize()
if not result['OK']:
self.log.always('Can not start watchdog for the following reason')
self.log.always(result['Message'])
return result
try:
while True:
self.log.debug('Starting watchdog loop # %d' % self.count)
start_cycle_time = time.time()
result = self.execute()
exec_cycle_time = time.time() - start_cycle_time
if not result['OK']:
self.log.error("Watchdog error during execution", result['Message'])
break
elif result['Value'] == "Ended":
break
self.count += 1
if exec_cycle_time < self.pollingTime:
time.sleep(self.pollingTime - exec_cycle_time)
return S_OK()
except Exception:
self.log.exception()
return S_ERROR('Exception')
#############################################################################
def execute(self):
""" The main agent execution method of the Watchdog.
"""
if not self.exeThread.isAlive():
self.__getUsageSummary()
self.log.info('Process to monitor has completed, Watchdog will exit.')
return S_OK("Ended")
# WallClock checks every self.wallClockCheckSeconds, but only if StopSigRegex is defined in JDL
if not self.stopSigSent and self.stopSigRegex is not None and (
time.time() - self.initialValues['StartTime']) > self.wallClockCheckSeconds * self.wallClockCheckCount:
self.wallClockCheckCount += 1
self._performWallClockChecks()
if self.littleTimeLeft:
# if we have gone over enough iterations query again
if self.littleTimeLeftCount == 0 and self.__timeLeft() == -1:
self.checkError = 'Job has reached the CPU limit of the queue'
self.log.error(self.checkError, self.timeLeft)
self.__killRunningThread()
return S_OK()
else:
self.littleTimeLeftCount -= 1
# Note: need to poll regularly to see if the thread is alive
# but only perform checks with a certain frequency
if (time.time() - self.initialValues['StartTime']) > self.checkingTime * self.checkCount:
self.checkCount += 1
result = self._performChecks()
if not result['OK']:
self.log.warn('Problem during recent checks')
self.log.warn(result['Message'])
return S_OK()
else:
# self.log.debug('Application thread is alive: checking count is %s' %(self.checkCount))
return S_OK()
#############################################################################
def _performWallClockChecks(self):
"""Watchdog performs the wall clock checks based on MJF. Signals are sent
to processes if we need to stop, but function always returns S_OK()
"""
mjf = MJF.MJF()
try:
wallClockSecondsLeft = mjf.getWallClockSecondsLeft()
except Exception as e:
# Just stop if we can't get the wall clock seconds left
return S_OK()
jobstartSeconds = mjf.getIntJobFeature('jobstart_secs')
if jobstartSeconds is None:
# Just stop if we don't know when the job started
return S_OK()
if (int(time.time()) > jobstartSeconds + self.stopSigStartSeconds) and \
(wallClockSecondsLeft < self.stopSigFinishSeconds + self.wallClockCheckSeconds):
# Need to send the signal! Assume it works to avoid sending the signal more than once
self.log.info('Sending signal %d to JobWrapper children' % self.stopSigNumber)
self.stopSigSent = True
try:
for childPid in getChildrenPIDs(self.wrapperPID):
try:
cmdline = open('/proc/%d/cmdline' % childPid, 'r').read().replace('\0', ' ').strip()
except IOError:
# Process gone away? Not running on Linux? Skip anyway
continue
if re.search(self.stopSigRegex, cmdline) is not None:
self.log.info(
'Sending signal %d to process ID %d, cmdline = "%s"' %
(self.stopSigNumber, childPid, cmdline))
os.kill(childPid, self.stopSigNumber)
except Exception as e:
self.log.error('Failed to send signals to JobWrapper children! (%s)' % str(e))
return S_OK()
#############################################################################
def _performChecks(self):
"""The Watchdog checks are performed at a different period to the checking of the
application thread and correspond to the checkingTime.
"""
self.log.verbose('------------------------------------')
self.log.verbose('Checking loop starts for Watchdog')
heartBeatDict = {}
msg = ''
loadAvg = float(os.getloadavg()[0])
msg += 'LoadAvg: %d ' % loadAvg
heartBeatDict['LoadAverage'] = loadAvg
if 'LoadAverage' not in self.parameters:
self.parameters['LoadAverage'] = []
self.parameters['LoadAverage'].append(loadAvg)
memoryUsed = self.getMemoryUsed()
msg += 'MemUsed: %.1f kb ' % (memoryUsed)
heartBeatDict['MemoryUsed'] = memoryUsed
if 'MemoryUsed' not in self.parameters:
self.parameters['MemoryUsed'] = []
self.parameters['MemoryUsed'].append(memoryUsed)
result = self.profiler.getAllProcessData(withChildren=True)
if result['OK']:
vsize = result['Value']['stats']['vSizeUsage'] * 1024.
rss = result['Value']['stats']['memoryUsage'] * 1024.
heartBeatDict['Vsize'] = vsize
heartBeatDict['RSS'] = rss
self.parameters.setdefault('Vsize', [])
self.parameters['Vsize'].append(vsize)
self.parameters.setdefault('RSS', [])
self.parameters['RSS'].append(rss)
msg += "Job Vsize: %.1f kb " % vsize
msg += "Job RSS: %.1f kb " % rss
result = self.getDiskSpace()
if not result['OK']:
self.log.warn("Could not establish DiskSpace", result['Message'])
else:
msg += 'DiskSpace: %.1f MB ' % (result['Value'])
if 'DiskSpace' not in self.parameters:
self.parameters['DiskSpace'] = []
if result['OK']:
self.parameters['DiskSpace'].append(result['Value'])
heartBeatDict['AvailableDiskSpace'] = result['Value']
cpu = self.__getCPU()
if not cpu['OK']:
msg += 'CPU: ERROR '
hmsCPU = 0
else:
cpu = cpu['Value']
msg += 'CPU: %s (h:m:s) ' % (cpu)
if 'CPUConsumed' not in self.parameters:
self.parameters['CPUConsumed'] = []
self.parameters['CPUConsumed'].append(cpu)
hmsCPU = cpu
rawCPU = self.__convertCPUTime(hmsCPU)
if rawCPU['OK']:
heartBeatDict['CPUConsumed'] = rawCPU['Value']
result = self.__getWallClockTime()
if not result['OK']:
self.log.warn("Failed determining wall clock time", result['Message'])
else:
msg += 'WallClock: %.2f s ' % (result['Value'])
self.parameters.setdefault('WallClockTime', list()).append(result['Value'])
heartBeatDict['WallClockTime'] = result['Value']
self.log.info(msg)
result = self._checkProgress()
if not result['OK']:
self.checkError = result['Message']
self.log.warn(self.checkError)
if self.jobPeekFlag:
result = self.__peek()
if result['OK']:
outputList = result['Value']
size = len(outputList)
self.log.info('Last %s lines of available application output:' % (size))
self.log.info('================START================')
for line in outputList:
self.log.info(line)
self.log.info('=================END=================')
self.__killRunningThread()
return S_OK()
recentStdOut = 'None'
if self.jobPeekFlag:
result = self.__peek()
if result['OK']:
outputList = result['Value']
size = len(outputList)
recentStdOut = 'Last %s lines of application output from Watchdog on %s [UTC]:' % (size, Time.dateTime())
border = '=' * len(recentStdOut)
cpuTotal = 'Last reported CPU consumed for job is %s (h:m:s)' % (hmsCPU)
if self.timeLeft:
cpuTotal += ', Batch Queue Time Left %s (s @ HS06)' % self.timeLeft
recentStdOut = '\n%s\n%s\n%s\n%s\n' % (border, recentStdOut, cpuTotal, border)
self.log.info(recentStdOut)
for line in outputList:
self.log.info(line)
recentStdOut += line + '\n'
else:
recentStdOut = 'Watchdog is initializing and will attempt to obtain standard output from application thread'
self.log.info(recentStdOut)
self.peekFailCount += 1
if self.peekFailCount > self.peekRetry:
self.jobPeekFlag = 0
self.log.warn('Turning off job peeking for remainder of execution')
if 'JOBID' not in os.environ:
self.log.info('Running without JOBID so parameters will not be reported')
return S_OK()
jobID = os.environ['JOBID']
staticParamDict = {'StandardOutput': recentStdOut}
self.__sendSignOfLife(int(jobID), heartBeatDict, staticParamDict)
return S_OK('Watchdog checking cycle complete')
#############################################################################
def __getCPU(self):
"""Uses os.times() to get CPU time and returns HH:MM:SS after conversion.
"""
try:
result = self.profiler.getAllProcessData()
if not result['OK']:
self.log.warn('Problem while checking consumed CPU')
return result
cpuTime = result['Value']
if cpuTime:
cpuTimeTotal = cpuTime['stats']['cpuUsageSystem'] + cpuTime['stats']['cpuUsageUser']
self.log.verbose("Raw CPU time consumed (s) = %s" % (cpuTimeTotal))
return self.__getCPUHMS(cpuTimeTotal)
else:
self.log.error("CPU time consumed found to be 0")
return S_ERROR()
except Exception as e:
self.log.warn('Could not determine CPU time consumed with exception')
self.log.exception(e)
return S_ERROR("Could not determine CPU time consumed with exception")
#############################################################################
def __getCPUHMS(self, cpuTime):
mins, secs = divmod(cpuTime, 60)
hours, mins = divmod(mins, 60)
humanTime = '%02d:%02d:%02d' % (hours, mins, secs)
self.log.verbose('Human readable CPU time is: %s' % humanTime)
return S_OK(humanTime)
#############################################################################
def __interpretControlSignal(self, signalDict):
"""This method is called whenever a signal is sent via the result of
sending a sign of life.
"""
self.log.info('Received control signal')
if isinstance(signalDict, dict):
if 'Kill' in signalDict:
self.log.info('Received Kill signal, stopping job via control signal')
self.checkError = 'Received Kill signal'
self.__killRunningThread()
else:
self.log.info('The following control signal was sent but not understood by the watchdog:')
self.log.info(signalDict)
else:
self.log.info('Expected dictionary for control signal, received:\n%s' % (signalDict))
return S_OK()
#############################################################################
def _checkProgress(self):
"""This method calls specific tests to determine whether the job execution
is proceeding normally. CS flags can easily be added to add or remove
tests via central configuration.
"""
report = ''
if self.testWallClock:
result = self.__checkWallClockTime()
if not result['OK']:
self.log.warn(result['Message'])
return result
report += 'WallClock: OK, '
else:
report += 'WallClock: NA,'
if self.testDiskSpace:
result = self.__checkDiskSpace()
if not result['OK']:
self.log.warn(result['Message'])
return result
report += 'DiskSpace: OK, '
else:
report += 'DiskSpace: NA,'
if self.testLoadAvg:
result = self.__checkLoadAverage()
if not result['OK']:
self.log.warn("Check of load average failed, but won't fail because of that: %s" % result['Message'])
report += 'LoadAverage: ERROR, '
return S_OK()
report += 'LoadAverage: OK, '
else:
report += 'LoadAverage: NA,'
if self.testCPUConsumed:
result = self.__checkCPUConsumed()
if not result['OK']:
return result
report += 'CPUConsumed: OK, '
else:
report += 'CPUConsumed: NA, '
if self.testCPULimit:
result = self.__checkCPULimit()
if not result['OK']:
self.log.warn(result['Message'])
return result
report += 'CPULimit OK, '
else:
report += 'CPULimit: NA, '
if self.testTimeLeft:
self.__timeLeft()
if self.timeLeft:
report += 'TimeLeft: OK'
else:
report += 'TimeLeft: NA'
if self.testMemoryLimit:
result = self.__checkMemoryLimit()
if not result['OK']:
self.log.warn(result['Message'])
return result
report += 'MemoryLimit OK, '
else:
report += 'MemoryLimit: NA, '
self.log.info(report)
return S_OK('All enabled checks passed')
#############################################################################
def __checkCPUConsumed(self):
""" Checks whether the CPU consumed by application process is reasonable. This
method will report stalled jobs to be killed.
"""
self.log.info("Checking CPU Consumed")
if 'WallClockTime' not in self.parameters:
return S_ERROR('Missing WallClockTime info')
if 'CPUConsumed' not in self.parameters:
return S_ERROR('Missing CPUConsumed info')
wallClockTime = self.parameters['WallClockTime'][-1]
if wallClockTime < self.sampleCPUTime:
self.log.info("Stopping check, wallclock time (%s) is still smaller than sample time (%s)" % (wallClockTime,
self.sampleCPUTime))
return S_OK()
intervals = max(1, int(self.sampleCPUTime / self.checkingTime))
if len(self.parameters['CPUConsumed']) < intervals + 1:
self.log.info("Not enough snapshots to calculate, there are %s and we need %s" %
(len(self.parameters['CPUConsumed']), intervals + 1))
return S_OK()
wallClockTime = self.parameters['WallClockTime'][-1] - self.parameters['WallClockTime'][-1 - intervals]
try:
cpuTime = self.__convertCPUTime(self.parameters['CPUConsumed'][-1])['Value']
# For some reason, some times the CPU consumed estimation returns 0
# if cpuTime == 0:
# return S_OK()
cpuTime -= self.__convertCPUTime(self.parameters['CPUConsumed'][-1 - intervals])['Value']
if cpuTime < 0:
self.log.warn('Consumed CPU time negative, something wrong may have happened, ignore')
return S_OK()
if wallClockTime <= 0:
self.log.warn('Wallclock time should not be negative or zero, Ignore')
return S_OK()
ratio = (cpuTime / wallClockTime) * 100.
self.log.info("CPU/Wallclock ratio is %.2f%%" % ratio)
# in case of error cpuTime might be 0, exclude this
if ratio < self.minCPUWallClockRatio:
if os.path.exists('DISABLE_WATCHDOG_CPU_WALLCLOCK_CHECK'):
self.log.info('N.B. job would be declared as stalled but CPU / WallClock check is disabled by payload')
return S_OK()
self.log.info("Job is stalled!")
return S_ERROR('Watchdog identified this job as stalled')
except Exception as e:
self.log.error("Cannot convert CPU consumed from string to int", str(e))
return S_OK()
#############################################################################
def __convertCPUTime(self, cputime):
""" Method to convert the CPU time as returned from the Watchdog
instances to the equivalent DIRAC normalized CPU time to be compared
to the Job CPU requirement.
"""
cpuValue = 0
cpuHMS = cputime.split(':')
# for i in xrange( len( cpuHMS ) ):
# cpuHMS[i] = cpuHMS[i].replace( '00', '0' )
try:
hours = float(cpuHMS[0]) * 60 * 60
mins = float(cpuHMS[1]) * 60
secs = float(cpuHMS[2])
cpuValue = float(hours + mins + secs)
except Exception as x:
self.log.warn(str(x))
return S_ERROR('Could not calculate CPU time')
# Normalization to be implemented
normalizedCPUValue = cpuValue
result = S_OK()
result['Value'] = normalizedCPUValue
self.log.debug('CPU value %s converted to %s' % (cputime, normalizedCPUValue))
return result
#############################################################################
def __checkCPULimit(self):
""" Checks that the job has consumed more than the job CPU requirement
(plus a configurable margin) and kills them as necessary.
"""
consumedCPU = 0
if 'CPUConsumed' in self.parameters:
consumedCPU = self.parameters['CPUConsumed'][-1]
consumedCPUDict = self.__convertCPUTime(consumedCPU)
if consumedCPUDict['OK']:
currentCPU = consumedCPUDict['Value']
else:
return S_OK('Not possible to determine current CPU consumed')
if consumedCPU:
limit = self.jobCPUTime + self.jobCPUTime * (self.jobCPUMargin / 100)
cpuConsumed = float(currentCPU)
if cpuConsumed > limit:
self.log.info(
'Job has consumed more than the specified CPU limit with an additional %s%% margin' %
(self.jobCPUMargin))
return S_ERROR('Job has exceeded maximum CPU time limit')
else:
return S_OK('Job within CPU limit')
elif not currentCPU:
self.log.verbose('Both initial and current CPU consumed are null')
return S_OK('CPU consumed is not measurable yet')
else:
return S_OK('Not possible to determine CPU consumed')
def __checkMemoryLimit(self):
""" Checks that the job memory consumption is within a limit
"""
if 'Vsize' in self.parameters:
vsize = self.parameters['Vsize'][-1]
if vsize and self.memoryLimit:
if vsize > self.memoryLimit:
vsize = vsize
# Just a warning for the moment
self.log.warn("Job has consumed %f.2 KB of memory with the limit of %f.2 KB" % (vsize, self.memoryLimit))
return S_OK()
#############################################################################
def __checkDiskSpace(self):
"""Checks whether the CS defined minimum disk space is available.
"""
if 'DiskSpace' in self.parameters:
availSpace = self.parameters['DiskSpace'][-1]
if availSpace >= 0 and availSpace < self.minDiskSpace:
self.log.info('Not enough local disk space for job to continue, defined in CS as %s MB' % (self.minDiskSpace))
return S_ERROR('Job has insufficient disk space to continue')
else:
return S_OK('Job has enough disk space available')
else:
return S_ERROR('Available disk space could not be established')
#############################################################################
def __checkWallClockTime(self):
"""Checks whether the job has been running for the CS defined maximum
wall clock time.
"""
if 'StartTime' in self.initialValues:
startTime = self.initialValues['StartTime']
if time.time() - startTime > self.maxWallClockTime:
self.log.info('Job has exceeded maximum wall clock time of %s seconds' % (self.maxWallClockTime))
return S_ERROR('Job has exceeded maximum wall clock time')
else:
return S_OK('Job within maximum wall clock time')
else:
return S_ERROR('Job start time could not be established')
#############################################################################
def __checkLoadAverage(self):
"""Checks whether the CS defined maximum load average is exceeded.
"""
if 'LoadAverage' in self.parameters:
loadAvg = self.parameters['LoadAverage'][-1]
if loadAvg > float(self.loadAvgLimit):
self.log.info('Maximum load average exceeded, defined in CS as %s ' % (self.loadAvgLimit))
return S_ERROR('Job exceeded maximum load average')
else:
return S_OK('Job running with normal load average')
else:
return S_ERROR('Job load average not established')
#############################################################################
def __peek(self):
""" Uses ExecutionThread.getOutput() method to obtain standard output
from running thread via subprocess callback function.
"""
result = self.exeThread.getOutput()
if not result['OK']:
self.log.warn('Could not obtain output from running application thread')
self.log.warn(result['Message'])
return result
#############################################################################
def calibrate(self):
""" The calibrate method obtains the initial values for system memory and load
and calculates the margin for error for the rest of the Watchdog cycle.
"""
self.__getWallClockTime()
self.parameters['WallClockTime'] = []
cpuConsumed = self.__getCPU()
if not cpuConsumed['OK']:
self.log.warn("Could not establish CPU consumed, setting to 0.0")
cpuConsumed = 0.0
else:
cpuConsumed = cpuConsumed['Value']
self.initialValues['CPUConsumed'] = cpuConsumed
self.parameters['CPUConsumed'] = []
self.initialValues['LoadAverage'] = float(os.getloadavg()[0])
self.parameters['LoadAverage'] = []
memUsed = self.getMemoryUsed()
self.initialValues['MemoryUsed'] = memUsed
self.parameters['MemoryUsed'] = []
result = self.profiler.getAllProcessData()
self.log.verbose('Job Memory: %s' % (result['Value']))
if not result['OK']:
self.log.warn('Could not get job memory usage')
self.initialValues['Vsize'] = result['Value']['stats']['vSizeUsage'] * 1024.
self.initialValues['RSS'] = result['Value']['stats']['memoryUsage'] * 1024.
self.parameters['Vsize'] = []
self.parameters['RSS'] = []
result = self.getDiskSpace()
self.log.verbose('DiskSpace: %s' % (result))
if not result['OK']:
self.log.warn("Could not establish DiskSpace")
self.initialValues['DiskSpace'] = result['Value']
self.parameters['DiskSpace'] = []
result = self.getNodeInformation()
self.log.verbose('NodeInfo: %s' % (result))
if not result['OK']:
self.log.warn("Could not establish static system information")
if 'LSB_JOBID' in os.environ:
result['LocalJobID'] = os.environ['LSB_JOBID']
if 'PBS_JOBID' in os.environ:
result['LocalJobID'] = os.environ['PBS_JOBID']
if 'QSUB_REQNAME' in os.environ:
result['LocalJobID'] = os.environ['QSUB_REQNAME']
if 'JOB_ID' in os.environ:
result['LocalJobID'] = os.environ['JOB_ID']
self.__reportParameters(result, 'NodeInformation', True)
self.__reportParameters(self.initialValues, 'InitialValues')
return S_OK()
def __timeLeft(self):
"""
return Normalized CPU time left in the batch system
0 if not available
update self.timeLeft and self.littleTimeLeft
"""
# Get CPU time left in the batch system
result = self.timeLeftUtil.getTimeLeft(0.0)
if not result['OK']:
# Could not get CPU time left, we might need to wait for the first loop
# or the Utility is not working properly for this batch system
# or we are in a batch system
timeLeft = 0
else:
timeLeft = result['Value']
self.timeLeft = timeLeft
if not self.littleTimeLeft:
if timeLeft and timeLeft < self.grossTimeLeftLimit:
self.log.info('TimeLeft bellow %s, now checking with higher frequency' % timeLeft)
self.littleTimeLeft = True
# TODO: better configurable way of doing this to be coded
self.littleTimeLeftCount = 15
else:
if self.timeLeft and self.timeLeft < self.fineTimeLeftLimit:
timeLeft = -1
return timeLeft
#############################################################################
def __getUsageSummary(self):
""" Returns average load, memory etc. over execution of job thread
"""
summary = {}
# CPUConsumed
if 'CPUConsumed' in self.parameters:
cpuList = self.parameters['CPUConsumed']
if cpuList:
hmsCPU = cpuList[-1]
rawCPU = self.__convertCPUTime(hmsCPU)
if rawCPU['OK']:
summary['LastUpdateCPU(s)'] = rawCPU['Value']
else:
summary['LastUpdateCPU(s)'] = 'Could not be estimated'
# DiskSpace
if 'DiskSpace' in self.parameters:
space = self.parameters['DiskSpace']
if space:
value = abs(float(space[-1]) - float(self.initialValues['DiskSpace']))
if value < 0:
value = 0
summary['DiskSpace(MB)'] = value
else:
summary['DiskSpace(MB)'] = 'Could not be estimated'
# MemoryUsed
if 'MemoryUsed' in self.parameters:
memory = self.parameters['MemoryUsed']
if memory:
summary['MemoryUsed(kb)'] = abs(float(memory[-1]) - float(self.initialValues['MemoryUsed']))
else:
summary['MemoryUsed(kb)'] = 'Could not be estimated'
# LoadAverage
if 'LoadAverage' in self.parameters:
laList = self.parameters['LoadAverage']
if laList:
summary['LoadAverage'] = float(sum(laList)) / float(len(laList))
else:
summary['LoadAverage'] = 'Could not be estimated'
result = self.__getWallClockTime()
if not result['OK']:
self.log.warn("Failed determining wall clock time", result['Message'])
summary['WallClockTime(s)'] = 0
summary['ScaledCPUTime(s)'] = 0
else:
wallClock = result['Value']
summary['WallClockTime(s)'] = wallClock
summary['ScaledCPUTime(s)'] = wallClock * self.scaleFactor * self.processors
self.__reportParameters(summary, 'UsageSummary', True)
self.currentStats = summary
#############################################################################
def __reportParameters(self, params, title=None, report=False):
"""Will report parameters for job.
"""
try:
parameters = []
self.log.info('==========================================================')
if title:
self.log.info('Watchdog will report %s' % (title))
else:
self.log.info('Watchdog will report parameters')
self.log.info('==========================================================')
vals = params
if 'Value' in params:
if vals['Value']:
vals = params['Value']
for k, v in vals.items():
if v:
self.log.info(str(k) + ' = ' + str(v))
parameters.append((k, v))
if report:
self.__setJobParamList(parameters)
self.log.info('==========================================================')
except Exception as x:
self.log.warn('Problem while reporting parameters')
self.log.warn(str(x))
#############################################################################
def __getWallClockTime(self):
""" Establishes the Wall Clock time spent since the Watchdog initialization"""
result = S_OK()
if 'StartTime' in self.initialValues:
currentTime = time.time()
wallClock = currentTime - self.initialValues['StartTime']
result['Value'] = wallClock
else:
self.initialValues['StartTime'] = time.time()
result['Value'] = 0.0
return result
#############################################################################
def __killRunningThread(self):
""" Will kill the running thread process and any child processes."""
self.log.info('Sending kill signal to application PID %s' % (self.spObject.getChildPID()))
result = self.spObject.killChild()
self.applicationKilled = True
self.log.info('Subprocess.killChild() returned:%s ' % (result))
return S_OK('Thread killed')
#############################################################################
def __sendSignOfLife(self, jobID, heartBeatDict, staticParamDict):
""" Sends sign of life 'heartbeat' signal and triggers control signal
interpretation.
"""
result = JobStateUpdateClient().sendHeartBeat(jobID, heartBeatDict, staticParamDict)
if not result['OK']:
self.log.warn('Problem sending sign of life')
self.log.warn(result)
if result['OK'] and result['Value']:
self.__interpretControlSignal(result['Value'])
return result
#############################################################################
def __setJobParamList(self, value):
"""Wraps around setJobParameters of state update client
"""
# job wrapper template sets the jobID variable
if 'JOBID' not in os.environ:
self.log.info('Running without JOBID so parameters will not be reported')
return S_OK()
jobID = os.environ['JOBID']
jobParam = JobStateUpdateClient().setJobParameters(int(jobID), value)
self.log.verbose('setJobParameters(%s,%s)' % (jobID, value))
if not jobParam['OK']:
self.log.warn(jobParam['Message'])
return jobParam
#############################################################################
def getNodeInformation(self):
""" Attempts to retrieve all static system information, should be overridden in a subclass"""
methodName = 'getNodeInformation'
self.log.warn('Watchdog: ' + methodName + ' method should be implemented in a subclass')
return S_ERROR('Watchdog: ' + methodName + ' method should be implemented in a subclass')
#############################################################################
def getMemoryUsed(self):
"""Obtains the memory used.
"""
mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + \
resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss
return float(mem)
#############################################################################
def getDiskSpace(self):
""" Attempts to get the available disk space, should be overridden in a subclass"""
methodName = 'getDiskSpace'
self.log.warn('Watchdog: ' + methodName + ' method should be implemented in a subclass')
return S_ERROR('Watchdog: ' + methodName + ' method should be implemented in a subclass')
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
fstagni/DIRAC
|
WorkloadManagementSystem/JobWrapper/Watchdog.py
|
Python
|
gpl-3.0
| 37,268
|
[
"DIRAC"
] |
7da5f8bca4e39bc1d48637f9876923de989d20169f9b5a4539ca719e164a1099
|
"""
@name: Modules/Housing/Pool/pool_data.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2018-2020 by D. Brian Kimmel
@note: Created on Feb 13, 2018
@license: MIT License
@summary:
"""
__updated__ = '2019-12-30'
# Import system type stuff
# Import PyMh files
from Modules.Core.data_objects import BaseUUIDObject
class PoolData(BaseUUIDObject):
""" Holds information about the pool(s)
==> PyHouse.House.Pools.{}.
"""
def __init__(self):
super(PoolData, self).__init__()
self.PoolType = None # 'Pool', 'Pond', 'HotTub'
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/House/Pool/pool_data.py
|
Python
|
mit
| 620
|
[
"Brian"
] |
9318218a5af966558df9a56b7aa0eaa021c2ec3953f478771961284c70e0ed90
|
import os
from lan_ast import *
debug = False
class CGenerator(object):
""" Uses the same visitor pattern as the NodeVisitor, but modified to
return a value from each visit method, using string accumulation in
generic_visit.
"""
def __init__(self):
self.output = ''
self.quotes = '\"'
self.newline = '\n'
self.semi = ';'
self.start = ''
# Statements start with indentation of self.indent_level spaces, using
# the _make_indent method
#
self.indent_level = 0
self.inside_ArgList = False
self.inside_Assignment = False
def createTemp(self, ast, filename = 'temp.cpp'):
code = self.visit(ast)
currentdir = os.getcwd()
fullFilename = currentdir + '/' + filename
try:
os.remove(fullFilename)
except OSError:
pass
try:
fileobj = open(fullFilename,'w')
fileobj.write(code)
fileobj.close()
except IOError:
print "Unable to write file"
def simple_node(self, n):
""" Returns True for nodes that are "simple"
"""
return not isinstance(n, (Constant, Id, ArrayRef))
def parenthesize_if(self, n, condition):
""" Visits 'n' and returns its string representation, parenthesized
if the condition function applied to the node returns True.
"""
s = self.visit(n)
if condition(n):
return '(' + s + ')'
else:
return s
def _make_indent(self):
return ' ' * self.indent_level
def visit(self, node):
method = 'visit_' + node.__class__.__name__
return getattr(self, method, self.generic_visit)(node)
def generic_visit(self, node):
if node is None:
return ''
else:
return ''.join(self.visit(c[1]) if len(c) == 2 \
else self.visit(c) for c in node.children())
def visit_FileAST(self, n):
newline = self.newline
start = self.start
if debug:
newline = n.__class__.__name__ + newline
start = n.__class__.__name__ + start
s = ''
for ext in n.ext:
if isinstance(ext, Compound):
s += self.visit_GlobalCompound(ext)
else:
s += start + self.visit(ext) + newline
return s
def visit_GlobalCompound(self, n):
s = ''
for stat in n.statements:
s += self.visit(stat)
s += n.__class__.__name__ + self.newline
return s
def visit_GroupCompound(self, n):
newline = self.newline
start = self.start
if debug:
newline = n.__class__.__name__ + newline
start = n.__class__.__name__ + start
s = ''
for i,stat in enumerate(n.statements):
start1 = ''
if i != 0:
start1 = start
s += start1 + self.visit(stat) + newline + self._make_indent()
s += start
return s
def visit_Comment(self, n):
return n.value
def visit_Increment(self, n):
s = self.visit(n.name)
return s + n.op
def visit_UnaryBefore(self, n):
s = self.visit(n.expr)
return n.op + s
def visit_TypeId(self, n):
s = self.visit(n.name)
if n.type:
s1 = ' '.join(n.type)
s1 += ' ' + s
else:
s1 = s
if not self.inside_ArgList:
s1 += self.semi
return s1
def visit_ArrayTypeId(self, n):
s = self.visit(n.name)
s1 = ' '.join(n.type)
s1 += ' ' + s
for arg in n.subscript:
s1 += '[' + self.visit(arg) + ']'
if not self.inside_ArgList:
s1 += self.semi
return s1
def visit_Assignment(self, n):
self.inside_ArgList = True
lval = self.visit(n.lval)
self.inside_ArgList = False
self.inside_Assignment = True
rval = self.visit(n.rval)
self.inside_Assignment = False
return lval + ' ' + n.op + ' ' + rval + self.semi
def visit_ArrayInit(self, n):
s = '{'
for stat in n.values:
s += self.visit(stat) + ', '
s = s[:-2]
s += '}'
return s
def visit_Compound(self, n):
start = self.start
newline = self.newline
if debug:
newline = n.__class__.__name__ + newline
start = n.__class__.__name__ + start
s = start + self._make_indent() + '{' + newline
self.indent_level += 2
for stat in n.statements:
s += start + self._make_indent() + self.visit(stat) + newline
self.indent_level -= 2
s += start + self._make_indent() + '}'
return s
def visit_ArgList(self, n):
newline = self.newline
start = self.start
if debug:
newline = n.__class__.__name__ + newline
start = n.__class__.__name__ + start
s = '('
count = 1
if len(n.arglist) == 1:
return '(' + self.visit(n.arglist[0]) + ')'
for arg in n.arglist:
if count == 1:
s += newline + '\t' + start
s += self.visit(arg)
if count != (len(n.arglist)):
s += ', '
if count % 3 == 0:
s += newline + '\t' + start
count += 1
## if n.arglist:
## s = s[:-2]
## if (count-1) % 3 == 0 and count != 1:
## s = s[:-2]
return s + ')'
def visit_ArrayRef(self, n):
s = self.visit(n.name)
for arg in n.subscript:
s += '[' + self.visit(arg) + ']'
return s
def visit_BinOp(self, n):
lval = self.parenthesize_if(n.lval,self.simple_node)
rval = self.parenthesize_if(n.rval,self.simple_node)
return lval + ' ' + n.op + ' ' + rval
def visit_FuncDecl(self, n):
newline = self.newline
if debug:
newline = n.__class__.__name__ + newline
self.inside_ArgList = True
typeid = self.visit(n.typeid)
arglist = self.visit(n.arglist)
self.inside_ArgList = False
if self.inside_Assignment:
compound = ''
end = ''
elif n.compound.statements:
typeid = self.start + typeid
arglist += newline
compound = self.visit(n.compound) + newline
else:
compound = self.semi
return typeid + arglist + compound
def visit_ForLoop(self, n):
newline = self.newline
if debug:
newline = n.__class__.__name__ + newline
init = self.visit(n.init) # already has a semi at the end
cond = self.visit(n.cond)
inc = self.visit(n.inc)
self.indent_level += 2
compound = self.visit(n.compound)
self.indent_level -= 2
return 'for (' + init + ' ' + cond + self.semi + ' ' + inc + ')'\
+ newline + compound
def visit_IfThen(self, n):
newline = self.newline
start = self.start
if debug:
newline = n.__class__.__name__ + newline
start = n.__class__.__name__ + start
cond = self.visit(n.cond)
self.indent_level += 2
compound = self.visit(n.compound)
self.indent_level -= 2
return 'if (' + cond + ')' + newline + compound
def visit_IfThenElse(self, n):
newline = self.newline
start = self.start
if debug:
newline = n.__class__.__name__ + newline
start = n.__class__.__name__ + start
cond = self.visit(n.cond)
self.indent_level += 2
compound1 = self.visit(n.compound1)
compound2 = self.visit(n.compound2)
self.indent_level -= 2
return 'if (' + cond + ')' + newline + compound1 \
+ newline + self._make_indent() + 'else' + newline + compound2
def visit_Id(self, n):
return n.name
def visit_Include(self, n):
return "#include " + n.name
def visit_Constant(self, n):
try:
s = float(n.value)
except ValueError:
if len(n.value) == 0:
return '\"\"'
if n.value[0] == '"':
## if self.extraquotes and False:
## return self.quotes + n.value[1:-1] + self.quotes
return n.value
else:
return self.quotes + n.value + self.quotes
else:
return str(n.value)
|
dikujepsen/OpenTran
|
v2.0/framework/cgen.py
|
Python
|
mit
| 8,784
|
[
"VisIt"
] |
83a7525e97e1d75c51b42d1efd07d77ea27fa3ad7537313ad83bfd9f333a68e7
|
#!/usr/bin/env python2
#
# Copyright (C) 2013-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
#
#unittests for python tools in $ESPRESSOPPHOME/tools
#this file uses a system containing several particles, for testing more complex tools
#input/output tools should be tested using test_python_tools_io.py
import sys
import time
import espressopp
import mpi4py.MPI as MPI
import unittest
class TestPythonTools(unittest.TestCase):
def setUp(self):
system = espressopp.System()
box = (10, 10, 10)
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.skin = 0.3
system.comm = MPI.COMM_WORLD
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size,box,rc=1.5,skin=system.skin)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc=1.5, skin=system.skin)
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
self.system = system
#add particles
particle_list = [
(1, 1, 0.1, espressopp.Real3D(3.0, 1.0, 4.0), 2.0),
(2, 1, -0.5, espressopp.Real3D(2.0, 2.0, 4.0), 2.0),
(3, 1, -0.5, espressopp.Real3D(1.0, 1.0, 4.5), 2.0),
(4, 1, 0.5, espressopp.Real3D(4.0, 1.0, 4.0), 2.0),
(5, 1, -0.5, espressopp.Real3D(5.0, 2.0, 4.0), 2.0),
(6, 1, 0.2, espressopp.Real3D(6.0, 1.0, 4.5), 2.0),
]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass')
self.system.storage.decompose()
def test_boresch_restraints(self):
restraintAtoms = {'A':1,'B':2,'C':3,'a':4,'b':5,'c':6} #A,B,C in ligand, a,b,c in protein, original atomistic indices
restraintK = {'aA':4184,'baA':41.84,'aAB':41.84,'aABC':41.84,'cbaA':41.84,'baAB':41.84} #kJ mol-1 nm-2,kJ mol-1 rad-2, all 10 kcal as in JPCB 2003
restraintR0 = {'aA':1.01,'baA':120.0,'aAB':120.0,'aABC':100.0,'cbaA':-170.0,'baAB':-105.0} #nm, degrees
restraint_interactions = espressopp.tools.applyBoreschRestraints(self.system,restraintAtoms,restraintK,restraintR0)
dhdlRstr = 0.0
for rt in restraint_interactions.values(): dhdlRstr+=rt.computeEnergy() #energyDeriv = energy for restraints
self.assertAlmostEqual(dhdlRstr,141.92348724,places=5)
def test_self_excl_energy(self):
exclusions = [(1,2),(1,3),(4,5),(4,6)]
energy = espressopp.tools.energy.getSelfExclEnergyReactionField(self.system,exclusions,prefactor=1.0,epsilon1=1,epsilon2=80,rc=3.0,pidlist=[3,4,5,6])
self.assertAlmostEqual(energy,-0.1231021394065,places=5)
energy = espressopp.tools.energy.getSelfExclEnergyReactionField(self.system,exclusions,prefactor=1.0,epsilon1=1,epsilon2=80,rc=3.0,nParticles=6)
self.assertAlmostEqual(energy,-0.1436881757534,places=5)
if __name__ == '__main__':
unittest.main()
|
govarguz/espressopp
|
testsuite/python_tools/complex/test_python_tools_complex.py
|
Python
|
gpl-3.0
| 3,613
|
[
"ESPResSo"
] |
3ccdad8151b42620bc6661d93c913582083b1d35b45a52863fef4cd59680fd41
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
remove_end,
)
class MailRuIE(InfoExtractor):
IE_NAME = 'mailru'
IE_DESC = 'Видео@Mail.Ru'
_VALID_URL = r'https?://(?:(?:www|m)\.)?my\.mail\.ru/(?:video/.*#video=/?(?P<idv1>(?:[^/]+/){3}\d+)|(?:(?P<idv2prefix>(?:[^/]+/){2})video/(?P<idv2suffix>[^/]+/\d+))\.html)'
_TESTS = [
{
'url': 'http://my.mail.ru/video/top#video=/mail/sonypicturesrus/75/76',
'md5': 'dea205f03120046894db4ebb6159879a',
'info_dict': {
'id': '46301138_76',
'ext': 'mp4',
'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро',
'timestamp': 1393232740,
'upload_date': '20140224',
'uploader': 'sonypicturesrus',
'uploader_id': 'sonypicturesrus@mail.ru',
'duration': 184,
},
'skip': 'Not accessible from Travis CI server',
},
{
'url': 'http://my.mail.ru/corp/hitech/video/news_hi-tech_mail_ru/1263.html',
'md5': '00a91a58c3402204dcced523777b475f',
'info_dict': {
'id': '46843144_1263',
'ext': 'mp4',
'title': 'Samsung Galaxy S5 Hammer Smash Fail Battery Explosion',
'timestamp': 1397039888,
'upload_date': '20140409',
'uploader': 'hitech@corp.mail.ru',
'uploader_id': 'hitech@corp.mail.ru',
'duration': 245,
},
'skip': 'Not accessible from Travis CI server',
},
{
# only available via metaUrl API
'url': 'http://my.mail.ru/mail/720pizle/video/_myvideo/502.html',
'md5': '3b26d2491c6949d031a32b96bd97c096',
'info_dict': {
'id': '56664382_502',
'ext': 'mp4',
'title': ':8336',
'timestamp': 1449094163,
'upload_date': '20151202',
'uploader': '720pizle@mail.ru',
'uploader_id': '720pizle@mail.ru',
'duration': 6001,
},
'skip': 'Not accessible from Travis CI server',
},
{
'url': 'http://m.my.mail.ru/mail/3sktvtr/video/_myvideo/138.html',
'only_matching': True,
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('idv1')
if not video_id:
video_id = mobj.group('idv2prefix') + mobj.group('idv2suffix')
webpage = self._download_webpage(url, video_id)
video_data = None
page_config = self._parse_json(self._search_regex(
r'(?s)<script[^>]+class="sp-video__page-config"[^>]*>(.+?)</script>',
webpage, 'page config', default='{}'), video_id, fatal=False)
if page_config:
meta_url = page_config.get('metaUrl') or page_config.get('video', {}).get('metaUrl')
if meta_url:
video_data = self._download_json(
meta_url, video_id, 'Downloading video meta JSON', fatal=False)
# Fallback old approach
if not video_data:
video_data = self._download_json(
'http://api.video.mail.ru/videos/%s.json?new=1' % video_id,
video_id, 'Downloading video JSON')
formats = []
for f in video_data['videos']:
video_url = f.get('url')
if not video_url:
continue
format_id = f.get('key')
height = int_or_none(self._search_regex(
r'^(\d+)[pP]$', format_id, 'height', default=None)) if format_id else None
formats.append({
'url': video_url,
'format_id': format_id,
'height': height,
})
self._sort_formats(formats)
meta_data = video_data['meta']
title = remove_end(meta_data['title'], '.mp4')
author = video_data.get('author')
uploader = author.get('name')
uploader_id = author.get('id') or author.get('email')
view_count = int_or_none(video_data.get('viewsCount') or video_data.get('views_count'))
acc_id = meta_data.get('accId')
item_id = meta_data.get('itemId')
content_id = '%s_%s' % (acc_id, item_id) if acc_id and item_id else video_id
thumbnail = meta_data.get('poster')
duration = int_or_none(meta_data.get('duration'))
timestamp = int_or_none(meta_data.get('timestamp'))
return {
'id': content_id,
'title': title,
'thumbnail': thumbnail,
'timestamp': timestamp,
'uploader': uploader,
'uploader_id': uploader_id,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
|
Tithen-Firion/youtube-dl
|
youtube_dl/extractor/mailru.py
|
Python
|
unlicense
| 5,086
|
[
"Galaxy"
] |
ee9a69845b2131dcd8aa81c2411ded78895cd640eb74ed3aa28952769f6d2b68
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAltcdfenvs(RPackage):
"""Convenience data structures and functions to handle cdfenvs."""
homepage = "https://www.bioconductor.org/packages/altcdfenvs/"
url = "https://git.bioconductor.org/packages/altcdfenvs"
version('2.38.0', git='https://git.bioconductor.org/packages/altcdfenvs', commit='2e92b9da76dbe50af4bf33c525134e29e9809291')
depends_on('r@3.4.0:3.4.9', when='@2.38.0')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-makecdfenv', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-hypergraph', type=('build', 'run'))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-altcdfenvs/package.py
|
Python
|
lgpl-2.1
| 2,040
|
[
"Bioconductor"
] |
309bd4f5977eb4bf0c974a22cf2667077973973138e71d425e10415252a5d708
|
#!/usr/bin/python
# Copyright 2017 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_healthcheck
version_added: "2.4"
short_description: Create, Update or Destroy a Healthcheck.
description:
- Create, Update or Destroy a Healthcheck. Currently only HTTP and
HTTPS Healthchecks are supported. Healthchecks are used to monitor
individual instances, managed instance groups and/or backend
services. Healtchecks are reusable.
- Visit
U(https://cloud.google.com/compute/docs/load-balancing/health-checks)
for an overview of Healthchecks on GCP.
- See
U(https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks) for
API details on HTTP Healthchecks.
- See
U(https://cloud.google.com/compute/docs/reference/latest/httpsHealthChecks)
for more details on the HTTPS Healtcheck API.
requirements:
- "python >= 2.6"
- "google-api-python-client >= 1.6.2"
- "google-auth >= 0.9.0"
- "google-auth-httplib2 >= 0.0.2"
notes:
- Only supports HTTP and HTTPS Healthchecks currently.
author:
- "Tom Melendez (@supertom) <tom@supertom.com>"
options:
check_interval:
description:
- How often (in seconds) to send a health check.
required: false
default: 5
healthcheck_name:
description:
- Name of the Healthcheck.
required: true
healthcheck_type:
description:
- Type of Healthcheck.
required: true
choices: ["HTTP", "HTTPS"]
host_header:
description:
- The value of the host header in the health check request. If left
empty, the public IP on behalf of which this health
check is performed will be used.
required: true
default: ""
port:
description:
- The TCP port number for the health check request. The default value is
443 for HTTPS and 80 for HTTP.
required: false
request_path:
description:
- The request path of the HTTPS health check request.
required: false
default: "/"
state:
description: State of the Healthcheck.
required: true
choices: ["present", "absent"]
timeout:
description:
- How long (in seconds) to wait for a response before claiming
failure. It is invalid for timeout
to have a greater value than check_interval.
required: false
default: 5
unhealthy_threshold:
description:
- A so-far healthy instance will be marked unhealthy after this
many consecutive failures.
required: false
default: 2
healthy_threshold:
description:
- A so-far unhealthy instance will be marked healthy after this
many consecutive successes.
required: false
default: 2
service_account_email:
description:
- service account email
required: false
default: null
service_account_permissions:
version_added: "2.0"
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
required: false
default: null
choices: [
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
"useraccounts-ro", "useraccounts-rw", "datastore", "logging-write",
"monitoring", "sql-admin", "storage-full", "storage-ro",
"storage-rw", "taskqueue", "userinfo-email"
]
credentials_file:
description:
- Path to the JSON file associated with the service account email
default: null
required: false
project_id:
description:
- Your GCP project ID
required: false
default: null
'''
EXAMPLES = '''
- name: Create Minimum HealthCheck
gcp_healthcheck:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
healthcheck_name: my-healthcheck
healthcheck_type: HTTP
state: present
- name: Create HTTP HealthCheck
gcp_healthcheck:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
healthcheck_name: my-healthcheck
healthcheck_type: HTTP
host: my-host
request_path: /hc
check_interval: 10
timeout: 30
unhealthy_threshhold: 2
healthy_threshhold: 1
state: present
- name: Create HTTPS HealthCheck
gcp_healthcheck:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
healthcheck_name: "{{ https_healthcheck }}"
healthcheck_type: HTTPS
host_header: my-host
request_path: /hc
check_interval: 5
timeout: 5
unhealthy_threshold: 2
healthy_threshold: 1
state: present
'''
RETURN = '''
state:
description: state of the Healthcheck
returned: Always.
type: str
sample: present
healthcheck_name:
description: Name of the Healthcheck
returned: Always
type: str
sample: my-url-map
healthcheck_type:
description: Type of the Healthcheck
returned: Always
type: str
sample: HTTP
healthcheck:
description: GCP Healthcheck dictionary
returned: Always. Refer to GCP documentation for detailed field descriptions.
type: dict
sample: { "name": "my-hc", "port": 443, "requestPath": "/foo" }
'''
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import check_params, get_google_api_client, GCPUtils
USER_AGENT_PRODUCT = 'ansible-healthcheck'
USER_AGENT_VERSION = '0.0.1'
def _validate_healthcheck_params(params):
"""
Validate healthcheck params.
Simple validation has already assumed by AnsibleModule.
:param params: Ansible dictionary containing configuration.
:type params: ``dict``
:return: True or raises ValueError
:rtype: ``bool`` or `class:ValueError`
"""
if params['timeout'] > params['check_interval']:
raise ValueError("timeout (%s) is greater than check_interval (%s)" % (
params['timeout'], params['check_interval']))
return (True, '')
def _build_healthcheck_dict(params):
"""
Reformat services in Ansible Params for GCP.
:param params: Params from AnsibleModule object
:type params: ``dict``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: dictionary suitable for submission to GCP
HealthCheck (HTTP/HTTPS) API.
:rtype ``dict``
"""
gcp_dict = GCPUtils.params_to_gcp_dict(params, 'healthcheck_name')
if 'timeout' in gcp_dict:
gcp_dict['timeoutSec'] = gcp_dict['timeout']
del gcp_dict['timeout']
if 'checkInterval' in gcp_dict:
gcp_dict['checkIntervalSec'] = gcp_dict['checkInterval']
del gcp_dict['checkInterval']
if 'hostHeader' in gcp_dict:
gcp_dict['host'] = gcp_dict['hostHeader']
del gcp_dict['hostHeader']
if 'healthcheckType' in gcp_dict:
del gcp_dict['healthcheckType']
return gcp_dict
def _get_req_resource(client, resource_type):
if resource_type == 'HTTPS':
return (client.httpsHealthChecks(), 'httpsHealthCheck')
else:
return (client.httpHealthChecks(), 'httpHealthCheck')
def get_healthcheck(client, name, project_id=None, resource_type='HTTP'):
"""
Get a Healthcheck from GCP.
:param client: An initialized GCE Compute Disovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param name: Name of the Url Map.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: A dict resp from the respective GCP 'get' request.
:rtype: ``dict``
"""
try:
resource, entity_name = _get_req_resource(client, resource_type)
args = {'project': project_id, entity_name: name}
req = resource.get(**args)
return GCPUtils.execute_api_client_req(req, raise_404=False)
except:
raise
def create_healthcheck(client, params, project_id, resource_type='HTTP'):
"""
Create a new Healthcheck.
:param client: An initialized GCE Compute Disovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param params: Dictionary of arguments from AnsibleModule.
:type params: ``dict``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
gcp_dict = _build_healthcheck_dict(params)
try:
resource, _ = _get_req_resource(client, resource_type)
args = {'project': project_id, 'body': gcp_dict}
req = resource.insert(**args)
return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
if not return_data:
return_data = get_healthcheck(client,
name=params['healthcheck_name'],
project_id=project_id)
return (True, return_data)
except:
raise
def delete_healthcheck(client, name, project_id, resource_type='HTTP'):
"""
Delete a Healthcheck.
:param client: An initialized GCE Compute Disover resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param name: Name of the Url Map.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
try:
resource, entity_name = _get_req_resource(client, resource_type)
args = {'project': project_id, entity_name: name}
req = resource.delete(**args)
return_data = GCPUtils.execute_api_client_req(req, client)
return (True, return_data)
except:
raise
def update_healthcheck(client, healthcheck, params, name, project_id,
resource_type='HTTP'):
"""
Update a Healthcheck.
If the healthcheck has not changed, the update will not occur.
:param client: An initialized GCE Compute Disovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param healthcheck: Name of the Url Map.
:type healthcheck: ``dict``
:param params: Dictionary of arguments from AnsibleModule.
:type params: ``dict``
:param name: Name of the Url Map.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
gcp_dict = _build_healthcheck_dict(params)
ans = GCPUtils.are_params_equal(healthcheck, gcp_dict)
if ans:
return (False, 'no update necessary')
try:
resource, entity_name = _get_req_resource(client, resource_type)
args = {'project': project_id, entity_name: name, 'body': gcp_dict}
req = resource.update(**args)
return_data = GCPUtils.execute_api_client_req(
req, client=client, raw=False)
return (True, return_data)
except:
raise
def main():
module = AnsibleModule(argument_spec=dict(
healthcheck_name=dict(required=True),
healthcheck_type=dict(required=True,
choices=['HTTP', 'HTTPS']),
request_path=dict(required=False, default='/'),
check_interval=dict(required=False, type='int', default=5),
healthy_threshold=dict(required=False, type='int', default=2),
unhealthy_threshold=dict(required=False, type='int', default=2),
host_header=dict(required=False, type='str', default=''),
timeout=dict(required=False, type='int', default=5),
port=dict(required=False, type='int'),
state=dict(choices=['absent', 'present'], default='present'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
credentials_file=dict(),
project_id=dict(), ), )
client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
user_agent_version=USER_AGENT_VERSION)
params = {}
params['healthcheck_name'] = module.params.get('healthcheck_name')
params['healthcheck_type'] = module.params.get('healthcheck_type')
params['request_path'] = module.params.get('request_path')
params['check_interval'] = module.params.get('check_interval')
params['healthy_threshold'] = module.params.get('healthy_threshold')
params['unhealthy_threshold'] = module.params.get('unhealthy_threshold')
params['host_header'] = module.params.get('host_header')
params['timeout'] = module.params.get('timeout')
params['port'] = module.params.get('port', None)
params['state'] = module.params.get('state')
if not params['port']:
params['port'] = 80
if params['healthcheck_type'] == 'HTTPS':
params['port'] = 443
try:
_validate_healthcheck_params(params)
except Exception as e:
module.fail_json(msg=e.message, changed=False)
changed = False
json_output = {'state': params['state']}
healthcheck = get_healthcheck(client,
name=params['healthcheck_name'],
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
if not healthcheck:
if params['state'] == 'absent':
# Doesn't exist in GCE, and state==absent.
changed = False
module.fail_json(
msg="Cannot delete unknown healthcheck: %s" %
(params['healthcheck_name']))
else:
# Create
changed, json_output['healthcheck'] = create_healthcheck(client,
params=params,
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
elif params['state'] == 'absent':
# Delete
changed, json_output['healthcheck'] = delete_healthcheck(client,
name=params['healthcheck_name'],
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
else:
changed, json_output['healthcheck'] = update_healthcheck(client,
healthcheck=healthcheck,
params=params,
name=params['healthcheck_name'],
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
json_output['changed'] = changed
json_output.update(params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
|
britcey/ansible
|
lib/ansible/modules/cloud/google/gcp_healthcheck.py
|
Python
|
gpl-3.0
| 16,066
|
[
"VisIt"
] |
70c48e0c7b359809f694cfd7ac2ee67c2690f6e183a41e48363f18e02ed7ebc6
|
from flask import Flask, jsonify, request, g, redirect, url_for, \
abort, render_template, flash
import pika
from lxml import etree
from lxml.builder import ElementMaker
import random
import string
DATABASE = '/tmp/flaskr.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
app = Flask(__name__)
app.config.from_object(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/admit', methods=['POST'])
def admit():
try:
fname = request.form['fname']
lname = request.form['lname']
taxonomy = request.form['taxonomy']
parameters = pika.URLParameters('amqp://guest:guest@oneillc-01-d.advisory.com:5672/BBRvhost')
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
E = ElementMaker()
HL7 = E.HL7
MESSAGE = E.Message
MESSAGEHEADER = E.MessageHeader
MESSAGECONTROLID = E.MessageControlId
MESSAGETYPE = E.MessageType
MEMBERORGID = E.MemberOrgId
PATIENTIDENTIFICATION = E.PatientIdentification
IROUNDPATIENTIDENTIFIER = E.iRoundPatientIdentifier
PATIENTNAME = E.PatientName
NAME = E.Name
FAMILYNAME = E.FamilyName
GIVENNAME = E.GivenName
VISIT = E.Visit
IROUNDVISITIDENTIFIER = E.iRoundVisitIdentifier
PATIENTCLASS = E.PatientClass
CURRENTLOCATION = E.CurrentLocation
LOCATION = E.Location
TAXONOMY = E.Taxonomy
hl7 = HL7(
MESSAGE(
MESSAGEHEADER(
MESSAGECONTROLID("".join( [random.choice(string.digits) for i in xrange(8)] ))
),
MESSAGETYPE('ADT-A01'),
MEMBERORGID('00000000-0000-0000-0000-000000000001'),
PATIENTIDENTIFICATION(
IROUNDPATIENTIDENTIFIER("".join( [random.choice(string.digits) for i in xrange(8)] )),
PATIENTNAME(
NAME(
FAMILYNAME(lname),
GIVENNAME(fname)
)
)
),
VISIT(
IROUNDVISITIDENTIFIER("".join( [random.choice(string.digits) for i in xrange(8)] )),
PATIENTCLASS('I'),
CURRENTLOCATION(
LOCATION(
TAXONOMY(taxonomy)
)
)
)
)
)
channel.basic_publish('HL7',
'HL7.StatusMapManager',
etree.tostring(hl7, pretty_print=True),
pika.BasicProperties(content_type='text/plain',
delivery_mode=1))
connection.close()
flash(fname + ' should show up momentarily', 'info')
except KeyError:
app.logger.error("Form did not contain key")
return redirect(url_for('index'))
if __name__ == "__main__":
app.run()
|
Advisory-iRound/StatusMapManager
|
app.py
|
Python
|
gpl-2.0
| 2,526
|
[
"VisIt"
] |
9eacc8c565f3917db35d5ddd50bbe2278c93c23ce722516441f0974fb9ba99ef
|
#!/usr/bin/env python
import os
import sys
sys.path.insert(1, os.path.abspath('..'))
#---------------------------------------------------------------------------------------------------
# SMT SYNTHESIS OF ARTIFICIAL NEURAL NETWORKS
#---------------------------------------------------------------------------------------------------
# This is an example of using SMT synthesis to automatically generate weights of artificial neural
# network (ANN). Task of neural network stated is in the form of test cases and well-known XOR
# problem was chosen as a benchmark.
#
#---------------------------------------------------------------------------------------------------
from pysv.smt_synthesis import *
from pysv import templates
def get_hole_declarations(program_vars):
"""Helper function for creating hole declaration with a grammar blow."""
grammar_spec = "((Start Int ( (Constant Real) )))"
grammar = templates.load_gramar_from_SYGUS_spec(grammar_spec)
h1 = HoleDecl('HOLE1', grammar, program_vars, True, max_depth=4)
h2 = HoleDecl('HOLE2', grammar, program_vars, True, max_depth=4)
return {h1.id: h1, h2.id: h2}
def get_scenario_desc(code, tests, in_vars, weights_vars, local_vars, out_vars):
prog_vars = ProgramVars()
prog_vars.add_input_variables(in_vars, 'Real')
prog_vars.add_local_variables(weights_vars, 'Real')
prog_vars.add_local_variables(local_vars, 'Real')
test_cases = TestCases(tests, in_vars=in_vars, out_vars=out_vars)
return code, test_cases, prog_vars, weights_vars
def print_result(res):
print(res.decision)
if res.decision == 'sat':
print('\nSynthesized ANN:')
print(res.final_code)
# if res.decision == 'unsat':
# print('unsat core:' + str(res.unsat_core))
#------------------------------------------------------------------------
# SCENARIOS
#------------------------------------------------------------------------
def scenario_linear_1layer():
# Trying to synthesize ANN with only one linear neuron.
# Should not succeed (unsat).
# ----------------------------------------------------------
code = "out_N1 = w0 + w1 * X1 + w2 * X2"
tests_xor = [TestF([0.0, 0.0], ['out_N1 <= 0']),
TestF([0.0, 1.0], ['out_N1 >= 1']),
TestF([1.0, 0.0], ['out_N1 >= 1']),
TestF([1.0, 1.0], ['out_N1 <= 0'])]
in_vars = ['X1', 'X2']
out_vars = ['out_N1']
local_vars = ['out_N1']
weights_vars = ['w0', 'w1', 'w2']
#----------------------------------------------------------
return get_scenario_desc(code, tests_xor, in_vars, weights_vars, local_vars, out_vars)
def scenario_linear_2layers():
# Trying to synthesize ANN with linear neurons and two layers (architecture 2-1).
# Should not succeed (unsat).
# ----------------------------------------------------------
code =\
"""
# Layer 1 neurons
out_N1 = w1_0 + w1_1 * X1 + w1_2 * X2
out_N2 = w2_0 + w2_1 * X1 + w2_2 * X2
# Layer 2 neuron
out_N3 = w3_0 + w3_1 * out_N1 + w3_2 * out_N2
"""
tests_xor = [TestF([0.0, 0.0], ['out_N3 <= 0']),
TestF([0.0, 1.0], ['out_N3 >= 1']),
TestF([1.0, 0.0], ['out_N3 >= 1']),
TestF([1.0, 1.0], ['out_N3 <= 0'])]
in_vars = ['X1', 'X2']
out_vars = ['out_N3']
local_vars = ['out_N1', 'out_N2', 'out_N3']
weights_vars = ['w1_0', 'w1_1', 'w1_2',
'w2_0', 'w2_1', 'w2_2',
'w3_0', 'w3_1', 'w3_2']
#----------------------------------------------------------
return get_scenario_desc(code, tests_xor, in_vars, weights_vars, local_vars, out_vars)
def scenario_binary_1layer():
# Trying to synthesize ANN with only one neuron with binary activation function.
# Should not succeed (unsat).
# ----------------------------------------------------------
code =\
"""
# Layer 1 neurons
sum_N1 = w1_0 + w1_1 * X1 + w1_2 * X2
if sum_N1 >= 0:
out_N1 = 1
else:
out_N1 = 0
"""
tests_xor = [Test([0.0, 0.0], [0.0]),
Test([0.0, 1.0], [1.0]),
Test([1.0, 0.0], [1.0]),
Test([1.0, 1.0], [0.0])]
in_vars = ['X1', 'X2']
out_vars = ['out_N1']
local_vars = ['out_N1', 'sum_N1']
weights_vars = ['w1_0', 'w1_1', 'w1_2',
'w2_0', 'w2_1', 'w2_2',
'w3_0', 'w3_1', 'w3_2']
#----------------------------------------------------------
return get_scenario_desc(code, tests_xor, in_vars, weights_vars, local_vars, out_vars)
def scenario_binary_2layers():
# Trying to synthesize ANN with neurons with binary activation function and two layers (architecture 2-1).
# Should succeed (sat).
# ----------------------------------------------------------
code =\
"""
# Layer 1 neurons
sum_N1 = w1_0 + w1_1 * X1 + w1_2 * X2
if sum_N1 >= 0:
out_N1 = 1
else:
out_N1 = 0
sum_N2 = w2_0 + w2_1 * X1 + w2_2 * X2
if sum_N2 >= 0:
out_N2 = 1
else:
out_N2 = 0
# Layer 2 neuron
sum_N3 = w3_0 + w3_1 * out_N1 + w3_2 * out_N2
if sum_N3 >= 0:
out_N3 = 1
else:
out_N3 = 0
"""
tests_xor = [Test([0.0, 0.0], [0.0]),
Test([0.0, 1.0], [1.0]),
Test([1.0, 0.0], [1.0]),
Test([1.0, 1.0], [0.0])]
in_vars = ['X1', 'X2']
out_vars = ['out_N3']
local_vars = ['out_N1', 'out_N2', 'out_N3', 'sum_N1', 'sum_N2', 'sum_N3']
weights_vars = ['w1_0', 'w1_1', 'w1_2',
'w2_0', 'w2_1', 'w2_2',
'w3_0', 'w3_1', 'w3_2']
#----------------------------------------------------------
return get_scenario_desc(code, tests_xor, in_vars, weights_vars, local_vars, out_vars)
def scenario_sigmoid_1layer():
# Trying to synthesize ANN with only one neuron with binary sigmoid (logistic) activation function.
# This cannot be realized by Z3, because it does not support exponential function.
# ----------------------------------------------------------
code =\
"""
# Layer 1 neurons
sum_N1 = w1_0 + w1_1 * X1 + w1_2 * X2
out_N1 = 1 / (1 + 2.72 ** (-sum_N1))
"""
tests_xor = [TestF([0.0, 0.0], ['out_N1 <= 0.5']),
TestF([0.0, 1.0], ['out_N1 >= 0.5']),
TestF([1.0, 0.0], ['out_N1 >= 0.5']),
TestF([1.0, 1.0], ['out_N1 <= 0.5'])]
in_vars = ['X1', 'X2']
out_vars = ['out_N1']
local_vars = ['out_N1', 'sum_N1']
weights_vars = ['w1_0', 'w1_1', 'w1_2']
#----------------------------------------------------------
return get_scenario_desc(code, tests_xor, in_vars, weights_vars, local_vars, out_vars)
#------------------------------------------------------------------------
# MAIN
#------------------------------------------------------------------------
env = utils.Options({'--solver':'z3', '--logic':'QF_NRA', '--synth_substitute_free':1,
'--produce_unsat_core':0, '--silent':1})
print('----------------------------------------------')
print("ANN: linear activation function, 1 layer")
print('----------------------------------------------')
code, test_cases, prog_vars, free_vars = scenario_linear_1layer()
res = synthesize_tc(test_cases, code, 'True', 'True', prog_vars, env, free_vars=free_vars)
print_result(res)
print('\n\n\n')
print('----------------------------------------------')
print("ANN: linear activation function, 2 layers")
print('----------------------------------------------')
code, test_cases, prog_vars, free_vars = scenario_linear_2layers()
res = synthesize_tc(test_cases, code, 'True', 'True', prog_vars, env, free_vars=free_vars)
print_result(res)
print('\n\n\n')
print('----------------------------------------------')
print("ANN: binary activation function, 1 layer")
print('----------------------------------------------')
code, test_cases, prog_vars, free_vars = scenario_binary_1layer()
res = synthesize_tc(test_cases, code, 'True', 'True', prog_vars, env, free_vars=free_vars)
print_result(res)
print('\n\n\n')
print('----------------------------------------------')
print("ANN: binary activation function, 2 layers")
print('----------------------------------------------')
code, test_cases, prog_vars, free_vars = scenario_binary_2layers()
res = synthesize_tc(test_cases, code, 'True', 'True', prog_vars, env, free_vars=free_vars)
print_result(res)
# print('\n\n\n')
# print('----------------------------------------------')
# print("ANN: sigmoid (logistic) activation function, 1 layer")
# print('----------------------------------------------')
#
# code, test_cases, prog_vars, free_vars = scenario_sigmoid_1layer()
# res = synthesize_test_cases(test_cases, code, 'True', 'True', prog_vars, env, free_vars=free_vars)
#
# print_result(res)
|
iwob/pysv
|
examples/synth_ann_xor.py
|
Python
|
mit
| 8,840
|
[
"NEURON"
] |
6930f67ff32ff05b0b94c8878cf0eb085d5a3b718aa077e6ec73b39b5bcba919
|
import datetime
import json
from sqlalchemy import orm
from DIRAC.DataManagementSystem.Client.FTS3Job import FTS3Job
from DIRAC.DataManagementSystem.private import FTS3Utilities
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.DataManagementSystem.Client.FTS3File import FTS3File
from DIRAC.DataManagementSystem.private.FTS3Utilities import FTS3Serializable
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.Client.Operation import Operation as rmsOperation
from DIRAC.RequestManagementSystem.Client.File import File as rmsFile
from DIRAC.RequestManagementSystem.Client.Request import Request as rmsRequest
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
class FTS3Operation(FTS3Serializable):
""" Abstract class to represent an operation to be executed by FTS. It is a
container for FTSFiles, as well as for FTSJobs.
There can be a mapping between one FTS3Operation and one RMS Operation.
The FTS3Operation takes care of generating the appropriate FTSJobs,
and to perform a callback when the work with FTS is over. The actual
generation and callback depends on the subclass.
This class should not be instantiated directly, but rather one of its
subclass
"""
ALL_STATES = ['Active', # Default state until FTS has done everything
'Processed', # Interactions with FTS done, but callback not done
'Finished', # Everything was done
'Canceled', # Canceled by the user
'Failed', # I don't know yet
]
FINAL_STATES = ['Finished', 'Canceled', 'Failed']
INIT_STATE = 'Active'
_attrToSerialize = ['operationID', 'username', 'userGroup', 'rmsReqID', 'rmsOpID',
'sourceSEs', 'ftsFiles', 'activity', 'priority',
'ftsJobs', 'creationTime', 'lastUpdate', 'error', 'status']
def __init__(self, ftsFiles=None, username=None, userGroup=None, rmsReqID=-1,
rmsOpID=0, sourceSEs=None, activity=None, priority=None):
"""
:param ftsFiles: list of FTS3Files object that belongs to the operation
:param username: username whose proxy should be used
:param userGroup: group that should be used with username
:param rmsReqID: ID of the Request in the RMS system
:param rmsOpID: ID of the Operation in the RMS system
:param sourceSEs: list of SE to be used as source (if applicable)
:param activity: FTS activity to use
:param priority: FTS priority to use
"""
############################
# persistent attributes
self.username = username
self.userGroup = userGroup
self.rmsReqID = rmsReqID
self.rmsOpID = rmsOpID
if isinstance(sourceSEs, list):
sourceSEs = ','.join(sourceSEs)
self.sourceSEs = sourceSEs
self.ftsFiles = ftsFiles if ftsFiles else []
self.activity = activity
self.priority = priority
self.ftsJobs = []
now = datetime.datetime.utcnow().replace(microsecond=0)
self.creationTime = now
self.lastUpdate = now
self.error = None
self.status = FTS3Operation.INIT_STATE
########################
self.reqClient = None
self.dManager = None
self._log = None
self.init_on_load()
@orm.reconstructor
def init_on_load(self):
""" This method initializes some attributes.
It is called by sqlalchemy (which does not call __init__)
"""
self._vo = None
self.dManager = DataManager()
self.rssClient = ResourceStatus()
opID = getattr(self, 'operationID', None)
loggerName = '%s/' % opID if opID else ''
loggerName += 'req_%s/op_%s' % (self.rmsReqID, self.rmsOpID)
self._log = gLogger.getSubLogger(loggerName, True)
@property
def vo(self):
""":returns: return vo of the usergroup """
if self._vo:
return self._vo
if self.userGroup:
self._vo = getVOForGroup(self.userGroup)
return self._vo
def isTotallyProcessed(self):
""" Returns True if and only if there is nothing
else to be done by FTS for this operation.
All files are successful or definitely failed
"""
if self.status == 'Processed':
return True
fileStatuses = set([f.status for f in self.ftsFiles])
# If all the files are in a final state
if fileStatuses <= set(FTS3File.FINAL_STATES):
self.status = 'Processed'
return True
return False
def _getFilesToSubmit(self, maxAttemptsPerFile=10):
""" Return the list of FTS3files that can be submitted
Either because they never were submitted, or because
we can make more attempts
:param maxAttemptsPerFile: the maximum number of attempts to be tried for a file
:return List of FTS3File to submit
"""
toSubmit = []
for ftsFile in self.ftsFiles:
if ftsFile.attempt >= maxAttemptsPerFile:
ftsFile.status = 'Defunct'
# The file was never submitted or
# The file failed from the point of view of FTS
# but no more than the maxAttemptsPerFile
elif ftsFile.status in ('New', 'Failed'):
toSubmit.append(ftsFile)
return toSubmit
@staticmethod
def _checkSEAccess(seName, accessType, vo=None):
"""Check the Status of a storage element
:param seName: name of the StorageElement
:param accessType ReadAccess, WriteAccess,CheckAccess,RemoveAccess
:return S_ERROR if not allowed or error, S_OK() otherwise
"""
# Check that the target is writable
# access = self.rssClient.getStorageElementStatus( seName, accessType )
# if not access["OK"]:
# return access
# if access["Value"][seName][accessType] not in ( "Active", "Degraded" ):
# return S_ERROR( "%s does not have %s in Active or Degraded" % ( seName, accessType ) )
status = StorageElement(seName, vo=vo).getStatus()
if not status['OK']:
return status
status = status['Value']
accessType = accessType.replace('Access', '')
if not status[accessType]:
return S_ERROR("%s does not have %s in Active or Degraded" % (seName, accessType))
return S_OK()
def _createNewJob(self, jobType, ftsFiles, targetSE, sourceSE=None):
""" Create a new FTS3Job object
:param jobType: type of job to create (Transfer, Staging, Removal)
:param ftsFiles: list of FTS3File objects the job has to work on
:param targetSE: SE on which to operate
:param sourceSE: source SE, only useful for Transfer jobs
:return FTS3Job object
"""
newJob = FTS3Job()
newJob.type = jobType
newJob.sourceSE = sourceSE
newJob.targetSE = targetSE
newJob.activity = self.activity
newJob.priority = self.priority
newJob.username = self.username
newJob.userGroup = self.userGroup
newJob.vo = self.vo
newJob.filesToSubmit = ftsFiles
newJob.operationID = getattr(self, 'operationID')
return newJob
def _callback(self):
"""Actually performs the callback
"""
raise NotImplementedError("You should not be using the base class")
def callback(self):
""" Trigger the callback once all the FTS interactions are done
and update the status of the Operation to 'Finished' if successful
"""
self.reqClient = ReqClient()
res = self._callback()
if res['OK']:
self.status = 'Finished'
return res
def prepareNewJobs(self, maxFilesPerJob=100, maxAttemptsPerFile=10):
""" Prepare the new jobs that have to be submitted
:param maxFilesPerJob: maximum number of files assigned to a job
:param maxAttemptsPerFile: maximum number of retry after an fts failure
:return list of jobs
"""
raise NotImplementedError("You should not be using the base class")
def _updateRmsOperationStatus(self):
""" Update the status of the Files in the rms operation
:return: S_OK with a dict:
* request: rms Request object
* operation: rms Operation object
* ftsFilesByTarget: dict {SE: [ftsFiles that were successful]}
"""
log = self._log.getSubLogger("_updateRmsOperationStatus/%s/%s" %
(getattr(self, 'operationID'), self.rmsReqID), child=True)
res = self.reqClient.getRequest(self.rmsReqID)
if not res['OK']:
return res
request = res['Value']
res = request.getWaiting()
if not res["OK"]:
log.error("Unable to find 'Scheduled' operation in request")
res = self.reqClient.putRequest(request, useFailoverProxy=False, retryMainService=3)
if not res['OK']:
log.error("Could not put back the request !", res['Message'])
return S_ERROR("Could not find scheduled operation")
operation = res['Value']
# We index the files of the operation by their IDs
rmsFileIDs = {}
for opFile in operation:
rmsFileIDs[opFile.FileID] = opFile
# Files that failed to transfer
defunctRmsFileIDs = set()
# { SE : [FTS3Files] }
ftsFilesByTarget = {}
for ftsFile in self.ftsFiles:
if ftsFile.status == 'Defunct':
log.info(
"File failed to transfer, setting it to failed in RMS", "%s %s" %
(ftsFile.lfn, ftsFile.targetSE))
defunctRmsFileIDs.add(ftsFile.rmsFileID)
continue
if ftsFile.status == 'Canceled':
log.info(
"File canceled, setting it Failed in RMS", "%s %s" %
(ftsFile.lfn, ftsFile.targetSE))
defunctRmsFileIDs.add(ftsFile.rmsFileID)
continue
# SHOULD NEVER HAPPEN !
if ftsFile.status != 'Finished':
log.error(
"Callback called with file in non terminal state", "%s %s" %
(ftsFile.lfn, ftsFile.targetSE))
res = self.reqClient.putRequest(request, useFailoverProxy=False, retryMainService=3)
if not res['OK']:
log.error("Could not put back the request !", res['Message'])
return S_ERROR("Callback called with file in non terminal state")
ftsFilesByTarget.setdefault(ftsFile.targetSE, []).append(ftsFile)
# Now, we set the rmsFile as done in the operation, providing
# that they are not in the defunctFiles.
# We cannot do this in the previous list because in the FTS system,
# each destination is a separate line in the DB but not in the RMS
for ftsFile in self.ftsFiles:
opFile = rmsFileIDs[ftsFile.rmsFileID]
opFile.Status = 'Failed' if ftsFile.rmsFileID in defunctRmsFileIDs else 'Done'
return S_OK({'request': request, 'operation': operation, 'ftsFilesByTarget': ftsFilesByTarget})
@classmethod
def fromRMSObjects(cls, rmsReq, rmsOp, username):
""" Construct an FTS3Operation object from the RMS Request and Operation corresponding.
The attributes taken are the OwnerGroup, Request and Operation IDS, sourceSE,
and activity and priority if they are defined in the Argument field of the operation
:param rmsReq: RMS Request object
:param rmsOp: RMS Operation object
:param username: username to which associate the FTS3Operation (normally comes from the Req OwnerDN)
:returns: FTS3Operation object
"""
ftsOp = cls()
ftsOp.username = username
ftsOp.userGroup = rmsReq.OwnerGroup
ftsOp.rmsReqID = rmsReq.RequestID
ftsOp.rmsOpID = rmsOp.OperationID
ftsOp.sourceSEs = rmsOp.SourceSE
try:
argumentDic = json.loads(rmsOp.Arguments)
ftsOp.activity = argumentDic['activity']
ftsOp.priority = argumentDic['priority']
except Exception as _e:
pass
return ftsOp
class FTS3TransferOperation(FTS3Operation):
""" Class to be used for a Replication operation
"""
def prepareNewJobs(self, maxFilesPerJob=100, maxAttemptsPerFile=10):
log = self._log.getSubLogger("_prepareNewJobs", child=True)
filesToSubmit = self._getFilesToSubmit(maxAttemptsPerFile=maxAttemptsPerFile)
log.debug("%s ftsFiles to submit" % len(filesToSubmit))
newJobs = []
# {targetSE : [FTS3Files] }
res = FTS3Utilities.groupFilesByTarget(filesToSubmit)
if not res['OK']:
return res
filesGroupedByTarget = res['Value']
for targetSE, ftsFiles in filesGroupedByTarget.iteritems():
res = self._checkSEAccess(targetSE, 'WriteAccess', vo=self.vo)
if not res['OK']:
log.error(res)
for ftsFile in ftsFiles:
ftsFile.attempt += 1
continue
sourceSEs = self.sourceSEs.split(',') if self.sourceSEs is not None else []
# { sourceSE : [FTSFiles] }
res = FTS3Utilities.selectUniqueRandomSource(ftsFiles, allowedSources=sourceSEs)
if not res['OK']:
return res
uniqueTransfersBySource = res['Value']
# We don't need to check the source, since it is already filtered by the DataManager
for sourceSE, ftsFiles in uniqueTransfersBySource.iteritems():
for ftsFilesChunk in breakListIntoChunks(ftsFiles, maxFilesPerJob):
newJob = self._createNewJob('Transfer', ftsFilesChunk, targetSE, sourceSE=sourceSE)
newJobs.append(newJob)
return S_OK(newJobs)
def _callback(self):
"""" After a Transfer operation, we have to update the matching Request in the
RMS, and add the registration operation just before the ReplicateAndRegister one
NOTE: we don't use ReqProxy when putting the request back to avoid operational hell
"""
log = self._log.getSubLogger("callback", child=True)
# In case there is no Request associated to the Transfer
# we do not do the callback. Not really advised, but there is a feature
# request to use the FTS3 system without RMS
if self.rmsReqID == -1:
return S_OK()
# Now we check the status of the Request.
# in principle, it should be scheduled
res = self.reqClient.getRequestStatus(self.rmsReqID)
if not res['OK']:
log.error("Could not get request status", res)
return res
status = res['Value']
# If it is not scheduled, something went wrong
# and we will not modify it
if status != 'Scheduled':
# If the Request is in a final state, just leave it,
# and we consider our job done.
# (typically happens when the callback had already been done but not persisted to the FTS3DB)
if status in rmsRequest.FINAL_STATES:
log.warn(
"Request with id %s is not Scheduled (%s), but okay it is in a Final State" %
(self.rmsReqID, status))
return S_OK()
# If the Request is not in a final state, then something really wrong is going on,
# and we do not do anything, keep ourselves pending
else:
return S_ERROR("Request with id %s is not Scheduled:%s" % (self.rmsReqID, status))
res = self._updateRmsOperationStatus()
if not res['OK']:
return res
ftsFilesByTarget = res['Value']['ftsFilesByTarget']
request = res['Value']['request']
operation = res['Value']['operation']
log.info("will create %s 'RegisterReplica' operations" % len(ftsFilesByTarget))
for target, ftsFileList in ftsFilesByTarget.iteritems():
log.info(
"creating 'RegisterReplica' operation for targetSE %s with %s files..." %
(target, len(ftsFileList)))
registerOperation = rmsOperation()
registerOperation.Type = "RegisterReplica"
registerOperation.Status = "Waiting"
registerOperation.TargetSE = target
if operation.Catalog:
registerOperation.Catalog = operation.Catalog
targetSE = StorageElement(target, vo=self.vo)
for ftsFile in ftsFileList:
opFile = rmsFile()
opFile.LFN = ftsFile.lfn
opFile.Checksum = ftsFile.checksum
# TODO: are we really ever going to change type... ?
opFile.ChecksumType = 'ADLER32'
opFile.Size = ftsFile.size
res = returnSingleResult(targetSE.getURL(ftsFile.lfn, protocol='srm'))
# This should never happen !
if not res["OK"]:
log.error("Could not get url", res['Message'])
continue
opFile.PFN = res["Value"]
registerOperation.addFile(opFile)
request.insertBefore(registerOperation, operation)
return self.reqClient.putRequest(request, useFailoverProxy=False, retryMainService=3)
class FTS3StagingOperation(FTS3Operation):
""" Class to be used for a Staging operation
"""
def prepareNewJobs(self, maxFilesPerJob=100, maxAttemptsPerFile=10):
log = gLogger.getSubLogger("_prepareNewJobs", child=True)
filesToSubmit = self._getFilesToSubmit(maxAttemptsPerFile=maxAttemptsPerFile)
log.debug("%s ftsFiles to submit" % len(filesToSubmit))
newJobs = []
# {targetSE : [FTS3Files] }
filesGroupedByTarget = FTS3Utilities.groupFilesByTarget(filesToSubmit)
for targetSE, ftsFiles in filesGroupedByTarget.iteritems():
res = self._checkSEAccess(targetSE, 'ReadAccess', vo=self.vo)
if not res['OK']:
log.error(res)
continue
for ftsFilesChunk in breakListIntoChunks(ftsFiles, maxFilesPerJob):
newJob = self._createNewJob('Staging', ftsFilesChunk, targetSE, sourceSE=targetSE)
newJobs.append(newJob)
return S_OK(newJobs)
def _callback(self):
"""" After a Staging operation, we have to update the matching Request in the
RMS, and nothing more. If a callback is to be performed, it will be the next
operation in the request, and put by the caller
NOTE: we don't use ReqProxy when putting the request back to avoid operational hell
"""
res = self._updateRmsOperationStatus()
if not res['OK']:
return res
request = res['Value']['request']
return self.reqClient.putRequest(request, useFailoverProxy=False, retryMainService=3)
|
arrabito/DIRAC
|
DataManagementSystem/Client/FTS3Operation.py
|
Python
|
gpl-3.0
| 18,250
|
[
"DIRAC"
] |
e2e37c0cac556944a6ded729fc38cdeb77eb04c10c90980f914bc85d06bf614f
|
# Copyright (c) 2012 Luke McCarthy <luke@iogopro.co.uk>
#
# This is free software released under the MIT license.
# See COPYING file for details, or visit:
# http://www.opensource.org/licenses/mit-license.php
#
# The file is part of FSMonitor, a file-system monitoring library.
# https://github.com/shaurz/fsmonitor
import sys, os, time, threading, errno
from .common import FSEvent, FSMonitorError
def get_dir_contents(path):
return [(filename, os.stat(os.path.join(path, filename)))
for filename in os.listdir(path)]
class FSMonitorDirWatch(object):
def __init__(self, path, flags, user):
self.path = path
self.flags = flags
self.user = user
self.enabled = True
self._timestamp = time.time()
try:
self._contents = get_dir_contents(path)
self._deleted = False
except OSError as e:
self._contents = []
self._deleted = (e.errno == errno.ENOENT)
def __repr__(self):
return "<FSMonitorDirWatch %r>" % self.path
@classmethod
def new_state(cls, path):
return [(filename, os.stat(os.path.join(path, filename)))
for filename in os.listdir(path)]
def getstate(self):
return self._contents
def delstate(self):
self._contents = []
self._deleted = True
def setstate(self, state):
self._contents = state
self._deleted = False
state = property(getstate, setstate, delstate)
class FSMonitorFileWatch(object):
def __init__(self, path, flags, user):
self.path = path
self.flags = flags
self.user = user
self.enabled = True
self._timestamp = time.time()
try:
self._stat = os.stat(path)
self._deleted = False
except OSError as e:
self._stat = None
self._deleted = (e.errno == errno.ENOENT)
def __repr__(self):
return "<FSMonitorFileWatch %r>" % self.path
@classmethod
def new_state(cls, path):
return os.stat(path)
def getstate(self):
return self._stat
def delstate(self):
self._stat = None
self._deleted = True
def setstate(self, state):
self._stat = state
self._deleted = False
state = property(getstate, setstate, delstate)
class FSMonitorWatch(object):
def __init__(self, path, flags, user):
self.path = path
self.flags = flags
self.user = user
self.enabled = True
self._timestamp = time.time()
try:
self._contents = get_dir_contents(path)
self._deleted = False
except OSError as e:
self._contents = []
self._deleted = (e.errno == errno.ENOENT)
def __repr__(self):
return "<FSMonitorWatch %r>" % self.path
def _compare_contents(watch, new_contents, events_out, before):
name_to_new_stat = dict(new_contents)
for name, old_stat in watch._contents:
new_stat = name_to_new_stat.get(name)
if new_stat:
_compare_stat(watch, new_stat, events_out, before, old_stat, name)
else:
events_out.append(FSEvent(watch, FSEvent.Delete, name))
old_names = frozenset(x[0] for x in watch._contents)
for name, new_stat in new_contents:
if name not in old_names:
events_out.append(FSEvent(watch, FSEvent.Create, name))
def _compare_stat(watch, new_stat, events_out, before, old_stat, filename):
if new_stat.st_atime != old_stat.st_atime and new_stat.st_atime < before:
events_out.append(FSEvent(watch, FSEvent.Access, filename))
if new_stat.st_mtime != old_stat.st_mtime:
events_out.append(FSEvent(watch, FSEvent.Modify, filename))
def round_fs_resolution(t):
if sys.platform == "win32":
return t // 2 * 2
else:
return t // 1
class FSMonitor(object):
def __init__(self):
self.__lock = threading.Lock()
self.__dir_watches = set()
self.__file_watches = set()
self.polling_interval = 0.5
@property
def watches(self):
with self.__lock:
return list(self.__dir_watches) + list(self.__file_watches)
def add_dir_watch(self, path, flags=FSEvent.All, user=None):
watch = FSMonitorDirWatch(path, flags, user)
with self.__lock:
self.__dir_watches.add(watch)
return watch
def add_file_watch(self, path, flags=FSEvent.All, user=None):
watch = FSMonitorFileWatch(path, flags, user)
with self.__lock:
self.__file_watches.add(watch)
return watch
def remove_watch(self, watch):
with self.__lock:
if watch in self.__dir_watches:
self.__dir_watches.discard(watch)
elif watch in self.__file_watches:
self.__file_watches.discard(watch)
def remove_all_watches(self):
with self.__lock:
self.__dir_watches.clear()
self.__file_watches.clear()
def enable_watch(self, watch, enable=True):
watch.enabled = enable
def disable_watch(self, watch):
watch.enabled = False
def read_events(self, timeout=None):
now = start_time = time.time()
watches = self.watches
watches.sort(key=lambda watch: abs(now - watch._timestamp), reverse=True)
events = []
for watch in watches:
now = time.time()
if watch._timestamp < now:
tdiff = now - watch._timestamp
if tdiff < self.polling_interval:
time.sleep(self.polling_interval - tdiff)
watch._timestamp = now
if not watch.enabled:
continue
before = round_fs_resolution(time.time())
try:
new_state = watch.new_state(watch.path)
except OSError as e:
if e.errno == errno.ENOENT:
if not watch._deleted:
del watch.state
events.append(FSEvent(watch, FSEvent.DeleteSelf))
else:
if isinstance(watch, FSMonitorDirWatch):
_compare_contents(watch, new_state, events, before)
elif isinstance(watch, FSMonitorFileWatch):
_compare_stat(watch, new_state, events, before,
watch.state, watch.path)
watch.state = new_state
return events
|
shaurz/fsmonitor
|
fsmonitor/polling.py
|
Python
|
mit
| 6,513
|
[
"VisIt"
] |
a4d4c414c8c7d9333cc16686a6f3cb6b5d12d1383a5c959c3ead86498dc1a4ae
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2007-2009 Brian G. Matherly
# Copyright (C) 2009-2010 Benny Malengier <benny.malengier@gramps-project.org>
# Copyright (C) 2010 Peter Landgren
# Copyright (C) 2011 Adam Stein <adam@csh.rit.edu>
# Copyright (C) 2012,2017 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
ACSII document generator.
"""
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import DOCGEN_OPTIONS
from gramps.gen.errors import ReportError
from gramps.gen.plug.docgen import (BaseDoc, TextDoc,
PARA_ALIGN_RIGHT, PARA_ALIGN_CENTER)
from gramps.gen.plug.menu import NumberOption
from gramps.gen.plug.report import DocOptions
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
LEFT, RIGHT, CENTER = 'LEFT', 'RIGHT', 'CENTER'
#------------------------------------------------------------------------
#
# This routine was written by David Mertz and placed into the public
# domain. It is sample code from his book, "Text Processing in Python"
#
# Modified by Alex Roitman: right-pad with spaces, if right_pad==1;
# return empty string if no text was given
# Another argument: "first" is the first line indent in characters
# _relative_ to the "left" margin. It can be negative!
#
#------------------------------------------------------------------------
def reformat_para(para='', left=0, right=72, just=LEFT, right_pad=0, first=0):
if not para.strip():
return "\n"
lines = []
real_left = left+first
alllines = para.split('\n')
for realline in alllines:
words = realline.split()
line = ''
word = 0
end_words = 0
while not end_words:
if not words:
lines.append("\n")
break
if len(words[word]) > right-real_left: # Handle very long words
line = words[word]
word += 1
if word >= len(words):
end_words = 1
else: # Compose line of words
while len(line)+len(words[word]) <= right-real_left:
line += words[word]
word += 1
if word >= len(words):
end_words = 1
break
elif len(line) < right-real_left:
line += ' ' # add a space since there is still room
lines.append(line)
#first line finished, discard first
real_left = left
line = ''
if just == CENTER:
if right_pad:
return '\n'.join(
[' '*(left+first) + ln.center(right-left-first)
for ln in lines[0:1]] +
[' '*left + ln.center(right-left) for ln in lines[1:]]
)
else:
return '\n'.join(
[' '*(left+first) + ln.center(right-left-first).rstrip()
for ln in lines[0:1]] +
[' '*left + ln.center(right-left).rstrip()
for ln in lines[1:]]
)
elif just == RIGHT:
if right_pad:
return '\n'.join([line.rjust(right) for line in lines])
else:
return '\n'.join([line.rjust(right).rstrip() for line in lines])
else: # left justify
if right_pad:
return '\n'.join(
[' '*(left+first) + line.ljust(right-left-first)
for line in lines[0:1]] +
[' '*left + line.ljust(right-left) for line in lines[1:]]
)
else:
return '\n'.join(
[' '*(left+first) + line for line in lines[0:1]] +
[' '*left + line for line in lines[1:]]
)
#------------------------------------------------------------------------
#
# Ascii
#
#------------------------------------------------------------------------
class AsciiDoc(BaseDoc, TextDoc):
"""
ASCII document generator.
"""
def __init__(self, styles, paper_style, options=None, uistate=None):
BaseDoc.__init__(self, styles, paper_style, uistate=uistate)
self.__note_format = False
self._cpl = 72 # characters per line, in case the options are ignored
if options:
menu = options.menu
self._cpl = menu.get_option_by_name('linechars').get_value()
self.file = None
self.filename = ''
self.text = ''
self.para = None
self.leader = None
self.tbl_style = None
self.in_cell = None
self.ncols = 0
self.column_order = []
self.cellpars = []
self.cell_lines = []
self.cell_widths = []
self.cellnum = -1
self.maxlines = 0
#--------------------------------------------------------------------
#
# Opens the file, resets the text buffer.
#
#--------------------------------------------------------------------
def open(self, filename):
if filename[-4:] != ".txt":
self.filename = filename + ".txt"
else:
self.filename = filename
try:
self.file = open(self.filename, "w", errors='backslashreplace')
except Exception as msg:
raise ReportError(_("Could not create %s") % self.filename, msg)
self.in_cell = 0
self.text = ""
#--------------------------------------------------------------------
#
# Close the file. Call the app if required.
#
#--------------------------------------------------------------------
def close(self):
self.file.close()
def get_usable_width(self):
"""
Return the usable width of the document in characters.
"""
return self._cpl
#--------------------------------------------------------------------
#
# Force a section page break
#
#--------------------------------------------------------------------
def page_break(self):
self.file.write('\012')
def start_bold(self):
pass
def end_bold(self):
pass
def start_superscript(self):
self.text = self.text + '['
def end_superscript(self):
self.text = self.text + ']'
#--------------------------------------------------------------------
#
# Starts a paragraph.
#
#--------------------------------------------------------------------
def start_paragraph(self, style_name, leader=None):
styles = self.get_style_sheet()
self.para = styles.get_paragraph_style(style_name)
self.leader = leader
#--------------------------------------------------------------------
#
# End a paragraph. First format it to the desired widths.
# If not in table cell, write it immediately. If in the cell,
# add it to the list for this cell after formatting.
#
#--------------------------------------------------------------------
def end_paragraph(self):
if self.para.get_alignment() == PARA_ALIGN_RIGHT:
fmt = RIGHT
elif self.para.get_alignment() == PARA_ALIGN_CENTER:
fmt = CENTER
else:
fmt = LEFT
if self.in_cell:
right = self.cell_widths[self.cellnum]
else:
right = self.get_usable_width()
# Compute indents in characters. Keep first_indent relative!
regular_indent = 0
first_indent = 0
if self.para.get_left_margin():
regular_indent = int(4*self.para.get_left_margin())
if self.para.get_first_indent():
first_indent = int(4*self.para.get_first_indent())
if self.in_cell and self.cellnum < self.ncols - 1:
right_pad = 1
the_pad = ' ' * right
else:
right_pad = 0
the_pad = ''
# Depending on the leader's presence, treat the first line differently
if self.leader:
# If we have a leader then we need to reformat the text
# as if there's no special treatment for the first line.
# Then add leader and eat up the beginning of the first line pad.
# Do not reformat if preformatted notes
if not self.__note_format:
self.leader += ' '
start_at = regular_indent + min(len(self.leader)+first_indent,
0)
this_text = reformat_para(self.text, regular_indent, right, fmt,
right_pad)
this_text = (' ' * (regular_indent+first_indent) +
self.leader +
this_text[start_at:]
)
else:
this_text = self.text
else:
# If no leader then reformat the text according to the first
# line indent, as specified by style.
# Do not reformat if preformatted notes
if not self.__note_format:
this_text = reformat_para(self.text, regular_indent, right, fmt,
right_pad, first_indent)
else:
this_text = ' ' * (regular_indent + first_indent) + self.text
if self.__note_format:
# don't add an extra LF before the_pad if preformatted notes.
if this_text != '\n':
# don't add LF if there is this_text is a LF
this_text += the_pad + '\n'
else:
this_text += '\n' + the_pad + '\n'
if self.in_cell:
self.cellpars[self.cellnum] += this_text
else:
self.file.write(this_text)
self.text = ""
#--------------------------------------------------------------------
#
# Start a table. Grab the table style, and store it.
#
#--------------------------------------------------------------------
def start_table(self, name, style_name):
styles = self.get_style_sheet()
self.tbl_style = styles.get_table_style(style_name)
self.ncols = self.tbl_style.get_columns()
self.column_order = []
for cell in range(self.ncols):
self.column_order.append(cell)
if self.get_rtl_doc():
self.column_order.reverse()
#--------------------------------------------------------------------
#
# End a table. Turn off the self.in_cell flag
#
#--------------------------------------------------------------------
def end_table(self):
self.in_cell = 0
#--------------------------------------------------------------------
#
# Start a row. Initialize lists for cell contents, number of lines,
# and the widths. It is necessary to keep a list of cell contents
# that is to be written after all the cells are defined.
#
#--------------------------------------------------------------------
def start_row(self):
self.cellpars = [''] * self.ncols
self.cell_lines = [0] * self.ncols
self.cell_widths = [0] * self.ncols
self.cellnum = -1
self.maxlines = 0
table_width = (self.get_usable_width() *
self.tbl_style.get_width() / 100.0)
for cell in self.column_order:
self.cell_widths[cell] = int(
table_width * self.tbl_style.get_column_width(cell) / 100.0)
#--------------------------------------------------------------------
#
# End a row. Write the cell contents. Write the line of spaces
# if the cell has fewer lines than the maximum number.
#
#--------------------------------------------------------------------
def end_row(self):
self.in_cell = 0
cell_text = [None]*self.ncols
for cell in self.column_order:
if self.cell_widths[cell]:
blanks = ' '*self.cell_widths[cell] + '\n'
if self.cell_lines[cell] < self.maxlines:
self.cellpars[cell] += blanks * (
self.maxlines - self.cell_lines[cell]
)
cell_text[cell] = self.cellpars[cell].split('\n')
for line in range(self.maxlines):
for cell in self.column_order:
if self.cell_widths[cell]:
self.file.write(cell_text[cell][line])
self.file.write('\n')
#--------------------------------------------------------------------
#
# Start a cell. Set the self.in_cell flag,
# increment the current cell number.
#
#--------------------------------------------------------------------
def start_cell(self, style_name, span=1):
self.in_cell = 1
self.cellnum = self.cellnum + span
span -= 1
while span:
self.cell_widths[self.cellnum] += (
self.cell_widths[self.cellnum-span]
)
self.cell_widths[self.cellnum-span] = 0
span -= 1
#--------------------------------------------------------------------
#
# End a cell. Find out the number of lines in this cell, correct
# the maximum number of lines if necessary.
#
#--------------------------------------------------------------------
def end_cell(self):
self.in_cell = 0
self.cell_lines[self.cellnum] = self.cellpars[self.cellnum].count('\n')
if self.cell_lines[self.cellnum] > self.maxlines:
self.maxlines = self.cell_lines[self.cellnum]
def add_media(self, name, align, w_cm, h_cm, alt='', style_name=None,
crop=None):
this_text = '(photo)'
if self.in_cell:
self.cellpars[self.cellnum] += this_text
else:
self.file.write(this_text)
def write_styled_note(self, styledtext, format, style_name,
contains_html=False, links=False):
"""
Convenience function to write a styledtext to the ASCII doc.
styledtext : assumed a StyledText object to write
format : = 0 : Flowed, = 1 : Preformatted
style_name : name of the style to use for default presentation
contains_html: bool, the backend should not check if html is present.
If contains_html=True, then the textdoc is free to handle that in
some way. Eg, a textdoc could remove all tags, or could make sure
a link is clickable. AsciiDoc prints the html without handling it
links: bool, make the URL in the text clickable (if supported)
"""
if contains_html:
return
text = str(styledtext)
if format:
#Preformatted note, keep all white spaces, tabs, LF's
self.__note_format = True
for line in text.split('\n'):
self.start_paragraph(style_name)
self.write_text(line)
self.end_paragraph()
# Add an extra empty para all lines in each preformatted note
self.start_paragraph(style_name)
self.end_paragraph()
self.__note_format = False
else:
for line in text.split('\n\n'):
self.start_paragraph(style_name)
#line = line.replace('\n',' ')
#line = ' '.join(line.split())
self.write_text(line)
self.end_paragraph()
#--------------------------------------------------------------------
#
# Writes text.
#--------------------------------------------------------------------
def write_text(self, text, mark=None, links=False):
self.text = self.text + text
#------------------------------------------------------------------------
#
# AsciiDocOptions class
#
#------------------------------------------------------------------------
class AsciiDocOptions(DocOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
DocOptions.__init__(self, name)
def add_menu_options(self, menu):
"""
Add options to the document menu for the AsciiDoc docgen.
"""
category_name = DOCGEN_OPTIONS
linechars = NumberOption(_('Characters per line'), 72, 20, 9999)
linechars.set_help(_("The number of characters per line"))
menu.add_option(category_name, 'linechars', linechars)
|
Nick-Hall/gramps
|
gramps/plugins/docgen/asciidoc.py
|
Python
|
gpl-2.0
| 17,549
|
[
"Brian"
] |
aa8bcf353520a18fbb6683a65acdc0fa5a32d75440e64425a6cb10de6644deff
|
from netCDF4 import Dataset
import numpy as np
import os
#initializes a netCDF file to hold a single variable with dimensions x, y, z, time
#dimension lengths are specified by user
def initialize_ncFile(filename, varname, xlen, ylen, zlen, tlen):
#an error is thrown if the file already exists, delete it first if it exists
try:
filepath = os.path.realpath(filename)
os.remove(filepath)
nc_out = Dataset(filename, mode='w', format='NETCDF4')
except OSError:
nc_out = Dataset(filename, mode='w', format='NETCDF4')
nc_out.createDimension('x', xlen)
nc_out.createDimension('y', ylen)
nc_out.createDimension('z', zlen)
nc_out.createDimension('time', tlen)
x = nc_out.createVariable('x', 'f8', ('x',))
y = nc_out.createVariable('y', 'f8', ('y',))
z = nc_out.createVariable('z', 'f8', ('z',))
time = nc_out.createVariable('time', 'f8', ('time',))
val = nc_out.createVariable(varname, 'f8', ('time', 'z', 'y', 'x'))
x.units = 'meters'
y.units = 'meters'
z.units = 'meters'
time.units = 'seconds since 2013-05-08 00:00:00 +0:00'
return nc_out
|
nchaparr/Sam_Output_Anls
|
initialize_ncFile.py
|
Python
|
mit
| 1,159
|
[
"NetCDF"
] |
437b3a95af9b74961b21a45d190a9a734ceedc21f7966857d5271b8f01ee3305
|
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Utilities for obtaining speech utterances for objects."""
__id__ = "$Id:$"
__version__ = "$Revision:$"
__date__ = "$Date:$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import urlparse, urllib2
import generator
import pyatspi
import orca
import rolenames
import settings
import sound
import text_attribute_names
from orca_i18n import _ # for gettext support
from orca_i18n import ngettext # for ngettext support
from orca_i18n import C_ # to provide qualified translatable strings
class Pause:
"""A dummy class to indicate we want to insert a pause into an
utterance."""
def __init__(self):
pass
PAUSE = [Pause()]
class LineBreak:
"""A dummy class to indicate we want to break an utterance into
separate calls to speak."""
def __init__(self):
pass
LINE_BREAK = [LineBreak()]
# [[[WDW - general note -- for all the _generate* methods, it would be great if
# we could return an empty array if we can determine the method does not
# apply to the object. This would allow us to reduce the number of strings
# needed in formatting.py.]]]
# The prefix to use for the individual generator methods
#
METHOD_PREFIX = "_generate"
DEFAULT = "default"
UPPERCASE = "uppercase"
HYPERLINK = "hyperlink"
SYSTEM = "system"
STATE = "state" # Candidate for sound
VALUE = "value" # Candidate for sound
voiceType = {
DEFAULT : settings.DEFAULT_VOICE,
UPPERCASE : settings.UPPERCASE_VOICE,
HYPERLINK : settings.HYPERLINK_VOICE,
SYSTEM : settings.SYSTEM_VOICE,
STATE : settings.SYSTEM_VOICE, # Users may prefer DEFAULT_VOICE here
VALUE : settings.SYSTEM_VOICE, # Users may prefer DEFAULT_VOICE here
}
_settingsManager = getattr(orca, '_settingsManager')
class SpeechGenerator(generator.Generator):
"""Takes accessible objects and produces a string to speak for
those objects. See the generateSpeech method, which is the primary
entry point. Subclasses can feel free to override/extend the
speechGenerators instance field as they see fit."""
# pylint: disable-msg=W0142
def __init__(self, script):
generator.Generator.__init__(self, script, "speech")
def _addGlobals(self, globalsDict):
"""Other things to make available from the formatting string.
"""
generator.Generator._addGlobals(self, globalsDict)
globalsDict['voice'] = self.voice
globalsDict['play'] = self.play
def play(self, key):
"""Returns an array containing a sound.Sound instance.
The key can a value to be used to look up a filename in the
settings.py:sounds dictionary (e.g., a pyatspi.ROLE_* value)
or just the name of an audio file to use.
"""
sounds = _settingsManager.getSetting('sounds')
try:
soundBite = sound.Sound(sounds[key])
except:
if isinstance(key, basestring):
soundBite = sound.Sound(key)
else:
soundBite = None
return [soundBite]
def generateSpeech(self, obj, **args):
return self.generate(obj, **args)
#####################################################################
# #
# Name, role, and label information #
# #
#####################################################################
def _generateName(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the name of the object. If the object is directly
displaying any text, that text will be treated as the name.
Otherwise, the accessible name of the object will be used. If
there is no accessible name, then the description of the
object will be used. This method will return an empty array
if nothing can be found. [[[WDW - I wonder if we should just
have _generateName, _generateDescription,
_generateDisplayedText, etc., that don't do any fallback.
Then, we can allow the formatting to do the fallback (e.g.,
'displayedText or name or description'). [[[JD to WDW - I
needed a _generateDescription for whereAmI. :-) See below.
"""
role = args.get('role', obj.getRole())
if role == pyatspi.ROLE_LAYERED_PANE:
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
else:
acss = self.voice(SYSTEM)
else:
acss = self.voice(DEFAULT)
result = generator.Generator._generateName(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateLabel(self, obj, **args):
"""Returns the label for an object as an array of strings for use by
speech and braille. The label is determined by the displayedLabel
method of the script utility, and an empty array will be returned if
no label can be found.
"""
acss = self.voice(DEFAULT)
result = generator.Generator._generateLabel(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateLabelOrName(self, obj, **args):
"""Returns the label as an array of strings for speech and braille.
If the label cannot be found, the name will be used instead.
If the name cannot be found, an empty array will be returned.
"""
result = []
acss = self.voice(DEFAULT)
result.extend(self._generateLabel(obj, **args))
if not result:
if obj.name and (len(obj.name)):
result.append(obj.name)
result.extend(acss)
return result
def _generatePlaceholderText(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the 'placeholder' text. This is typically text that
serves as a functional label and is found in a text widget until
that widget is given focus at which point the text is removed,
the assumption being that the user was able to see the text prior
to giving the widget focus.
"""
acss = self.voice(DEFAULT)
result = generator.Generator._generatePlaceholderText(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateDescription(self, obj, **args):
"""Returns an array of strings fo use by speech and braille that
represent the description of the object, if that description
is different from that of the name and label.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
acss = self.voice(SYSTEM)
result = generator.Generator._generateDescription(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateReadOnly(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the read only state of this object, but only if it
is read only (i.e., it is a text area that cannot be edited).
"""
acss = self.voice(SYSTEM)
result = generator.Generator._generateReadOnly(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateTextRole(self, obj, **args):
"""A convenience method to prevent the pyatspi.ROLE_PARAGRAPH role
from being spoken. In the case of a pyatspi.ROLE_PARAGRAPH
role, an empty array will be returned. In all other cases, the
role name will be returned as an array of strings (and
possibly voice and audio specifications). Note that a 'role'
attribute in args will override the accessible role of the
obj. [[[WDW - I wonder if this should be moved to
_generateRoleName. Or, maybe make a 'do not speak roles' attribute
of a speech generator that we can update and the user can
override.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
role = args.get('role', obj.getRole())
if role != pyatspi.ROLE_PARAGRAPH:
result.extend(self._generateRoleName(obj, **args))
return result
def _generateRoleName(self, obj, **args):
"""Returns the role name for the object in an array of strings (and
possibly voice and audio specifications), with the exception
that the pyatspi.ROLE_UNKNOWN role will yield an empty array.
Note that a 'role' attribute in args will override the
accessible role of the obj.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
role = args.get('role', obj.getRole())
if (role != pyatspi.ROLE_UNKNOWN):
result.append(rolenames.getSpeechForRoleName(obj, role))
result.extend(acss)
return result
def getRoleName(self, obj, **args):
"""Returns the role name for the object in an array of strings (and
possibly voice and audio specifications), with the exception
that the pyatspi.ROLE_UNKNOWN role will yield an empty array.
Note that a 'role' attribute in args will override the
accessible role of the obj. This is provided mostly as a
method for scripts to call.
"""
return self._generateRoleName(obj, **args)
def _generateUnrelatedLabels(self, obj, **args):
"""Returns, as an array of strings (and possibly voice
specifications), all the labels which are underneath the obj's
hierarchy and which are not in a label for or labelled by
relation.
"""
result = []
acss = self.voice(DEFAULT)
labels = self._script.utilities.unrelatedLabels(obj)
for label in labels:
name = self._generateName(label, **args)
result.extend(name)
if result:
result.extend(acss)
return result
def _generateEmbedded(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) used especially for handling embedded objects.
This either is the label or name of the object or the name of
the application for the object.
"""
acss = self.voice(DEFAULT)
result = self._generateLabelOrName(obj, **args)
if not result:
try:
result.append(obj.getApplication().name)
except:
pass
if result:
result.extend(acss)
return result
#####################################################################
# #
# State information #
# #
#####################################################################
def _generateCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
acss = self.voice(STATE)
result = generator.Generator._generateCheckedState(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateExpandableState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the expanded/collapsed state of an object, such as a
tree node. If the object is not expandable, an empty array
will be returned.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
acss = self.voice(STATE)
result = generator.Generator._generateExpandableState(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateMenuItemCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the menu item, only if it is
checked. Otherwise, and empty array will be returned.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
acss = self.voice(STATE)
result = generator.Generator.\
_generateMenuItemCheckedState(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateMultiselectableState(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the multiselectable state of
the object. This is typically for check boxes. If the object
is not multiselectable, an empty array will be returned.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(STATE)
if obj.getState().contains(pyatspi.STATE_MULTISELECTABLE):
# Translators: "multi-select" refers to a web form list
# in which more than one item can be selected at a time.
#
result.append(self._script.formatting.getString(
mode='speech',
stringType='multiselect'))
result.extend(acss)
return result
def _generateRadioState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
acss = self.voice(STATE)
result = generator.Generator._generateRadioState(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateToggleState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
acss = self.voice(STATE)
result = generator.Generator._generateToggleState(self, obj, **args)
if result:
result.extend(acss)
return result
#####################################################################
# #
# Link information #
# #
#####################################################################
def _generateLinkInfo(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the protocol of the URI of
the link associated with obj.
"""
result = []
acss = self.voice(HYPERLINK)
# Get the URI for the link of interest and parse it. The parsed
# URI is returned as a tuple containing six components:
# scheme://netloc/path;parameters?query#fragment.
#
link_uri = self._script.utilities.uri(obj)
if not link_uri:
# [[[TODO - JD: For some reason, this is failing for certain
# links. The current whereAmI code says, "It might be an anchor.
# Try to speak the text." and passes things off to whereAmI's
# _speakText method. That won't work in the new world order.
# Therefore, for now, I will hack in some code to do that
# work here so that the before and after end results match.]]]
#
result.extend(self._generateLabel(obj))
result.extend(self._generateRoleName(obj))
result.append(self._script.utilities.displayedText(obj))
else:
link_uri_info = urlparse.urlparse(link_uri)
if link_uri_info[0] in ["ftp", "ftps", "file"]:
fileName = link_uri_info[2].split('/')
# Translators: this refers to a link to a file, where
# the first item is the protocol (ftp, ftps, or file)
# and the second item the name of the file being linked
# to.
#
result.append(_("%(uri)s link to %(file)s") \
% {"uri" : link_uri_info[0],
"file" : fileName[-1]})
else:
# Translators: this is the protocol of a link eg. http, mailto.
#
linkOutput = _("%s link") % link_uri_info[0]
text = self._script.utilities.displayedText(obj)
if not text:
# If there's no text for the link, expose part of the
# URI to the user.
#
text = self._script.getLinkBasename(obj)
if text:
linkOutput += " " + text
result.append(linkOutput)
if obj.childCount and obj[0].getRole() == pyatspi.ROLE_IMAGE:
result.extend(self._generateRoleName(obj[0]))
if result:
result.extend(acss)
return result
def _generateSiteDescription(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that describe the site (same or different)
pointed to by the URI of the link associated with obj.
"""
result = []
acss = self.voice(HYPERLINK)
link_uri = self._script.utilities.uri(obj)
if link_uri:
link_uri_info = urlparse.urlparse(link_uri)
else:
return result
doc_uri = self._script.utilities.documentFrameURI()
if doc_uri:
doc_uri_info = urlparse.urlparse(doc_uri)
if link_uri_info[1] == doc_uri_info[1]:
if link_uri_info[2] == doc_uri_info[2]:
# Translators: this is an indication that a given
# link points to an object that is on the same page.
#
result.append(_("same page"))
else:
# Translators: this is an indication that a given
# link points to an object that is at the same site
# (but not on the same page as the link).
#
result.append(_("same site"))
else:
# check for different machine name on same site
#
linkdomain = link_uri_info[1].split('.')
docdomain = doc_uri_info[1].split('.')
if len(linkdomain) > 1 and docdomain > 1 \
and linkdomain[-1] == docdomain[-1] \
and linkdomain[-2] == docdomain[-2]:
# Translators: this is an indication that a given
# link points to an object that is at the same site
# (but not on the same page) as the link.
#
result.append(_("same site"))
else:
# Translators: this is an indication that a given
# link points to an object that is at a different
# site than that of the link.
#
result.append(_("different site"))
if result:
result.extend(acss)
return result
def _generateFileSize(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the size (Content-length) of
the file pointed to by the URI of the link associated with
obj.
"""
result = []
acss = self.voice(HYPERLINK)
sizeString = ""
uri = self._script.utilities.uri(obj)
if not uri:
return result
try:
x = urllib2.urlopen(uri)
try:
sizeString = x.info()['Content-length']
except KeyError:
pass
except (ValueError, urllib2.URLError, OSError):
pass
if sizeString:
size = int(sizeString)
if size < 10000:
# Translators: This is the size of a file in bytes
#
result.append(ngettext("%d byte", "%d bytes", size) % size)
elif size < 1000000:
# Translators: This is the size of a file in kilobytes
#
result.append(_("%.2f kilobytes") % (float(size) * .001))
elif size >= 1000000:
# Translators: This is the size of a file in megabytes
#
result.append(_("%.2f megabytes") % (float(size) * .000001))
if result:
result.extend(acss)
return result
#####################################################################
# #
# Image information #
# #
#####################################################################
def _generateImage(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the image on the the object, if
it exists. Otherwise, an empty array is returned.
"""
result = []
acss = self.voice(DEFAULT)
try:
image = obj.queryImage()
except:
pass
else:
args['role'] = pyatspi.ROLE_IMAGE
result.extend(self.generate(obj, **args))
result.extend(acss)
return result
#####################################################################
# #
# Table interface information #
# #
#####################################################################
def _generateNewRowHeader(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the row header for an object
that is in a table, if it exists and if it is different from
the previous row header. Otherwise, an empty array is
returned. The previous row header is determined by looking at
the row header for the 'priorObj' attribute of the args
dictionary. The 'priorObj' is typically set by Orca to be the
previous object with focus.
"""
result = []
acss = self.voice(DEFAULT)
if obj:
priorObj = args.get('priorObj', None)
try:
priorParent = priorObj.parent
except:
priorParent = None
if (obj.getRole() == pyatspi.ROLE_TABLE_CELL) \
or (obj.parent and obj.parent.getRole() == pyatspi.ROLE_TABLE):
try:
table = priorParent.queryTable()
except:
table = None
if table \
and ((priorObj.getRole() == pyatspi.ROLE_TABLE_CELL) \
or (priorObj.getRole() == pyatspi.ROLE_TABLE)):
index = self._script.utilities.cellIndex(priorObj)
oldRow = table.getRowAtIndex(index)
else:
oldRow = -1
try:
table = obj.parent.queryTable()
except:
pass
else:
index = self._script.utilities.cellIndex(obj)
newRow = table.getRowAtIndex(index)
if (newRow >= 0) \
and (index != newRow) \
and ((newRow != oldRow) \
or (obj.parent != priorParent)):
result = self._generateRowHeader(obj, **args)
if result:
result.extend(acss)
return result
def _generateNewColumnHeader(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the column header for an object
that is in a table, if it exists and if it is different from
the previous column header. Otherwise, an empty array is
returned. The previous column header is determined by looking
at the column header for the 'priorObj' attribute of the args
dictionary. The 'priorObj' is typically set by Orca to be the
previous object with focus.
"""
result = []
acss = self.voice(DEFAULT)
if obj and not args.get('readingRow', False):
priorObj = args.get('priorObj', None)
try:
priorParent = priorObj.parent
except:
priorParent = None
if (obj.getRole() == pyatspi.ROLE_TABLE_CELL) \
or (obj.parent and obj.parent.getRole() == pyatspi.ROLE_TABLE):
try:
table = priorParent.queryTable()
except:
table = None
if table \
and ((priorObj.getRole() == pyatspi.ROLE_TABLE_CELL) \
or (priorObj.getRole() == pyatspi.ROLE_TABLE)):
index = self._script.utilities.cellIndex(priorObj)
oldCol = table.getColumnAtIndex(index)
else:
oldCol = -1
try:
table = obj.parent.queryTable()
except:
pass
else:
index = self._script.utilities.cellIndex(obj)
newCol = table.getColumnAtIndex(index)
if (newCol >= 0) \
and (index != newCol) \
and ((newCol != oldCol) \
or (obj.parent != priorParent)):
result = self._generateColumnHeader(obj, **args)
if result:
result.extend(acss)
return result
def _generateRealTableCell(self, obj, **args):
"""Orca has a feature to automatically read an entire row of a table
as the user arrows up/down the roles. This leads to complexity in
the code. This method is used to return an array of strings
(and possibly voice and audio specifications) for a single table
cell itself. The string, 'blank', is added for empty cells.
"""
result = []
acss = self.voice(DEFAULT)
oldRole = self._overrideRole('REAL_ROLE_TABLE_CELL', args)
result.extend(self.generate(obj, **args))
self._restoreRole(oldRole, args)
if not result and _settingsManager.getSetting('speakBlankLines') \
and not args.get('readingRow', False):
# Translators: "blank" is a short word to mean the
# user has navigated to an empty line.
#
result.append(_("blank"))
if result:
result.extend(acss)
return result
def _generateUnselectedCell(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) if this is an icon within an layered pane or a
table cell within a table or a tree table and the item is
focused but not selected. Otherwise, an empty array is
returned. [[[WDW - I wonder if this string should be moved to
settings.py.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(STATE)
# If this is an icon within an layered pane or a table cell
# within a table or a tree table and the item is focused but not
# selected, let the user know. See bug #486908 for more details.
#
checkIfSelected = False
objRole, parentRole, state = None, None, None
if obj:
objRole = obj.getRole()
state = obj.getState()
if obj.parent:
parentRole = obj.parent.getRole()
if objRole == pyatspi.ROLE_TABLE_CELL \
and (parentRole == pyatspi.ROLE_TREE_TABLE \
or parentRole == pyatspi.ROLE_TABLE):
checkIfSelected = True
# If we met the last set of conditions, but we got here by
# moving left or right on the same row, then don't announce the
# selection state to the user. See bug #523235 for more details.
#
lastKey, mods = self._script.utilities.lastKeyAndModifiers()
if checkIfSelected and lastKey in ["Left", "Right"]:
checkIfSelected = False
if objRole == pyatspi.ROLE_ICON \
and parentRole == pyatspi.ROLE_LAYERED_PANE:
checkIfSelected = True
if checkIfSelected \
and state and not state.contains(pyatspi.STATE_SELECTED):
# Translators: this is in reference to a table cell being
# selected or not.
#
result.append(C_("tablecell", "not selected"))
result.extend(acss)
return result
def _generateColumn(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) reflecting the column number of a cell.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
col = -1
if obj.parent.getRole() == pyatspi.ROLE_TABLE_CELL:
obj = obj.parent
parent = obj.parent
try:
table = parent.queryTable()
except:
if args.get('guessCoordinates', False):
col = self._script.pointOfReference.get('lastColumn', -1)
else:
index = self._script.utilities.cellIndex(obj)
col = table.getColumnAtIndex(index)
if col >= 0:
# Translators: this is in references to a column in a
# table.
result.append(_("column %d") % (col + 1))
if result:
result.extend(acss)
return result
def _generateRow(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) reflecting the row number of a cell.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
row = -1
if obj.parent.getRole() == pyatspi.ROLE_TABLE_CELL:
obj = obj.parent
parent = obj.parent
try:
table = parent.queryTable()
except:
if args.get('guessCoordinates', False):
row = self._script.pointOfReference.get('lastRow', -1)
else:
index = self._script.utilities.cellIndex(obj)
row = table.getRowAtIndex(index)
if row >= 0:
# Translators: this is in references to a row in a table.
#
result.append(_("row %d") % (row + 1))
if result:
result.extend(acss)
return result
def _generateColumnAndRow(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) reflecting the position of the cell in terms
of its column number, the total number of columns, its row,
and the total number of rows.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
if obj.parent.getRole() == pyatspi.ROLE_TABLE_CELL:
obj = obj.parent
parent = obj.parent
try:
table = parent.queryTable()
except:
table = None
else:
index = self._script.utilities.cellIndex(obj)
col = table.getColumnAtIndex(index)
row = table.getRowAtIndex(index)
# Translators: this is in references to a column in a
# table.
result.append(_("column %(index)d of %(total)d") \
% {"index" : (col + 1),
"total" : table.nColumns})
# Translators: this is in reference to a row in a table.
#
result.append(_("row %(index)d of %(total)d") \
% {"index" : (row + 1),
"total" : table.nRows})
if result:
result.extend(acss)
return result
def _generateEndOfTableIndicator(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) indicating that this cell is the last cell
in the table.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
if _settingsManager.getSetting('speechVerbosityLevel') \
== settings.VERBOSITY_LEVEL_VERBOSE:
if obj.getRole() == pyatspi.ROLE_TABLE_CELL:
cell = obj
else:
cell = self._script.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_TABLE_CELL], [pyatspi.ROLE_FRAME])
try:
table = cell.parent.queryTable()
except:
pass
else:
index = self._script.utilities.cellIndex(cell)
row = table.getRowAtIndex(index)
col = table.getColumnAtIndex(index)
if row + 1 == table.nRows and col + 1 == table.nColumns:
# Translators: This is to indicate to the user that
# he/she is in the last cell of a table in a document.
#
result.append(_("End of table"))
if result:
result.extend(acss)
return result
#####################################################################
# #
# Terminal information #
# #
#####################################################################
def _generateTerminal(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) used especially for handling terminal objects.
This either is the name of the frame the terminal is in or the
displayed label of the terminal. [[[WDW - it might be nice
to return an empty array if this is not a terminal.]]]
"""
result = []
acss = self.voice(DEFAULT)
title = None
frame = self._script.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_FRAME], [])
if frame:
title = frame.name
if not title:
title = self._script.utilities.displayedLabel(obj)
result.append(title)
if result:
result.extend(acss)
return result
#####################################################################
# #
# Text interface information #
# #
#####################################################################
def _generateCurrentLineText(self, obj, **args):
"""Returns an array of strings for use by speech and braille
that represents the current line of text, if
this is a text object. [[[WDW - consider returning an empty
array if this is not a text object.]]]
"""
acss = self.voice(DEFAULT)
result = generator.Generator._generateCurrentLineText(self, obj, **args)
if result:
result.extend(acss)
return result
def _getCharacterAttributes(self,
obj,
text,
textOffset,
lineIndex,
keys=["style", "weight", "underline"]):
"""Helper function that returns a string containing the
given attributes from keys for the given character.
"""
attribStr = ""
defaultAttributes = text.getDefaultAttributes()
keyList, attributesDictionary = \
self._script.utilities.stringToKeysAndDict(defaultAttributes)
charAttributes = text.getAttributes(textOffset)
if charAttributes[0]:
keyList, charDict = \
self._script.utilities.stringToKeysAndDict(charAttributes[0])
for key in keyList:
attributesDictionary[key] = charDict[key]
if attributesDictionary:
for key in keys:
localizedKey = text_attribute_names.getTextAttributeName(key)
if key in attributesDictionary:
attribute = attributesDictionary[key]
localizedValue = \
text_attribute_names.getTextAttributeName(attribute)
if attribute:
# If it's the 'weight' attribute and greater than 400,
# just speak it as bold, otherwise speak the weight.
#
if key == "weight":
if int(attribute) > 400:
attribStr += " "
# Translators: bold as in the font sense.
#
attribStr += _("bold")
elif key == "underline":
if attribute != "none":
attribStr += " "
attribStr += localizedKey
elif key == "style":
if attribute != "normal":
attribStr += " "
attribStr += localizedValue
else:
attribStr += " "
attribStr += (localizedKey + " " + localizedValue)
# Also check to see if this is a hypertext link.
#
if self._script.utilities.linkIndex(obj, textOffset) >= 0:
attribStr += " "
# Translators: this indicates that this piece of
# text is a hypertext link.
#
attribStr += _("link")
return attribStr
def _getTextInformation(self, obj):
"""Returns [textContents, startOffset, endOffset, selected] as
follows:
A. if no text on the current line is selected, the current line
B. if text is selected, the selected text
C. if the current line is blank/empty, 'blank'
Also sets up a 'textInformation' attribute in
self._script.generatorCache to prevent computing this
information repeatedly while processing a single event.
"""
try:
return self._script.generatorCache['textInformation']
except:
pass
textObj = obj.queryText()
caretOffset = textObj.caretOffset
textContents = ""
selected = False
nSelections = textObj.getNSelections()
[current, other] = self._script.utilities.hasTextSelections(obj)
if current or other:
selected = True
[textContents, startOffset, endOffset] = \
self._script.utilities.allSelectedText(obj)
else:
# Get the line containing the caret
#
[line, startOffset, endOffset] = textObj.getTextAtOffset(
textObj.caretOffset,
pyatspi.TEXT_BOUNDARY_LINE_START)
if len(line):
# Check for embedded object characters. If we find any,
# expand the text. TODO - JD: This expansion doesn't
# include the role information; just the text. However,
# the handling of roles should probably be dealt with as
# a formatting string. We have not yet worked out how to
# do this with Gecko (primary user of embedded object
# characters). Until we do, this expansion is better than
# presenting the actual embedded object character.
#
unicodeText = line.decode("UTF-8")
if self._script.EMBEDDED_OBJECT_CHARACTER in unicodeText:
line = self._script.utilities.expandEOCs(
obj, startOffset, endOffset)
line = self._script.utilities.adjustForRepeats(line)
textContents = line
else:
char = textObj.getTextAtOffset(caretOffset,
pyatspi.TEXT_BOUNDARY_CHAR)
if char[0] == "\n" and startOffset == caretOffset \
and _settingsManager.getSetting('speakBlankLines'):
# Translators: "blank" is a short word to mean the
# user has navigated to an empty line.
#
textContents = (_("blank"))
self._script.generatorCache['textInformation'] = \
[textContents, startOffset, endOffset, selected]
return self._script.generatorCache['textInformation']
def _generateTextContent(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) containing the text content. This requires
_generateTextInformation to have been called prior to this method.
"""
try:
text = obj.queryText()
except NotImplementedError:
return []
result = []
acss = self.voice(DEFAULT)
[line, startOffset, endOffset, selected] = \
self._getTextInformation(obj)
# The empty string seems to be messing with using 'or' in
# formatting strings.
#
if line:
result.append(line)
result.extend(acss)
return result
def _generateTextContentWithAttributes(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) containing the text content, obtained from the
'textInformation' value, with character attribute information
mixed in. This requires _generateTextInformation to have been
called prior to this method.
"""
try:
text = obj.queryText()
except NotImplementedError:
return []
acss = self.voice(DEFAULT)
[line, startOffset, endOffset, selected] = \
self._getTextInformation(obj)
newLine = ""
lastAttribs = None
textOffset = startOffset
for i in range(0, len(line)):
attribs = self._getCharacterAttributes(obj, text, textOffset, i)
if attribs and attribs != lastAttribs:
if newLine:
newLine += " ; "
newLine += attribs
newLine += " "
lastAttribs = attribs
newLine += line[i]
textOffset += 1
attribs = self._getCharacterAttributes(obj,
text,
startOffset,
0,
["paragraph-style"])
if attribs:
if newLine:
newLine += " ; "
newLine += attribs
result = [newLine]
result.extend(acss)
return result
def _generateAnyTextSelection(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that says if any of the text for the entire
object is selected. [[[WDW - I wonder if this string should be
moved to settings.py.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
[line, startOffset, endOffset, selected] = \
self._getTextInformation(obj)
if selected:
# Translators: when the user selects (highlights) text in
# a document, Orca lets them know this.
#
text = C_("text", "selected")
result.append(text)
result.extend(acss)
return result
def _generateAllTextSelection(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that says if all the text for the entire
object is selected. [[[WDW - I wonder if this string should be
moved to settings.py.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
try:
textObj = obj.queryText()
except:
pass
else:
noOfSelections = textObj.getNSelections()
if noOfSelections == 1:
[string, startOffset, endOffset] = \
textObj.getTextAtOffset(0, pyatspi.TEXT_BOUNDARY_LINE_START)
if startOffset == 0 and endOffset == len(string):
# Translators: when the user selects (highlights) text in
# a document, Orca lets them know this.
#
result = [C_("text", "selected")]
result.extend(acss)
return result
def generateTextIndentation(self, obj, **args):
return self._generateTextIndentation(obj, **args)
def _generateTextIndentation(self, obj, **args):
"""Speaks a summary of the number of spaces and/or tabs at the
beginning of the given line.
Arguments:
- obj: the text object.
- line: the string to check for spaces and tabs.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
acss = self.voice(SYSTEM)
if not _settingsManager.getSetting('enableSpeechIndentation'):
return []
line = args.get('alreadyFocused', "")
if not line:
[line, caretOffset, startOffset] = \
self._script.getTextLineAtCaret(obj)
# For the purpose of speaking the text indentation, replace
# occurances of UTF-8 '\302\240' (non breaking space) with
# spaces.
#
line = line.replace("\302\240", " ")
line = line.decode("UTF-8")
spaceCount = 0
tabCount = 0
utterance = ""
offset = 0
while True:
while (offset < len(line)) and line[offset] == ' ':
spaceCount += 1
offset += 1
if spaceCount:
# Translators: this is the number of space characters on a line
# of text.
#
utterance += ngettext("%d space",
"%d spaces",
spaceCount) % spaceCount + " "
while (offset < len(line)) and line[offset] == '\t':
tabCount += 1
offset += 1
if tabCount:
# Translators: this is the number of tab characters on a line
# of text.
#
utterance += ngettext("%d tab",
"%d tabs",
tabCount) % tabCount + " "
if not (spaceCount or tabCount):
break
spaceCount = tabCount = 0
result = [utterance]
if result and result[0]:
result.extend(acss)
return result
#####################################################################
# #
# Tree interface information #
# #
#####################################################################
def _generateNewNodeLevel(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represents the tree node level of the
object, or an empty array if the object is not a tree node or
if the node level is not different from the 'priorObj'
'priorObj' attribute of the args dictionary. The 'priorObj'
is typically set by Orca to be the previous object with
focus.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
oldLevel = self._script.utilities.nodeLevel(args.get('priorObj', None))
newLevel = self._script.utilities.nodeLevel(obj)
if (oldLevel != newLevel) and (newLevel >= 0):
result.extend(self._generateNodeLevel(obj, **args))
result.extend(acss)
return result
#####################################################################
# #
# Value interface information #
# #
#####################################################################
def _generatePercentage(self, obj, **args ):
"""Returns an array of strings (and possibly voice and audio
specifications) that represents the percentage value of the
object. This is typically for progress bars. [[[WDW - we
should consider returning an empty array if there is no value.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
try:
value = obj.queryValue()
except NotImplementedError:
pass
else:
percentValue = \
(value.currentValue
/ (value.maximumValue - value.minimumValue)) \
* 100.0
# Translators: this is the percentage value of a progress bar.
#
percentage = ngettext("%d percent",
"%d percent",
percentValue) % percentValue
result.append(percentage)
if result:
result.extend(acss)
return result
#####################################################################
# #
# Hierarchy and related dialog information #
# #
#####################################################################
def _generateNewRadioButtonGroup(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represents the radio button group label
of the object, or an empty array if the object has no such
label or if the radio button group is not different from the
'priorObj' 'priorObj' attribute of the args dictionary. The
'priorObj' is typically set by Orca to be the previous object
with focus.
"""
# [[[TODO: WDW - hate duplicating code from _generateRadioButtonGroup
# but don't want to call it because it will make the same
# AT-SPI method calls.]]]
#
result = []
acss = self.voice(DEFAULT)
priorObj = args.get('priorObj', None)
if obj and obj.getRole() == pyatspi.ROLE_RADIO_BUTTON:
radioGroupLabel = None
inSameGroup = False
relations = obj.getRelationSet()
for relation in relations:
if (not radioGroupLabel) \
and (relation.getRelationType() \
== pyatspi.RELATION_LABELLED_BY):
radioGroupLabel = relation.getTarget(0)
if (not inSameGroup) \
and (relation.getRelationType() \
== pyatspi.RELATION_MEMBER_OF):
for i in range(0, relation.getNTargets()):
target = relation.getTarget(i)
if target == priorObj:
inSameGroup = True
break
if (not inSameGroup) and radioGroupLabel:
result.append(self._script.utilities.\
displayedText(radioGroupLabel))
result.extend(acss)
return result
def _generateNumberOfChildren(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represents the number of children the
object has. [[[WDW - can we always return an empty array if
this doesn't apply?]]] [[[WDW - I wonder if this string should
be moved to settings.py.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
childNodes = self._script.utilities.childNodes(obj)
children = len(childNodes)
if children:
# Translators: this is the number of items in a layered
# pane or table.
#
itemString = ngettext("%d item", "%d items", children) % children
result.append(itemString)
result.extend(acss)
return result
def _generateNoShowingChildren(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that says if this object has no showing
children (e.g., it's an empty table or list). object has.
[[[WDW - can we always return an empty array if this doesn't
apply?]]] [[[WDW - I wonder if this string should be moved to
settings.py.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
hasItems = False
for child in obj:
state = child.getState()
if state.contains(pyatspi.STATE_SHOWING):
hasItems = True
break
if not hasItems:
# Translators: this is the number of items in a layered pane
# or table.
#
result.append(_("0 items"))
result.extend(acss)
return result
def _generateNoChildren(self, obj, **args ):
"""Returns an array of strings (and possibly voice and audio
specifications) that says if this object has no children at
all (e.g., it's an empty table or list). object has. [[[WDW
- can we always return an empty array if this doesn't
apply?]]] [[[WDW - I wonder if this string should be moved to
settings.py.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
if not obj.childCount:
# Translators: this is the number of items in a layered pane
# or table.
#
result.append(_("0 items"))
result.extend(acss)
return result
def _generateSelectedItemCount(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) indicating how many items are selected in this
and the position of the current item. This object will be an icon
panel or a layered pane.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
# TODO - JD: Is there a better way to do this other than
# hard-coding it in?
#
if args.get('role', obj.getRole()) == pyatspi.ROLE_ICON:
obj = obj.parent
childCount = obj.childCount
selectedItems = []
totalSelectedItems = 0
currentItem = 0
for child in obj:
state = child.getState()
if state.contains(pyatspi.STATE_SELECTED):
totalSelectedItems += 1
selectedItems.append(child)
if state.contains(pyatspi.STATE_FOCUSED):
currentItem = child.getIndexInParent() + 1
# Translators: this is a count of the number of selected icons
# and the count of the total number of icons within an icon panel.
# An example of an icon panel is the Nautilus folder view.
#
countString = ngettext("%(index)d of %(total)d item selected",
"%(index)d of %(total)d items selected",
childCount) \
% {"index" : totalSelectedItems,
"total" : childCount}
result.append(countString)
result.extend(acss)
result.append(self._script.formatting.getString(
mode='speech',
stringType='iconindex') \
% {"index" : currentItem,
"total" : childCount})
result.extend(acss)
return result
def _generateSelectedItems(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) containing the names of all the selected items.
This object will be an icon panel or a layered pane.
"""
result = []
# TODO - JD: Is there a better way to do this other than
# hard-coding it in?
#
if args.get('role', obj.getRole()) == pyatspi.ROLE_ICON:
obj = obj.parent
for child in obj:
if child.getState().contains(pyatspi.STATE_SELECTED):
result.extend(self._generateLabelAndName(child))
return result
def _generateUnfocusedDialogCount(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that says how many unfocused alerts and
dialogs are associated with the application for this object.
[[[WDW - I wonder if this string should be moved to
settings.py.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
# If this application has more than one unfocused alert or
# dialog window, then speak '<m> unfocused dialogs'
# to let the user know.
#
alertAndDialogCount = \
self._script.utilities.unfocusedAlertAndDialogCount(obj)
if alertAndDialogCount > 0:
# Translators: this tells the user how many unfocused
# alert and dialog windows that this application has.
#
result.append(ngettext("%d unfocused dialog",
"%d unfocused dialogs",
alertAndDialogCount) % alertAndDialogCount)
result.extend(acss)
return result
def _generateAncestors(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the text of the ancestors for
the object. This is typically used to present the context for
an object (e.g., the names of the window, the panels, etc.,
that the object is contained in). If the 'priorObj' attribute
of the args dictionary is set, only the differences in
ancestry between the 'priorObj' and the current obj will be
computed. The 'priorObj' is typically set by Orca to be the
previous object with focus.
"""
result = []
acss = self.voice(DEFAULT)
priorObj = args.get('priorObj', None)
requireText = args.get('requireText', True)
commonAncestor = self._script.utilities.commonAncestor(priorObj, obj)
if obj != commonAncestor:
parent = obj.parent
if parent \
and (obj.getRole() == pyatspi.ROLE_TABLE_CELL) \
and (parent.getRole() == pyatspi.ROLE_TABLE_CELL):
parent = parent.parent
while parent and (parent.parent != parent):
if parent == commonAncestor:
break
if not self._script.utilities.isLayoutOnly(parent):
text = self._script.utilities.displayedLabel(parent)
if not text \
and (not requireText \
or (requireText \
and 'Text' in pyatspi.listInterfaces(parent))):
text = self._script.utilities.displayedText(parent)
if not text and parent.getRole() \
in [pyatspi.ROLE_MENU, pyatspi.ROLE_PAGE_TAB]:
text = parent.name
if text and len(text.strip()):
roleInfo = self._generateRoleName(parent)
if roleInfo:
roleInfo.reverse()
# Push announcement of cell to the end
#
if parent.getRole() not in [pyatspi.ROLE_TABLE_CELL,
pyatspi.ROLE_FILLER]:
result.extend(roleInfo)
result.extend(acss)
result.append(text)
if parent.getRole() == pyatspi.ROLE_TABLE_CELL:
result.extend(roleInfo)
parent = parent.parent
result.reverse()
return result
def _generateNewAncestors(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the text of the ancestors for
the object. This is typically used to present the context for
an object (e.g., the names of the window, the panels, etc.,
that the object is contained in). If the 'priorObj' attribute
of the args dictionary is set, only the differences in
ancestry between the 'priorObj' and the current obj will be
computed. Otherwise, no ancestry will be computed. The
'priorObj' is typically set by Orca to be the previous object
with focus.
"""
result = []
if args.get('priorObj', None):
result = self._generateAncestors(obj, **args)
return result
def _generateParentRoleName(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) containing the role name of the parent of obj.
"""
if args.get('role', obj.getRole()) == pyatspi.ROLE_ICON \
and args.get('formatType', None) \
in ['basicWhereAmI', 'detailedWhereAmI']:
# Translators: this is an alternative name for the
# parent object of a series of icons.
#
return [_("Icon panel")]
elif obj.parent.getRole() == pyatspi.ROLE_TABLE_CELL:
obj = obj.parent
return self._generateRoleName(obj.parent)
def _generateToolbar(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) containing the name and role of the toolbar
which contains obj.
"""
result = []
ancestor = self._script.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_TOOL_BAR], [pyatspi.ROLE_FRAME])
if ancestor:
result.extend(self._generateLabelAndName(ancestor))
result.extend(self._generateRoleName(ancestor))
return result
def _generatePositionInGroup(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the relative position of an
object in a group.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
position = -1
total = -1
relations = obj.getRelationSet()
for relation in relations:
if relation.getRelationType() == pyatspi.RELATION_MEMBER_OF:
total = 0
for i in range(0, relation.getNTargets()):
target = relation.getTarget(i)
if target.getState().contains(pyatspi.STATE_SHOWING):
total += 1
if target == obj:
position = total
if position >= 0:
# Adjust the position because the relations tend to be given
# in the reverse order.
position = total - position + 1
result.append(self._script.formatting.getString(
mode='speech',
stringType='groupindex') \
% {"index" : position,
"total" : total})
result.extend(acss)
return result
def _generatePositionInList(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the relative position of an
object in a list.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText') \
or not (_settingsManager.getSetting('enablePositionSpeaking') \
or args.get('forceList', False)):
return []
result = []
acss = self.voice(SYSTEM)
position = -1
index = 0
total = 0
name = self._generateName(obj)
# TODO - JD: There might be a better way to do this (e.g. pass
# roles in maybe?).
#
role = args.get('role', obj.getRole())
if role == pyatspi.ROLE_COMBO_BOX:
obj = obj[0]
elif role in [pyatspi.ROLE_PAGE_TAB,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM]:
obj = obj.parent
elif role == pyatspi.ROLE_LIST_ITEM:
parent = obj.parent
for relation in obj.getRelationSet():
if relation.getRelationType() == \
pyatspi.RELATION_NODE_CHILD_OF:
# childNodes assumes that we have an accessible table
# interface to work with. If we don't, it will fail. So
# don't set the parent until verifying the interface we
# expect actually exists.
#
target = relation.getTarget(0)
try:
target.parent.queryTable()
except:
pass
else:
parent = target
break
obj = parent
else:
obj = obj.parent
# We want to return the position relative to this hierarchical
# level and not the entire list. If the object in question
# uses the NODE_CHILD_OF relationship, we need to use it instead
# of the childCount.
#
childNodes = self._script.utilities.childNodes(obj)
total = len(childNodes)
for i in range(0, total):
childName = self._generateName(childNodes[i])
if childName == name:
position = i+1
break
if not total:
for child in obj:
nextName = self._generateName(child)
state = child.getState()
if not nextName or nextName[0] in ["", "Empty", "separator"] \
or not state.contains(pyatspi.STATE_VISIBLE):
continue
index += 1
total += 1
if nextName == name:
position = index
if (_settingsManager.getSetting('enablePositionSpeaking') \
or args.get('forceList', False)) \
and position >= 0:
result.append(self._script.formatting.getString(
mode='speech',
stringType='groupindex') \
% {"index" : position,
"total" : total})
result.extend(acss)
return result
def _generateDefaultButton(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the default button in a dialog.
This method should initially be called with a top-level window.
"""
result = []
button = self._script.utilities.defaultButton(obj)
if button and button.getState().contains(pyatspi.STATE_SENSITIVE):
name = self._generateName(button)
if name:
# Translators: The "default" button in a dialog box is the
# button that gets activated when Enter is pressed anywhere
# within that dialog box.
#
result.append(_("Default button is %s") % name[0])
return result
def generateDefaultButton(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the default button of the window
containing the object.
"""
return self._generateDefaultButton(obj, **args)
def _generateStatusBar(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the status bar of a window.
This method should initially be called with a top-level window.
"""
result = []
statusBar = self._script.utilities.statusBar(obj)
if statusBar:
name = self._generateName(statusBar)
if name:
result.extend(name)
else:
for child in statusBar:
result.extend(self._generateName(child))
return result
def generateStatusBar(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the status bar of the window
containing the object.
"""
return self._generateStatusBar(obj, **args)
def generateTitle(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the title of the window, obj.
containing the object, along with information associated with
any unfocused dialog boxes.
"""
result = []
acss = self.voice(DEFAULT)
frame, dialog = self._script.utilities.frameAndDialog(obj)
if frame:
result.append(self._generateLabelAndName(frame))
if dialog:
result.append(self._generateLabelAndName(dialog))
alertAndDialogCount = \
self._script.utilities.unfocusedAlertAndDialogCount(obj)
if alertAndDialogCount > 0:
# Translators: this tells the user how many unfocused
# alert and dialog windows that this application has.
#
dialogs = []
dialogs.append(ngettext("%d unfocused dialog",
"%d unfocused dialogs",
alertAndDialogCount) % alertAndDialogCount)
dialogs.extend(acss)
result.append(dialogs)
return result
#####################################################################
# #
# Keyboard shortcut information #
# #
#####################################################################
def _generateAccelerator(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the accelerator for the object,
or an empty array if no accelerator can be found.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
[mnemonic, shortcut, accelerator] = \
self._script.utilities.mnemonicShortcutAccelerator(obj)
if accelerator:
result.append(accelerator)
result.extend(acss)
return result
def _generateMnemonic(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the mnemonic for the object, or
an empty array if no mnemonic can be found.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
if _settingsManager.getSetting('enableMnemonicSpeaking') \
or args.get('forceMnemonic', False):
[mnemonic, shortcut, accelerator] = \
self._script.utilities.mnemonicShortcutAccelerator(obj)
if mnemonic:
mnemonic = mnemonic[-1] # we just want a single character
if not mnemonic and shortcut:
mnemonic = shortcut
if mnemonic:
result = [mnemonic]
result.extend(acss)
return result
#####################################################################
# #
# Tutorial information #
# #
#####################################################################
def _generateTutorial(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the tutorial for the object.
The tutorial will only be generated if the user has requested
tutorials, and will then be generated according to the
tutorial generator. A tutorial can be forced by setting the
'forceTutorial' attribute of the args dictionary to True.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
alreadyFocused = args.get('alreadyFocused', False)
forceTutorial = args.get('forceTutorial', False)
result.extend(self._script.tutorialGenerator.getTutorial(
obj,
alreadyFocused,
forceTutorial))
if args.get('role', obj.getRole()) == pyatspi.ROLE_ICON \
and args.get('formatType', 'unfocused') == 'basicWhereAmI':
frame, dialog = self._script.utilities.frameAndDialog(obj)
if frame:
result.extend(self._script.tutorialGenerator.getTutorial(
frame,
alreadyFocused,
forceTutorial))
if result and result[0]:
result.extend(acss)
return result
#####################################################################
# #
# Other things for prosody and voice selection #
# #
#####################################################################
def _generatePause(self, obj, **args):
return PAUSE
def _generateLineBreak(self, obj, **args):
return LINE_BREAK
def voice(self, key=None, **args):
"""Returns an array containing a voice. The key is a value
to be used to look up the voice in the settings.py:voices
dictionary. Other arguments can be passed in for future
decision making.
"""
voicename = voiceType.get(key) or voiceType.get(DEFAULT)
voices = _settingsManager.getSetting('voices')
rv = voices.get(voicename)
if rv and rv.get('established') == False:
rv.pop('established')
return [rv]
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/orca/speech_generator.py
|
Python
|
gpl-3.0
| 79,095
|
[
"ORCA"
] |
e818c178cf63bc192924b4a262393b891ba7bec65b88ee100104a0c07210ab7e
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2001-2007 Donald N. Allingham
# Copyright (C) 2008 Lukasz Rymarczyk
# Copyright (C) 2008 Raphael Ackermann
# Copyright (C) 2008-2011 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011-2013 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
# cli.plug.__init__
#
""" Enable report generation from the command line interface (CLI) """
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import traceback
import os
import sys
import logging
LOG = logging.getLogger(".")
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.plug import BasePluginManager
from gramps.gen.plug.docgen import (StyleSheet, StyleSheetList, PaperStyle,
PAPER_PORTRAIT, PAPER_LANDSCAPE, graphdoc,
treedoc)
from gramps.gen.plug.menu import (FamilyOption, PersonOption, NoteOption,
MediaOption, PersonListOption, NumberOption,
BooleanOption, DestinationOption, Option,
TextOption, EnumeratedListOption,
StringOption)
from gramps.gen.display.name import displayer as name_displayer
from gramps.gen.errors import ReportError, FilterError
from gramps.gen.plug.report import (CATEGORY_TEXT, CATEGORY_DRAW, CATEGORY_BOOK,
CATEGORY_GRAPHVIZ, CATEGORY_TREE,
CATEGORY_CODE, ReportOptions, append_styles)
from gramps.gen.plug.report._paper import paper_sizes
from gramps.gen.const import USER_HOME, DOCGEN_OPTIONS
from gramps.gen.dbstate import DbState
from ..grampscli import CLIManager
from ..user import User
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# Private Functions
#
#------------------------------------------------------------------------
def _convert_str_to_match_type(str_val, type_val):
"""
Returns a value representing str_val that is the same type as type_val.
"""
str_val = str_val.strip()
ret_type = type(type_val)
if isinstance(type_val, str):
if ((str_val.startswith("'") and str_val.endswith("'"))
or (str_val.startswith('"') and str_val.endswith('"'))):
# Remove enclosing quotes
return str(str_val[1:-1])
else:
return str(str_val)
elif ret_type == int:
if str_val.isdigit():
return int(str_val)
else:
print("'%s' is not an integer number" % str_val)
return 0
elif ret_type == float:
try:
return float(str_val)
except ValueError:
print("'%s' is not a decimal number" % str_val)
return 0.0
elif ret_type == bool:
if str_val == str(True):
return True
elif str_val == str(False):
return False
else:
print("'%s' is not a boolean-- try 'True' or 'False'" % str_val)
return False
elif ret_type == list:
ret_val = []
if not (str_val.startswith("[") and str_val.endswith("]")):
print("'%s' is not a list-- try: [%s]" % (str_val, str_val))
return ret_val
entry = ""
quote_type = None
# Search through characters between the brackets
for char in str_val[1:-1]:
if (char == "'" or char == '"') and quote_type is None:
# This character starts a string
quote_type = char
elif char == quote_type:
# This character ends a string
quote_type = None
elif quote_type is None and char == ",":
# This character ends an entry
ret_val.append(entry.strip())
entry = ""
quote_type = None
else:
entry += char
if entry != "":
# Add the last entry
ret_val.append(entry.strip())
return ret_val
def _validate_options(options, dbase):
"""
Validate all options by making sure that their values are consistent with
the database.
menu: The Menu class
dbase: the database the options will be applied to
"""
if not hasattr(options, "menu"):
return
menu = options.menu
for name in menu.get_all_option_names():
option = menu.get_option_by_name(name)
if isinstance(option, PersonOption):
pid = option.get_value()
person = dbase.get_person_from_gramps_id(pid)
if not person:
person = dbase.get_default_person()
if not person:
try:
phandle = next(dbase.iter_person_handles())
except StopIteration:
phandle = None
person = dbase.get_person_from_handle(phandle)
if not person:
print(_("ERROR: Please specify a person"),
file=sys.stderr)
if person:
option.set_value(person.get_gramps_id())
elif isinstance(option, FamilyOption):
fid = option.get_value()
family = dbase.get_family_from_gramps_id(fid)
if not family:
person = dbase.get_default_person()
family_list = []
family_handle = None
if person:
family_list = person.get_family_handle_list()
if family_list:
family_handle = family_list[0]
else:
try:
family_handle = next(dbase.iter_family_handles())
except StopIteration:
family_handle = None
if family_handle:
family = dbase.get_family_from_handle(family_handle)
option.set_value(family.get_gramps_id())
else:
print(_("ERROR: Please specify a family"), file=sys.stderr)
#------------------------------------------------------------------------
#
# Command-line report
#
#------------------------------------------------------------------------
class CommandLineReport:
"""
Provide a way to generate report from the command line.
"""
def __init__(self, database, name, category, option_class, options_str_dict,
noopt=False):
pmgr = BasePluginManager.get_instance()
self.__textdoc_plugins = []
self.__drawdoc_plugins = []
self.__bookdoc_plugins = []
for plugin in pmgr.get_docgen_plugins():
if plugin.get_text_support() and plugin.get_extension():
self.__textdoc_plugins.append(plugin)
if plugin.get_draw_support() and plugin.get_extension():
self.__drawdoc_plugins.append(plugin)
if (plugin.get_extension()
and plugin.get_text_support()
and plugin.get_draw_support()):
self.__bookdoc_plugins.append(plugin)
self.database = database
self.category = category
self.options_dict = None # keep pylint happy
self.options_help = None
self.paper = None
self.orien = None
self.marginl = None
self.marginr = None
self.margint = None
self.marginb = None
self.doc_options = None
self.doc_option_class = None
self.selected_style = None
self.style_list = None
self.css_filename = None
self.format = None
self.raw_name = name
self.option_class = option_class(name, database)
if category == CATEGORY_GRAPHVIZ:
# Need to include Graphviz options
self.__gvoptions = graphdoc.GVOptions()
menu = self.option_class.menu
self.__gvoptions.add_menu_options(menu)
for name in menu.get_all_option_names():
if name not in self.option_class.options_dict:
self.option_class.options_dict[
name] = menu.get_option_by_name(name).get_value()
if category == CATEGORY_TREE:
# Need to include Genealogy Tree options
self.__toptions = treedoc.TreeOptions()
menu = self.option_class.menu
self.__toptions.add_menu_options(menu)
for name in menu.get_all_option_names():
if name not in self.option_class.options_dict:
self.option_class.options_dict[
name] = menu.get_option_by_name(name).get_value()
self.option_class.load_previous_values()
_validate_options(self.option_class, database)
self.show = options_str_dict.pop('show', None)
self.options_str_dict = options_str_dict
self.init_standard_options(noopt)
self.init_report_options()
self.parse_options()
self.init_report_options_help()
self.show_options()
def init_standard_options(self, noopt):
"""
Initialize the options that are hard-coded into the report system.
"""
self.options_dict = {
'of' : self.option_class.handler.module_name,
'off' : self.option_class.handler.get_format_name(),
'style' :
self.option_class.handler.get_default_stylesheet_name(),
'papers' : self.option_class.handler.get_paper_name(),
'papero' : self.option_class.handler.get_orientation(),
'paperml' : self.option_class.handler.get_margins()[0],
'papermr' : self.option_class.handler.get_margins()[1],
'papermt' : self.option_class.handler.get_margins()[2],
'papermb' : self.option_class.handler.get_margins()[3],
'css' : self.option_class.handler.get_css_filename(),
}
self.options_help = {
'of' : [_("=filename"),
_("Output file name. MANDATORY"), ""],
'off' : [_("=format"), _("Output file format."), []],
'style' : [_("=name"), _("Style name."), ""],
'papers' : [_("=name"), _("Paper size name."), ""],
'papero' : [_("=number"), _("Paper orientation number."), ""],
'paperml' : [_("=number"),
_("Left paper margin"), _("Size in cm")],
'papermr' : [_("=number"),
_("Right paper margin"), _("Size in cm")],
'papermt' : [_("=number"),
_("Top paper margin"), _("Size in cm")],
'papermb' : [_("=number"),
_("Bottom paper margin"), _("Size in cm")],
'css' : [_("=css filename"),
_("CSS filename to use, html format only"), ""],
}
if noopt:
return
self.options_help['of'][2] = os.path.join(USER_HOME,
"whatever_name")
if self.category == CATEGORY_TEXT:
for plugin in self.__textdoc_plugins:
self.options_help['off'][2].append(
plugin.get_extension() + "\t" + plugin.get_description())
elif self.category == CATEGORY_DRAW:
for plugin in self.__drawdoc_plugins:
self.options_help['off'][2].append(
plugin.get_extension() + "\t" + plugin.get_description())
elif self.category == CATEGORY_BOOK:
for plugin in self.__bookdoc_plugins:
self.options_help['off'][2].append(
plugin.get_extension() + "\t" + plugin.get_description())
elif self.category == CATEGORY_GRAPHVIZ:
for graph_format in graphdoc.FORMATS:
self.options_help['off'][2].append(
graph_format["type"] + "\t" + graph_format["descr"])
elif self.category == CATEGORY_TREE:
for tree_format in treedoc.FORMATS:
self.options_help['off'][2].append(
tree_format["type"] + "\t" + tree_format["descr"])
else:
self.options_help['off'][2] = "NA"
self.options_help['papers'][2] = [
paper.get_name() for paper in paper_sizes
if paper.get_name() != 'Custom Size']
self.options_help['papero'][2] = ["%d\tPortrait" % PAPER_PORTRAIT,
"%d\tLandscape" % PAPER_LANDSCAPE]
self.options_help['css'][2] = os.path.join(USER_HOME,
"whatever_name.css")
if self.category in (CATEGORY_TEXT, CATEGORY_DRAW):
default_style = StyleSheet()
self.option_class.make_default_style(default_style)
# Read all style sheets available for this item
style_file = self.option_class.handler.get_stylesheet_savefile()
self.style_list = StyleSheetList(style_file, default_style)
self.options_help['style'][2] = self.style_list.get_style_names()
def init_report_options(self):
"""
Initialize the options that are defined by each report.
"""
if self.category == CATEGORY_BOOK: # a Book Report has no "menu"
for key in self.option_class.options_dict:
self.options_dict[key] = self.option_class.options_dict[key]
self.options_help[
key] = self.option_class.options_help[key][:3]
# a Book Report can't have HTML output so "css" is meaningless
self.options_dict.pop('css')
if not hasattr(self.option_class, "menu"):
return
menu = self.option_class.menu
for name in menu.get_all_option_names():
option = menu.get_option_by_name(name)
self.options_dict[name] = option.get_value()
def init_report_options_help(self):
"""
Initialize help for the options that are defined by each report.
(And also any docgen options, if defined by the docgen.)
"""
if not hasattr(self.option_class, "menu"):
return
menu = self.option_class.menu
for name in menu.get_all_option_names():
option = menu.get_option_by_name(name)
self.options_help[name] = ["", option.get_help()]
if isinstance(option, PersonOption):
id_list = []
for person_handle in self.database.get_person_handles(True):
person = self.database.get_person_from_handle(person_handle)
id_list.append("%s\t%s"
% (person.get_gramps_id(),
name_displayer.display(person)))
self.options_help[name].append(id_list)
elif isinstance(option, FamilyOption):
id_list = []
for family in self.database.iter_families():
mname = ""
fname = ""
mhandle = family.get_mother_handle()
if mhandle:
mother = self.database.get_person_from_handle(mhandle)
if mother:
mname = name_displayer.display(mother)
fhandle = family.get_father_handle()
if fhandle:
father = self.database.get_person_from_handle(fhandle)
if father:
fname = name_displayer.display(father)
# translators: needed for French, Hebrew and Arabic
text = _("%(id)s:\t%(father)s, %(mother)s"
) % {'id': family.get_gramps_id(),
'father': fname, 'mother': mname}
id_list.append(text)
self.options_help[name].append(id_list)
elif isinstance(option, NoteOption):
id_list = []
for nhandle in self.database.get_note_handles():
note = self.database.get_note_from_handle(nhandle)
id_list.append(note.get_gramps_id())
self.options_help[name].append(id_list)
elif isinstance(option, MediaOption):
id_list = []
for mhandle in self.database.get_media_handles():
mobject = self.database.get_media_from_handle(mhandle)
id_list.append(mobject.get_gramps_id())
self.options_help[name].append(id_list)
elif isinstance(option, PersonListOption):
self.options_help[name].append("")
elif isinstance(option, NumberOption):
self.options_help[name].append("A number")
elif isinstance(option, BooleanOption):
self.options_help[name].append(["False", "True"])
elif isinstance(option, DestinationOption):
self.options_help[name].append("A file system path")
elif isinstance(option, StringOption):
self.options_help[name].append("Any text")
elif isinstance(option, TextOption):
self.options_help[name].append(
"A list of text values. Each entry in the list "
"represents one line of text.")
elif isinstance(option, EnumeratedListOption):
ilist = []
for (value, description) in option.get_items():
tabs = '\t'
try:
tabs = '\t\t' if len(value) < 10 else '\t'
except TypeError: #Value is a number, use just one tab.
pass
val = "%s%s%s" % (value, tabs, description)
ilist.append(val)
self.options_help[name].append(ilist)
elif isinstance(option, Option):
self.options_help[name].append(option.get_help())
else:
print(_("Unknown option: %s") % option, file=sys.stderr)
print(_(" Valid options are:") +
_(", ").join(list(self.options_dict.keys())), # Arabic OK
file=sys.stderr)
print(_(" Use '%(donottranslate)s' to see description "
"and acceptable values"
) % {'donottranslate' : "show=option"},
file=sys.stderr)
def parse_options(self):
"""
Load the options that the user has entered.
"""
if not hasattr(self.option_class, "menu"):
menu = None
else:
menu = self.option_class.menu
menu_opt_names = menu.get_all_option_names()
_format_str = self.options_str_dict.pop('off', None)
if _format_str:
self.options_dict['off'] = _format_str
self.css_filename = None
_chosen_format = None
self.doc_option_class = None
if self.category in [CATEGORY_TEXT, CATEGORY_DRAW, CATEGORY_BOOK]:
if self.category == CATEGORY_TEXT:
plugins = self.__textdoc_plugins
self.css_filename = self.options_dict['css']
elif self.category == CATEGORY_DRAW:
plugins = self.__drawdoc_plugins
elif self.category == CATEGORY_BOOK:
plugins = self.__bookdoc_plugins
for plugin in plugins:
if plugin.get_extension() == self.options_dict['off']:
self.format = plugin.get_basedoc()
self.doc_option_class = plugin.get_doc_option_class()
if self.format is None:
# Pick the first one as the default.
plugin = plugins[0]
self.format = plugin.get_basedoc()
self.doc_option_class = plugin.get_doc_option_class()
_chosen_format = plugin.get_extension()
elif self.category == CATEGORY_GRAPHVIZ:
for graph_format in graphdoc.FORMATS:
if graph_format['type'] == self.options_dict['off']:
if not self.format: # choose the first one, not the last
self.format = graph_format["class"]
if self.format is None:
# Pick the first one as the default.
self.format = graphdoc.FORMATS[0]["class"]
_chosen_format = graphdoc.FORMATS[0]["type"]
elif self.category == CATEGORY_TREE:
for tree_format in treedoc.FORMATS:
if tree_format['type'] == self.options_dict['off']:
if not self.format: # choose the first one, not the last
self.format = tree_format["class"]
if self.format is None:
# Pick the first one as the default.
self.format = tree_format.FORMATS[0]["class"]
_chosen_format = tree_format.FORMATS[0]["type"]
else:
self.format = None
if _chosen_format and _format_str:
print(_("Ignoring '%(notranslate1)s=%(notranslate2)s' "
"and using '%(notranslate1)s=%(notranslate3)s'."
) % {'notranslate1' : "off",
'notranslate2' : self.options_dict['off'],
'notranslate3' : _chosen_format},
file=sys.stderr)
print(_("Use '%(notranslate)s' to see valid values."
) % {'notranslate' : "show=off"}, file=sys.stderr)
self.do_doc_options()
for opt in self.options_str_dict:
if opt in self.options_dict:
self.options_dict[opt] = _convert_str_to_match_type(
self.options_str_dict[opt], self.options_dict[opt])
self.option_class.handler.options_dict[
opt] = self.options_dict[opt]
if menu and opt in menu_opt_names:
option = menu.get_option_by_name(opt)
option.set_value(self.options_dict[opt])
else:
print(_("Ignoring unknown option: %s") % opt, file=sys.stderr)
print(_(" Valid options are:"),
_(", ").join(list(self.options_dict.keys())), # Arabic OK
file=sys.stderr)
print(_(" Use '%(donottranslate)s' to see description "
"and acceptable values"
) % {'donottranslate' : "show=option"},
file=sys.stderr)
self.option_class.handler.output = self.options_dict['of']
self.paper = paper_sizes[0] # make sure one exists
for paper in paper_sizes:
if paper.get_name() == self.options_dict['papers']:
self.paper = paper
self.option_class.handler.set_paper(self.paper)
self.orien = self.options_dict['papero']
self.marginl = self.options_dict['paperml']
self.marginr = self.options_dict['papermr']
self.margint = self.options_dict['papermt']
self.marginb = self.options_dict['papermb']
if self.category in (CATEGORY_TEXT, CATEGORY_DRAW):
default_style = StyleSheet()
self.option_class.make_default_style(default_style)
# Read all style sheets available for this item
style_file = self.option_class.handler.get_stylesheet_savefile()
self.style_list = StyleSheetList(style_file, default_style)
# Get the selected stylesheet
style_name = self.option_class.handler.get_default_stylesheet_name()
self.selected_style = self.style_list.get_style_sheet(style_name)
def do_doc_options(self):
"""
Process docgen options, if any (options for the backend, e.g. AsciiDoc)
"""
self.doc_options = None
if not self.doc_option_class:
return # this docgen type has no options
try:
if issubclass(self.doc_option_class, object):
self.doc_options = self.doc_option_class(self.raw_name,
self.database)
doc_options_dict = self.doc_options.options_dict
except TypeError:
self.doc_options = self.doc_option_class
self.doc_options.load_previous_values()
docgen_menu = self.doc_options.menu
report_menu = self.option_class.menu # "help" checks the option type
for oname in docgen_menu.get_option_names(DOCGEN_OPTIONS):
docgen_opt = docgen_menu.get_option(DOCGEN_OPTIONS, oname)
if oname in self.options_str_dict and oname in doc_options_dict:
doc_options_dict[oname] = _convert_str_to_match_type(
self.options_str_dict[oname], doc_options_dict[oname])
self.options_str_dict.pop(oname)
if oname in doc_options_dict:
docgen_opt.set_value(doc_options_dict[oname])
report_menu.add_option(DOCGEN_OPTIONS, oname, docgen_opt)
for oname in doc_options_dict: # enable "help"
self.options_dict[oname] = doc_options_dict[oname]
self.options_help[oname] = self.doc_options.options_help[oname][:3]
def show_options(self):
"""
Print available options on the CLI.
"""
if not self.show:
return
elif self.show == 'all':
print(_(" Available options:"))
for key in sorted(self.options_dict.keys()):
if key in self.options_help:
opt = self.options_help[key]
# Make the output nicer to read, assume a tab has 8 spaces
tabs = '\t\t' if len(key) < 10 else '\t'
optmsg = " %s%s%s (%s)" % (key, tabs, opt[1], opt[0])
else:
optmsg = " %s%s%s" % (key, tabs,
_('(no help available)'))
print(optmsg)
print(_(" Use '%(donottranslate)s' to see description "
"and acceptable values"
) % {'donottranslate' : "show=option"})
elif self.show in self.options_help:
opt = self.options_help[self.show]
tabs = '\t\t' if len(self.show) < 10 else '\t'
print(_(" Available values are:"))
print(' %s%s%s (%s)' % (self.show, tabs, opt[1], opt[0]))
vals = opt[2]
if isinstance(vals, (list, tuple)):
for val in vals:
print(" %s" % val)
else:
print(" %s" % opt[2])
else:
#there was a show option given, but the option is invalid
print(_("option '%(optionname)s' not valid. "
"Use '%(donottranslate)s' to see all valid options."
) % {'optionname' : self.show,
'donottranslate' : "show=all"},
file=sys.stderr)
#------------------------------------------------------------------------
#
# Command-line report generic task
#
#------------------------------------------------------------------------
def cl_report(database, name, category, report_class, options_class,
options_str_dict):
"""
function to actually run the selected report
"""
err_msg = _("Failed to write report. ")
clr = CommandLineReport(database, name, category, options_class,
options_str_dict)
# Exit here if show option was given
if clr.show:
return
# write report
try:
if category in [CATEGORY_TEXT, CATEGORY_DRAW, CATEGORY_BOOK]:
if clr.doc_options:
clr.option_class.handler.doc = clr.format(
clr.selected_style,
PaperStyle(clr.paper, clr.orien, clr.marginl,
clr.marginr, clr.margint, clr.marginb),
clr.doc_options)
else:
clr.option_class.handler.doc = clr.format(
clr.selected_style,
PaperStyle(clr.paper, clr.orien, clr.marginl,
clr.marginr, clr.margint, clr.marginb))
elif category in [CATEGORY_GRAPHVIZ, CATEGORY_TREE]:
clr.option_class.handler.doc = clr.format(
clr.option_class,
PaperStyle(clr.paper, clr.orien, clr.marginl,
clr.marginr, clr.margint, clr.marginb))
if (clr.css_filename is not None
and hasattr(clr.option_class.handler.doc, 'set_css_filename')):
clr.option_class.handler.doc.set_css_filename(clr.css_filename)
my_report = report_class(database, clr.option_class, User())
my_report.doc.init()
my_report.begin_report()
my_report.write_report()
my_report.end_report()
return clr
except ReportError as msg:
(msg1, msg2) = msg.messages()
print(err_msg, file=sys.stderr)
print(msg1, file=sys.stderr)
if msg2:
print(msg2, file=sys.stderr)
except:
if len(LOG.handlers) > 0:
LOG.error(err_msg, exc_info=True)
else:
print(err_msg, file=sys.stderr)
## Something seems to eat the exception above.
## Hack to re-get the exception:
try:
raise
except:
traceback.print_exc()
def run_report(db, name, **options_str_dict):
"""
Given a database, run a given report.
db is a Db database
name is the name of a report
options_str_dict is the same kind of options
given at the command line. For example:
>>> run_report(db, "ancestor_report", off="txt",
of="ancestor-007.txt", pid="I37")
returns CommandLineReport (clr) if successfully runs the report,
None otherwise.
You can see:
options and values used in clr.option_class.options_dict
filename in clr.option_class.get_output()
"""
dbstate = DbState()
climanager = CLIManager(dbstate, False, User()) # don't load db
climanager.do_reg_plugins(dbstate, None)
pmgr = BasePluginManager.get_instance()
cl_list = pmgr.get_reg_reports()
clr = None
for pdata in cl_list:
if name == pdata.id:
mod = pmgr.load_plugin(pdata)
if not mod:
#import of plugin failed
return clr
category = pdata.category
report_class = getattr(mod, pdata.reportclass)
options_class = getattr(mod, pdata.optionclass)
if category in (CATEGORY_BOOK, CATEGORY_CODE):
options_class(db, name, category,
options_str_dict)
else:
clr = cl_report(db, name, category,
report_class, options_class,
options_str_dict)
return clr
return clr
#------------------------------------------------------------------------
#
# Function to write books from command line
#
#------------------------------------------------------------------------
def cl_book(database, name, book, options_str_dict):
"""
function to actually run the selected book,
which in turn runs whatever reports the book has in it
"""
clr = CommandLineReport(database, name, CATEGORY_BOOK,
ReportOptions, options_str_dict)
# Exit here if show option was given
if clr.show:
return
# write report
doc = clr.format(None,
PaperStyle(clr.paper, clr.orien, clr.marginl,
clr.marginr, clr.margint, clr.marginb))
user = User()
rptlist = []
selected_style = StyleSheet()
for item in book.get_item_list():
# The option values were loaded magically by the book parser.
# But they still need to be applied to the menu options.
opt_dict = item.option_class.options_dict
menu = item.option_class.menu
for optname in opt_dict:
menu_option = menu.get_option_by_name(optname)
if menu_option:
menu_option.set_value(opt_dict[optname])
item.option_class.set_document(doc)
report_class = item.get_write_item()
obj = (write_book_item(database,
report_class, item.option_class, user),
item.get_translated_name())
if obj:
append_styles(selected_style, item)
rptlist.append(obj)
doc.set_style_sheet(selected_style)
doc.open(clr.option_class.get_output())
doc.init()
newpage = 0
err_msg = _("Failed to make '%s' report.")
try:
for (rpt, name) in rptlist:
if newpage:
doc.page_break()
newpage = 1
rpt.begin_report()
rpt.write_report()
doc.close()
except ReportError as msg:
(msg1, msg2) = msg.messages()
print(err_msg % name, file=sys.stderr) # which report has the error?
print(msg1, file=sys.stderr)
if msg2:
print(msg2, file=sys.stderr)
#------------------------------------------------------------------------
#
# Generic task function for book
#
#------------------------------------------------------------------------
def write_book_item(database, report_class, options, user):
"""Write the report using options set.
All user dialog has already been handled and the output file opened."""
try:
return report_class(database, options, user)
except ReportError as msg:
(msg1, msg2) = msg.messages()
print("ReportError", msg1, msg2, file=sys.stderr)
except FilterError as msg:
(msg1, msg2) = msg.messages()
print("FilterError", msg1, msg2, file=sys.stderr)
except:
LOG.error("Failed to write book item.", exc_info=True)
return None
|
prculley/gramps
|
gramps/cli/plug/__init__.py
|
Python
|
gpl-2.0
| 35,372
|
[
"Brian"
] |
fced3ef4ba7adadc6a2ffbc7884b486d69951e194ea0b2295b007630d679ecf6
|
#!/usr/bin/env python
import json
import logging
import os
import string
import subprocess
import sys
import time
import uuid
from random import choice
from twisted.internet import reactor, defer
from twisted.internet.task import deferLater
from twisted.internet.defer import CancelledError
from twisted.python import log
from twisted.web import server, resource, http
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from twisted.web.static import File
from vtk.web import upload
try:
import argparse
except ImportError:
from vtk.util import _argparse as argparse
sample_config_file = """
Here is a sample of what a configuration file could look like:
{
## ===============================
## General launcher configuration
## ===============================
"configuration": {
"host" : "localhost",
"port" : 8080,
"endpoint": "paraview", # SessionManager Endpoint
"content": "/.../www", # Optional: Directory shared over HTTP
"proxy_file" : "/.../proxy-mapping.txt", # Proxy-Mapping file for Apache
"sessionURL" : "ws://${host}:${port}/ws", # ws url used by the client to connect to the started process
"timeout" : 25, # Wait time in second after process start
"log_dir" : "/.../viz-logs", # Directory for log files
"upload_dir" : "/.../data", # If launcher should act as upload server, where to put files
"fields" : ["file", "host", "port", "updir"] # List of fields that should be send back to client
},
## ===============================
## Useful session vars for client
## ===============================
"sessionData" : { "updir": "/Home" }, # Tells client which path to updateFileBrowser after uploads
## ===============================
## Resources list for applications
## ===============================
"resources" : [ { "host" : "localhost", "port_range" : [9001, 9003] } ],
## ===============================
## Set of properties for cmd line
## ===============================
"properties" : {
"vtkpython" : "/.../VTK/build/bin/vtkpython",
"pvpython" : "/.../ParaView/build/bin/pvpython",
"vtk_python_path": "/.../VTK/build/Wrapping/Python/vtk/web",
"pv_python_path": "/.../ParaView/build/lib/site-packages/paraview/web",
"plugins_path": "/.../ParaView/build/lib",
"dataDir": "/.../path/to/data/directory"
},
## ===============================
## Application list with cmd lines
## ===============================
"apps" : {
"cone" : {
"cmd" : [
"${vtkpython}", "${vtk_python_path}/vtk_web_cone.py", "--port", "$port" ],
"ready_line" : "Starting factory"
},
"graph" : {
"cmd" : [
"${vtkpython}", "${vtk_python_path}/vtk_web_graph.py", "--port", "$port",
"--vertices", "${numberOfVertices}", "--edges", "${numberOfEdges}" ],
"ready_line" : "Starting factory"
},
"phylotree" : {
"cmd" : [
"${vtkpython}", "${vtk_python_path}/vtk_web_phylogenetic_tree.py", "--port", "$port",
"--tree", "${dataDir}/visomics/${treeFile}", "--table", "${dataDir}/visomics/${tableFile}" ],
"ready_line" : "Starting factory"
},
"filebrowser" : {
"cmd" : [
"${vtkpython}", "${vtk_python_path}/vtk_web_filebrowser.py",
"--port", "${port}", "--data-dir", "${dataDir}" ],
"ready_line" : "Starting factory"
},
"data_prober": {
"cmd": [
"${pvpython}", "-dr", "${pv_python_path}/pv_web_data_prober.py",
"--port", "${port}", "--data-dir", "${dataDir}", "-f" ],
"ready_line" : "Starting factory"
},
"visualizer": {
"cmd": [
"${pvpython}", "-dr", "${pv_python_path}/pv_web_visualizer.py",
"--plugins", "${plugins_path}/libPointSprite_Plugin.so", "--port", "${port}",
"--data-dir", "${dataDir}", "--load-file", "${dataDir}/${fileToLoad}",
"--authKey", "${secret}", "-f" ],
"ready_line" : "Starting factory"
},
"loader": {
"cmd": [
"${pvpython}", "-dr", "${pv_python_path}/pv_web_file_loader.py",
"--port", "${port}", "--data-dir", "${dataDir}",
"--load-file", "${dataDir}/${fileToLoad}", "-f" ],
"ready_line" : "Starting factory"
},
"launcher" : {
"cmd": [
"/.../ParaView/Web/Applications/Parallel/server/launcher.sh",
"${port}", "${client}", "${resources}", "${file}" ],
"ready_line" : "Starting factory"
},
"your_app": {
"cmd": [
"your_shell_script.sh", "--resource-host", "${host}", "--resource-port", "${port}",
"--session-id", "${id}", "--generated-password", "${secret}",
"--application-key", "${application}" ],
"ready_line": "Output line from your shell script indicating process is ready"
}
}
"""
# =============================================================================
# Helper module methods
# =============================================================================
def generatePassword():
return ''.join(choice(string.letters + string.digits) for _ in xrange(16))
# -----------------------------------------------------------------------------
def validateKeySet(obj, expected_keys, object_name):
all_key_found = True
for key in expected_keys:
if not obj.has_key(key):
print "ERROR: %s is missing %s key." % (object_name, key)
all_key_found = False
return all_key_found
# -----------------------------------------------------------------------------
def replaceVariables(template_str, variable_list):
for key_pair in variable_list:
item_template = string.Template(template_str)
template_str = item_template.safe_substitute(key_pair)
if "$" in template_str:
logging.error("Some properties could not be resolved: " + template_str)
return template_str
# -----------------------------------------------------------------------------
def replaceList(template_list, variable_list):
result_list = []
for str in template_list:
result_list.append(replaceVariables(str, variable_list))
return result_list
# -----------------------------------------------------------------------------
def filterResponse(obj, public_keys):
public_keys.extend(['id', 'sessionURL', 'sessionManagerURL'])
filtered_output = {}
for field in obj:
if field in public_keys:
filtered_output[field] = obj[field]
return filtered_output
# -----------------------------------------------------------------------------
def extractSessionId(request):
path = request.path.split('/')
if len(path) < 3:
return None
return str(path[2])
# =============================================================================
# Session manager
# =============================================================================
class SessionManager(object):
def __init__(self, config, mapping):
self.sessions = {}
self.config = config
self.resources = ResourceManager(config["resources"])
self.mapping = mapping
def createSession(self, options):
# Assign id and store options
id = str(uuid.uuid1())
# Assign resource to session
host, port = self.resources.getNextResource()
# Do we have resources
if host:
options['id'] = id
options['host'] = host
options['port'] = port
if not options.has_key('secret'):
options['secret'] = generatePassword()
options['sessionURL'] = replaceVariables(self.config['configuration']['sessionURL'], [options, self.config['properties']])
options['cmd'] = replaceList(self.config['apps'][options['application']]['cmd'], [options, self.config['properties']])
if self.config.has_key('sessionData') :
for key in self.config['sessionData'] :
options[key] = replaceVariables(self.config['sessionData'][key], [options, self.config['properties']])
self.sessions[id] = options
self.mapping.update(self.sessions)
return options
return None
def deleteSession(self, id):
host = self.sessions[id]['host']
port = self.sessions[id]['port']
self.resources.freeResource(host, port)
del self.sessions[id]
self.mapping.update(self.sessions)
def getSession(self, id):
if self.sessions.has_key(id):
return self.sessions[id]
return None
# =============================================================================
# Proxy manager
# =============================================================================
class ProxyMappingManager(object):
def update(sessions):
pass
class ProxyMappingManagerTXT(ProxyMappingManager):
def __init__(self, file_path, pattern="%s %s:%d\n"):
self.file_path = file_path
self.pattern = pattern
def update(self, sessions):
with open(self.file_path, "w") as map_file:
for id in sessions:
map_file.write(self.pattern % (id, sessions[id]['host'], sessions[id]['port']))
# =============================================================================
# Resource manager
# =============================================================================
class ResourceManager(object):
"""
Class that provides methods to keep track on available resources (host/port)
"""
def __init__(self, resourceList):
self.resources = {}
for resource in resourceList:
host = resource['host']
portList = range(resource['port_range'][0],resource['port_range'][1]+1)
if self.resources.has_key(host):
self.resources[host]['available'].extend(portList)
else:
self.resources[host] = { 'available': portList, 'used': []}
def getNextResource(self):
"""
Return a (host, port) pair if any available otherwise will return None
"""
# find host with max availibility
winner = None
availibilityCount = 0
for host in self.resources:
if availibilityCount < len(self.resources[host]['available']):
availibilityCount = len(self.resources[host]['available'])
winner = host
if winner:
port = self.resources[winner]['available'].pop()
self.resources[winner]['used'].append(port)
return (winner, port)
return (None, None)
def freeResource(self, host, port):
"""
Free a previously reserved resource
"""
if self.resources.has_key(host) and port in self.resources[host]['used']:
self.resources[host]['used'].remove(port)
self.resources[host]['available'].append(port)
# =============================================================================
# Process manager
# =============================================================================
class ProcessManager(object):
def __init__(self, configuration):
self.config = configuration
self.log_dir = configuration['configuration']['log_dir']
self.processes = {}
def __del__(self):
for id in self.processes:
self.processes[id].terminate()
def _getLogFilePath(self, id):
return "%s%s%s.txt" % (self.log_dir, os.sep, id)
def startProcess(self, session):
proc = None
# Create output log file
logFilePath = self._getLogFilePath(session['id'])
with open(logFilePath, "a+", 0) as log_file:
try:
proc = subprocess.Popen(session['cmd'], stdout=log_file, stderr=log_file)
self.processes[session['id']] = proc
except:
logging.error("The command line failed")
logging.error(' '.join(map(str, session['cmd'])))
return None
return proc
def stopProcess(self, id):
proc = self.processes[id]
del self.processes[id]
try:
proc.terminate()
except:
pass # we tried
def listEndedProcess(self):
session_to_release = []
for id in self.processes:
if self.processes[id].poll() is not None:
session_to_release.append(id)
return session_to_release
def isRunning(self, id):
return self.processes[id].poll() is None
# ========================================================================
# Look for ready line in process output. Return True if found, False
# otherwise. If no ready_line is configured and process is running return
# False. This will then rely on the timout time.
# ========================================================================
def isReady(self, session, count = 0):
id = session['id']
# The process has to be running to be ready!
if not self.isRunning(id) and count < 60:
return False
# Give up after 60 seconds if still not running
if not self.isRunning(id):
return True
application = self.config['apps'][session['application']]
ready_line = application.get('ready_line', None)
# If no ready_line is configured and the process is running then thats
# enough.
if not ready_line:
return False
ready = False
# Check the output for ready_line
logFilePath = self._getLogFilePath(session['id'])
with open(logFilePath, "r", 0) as log_file:
for line in log_file.readlines():
if ready_line in line:
ready = True
break
return ready
# ===========================================================================
# Class to implement requests to POST, GET and DELETE methods
# ===========================================================================
class LauncherResource(resource.Resource, object):
def __init__(self, options, config):
super(LauncherResource, self).__init__()
self._options = options
self._config = config
self.time_to_wait = int(config['configuration']['timeout'])
self.field_filter = config['configuration']['fields']
self.session_manager = SessionManager(config,ProxyMappingManagerTXT(config['configuration']['proxy_file']))
self.process_manager = ProcessManager(config)
def getChild(self, path, request):
return self
def __del__(self):
logging.warning("Server factory shutting down. Stopping all processes")
# ========================================================================
# Handle POST request
# ========================================================================
def render_POST(self, request):
payload = json.loads(request.content.getvalue())
# Make sure the request has all the expected keys
if not validateKeySet(payload, ["application"], "Launch request"):
request.setResponseCode(http.BAD_REQUEST)
return json.dumps({"error": "The request is not complete"})
# Try to free any available resource
id_to_free = self.process_manager.listEndedProcess()
for id in id_to_free:
self.session_manager.deleteSession(id)
self.process_manager.stopProcess(id)
# Create new session
session = self.session_manager.createSession(payload)
# No resource available
if not session:
request.setResponseCode(http.SERVICE_UNAVAILABLE)
return json.dumps({"error": "All the resources are currently taken"})
# Start process
proc = self.process_manager.startProcess(session)
if not proc:
request.setResponseCode(http.SERVICE_UNAVAILABLE)
return json.dumps({"error": "The process did not properly start. %s" % str(session['cmd'])})
# local function to act as errback for Deferred objects.
def errback(error):
# Filter out CancelledError and propagate rest
if error.type != CancelledError:
return error
# Deferred object set to timeout request if process doesn't start in time
timeout_deferred = deferLater(reactor, self.time_to_wait, lambda: request)
timeout_deferred.addCallback(self._delayedRenderTimeout, session)
timeout_deferred.addErrback(errback)
# Make sure other deferred is canceled once one has been fired
request.notifyFinish().addCallback(lambda x: timeout_deferred.cancel())
# If a ready_line is configured create a Deferred object to wait for
# ready line to be produced
if 'ready_line' in self._config['apps'][session['application']]:
ready_deferred = self._waitForReady(session, request)
ready_deferred.addCallback(self._delayedRenderReady, session)
ready_deferred.addErrback(errback)
# Make sure other deferred is canceled once one has been fired
request.notifyFinish().addCallback(lambda x: ready_deferred.cancel())
return NOT_DONE_YET
# ========================================================================
# Wait for session to be ready. Rather than blocking keep using callLater(...)
# to schedule self in reactor. Return a Deferred object whose callback will
# be triggered when the session is ready
# ========================================================================
def _waitForReady(self, session, request, count=0, d=None):
if not d:
d = defer.Deferred()
if not 'startTimedOut' in session and \
not self.process_manager.isReady(session, count + 1):
reactor.callLater(1, self._waitForReady, session, request, count + 1, d)
else:
d.callback(request)
return d
# ========================================================================
# Called when the timeout out expires. Check if process is now ready
# and send response to client.
# ========================================================================
def _delayedRenderTimeout(self, request, session):
ready = self.process_manager.isReady(session, 0)
if ready:
request.write(json.dumps(filterResponse(session, self.field_filter)))
request.setResponseCode(http.OK)
else:
request.write(json.dumps({"error": "Session did not start before timeout expired. Check session logs."}))
# Mark the session as timed out and clean up the process
session['startTimedOut'] = True
self.session_manager.deleteSession(session['id'])
self.process_manager.stopProcess(session['id'])
request.setResponseCode(http.SERVICE_UNAVAILABLE)
request.finish()
# ========================================================================
# Called when the process is ready ( the ready line has been read from the
# process output).
# ========================================================================
def _delayedRenderReady(self, request, session):
filterkeys = self.field_filter
if session['secret'] in session['cmd']:
filterkeys = self.field_filter + [ 'secret' ]
request.write(json.dumps(filterResponse(session, filterkeys)))
request.setResponseCode(http.OK)
request.finish()
# =========================================================================
# Handle GET request
# =========================================================================
def render_GET(self, request):
id = extractSessionId(request)
if not id:
message = "id not provided in GET request"
logging.error(message)
request.setResponseCode(http.BAD_REQUEST)
return json.dumps({"error":message})
logging.info("GET request received for id: %s" % id)
session = self.session_manager.getSession(id)
if not session:
message = "No session with id: %s" % id
logging.error(message)
request.setResponseCode(http.NOT_FOUND)
return json.dumps({"error":message})
# Return session meta-data
request.setResponseCode(http.OK)
return json.dumps(filterResponse(session, self.field_filter))
# =========================================================================
# Handle DELETE request
# =========================================================================
def render_DELETE(self, request):
id = extractSessionId(request)
if not id:
message = "id not provided in DELETE request"
logging.error(message)
request.setResponseCode(http.BAD_REQUEST)
return json.dumps({"error":message})
logging.info("DELETE request received for id: %s" % id)
session = self.session_manager.getSession(id)
if not session:
message = "No session with id: %s" % id
logging.error(message)
request.setResponseCode(http.NOT_FOUND)
return json.dumps({"error":message})
# Remove session
self.session_manager.deleteSession(id)
self.process_manager.stopProcess(id)
message = "Deleted session with id: %s" % id
logging.info(message)
request.setResponseCode(http.OK)
return session
# =============================================================================
# Start the web server
# =============================================================================
def startWebServer(options, config):
# Extract properties from config
log_dir = str(config["configuration"]["log_dir"])
content = str(config["configuration"]["content"])
endpoint = str(config["configuration"]["endpoint"])
host = str(config["configuration"]["host"])
port = int(config["configuration"]["port"])
# Setup logging
logFileName = log_dir + os.sep + "launcherLog.log"
formatting = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
logging.basicConfig(level=logging.DEBUG, filename=logFileName, filemode='w', format=formatting)
observer = log.PythonLoggingObserver()
observer.start()
if options.debug:
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
formatter = logging.Formatter(formatting)
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
# Initialize web resource
web_resource = File(content) if (len(content) > 0) else resource.Resource()
# Attach launcher
web_resource.putChild(endpoint, LauncherResource(options, config))
# Check if launcher should act as a file upload server as well
if config["configuration"].has_key("upload_dir"):
from upload import UploadPage
updir = replaceVariables(config['configuration']['upload_dir'], [config['properties']])
uploadResource = UploadPage(updir)
web_resource.putChild("upload", uploadResource)
site = server.Site(web_resource)
reactor.listenTCP(port, site, interface=host)
reactor.run()
# =============================================================================
# Parse config file
# =============================================================================
def parseConfig(options):
# Read values from the configuration file
try:
config = json.loads(open(options.config[0]).read())
except:
message = "ERROR: Unable to read config file.\n"
message += str(sys.exc_info()[1]) + "\n" + str(sys.exc_info()[2])
print message
print sample_config_file
sys.exit(2)
expected_keys = ["configuration", "apps", "properties", "resources"]
if not validateKeySet(config, expected_keys, "Config file"):
print sample_config_file
sys.exit(2)
expected_keys = ["endpoint", "host", "port", "proxy_file", "sessionURL", "timeout", "log_dir", "fields"]
if not validateKeySet(config["configuration"], expected_keys, "file.configuration"):
print sample_config_file
sys.exit(2)
if not config["configuration"].has_key("content"):
config["configuration"]["content"] = ""
return config
# =============================================================================
# Setup default arguments to be parsed
# -d, --debug
# -t, --proxyFileType Type of proxy file (txt, dbm)
# =============================================================================
def add_arguments(parser):
parser.add_argument("config", type=str, nargs=1,
help="configuration file for the launcher")
parser.add_argument("-d", "--debug",
help="log debugging messages to stdout",
action="store_true")
return parser
# =============================================================================
# Parse arguments
# =============================================================================
def start(argv=None,
description="VTKWeb Launcher"):
parser = argparse.ArgumentParser(description=description)
add_arguments(parser)
args = parser.parse_args(argv)
config = parseConfig(args)
startWebServer(args, config)
# =============================================================================
# Main
# =============================================================================
if __name__ == "__main__":
start()
|
msmolens/VTK
|
Web/Python/vtk/web/launcher.py
|
Python
|
bsd-3-clause
| 26,514
|
[
"ParaView",
"VTK"
] |
7238102393d60b7e170282939206e77b25f07abe2241250491140154ffec7ac6
|
#!/usr/bin/env python
'''
The expressions module contains classes to represent an expression. The main
class is ExpressionNode. ExpressionNode's most useful method is py_string(),
which returns a Python string representing that expression.
'''
import sys
from ctypedescs import *
import keyword
# Right now, the objects in this module are all oriented toward evaluation.
# However, they don't have to be, since ctypes objects are mutable. For example,
# shouldn't it be possible to translate the macro:
#
# #define INCREMENT(x) ++x
#
# into Python? The resulting code should be:
#
# def INCREMENT(x):
# x.value+=1
# return x.value
#
# On the other hand, this would be a challenge to write.
class EvaluationContext(object):
'''Interface for evaluating expression nodes.
'''
def evaluate_identifier(self, name):
warnings.warn('Attempt to evaluate identifier "%s" failed' % name)
return 0
def evaluate_sizeof(self, type):
warnings.warn('Attempt to evaluate sizeof "%s" failed' % str(type))
return 0
def evaluate_sizeof(self, object):
warnings.warn('Attempt to evaluate sizeof object "%s" failed' % str(object))
return 0
def evaluate_parameter(self, name):
warnings.warn('Attempt to evaluate parameter "%s" failed' % name)
return 0
class ExpressionNode(object):
def __init__(self):
self.errors = []
def error(self,message,cls = None):
self.errors.append((message,cls))
def __repr__(self):
try:
string = repr(self.py_string(True))
except ValueError:
string = "<error in expression node>"
return "<ExpressionNode: %s>" % string
def visit(self,visitor):
for error,cls in self.errors:
visitor.visit_error(error,cls)
class ConstantExpressionNode(ExpressionNode):
def __init__(self, value):
ExpressionNode.__init__(self)
self.value = value
def evaluate(self, context):
return self.value
def py_string(self, can_be_ctype):
if sys.platform != 'win32' or (sys.platform == 'win32' and sys.version_info >= (2, 6)):
# Windows python did not get infinity support until 2.6
if self.value == float('inf'):
return "float('inf')"
elif self.value == float('-inf'):
return "float('-inf')"
return repr(self.value)
class IdentifierExpressionNode(ExpressionNode):
def __init__(self, name):
ExpressionNode.__init__(self)
self.name = name
def evaluate(self, context):
return context.evaluate_identifier(self.name)
def visit(self, visitor):
visitor.visit_identifier(self.name)
ExpressionNode.visit(self,visitor)
def py_string(self, can_be_ctype):
# Errors will be thrown in generated code if identifier evaluates
# to a ctypes object, and can_be_ctype is False.
return self.name
class ParameterExpressionNode(ExpressionNode):
def __init__(self, name):
ExpressionNode.__init__(self)
self.name = name
def evaluate(self, context):
return context.evaluate_parameter(self.name)
def visit(self, visitor):
ExpressionNode.visit(self,visitor)
def py_string(self, can_be_ctype):
# Errors will be thrown in generated code if parameter is
# a ctypes object, and can_be_ctype is False.
return self.name
class UnaryExpressionNode(ExpressionNode):
def __init__(self, name, op, format, child_can_be_ctype, child):
ExpressionNode.__init__(self)
self.name = name
self.op = op
self.format = format
self.child_can_be_ctype = child_can_be_ctype
self.child = child
def visit(self, visitor):
self.child.visit(visitor)
ExpressionNode.visit(self,visitor)
def evaluate(self, context):
if self.op:
return self.op(self.child.evaluate(context))
else:
raise ValueError("The C operator \"%s\" can't be evaluated right " \
"now" % self.name)
def py_string(self, can_be_ctype):
return self.format % \
self.child.py_string(self.child_can_be_ctype and can_be_ctype)
class SizeOfExpressionNode(ExpressionNode):
def __init__(self, child):
ExpressionNode.__init__(self)
self.child = child
def visit(self, visitor):
self.child.visit(visitor)
ExpressionNode.visit(self,visitor)
def evaluate(self, context):
if isinstance(self.child, CtypesType):
return context.evaluate_sizeof(self.child)
else:
return context.evaluate_sizeof_object(self.child)
def py_string(self, can_be_ctype):
if isinstance(self.child, CtypesType):
return 'sizeof(%s)' % self.child.py_string()
else:
return 'sizeof(%s)' % self.child.py_string(True)
class BinaryExpressionNode(ExpressionNode):
def __init__(self, name, op, format, can_be_ctype, left, right):
ExpressionNode.__init__(self)
self.name = name
self.op = op
self.format = format
self.can_be_ctype = can_be_ctype
self.left = left
self.right = right
def visit(self, visitor):
self.left.visit(visitor)
self.right.visit(visitor)
ExpressionNode.visit(self,visitor)
def evaluate(self, context):
if self.op:
return self.op(self.left.evaluate(context),
self.right.evaluate(context))
else:
raise ValueError("The C operator \"%s\" can't be evaluated right " \
"now" % self.name)
def py_string(self, can_be_ctype):
return self.format % \
(self.left.py_string(self.can_be_ctype[0] and can_be_ctype),
self.right.py_string(self.can_be_ctype[0] and can_be_ctype))
class ConditionalExpressionNode(ExpressionNode):
def __init__(self, cond, yes, no):
ExpressionNode.__init__(self)
self.cond = cond
self.yes = yes
self.no = no
def visit(self, visitor):
self.cond.visit(visitor)
self.yes.visit(visitor)
self.no.visit(visitor)
ExpressionNode.visit(self,visitor)
def evaluate(self, context):
if self.cond.evaluate(context):
return self.yes.evaluate(context)
else:
return self.no.evaluate(context)
def py_string(self, can_be_ctype):
return "%s and %s or %s" % \
(self.cond.py_string(True),
self.yes.py_string(can_be_ctype),
self.no.py_string(can_be_ctype))
class AttributeExpressionNode(ExpressionNode):
def __init__(self, op, format, base, attribute):
ExpressionNode.__init__(self)
self.op = op
self.format = format
self.base = base
self.attribute = attribute
# Attribute access will raise parse errors if you don't do this.
# Fortunately, the processor module does the same thing to
# the struct member name.
if self.attribute in keyword.kwlist:
self.attribute = "_"+self.attribute
def visit(self,visitor):
self.base.visit(visitor)
ExpressionNode.visit(self,visitor)
def evaluate(self, context):
return self.op(self.base.evalute(context),self.attribute)
def py_string(self, can_be_ctype):
if can_be_ctype:
return self.format % (self.base.py_string(can_be_ctype),
self.attribute)
else:
return "(%s.value)" % (self.format % \
(self.base.py_string(can_be_ctype), self.attribute))
class CallExpressionNode(ExpressionNode):
def __init__(self,function,arguments):
ExpressionNode.__init__(self)
self.function = function
self.arguments = arguments
def visit(self,visitor):
self.function.visit(visitor)
for arg in self.arguments:
arg.visit(visitor)
ExpressionNode.visit(self,visitor)
def evaluate(self,context):
arguments = [arg.evaluate(context) for arg in self.arguments]
return self.function.evaluate(context)(*arguments)
def py_string(self, can_be_ctype):
function = self.function.py_string(can_be_ctype)
arguments = [x.py_string(can_be_ctype) for x in self.arguments]
if can_be_ctype:
return '(%s (%s))' % (function,", ".join(arguments))
else:
return '((%s (%s)).value)' % (function,", ".join(arguments))
# There seems not to be any reasonable way to translate C typecasts
# into Python. Ctypesgen doesn't try, except for the special case of NULL.
class TypeCastExpressionNode(ExpressionNode):
def __init__(self, base, ctype):
ExpressionNode.__init__(self)
self.base = base
self.ctype = ctype
self.isnull = isinstance(ctype, CtypesPointer) and \
isinstance(base, ConstantExpressionNode) and \
base.value == 0
def visit(self,visitor):
# No need to visit ctype because it isn't actually used
self.base.visit(visitor)
ExpressionNode.visit(self,visitor)
def evaluate(self,context):
if self.isnull:
return None
else:
return self.base.evaluate(context)
def py_string(self, can_be_ctype):
if self.isnull:
return "None"
else:
return self.base.py_string(can_be_ctype)
class UnsupportedExpressionNode(ExpressionNode):
def __init__(self,message):
ExpressionNode.__init__(self)
self.message = message
self.error(message,'unsupported-type')
def evaluate(self,context):
raise ValueError("Tried to evaluate an unsupported expression " \
"node: %s" % self.message)
def __repr__(self):
return "<UnsupportedExpressionNode>"
def py_string(self, can_be_ctype):
raise ValueError("Called py_string() an unsupported expression " \
"node: %s" % self.message)
|
pombredanne/ctypesgen
|
ctypesgencore/expressions.py
|
Python
|
bsd-3-clause
| 10,115
|
[
"VisIt"
] |
e44c638f02b2ce6cf26e020b60fc509a38694643364028156d2b730e1ac78df3
|
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Index.py
This module provides a way to create indexes to text files.
Classes:
Index Dictionary-like class used to store index information.
_ShelveIndex An Index class based on the shelve module.
_InMemoryIndex An in-memory Index class.
"""
import os
import array
import cPickle
import shelve
class _ShelveIndex(dict):
"""An index file wrapped around shelve.
"""
# Without a good dbm module installed, this is pretty slow and
# generates large files. When generating an index on a FASTA-
# formatted file with 82000 sequences (37Mb), the
# index 'dat' file is 42Mb and 'dir' file is 8Mb.
__version = 2
__version_key = '__version'
def __init__(self, indexname, truncate=None):
dict.__init__(self)
try:
if truncate:
# In python 1.52 and before, dumbdbm (under shelve)
# doesn't clear the old database.
files = [indexname + '.dir',
indexname + '.dat',
indexname + '.bak'
]
for file in files:
if os.path.exists(file):
os.unlink(file)
raise Exception("open a new shelf")
self.data = shelve.open(indexname, flag='r')
except:
# No database exists.
self.data = shelve.open(indexname, flag='n')
self.data[self.__version_key] = self.__version
else:
# Check to make sure the database is the correct version.
version = self.data.get(self.__version_key, None)
if version is None:
raise IOError("Unrecognized index format")
elif version != self.__version:
raise IOError("Version %s doesn't match my version %s" \
% (version, self.__version))
def __del__(self):
if self.__dict__.has_key('data'):
self.data.close()
class _InMemoryIndex(dict):
"""This creates an in-memory index file.
"""
# File Format:
# version
# key value
# [...]
__version = 3
__version_key = '__version'
def __init__(self, indexname, truncate=None):
self._indexname = indexname
dict.__init__(self)
self.__changed = 0 # the index hasn't changed
# Remove the database if truncate is true.
if truncate and os.path.exists(indexname):
os.unlink(indexname)
self.__changed = 1
# Load the database if it exists
if os.path.exists(indexname):
handle = open(indexname)
version = self._toobj(handle.readline().rstrip())
if version != self.__version:
raise IOError("Version %s doesn't match my version %s" \
% (version, self.__version))
for line in handle:
key, value = line.split()
key, value = self._toobj(key), self._toobj(value)
self[key] = value
self.__changed = 0
def update(self, dict):
self.__changed = 1
dict.update(self, dict)
def __setitem__(self, key, value):
self.__changed = 1
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self.__changed = 1
dict.__delitem__(self, key)
def clear(self):
self.__changed = 1
dict.clear(self)
def __del__(self):
if self.__changed:
handle = open(self._indexname, 'w')
handle.write("%s\n" % self._tostr(self.__version))
for key, value in self.items():
handle.write("%s %s\n" %
(self._tostr(key), self._tostr(value)))
handle.close()
def _tostr(self, obj):
# I need a representation of the object that's saveable to
# a file that uses whitespace as delimiters. Thus, I'm
# going to pickle the object, and then convert each character of
# the string to its ASCII integer value. Then, I'm going to convert
# the integers into strings and join them together with commas.
# It's not the most efficient way of storing things, but it's
# relatively fast.
s = cPickle.dumps(obj)
intlist = array.array('b', s)
strlist = map(str, intlist)
return ','.join(strlist)
def _toobj(self, str):
intlist = map(int, str.split(','))
intlist = array.array('b', intlist)
strlist = map(chr, intlist)
return cPickle.loads(''.join(strlist))
Index = _InMemoryIndex
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Index.py
|
Python
|
gpl-2.0
| 4,883
|
[
"Biopython"
] |
17edaab3112425b80c75cb18918b0a5b44796115083644872bb5ca42ce395b06
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Generalized Hartree-Fock for periodic systems at a single k-point
'''
import numpy as np
import scipy.linalg
import pyscf.scf.ghf as mol_ghf
from pyscf.pbc.scf import hf as pbchf
from pyscf.pbc.scf import addons
from pyscf.pbc.scf import chkfile # noqa
def get_jk(mf, cell=None, dm=None, hermi=0, kpt=None, kpts_band=None,
with_j=True, with_k=True, **kwargs):
if cell is None: cell = mf.cell
if dm is None: dm = mf.make_rdm1()
if kpt is None: kpt = mf.kpt
dm = np.asarray(dm)
nso = dm.shape[-1]
nao = nso // 2
dms = dm.reshape(-1,nso,nso)
n_dm = dms.shape[0]
dmaa = dms[:,:nao,:nao]
dmab = dms[:,nao:,:nao]
dmbb = dms[:,nao:,nao:]
dms = np.vstack((dmaa, dmbb, dmab))
j1, k1 = mf.with_df.get_jk(dms, hermi, kpt, kpts_band, with_j, with_k,
exxdiv=mf.exxdiv)
j1 = j1.reshape(3,n_dm,nao,nao)
k1 = k1.reshape(3,n_dm,nao,nao)
vj = vk = None
if with_j:
vj = np.zeros((n_dm,nso,nso), j1.dtype)
vj[:,:nao,:nao] = vj[:,nao:,nao:] = j1[0] + j1[1]
vj = vj.reshape(dm.shape)
if with_k:
vk = np.zeros((n_dm,nso,nso), k1.dtype)
vk[:,:nao,:nao] = k1[0]
vk[:,nao:,nao:] = k1[1]
vk[:,:nao,nao:] = k1[2]
vk[:,nao:,:nao] = k1[2].transpose(0,2,1).conj()
vk = vk.reshape(dm.shape)
return vj, vk
class GHF(pbchf.SCF, mol_ghf.GHF):
'''GHF class for PBCs.
'''
def get_hcore(self, cell=None, kpt=None):
hcore = pbchf.SCF.get_hcore(self, cell, kpt)
return scipy.linalg.block_diag(hcore, hcore)
def get_ovlp(self, cell=None, kpt=None):
s = pbchf.SCF.get_ovlp(self, cell, kpt)
return scipy.linalg.block_diag(s, s)
get_jk = get_jk
get_occ = mol_ghf.get_occ
get_grad = mol_ghf.GHF.get_grad
def get_j(self, cell=None, dm=None, hermi=0, kpt=None, kpts_band=None,
**kwargs):
return self.get_jk(cell, dm, hermi, kpt, kpts_band, True, False)[0]
def get_k(self, cell=None, dm=None, hermi=0, kpt=None, kpts_band=None,
**kwargs):
return self.get_jk(cell, dm, hermi, kpt, kpts_band, False, True)[1]
def get_veff(self, cell=None, dm=None, dm_last=0, vhf_last=0, hermi=1,
kpt=None, kpts_band=None):
vj, vk = self.get_jk(cell, dm, hermi, kpt, kpts_band, True, True)
vhf = vj - vk
return vhf
def get_bands(self, kpts_band, cell=None, dm=None, kpt=None):
'''Get energy bands at the given (arbitrary) 'band' k-points.
Returns:
mo_energy : (nmo,) ndarray or a list of (nmo,) ndarray
Bands energies E_n(k)
mo_coeff : (nao, nmo) ndarray or a list of (nao,nmo) ndarray
Band orbitals psi_n(k)
'''
raise NotImplementedError
def get_init_guess(self, cell=None, key='minao'):
if cell is None: cell = self.cell
dm = mol_ghf.GHF.get_init_guess(self, cell, key)
dm = pbchf.normalize_dm_(self, dm)
return dm
def convert_from_(self, mf):
'''Convert given mean-field object to RHF/ROHF'''
addons.convert_to_ghf(mf, self)
return self
stability = None
nuc_grad_method = None
if __name__ == '__main__':
from pyscf.pbc import gto
from pyscf.pbc import scf
cell = gto.Cell()
cell.atom = '''
H 0 0 0
H 1 0 0
H 0 1 0
H 0 1 1
'''
cell.a = np.eye(3)*2
cell.basis = [[0, [1.2, 1]]]
cell.verbose = 4
cell.build()
kpts = cell.make_kpts([2,2,2])
mf = scf.RHF(cell, kpt=kpts[7]).run()
mf = GHF(cell, kpt=kpts[7])
mf.kernel()
|
sunqm/pyscf
|
pyscf/pbc/scf/ghf.py
|
Python
|
apache-2.0
| 4,343
|
[
"PySCF"
] |
9a38def7f301a18d1b3fd11a0fad78a2145a40eed3e6cd1f16e31e67ed787c55
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
from itertools import product
from fractions import Fraction
from abc import ABCMeta, abstractmethod
from collections.abc import Sequence
import numpy as np
import warnings
import re
from monty.serialization import loadfn
from pymatgen.core.operations import SymmOp
from monty.design_patterns import cached_class
"""
Defines SymmetryGroup parent class and PointGroup and SpaceGroup classes.
Shyue Ping Ong thanks Marc De Graef for his generous sharing of his
SpaceGroup data as published in his textbook "Structure of Materials".
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2013, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "4/4/14"
SYMM_DATA = None
def get_symm_data(name):
global SYMM_DATA
if SYMM_DATA is None:
SYMM_DATA = loadfn(os.path.join(os.path.dirname(__file__),
"symm_data.json"))
return SYMM_DATA[name]
class SymmetryGroup(Sequence):
__metaclass__ = ABCMeta
@property
@abstractmethod
def symmetry_ops(self):
pass
def __contains__(self, item):
for i in self.symmetry_ops:
if np.allclose(i.affine_matrix, item.affine_matrix):
return True
return False
def __hash__(self):
return self.__len__()
def __getitem__(self, item):
return self.symmetry_ops[item]
def __len__(self):
return len(self.symmetry_ops)
def is_subgroup(self, supergroup):
"""
True if this group is a subgroup of the supplied group.
Args:
supergroup (SymmetryGroup): Supergroup to test.
Returns:
True if this group is a subgroup of the supplied group.
"""
warnings.warn("This is not fully functional. Only trivial subsets are tested right now. ")
return set(self.symmetry_ops).issubset(supergroup.symmetry_ops)
def is_supergroup(self, subgroup):
"""
True if this group is a supergroup of the supplied group.
Args:
subgroup (SymmetryGroup): Subgroup to test.
Returns:
True if this group is a supergroup of the supplied group.
"""
warnings.warn("This is not fully functional. Only trivial subsets are "
"tested right now. ")
return set(subgroup.symmetry_ops).issubset(self.symmetry_ops)
@cached_class
class PointGroup(SymmetryGroup):
"""
Class representing a Point Group, with generators and symmetry operations.
.. attribute:: symbol
Full International or Hermann-Mauguin Symbol.
.. attribute:: generators
List of generator matrices. Note that 3x3 matrices are used for Point
Groups.
.. attribute:: symmetry_ops
Full set of symmetry operations as matrices.
"""
def __init__(self, int_symbol):
"""
Initializes a Point Group from its international symbol.
Args:
int_symbol (str): International or Hermann-Mauguin Symbol.
"""
self.symbol = int_symbol
self.generators = [get_symm_data("generator_matrices")[c]
for c in get_symm_data("point_group_encoding")[int_symbol]]
self._symmetry_ops = set([SymmOp.from_rotation_and_translation(m)
for m in self._generate_full_symmetry_ops()])
self.order = len(self._symmetry_ops)
@property
def symmetry_ops(self):
return self._symmetry_ops
def _generate_full_symmetry_ops(self):
symm_ops = list(self.generators)
new_ops = self.generators
while len(new_ops) > 0:
gen_ops = []
for g1, g2 in product(new_ops, symm_ops):
op = np.dot(g1, g2)
if not in_array_list(symm_ops, op):
gen_ops.append(op)
symm_ops.append(op)
new_ops = gen_ops
return symm_ops
def get_orbit(self, p, tol=1e-5):
"""
Returns the orbit for a point.
Args:
p: Point as a 3x1 array.
tol: Tolerance for determining if sites are the same. 1e-5 should
be sufficient for most purposes. Set to 0 for exact matching
(and also needed for symbolic orbits).
Returns:
([array]) Orbit for point.
"""
orbit = []
for o in self.symmetry_ops:
pp = o.operate(p)
if not in_array_list(orbit, pp, tol=tol):
orbit.append(pp)
return orbit
@cached_class
class SpaceGroup(SymmetryGroup):
"""
Class representing a SpaceGroup.
.. attribute:: symbol
Full International or Hermann-Mauguin Symbol.
.. attribute:: int_number
International number
.. attribute:: generators
List of generator matrices. Note that 4x4 matrices are used for Space
Groups.
.. attribute:: order
Order of Space Group
"""
SYMM_OPS = loadfn(os.path.join(os.path.dirname(__file__),
"symm_ops.json"))
SG_SYMBOLS = set(get_symm_data("space_group_encoding").keys())
for op in SYMM_OPS:
op["hermann_mauguin"] = re.sub(r" ", "", op["hermann_mauguin"])
op["universal_h_m"] = re.sub(r" ", "", op["universal_h_m"])
SG_SYMBOLS.add(op["hermann_mauguin"])
SG_SYMBOLS.add(op["universal_h_m"])
gen_matrices = get_symm_data("generator_matrices")
# POINT_GROUP_ENC = SYMM_DATA["point_group_encoding"]
sgencoding = get_symm_data("space_group_encoding")
abbrev_sg_mapping = get_symm_data("abbreviated_spacegroup_symbols")
translations = {k: Fraction(v) for k, v in get_symm_data(
"translations").items()}
full_sg_mapping = {
v["full_symbol"]: k
for k, v in get_symm_data("space_group_encoding").items()}
def __init__(self, int_symbol):
"""
Initializes a Space Group from its full or abbreviated international
symbol. Only standard settings are supported.
Args:
int_symbol (str): Full International (e.g., "P2/m2/m2/m") or
Hermann-Mauguin Symbol ("Pmmm") or abbreviated symbol. The
notation is a LaTeX-like string, with screw axes being
represented by an underscore. For example, "P6_3/mmc".
Alternative settings can be access by adding a ":identifier".
For example, the hexagonal setting for rhombohedral cells can be
accessed by adding a ":H", e.g., "R-3m:H". To find out all
possible settings for a spacegroup, use the get_settings
classmethod. Alternative origin choices can be indicated by a
translation vector, e.g., 'Fm-3m(a-1/4,b-1/4,c-1/4)'.
"""
int_symbol = re.sub(r" ", "", int_symbol)
if int_symbol in SpaceGroup.abbrev_sg_mapping:
int_symbol = SpaceGroup.abbrev_sg_mapping[int_symbol]
elif int_symbol in SpaceGroup.full_sg_mapping:
int_symbol = SpaceGroup.full_sg_mapping[int_symbol]
for spg in SpaceGroup.SYMM_OPS:
if int_symbol in [spg["hermann_mauguin"], spg["universal_h_m"]]:
ops = [SymmOp.from_xyz_string(s) for s in spg["symops"]]
self.symbol = re.sub(r":", "",
re.sub(r" ", "", spg["universal_h_m"]))
if int_symbol in SpaceGroup.sgencoding:
self.full_symbol = SpaceGroup.sgencoding[int_symbol]["full_symbol"]
self.point_group = SpaceGroup.sgencoding[int_symbol]["point_group"]
else:
self.full_symbol = re.sub(r" ", "",
spg["universal_h_m"])
self.point_group = spg["schoenflies"]
self.int_number = spg["number"]
self.order = len(ops)
self._symmetry_ops = ops
break
else:
if int_symbol not in SpaceGroup.sgencoding:
raise ValueError("Bad international symbol %s" % int_symbol)
data = SpaceGroup.sgencoding[int_symbol]
self.symbol = int_symbol
# TODO: Support different origin choices.
enc = list(data["enc"])
inversion = int(enc.pop(0))
ngen = int(enc.pop(0))
symm_ops = [np.eye(4)]
if inversion:
symm_ops.append(np.array(
[[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0],
[0, 0, 0, 1]]))
for i in range(ngen):
m = np.eye(4)
m[:3, :3] = SpaceGroup.gen_matrices[enc.pop(0)]
m[0, 3] = SpaceGroup.translations[enc.pop(0)]
m[1, 3] = SpaceGroup.translations[enc.pop(0)]
m[2, 3] = SpaceGroup.translations[enc.pop(0)]
symm_ops.append(m)
self.generators = symm_ops
self.full_symbol = data["full_symbol"]
self.point_group = data["point_group"]
self.int_number = data["int_number"]
self.order = data["order"]
self._symmetry_ops = None
def _generate_full_symmetry_ops(self):
symm_ops = np.array(self.generators)
for op in symm_ops:
op[0:3, 3] = np.mod(op[0:3, 3], 1)
new_ops = symm_ops
while len(new_ops) > 0 and len(symm_ops) < self.order:
gen_ops = []
for g in new_ops:
temp_ops = np.einsum('ijk,kl', symm_ops, g)
for op in temp_ops:
op[0:3, 3] = np.mod(op[0:3, 3], 1)
ind = np.where(np.abs(1 - op[0:3, 3]) < 1e-5)
op[ind, 3] = 0
if not in_array_list(symm_ops, op):
gen_ops.append(op)
symm_ops = np.append(symm_ops, [op], axis=0)
new_ops = gen_ops
assert len(symm_ops) == self.order
return symm_ops
@classmethod
def get_settings(cls, int_symbol):
"""
Returns all the settings for a particular international symbol.
Args:
int_symbol (str): Full International (e.g., "P2/m2/m2/m") or
Hermann-Mauguin Symbol ("Pmmm") or abbreviated symbol. The
notation is a LaTeX-like string, with screw axes being
represented by an underscore. For example, "P6_3/mmc".
"""
symbols = []
if int_symbol in SpaceGroup.abbrev_sg_mapping:
symbols.append(SpaceGroup.abbrev_sg_mapping[int_symbol])
int_number = SpaceGroup.sgencoding[int_symbol]["int_number"]
elif int_symbol in SpaceGroup.full_sg_mapping:
symbols.append(SpaceGroup.full_sg_mapping[int_symbol])
int_number = SpaceGroup.sgencoding[int_symbol]["int_number"]
else:
for spg in SpaceGroup.SYMM_OPS:
if int_symbol in [re.split(r"\(|:", spg["hermann_mauguin"])[0],
re.split(r"\(|:", spg["universal_h_m"])[0]]:
int_number = spg["number"]
break
for spg in SpaceGroup.SYMM_OPS:
if int_number == spg["number"]:
symbols.append(spg["hermann_mauguin"])
symbols.append(spg["universal_h_m"])
return set(symbols)
@property
def symmetry_ops(self):
"""
Full set of symmetry operations as matrices. Lazily initialized as
generation sometimes takes a bit of time.
"""
if self._symmetry_ops is None:
self._symmetry_ops = [
SymmOp(m) for m in self._generate_full_symmetry_ops()]
return self._symmetry_ops
def get_orbit(self, p, tol=1e-5):
"""
Returns the orbit for a point.
Args:
p: Point as a 3x1 array.
tol: Tolerance for determining if sites are the same. 1e-5 should
be sufficient for most purposes. Set to 0 for exact matching
(and also needed for symbolic orbits).
Returns:
([array]) Orbit for point.
"""
orbit = []
for o in self.symmetry_ops:
pp = o.operate(p)
pp = np.mod(np.round(pp, decimals=10), 1)
if not in_array_list(orbit, pp, tol=tol):
orbit.append(pp)
return orbit
def is_compatible(self, lattice, tol=1e-5, angle_tol=5):
"""
Checks whether a particular lattice is compatible with the
*conventional* unit cell.
Args:
lattice (Lattice): A Lattice.
tol (float): The tolerance to check for equality of lengths.
angle_tol (float): The tolerance to check for equality of angles
in degrees.
"""
abc, angles = lattice.lengths_and_angles
crys_system = self.crystal_system
def check(param, ref, tolerance):
return all([abs(i - j) < tolerance for i, j in zip(param, ref)
if j is not None])
if crys_system == "cubic":
a = abc[0]
return check(abc, [a, a, a], tol) and\
check(angles, [90, 90, 90], angle_tol)
elif crys_system == "hexagonal" or (
crys_system == "trigonal" and (
self.symbol.endswith("H") or
self.int_number in [143, 144, 145, 147, 149, 150, 151, 152,
153, 154, 156, 157, 158, 159, 162, 163,
164, 165])):
a = abc[0]
return check(abc, [a, a, None], tol)\
and check(angles, [90, 90, 120], angle_tol)
elif crys_system == "trigonal":
a = abc[0]
alpha = angles[0]
return check(abc, [a, a, a], tol) \
and check(angles, [alpha, alpha, alpha], angle_tol)
elif crys_system == "tetragonal":
a = abc[0]
return check(abc, [a, a, None], tol) and\
check(angles, [90, 90, 90], angle_tol)
elif crys_system == "orthorhombic":
return check(angles, [90, 90, 90], angle_tol)
elif crys_system == "monoclinic":
return check(angles, [90, None, 90], angle_tol)
return True
@property
def crystal_system(self):
i = self.int_number
if i <= 2:
return "triclinic"
elif i <= 15:
return "monoclinic"
elif i <= 74:
return "orthorhombic"
elif i <= 142:
return "tetragonal"
elif i <= 167:
return "trigonal"
elif i <= 194:
return "hexagonal"
else:
return "cubic"
def is_subgroup(self, supergroup):
"""
True if this space group is a subgroup of the supplied group.
Args:
group (Spacegroup): Supergroup to test.
Returns:
True if this space group is a subgroup of the supplied group.
"""
if len(supergroup.symmetry_ops) < len(self.symmetry_ops):
return False
groups = [[supergroup.int_number]]
all_groups = [supergroup.int_number]
max_subgroups = {int(k): v
for k, v in get_symm_data("maximal_subgroups").items()}
while True:
new_sub_groups = set()
for i in groups[-1]:
new_sub_groups.update([j for j in max_subgroups[i] if j
not in all_groups])
if self.int_number in new_sub_groups:
return True
elif len(new_sub_groups) == 0:
break
else:
groups.append(new_sub_groups)
all_groups.extend(new_sub_groups)
return False
def is_supergroup(self, subgroup):
"""
True if this space group is a supergroup of the supplied group.
Args:
subgroup (Spacegroup): Subgroup to test.
Returns:
True if this space group is a supergroup of the supplied group.
"""
return subgroup.is_subgroup(self)
@classmethod
def from_int_number(cls, int_number, hexagonal=True):
"""
Obtains a SpaceGroup from its international number.
Args:
int_number (int): International number.
hexagonal (bool): For rhombohedral groups, whether to return the
hexagonal setting (default) or rhombohedral setting.
Returns:
(SpaceGroup)
"""
return SpaceGroup(sg_symbol_from_int_number(int_number,
hexagonal=hexagonal))
def __str__(self):
return "Spacegroup %s with international number %d and order %d" % (
self.symbol, self.int_number, len(self.symmetry_ops))
def sg_symbol_from_int_number(int_number, hexagonal=True):
"""
Obtains a SpaceGroup name from its international number.
Args:
int_number (int): International number.
hexagonal (bool): For rhombohedral groups, whether to return the
hexagonal setting (default) or rhombohedral setting.
Returns:
(str) Spacegroup symbol
"""
syms = []
for n, v in get_symm_data("space_group_encoding").items():
if v["int_number"] == int_number:
syms.append(n)
if len(syms) == 0:
raise ValueError("Invalid international number!")
if len(syms) == 2:
if hexagonal:
syms = list(filter(lambda s: s.endswith("H"), syms))
else:
syms = list(filter(lambda s: not s.endswith("H"), syms))
return syms.pop()
def in_array_list(array_list, a, tol=1e-5):
"""
Extremely efficient nd-array comparison using numpy's broadcasting. This
function checks if a particular array a, is present in a list of arrays.
It works for arrays of any size, e.g., even matrix searches.
Args:
array_list ([array]): A list of arrays to compare to.
a (array): The test array for comparison.
tol (float): The tolerance. Defaults to 1e-5. If 0, an exact match is
done.
Returns:
(bool)
"""
if len(array_list) == 0:
return False
axes = tuple(range(1, a.ndim + 1))
if not tol:
return np.any(np.all(np.equal(array_list, a[None, :]), axes))
else:
return np.any(np.sum(np.abs(array_list - a[None, :]), axes) < tol)
|
dongsenfo/pymatgen
|
pymatgen/symmetry/groups.py
|
Python
|
mit
| 18,798
|
[
"pymatgen"
] |
02043a1e6317ebf284c4dc7e79e1ca1741a679068cb216e7b1a11b80734ebe78
|
#!/usr/bin/env python
# This example demonstrates the use of vtk3DSImporter.
# vtk3DSImporter is used to load 3D Studio files. Unlike writers,
# importers can load scenes (data as well as lights, cameras, actors
# etc.). Importers will either generate an instance of vtkRenderWindow
# and/or vtkRenderer or will use the ones you specify.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the importer and read a file
importer = vtk.vtk3DSImporter()
importer.ComputeNormalsOn()
importer.SetFileName(VTK_DATA_ROOT + "/Data/iflamigm.3ds")
importer.Read()
# Here we let the importer create a renderer and a render window for
# us. We could have also create and assigned those ourselves like so:
# renWin = vtk.vtkRenderWindow()
# importer.SetRenderWindow(renWin)
# Assign an interactor.
# We have to ask the importer for it's render window.
renWin = importer.GetRenderWindow()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Set the render window's size
renWin.SetSize(300, 300)
# Set some properties on the renderer.
# We have to ask the importer for it's renderer.
ren = importer.GetRenderer()
ren.SetBackground(0.1, 0.2, 0.4)
# Position the camera:
# change view up to +z
camera = ren.GetActiveCamera()
camera.SetPosition(0, 1, 0)
camera.SetFocalPoint(0, 0, 0)
camera.SetViewUp(0, 0, 1)
# let the renderer compute good position and focal point
ren.ResetCamera()
camera.Dolly(1.4)
ren.ResetCameraClippingRange()
iren.Initialize()
renWin.Render()
iren.Start()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Examples/IO/Python/flamingo.py
|
Python
|
gpl-3.0
| 1,534
|
[
"VTK"
] |
5830c391f7b71ebbd4b56b45fbbf4eef68b7cc54b2faba243c2e35085d0d9d5e
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 20:20:03 2018
@author: BallBlueMeercat
"""
import time
import dnest4
import numpy as np
import numpy.random as rng
import matplotlib.pyplot as plt
import pickle
import datasim
import results
import tools
import matplotlib as mpl
mpl.style.use('default') # has to be switched on to set figure size
mpl.style.use('fivethirtyeight')
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['figure.facecolor'] = 'white'
plt.rcParams['grid.color'] = 'white'
timed = False
plot = False
print('Bfactor')
# Loading pantheon SN Ia data:
dataname = 'pantheon'
#dataname = 'synth'
if dataname == 'pantheon':
import pandas as pd
print('-----Using pantheon')
# Pantheon data:
pantheon = pd.read_csv('./data/lcparam_full_long.txt', sep=" ")
pantheon.set_index('name', inplace=True)
pantheon.sort_values('zhel', inplace=True)
mag = pantheon.mb.values
sigma = pantheon.dmb.values
zpicks = pantheon.zhel.values
data_dic = {'mag':mag, 'zpicks':zpicks}
# plt.figure()
# plt.title('Pantheon')
# plt.scatter(zpicks, mag)
# plt.show(block=False)
elif dataname == 'synth':
mu, sigma = 0.0, 0.07 # Mean and standard deviation of the noise on the data.
npoints = 1048000
min_z = 0.01012
max_z = 2.26
# Loading artificial LCDM SN Ia data:
from pathlib import Path
dataname = f'data/{npoints}_{max_z}_sigma_{sigma}.p'
my_file = Path(dataname)
if my_file.is_file():
with open(dataname,'rb') as rfp: zpicks, mag = pickle.load(rfp)
plt.figure()
plt.title(f'Artificial data being used N={len(zpicks)}, $\sigma$={sigma}')
plt.scatter(zpicks, mag)
plt.show(block=False)
data_dic = {'mag':mag, 'zpicks':zpicks}
else:
print(f'failed to get zpicks, mag from {dataname}')
print('generating zpicks and mag')
# Generating redshifts.
zpicks = np.random.uniform(low=min_z, high=max_z, size=(npoints,))
zpicks = np.sort(zpicks, axis=None)
if zpicks[-1] != max_z:
zpicks[-1] = max_z
data_dic = {'zpicks':zpicks}
# Generating LCDM mag and da.
mag, da = datasim.magn(['Mcorr', 'matter'], np.array([-19.3, 0.3]), data_dic, 'LCDM')
# Adding noise to LCDM mag.
mag = datasim.gnoise(mag, mu, sigma)
plt.figure()
plt.title(f'Artificial data N={len(zpicks)}, $\sigma$={sigma}')
plt.scatter(zpicks, mag)
plt.show(block=False)
data = zpicks, mag
pickle.dump(data, open(dataname, 'wb'))
data_dic = {'mag':mag, 'zpicks':zpicks}
class Model(object):
"""
Specify the model in Python.
"""
def __init__(self, names, int_lim, fluid_number):
"""
Parameter values *are not* stored inside the class
"""
# Prior on corrected magnitude.
self.M_min = -20
self.M_max = -18
self.fluid_number = fluid_number
self.names = names
self.int_lim = int_lim
def from_prior(self):
"""
Unlike in C++, this must *return* a numpy array of parameters.
"""
M = 1E3*rng.rand()
M = dnest4.wrap(M, self.M_min, self.M_max)
M = np.array([M])
# Sampling fluids from prior (uniform distribution between 0 and 1).
fluids = [rng.rand() for i in range(0,fluid_number)]
if self.int_lim:
int_terms = np.zeros(len(self.int_lim))
for i in range(len(self.int_lim)):
term = 1E3*rng.rand()
term = dnest4.wrap(term, self.int_lim[i][0], self.int_lim[i][1])
int_terms[i] = term
return np.concatenate((M, fluids, int_terms))
return np.concatenate((M, fluids))
def perturb(self, theta):
"""
Unlike in C++, this takes a numpy array of parameters as input,
and modifies it in-place. The return value is still logH.
"""
logH = 0.0
pic = rng.randint(len(theta))
# Note the difference between dnest4.wrap in Python and
# DNest4::wrap in C++. The former *returns* the wrapped value.
if pic == 0:
theta[pic] += dnest4.randh()
theta[pic] = dnest4.wrap(theta[pic], self.M_min, self.M_max)
elif 0 < pic < (fluid_number+1):
theta[pic] += dnest4.randh()
theta[pic] = dnest4.wrap(theta[pic], 0.0, 1.0)
elif fluid_number < pic:
i = pic - fluid_number - 1 # index of interaction term
theta[pic] += dnest4.randh()
theta[pic] = dnest4.wrap(theta[pic],
self.int_lim[i][0], self.int_lim[i][1])
return logH
def log_likelihood(self, theta):
"""
Gaussian sampling distribution.
"""
model_mag, model_da = datasim.magn(self.names, theta, data_dic, key)
var = sigma**2.0
like = -0.5*np.sum((mag-model_mag)**2.0 /var +np.log(2.0*np.pi*var))
return like
firstderivs_functions = [None
,'rainbow' # speed 6
,'niagara' # speed 4
,'kanangra' # speed 4
,'waterfall' # speed 3
,'stepfall' # speed 3
,'exotic'
,'late_intxde'
,'heaviside_late_int'
,'heaviside_sudden' # didn't converge
,'late_int'
,'expgamma'
,'txgamma'
,'zxgamma'
,'gamma_over_z'
,'zxxgamma' # gamma forced positive in firstderivs
,'gammaxxz' # gamma forced positive in firstderivs
,'rdecay_m'
,'rdecay_de'
,'rdecay_mxde'
,'rdecay' # didn't converge
,'interacting'
,'LCDM'
,'rLCDM'
]
for key in firstderivs_functions:
if key:
print(key)
names, int_lim, speed = tools.names_intlim_speed(key)
if int_lim:
fluid_number = len(names) - 1 - len(int_lim)
else:
fluid_number = len(names) - 1
# Create a model object and a sampler.
model = Model(names, int_lim, fluid_number)
sampler = dnest4.DNest4Sampler(model,
backend=dnest4.backends.CSVBackend(".",sep=" "))
if speed == 6: # extra long
max_lvl,nstep,new_lvl,n_per_step,th_step = 60,1000,10000,10000,100
elif speed == 5: # extra long
max_lvl,nstep,new_lvl,n_per_step,th_step = 50,1000,10000,10000,100
elif speed == 4: # extra long
max_lvl,nstep,new_lvl,n_per_step,th_step = 40,1000,10000,10000,100
elif speed == 3: # LONG
max_lvl,nstep,new_lvl,n_per_step,th_step = 30,1000,10000,10000,100
elif speed == 2: # MEDIUM
max_lvl,nstep,new_lvl,n_per_step,th_step = 30,1000,1000,1000,100
elif speed == 1: # SHORT
max_lvl,nstep,new_lvl,n_per_step,th_step = 30,100,100,100,10
elif speed == 0: # sampling from prior
max_lvl,nstep,new_lvl,n_per_step,th_step = 1,1000,100,100,10
# Set up the sampler. num_per_step can be down to a few thousand.
gen = sampler.sample(max_num_levels=max_lvl, num_steps=nstep,
new_level_interval=new_lvl, num_per_step=n_per_step,
thread_steps=th_step, num_particles=5,
lam=10, beta=100, seed=1234)
if timed:
import cProfile, pstats, io
pr = cProfile.Profile()
pr.enable()
ti = time.time()
# Do the sampling (one iteration here = one particle save).
for i, sample in enumerate(gen):
# print("# Saved {k} particles.".format(k=(i+1)))
pass
tf = time.time()
if timed:
pr.disable()
s = io.StringIO()
sortby = 'tottime'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print (s.getvalue())
dnest_time = tools.timer('Bfactor', ti, tf)
print('testing =',key)
print('data =', dataname)
print('sigma =', sigma)
# Histogram of parameters found by DNest4.
array = np.loadtxt('sample.txt')
DNest_distr = {}
if plot: # ['light red', 'berry', 'purple', 'black']
hue = ['coral', 'amber', 'apple', 'aquamarine',
'raspberry', 'green blue', 'deep blue', 'emerald',
'blue violet', 'dark violet', 'yellow orange',
'light red', 'berry', 'coral', 'amber', 'apple',
'aquamarine', 'raspberry', 'green blue','deep blue',
'emerald', 'blue violet', 'dark violet', 'black']
# ndim = len(array[0,:])
ndim = len(names)
for i in range(ndim):
name = names[i]
plt.figure()
# plt.title(name)
plt.hist(array[:,i], color='xkcd:'+hue[i])
plt.locator_params(axis='x', nbins=5)
distribution = array[:,i]
# Standard deviation and mean of the DNest distribution.
DNest_distr[name+'_sd'] = np.std(distribution)
DNest_distr[name+'_mean'] = np.mean(distribution)
DNest_distr[name] = array[:,i]
plt.show(block=False)
# Run the postprocessing
info = dnest4.postprocess()
if speed > 1:
f = open('brief.txt','w')
f.write(dnest_time +'\n'
+'model = '+key +'\n'
+'data = '+ dataname +'\n'
+'sigma = '+str(sigma) +'\n'
+'log(Z) = '+str(info[0]) +'\n'
+'Information = '+str(info[1]) +'\n'
+'speed = '+str(speed))
f.close()
pickle.dump(info[0], open('evidence.p', 'wb'))
# Moving output .txt files into a run specific folder.
results.relocate('evidence.p', speed, key)
results.relocate('levels.txt', speed, key)
results.relocate('posterior_sample.txt', speed, key)
results.relocate('sample_info.txt', speed, key)
results.relocate('sample.txt', speed, key)
results.relocate('sampler_state.txt', speed, key)
results.relocate('weights.txt', speed, key)
results.relocate('brief.txt', speed, key)
results.relocate('plot_1.pdf', speed, key)
results.relocate('plot_2.pdf', speed, key)
results.relocate('plot_3.pdf', speed, key)
if speed > 1:
import os
os.system("say 'the Befactor script has finished my love' &")
|
lefthandedroo/Cosmo-models
|
Models/Bfactor.py
|
Python
|
mit
| 10,727
|
[
"Amber",
"Gaussian"
] |
d64f2bdc192d69163c418696db530b2e1fefa44a61d8e0980f66c7a14874276d
|
# ***************************************************************************
# *
# * Copyright (C) 2013-2016 University of Dundee
# * All rights reserved.
# *
# * This file is part of SAMoS (Soft Active Matter on Surfaces) program.
# *
# * SAMoS is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * SAMoS is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program. If not, see <http://www.gnu.org/licenses/>.
# *
# *****************************************************************************
import os
from read_conf import *
# Reads in parameter files, and places them in an intuitively readable class; using read_conf.py
# Reading in the different parameter values
# Careful, the code parser cares about CaPItalIzAtIoN
# See read_conf for an explanation
# Yes, this is hamfisted ...
# May 2015: Added infrastructure for the additional things which have been added to configuration files since the last edit
# (and stuff which should have been in there in the first place)
# Configuration file key line examples (M for mandatory, O for optional):
# M messages
# messages /where/the/message/file/is.msg
# M boxes
# box fixed { lx = 100; ly = 100; lz = 100 }
# box periodic { lx = 30.0; ly = 30.0; lz = 30.0 }
# M input
# input /where/the/input/file/is.txt
# M neighbour list
# nlist { rcut = 2.4; pad = 0.5 }
# O groups
# group g1 { type = 1 }
# group g2 { type = 2 }
# group g3 { type = 3 }
# O outputs
# Obscure formats
# dump rod_test { type=velocity; start=0; freq=10000; multi; header }
# dump rod_test { type=xyz; start=0; freq=10000; multi; header }
# dump rod_test { type=dcd; start=0; freq=1000 }
# dump rod_test_dir { type=xyzv; start=0; freq=1000; scale = 0.25; header }
# dump rod_test_vel { type=xyzv; start=0; freq=1000; scale = 0.1; velocity; header }
# dump rod_test { type=full; start=0; freq=1000; multi; coordinate; velocity; director; header }
# Common text format
# dump sphere_v0_0.02_j_0.1 { type=full; start=0; freq=10000; multi; coordinate; velocity; director; header }
# dump plane_circle_test { type=full; start=0; freq=2000; multi; id; flag; tp; radius; coordinate; velocity; director; header }
# dump plane_test { type=full; start=0; freq=5000; multi; id; flag; tp; radius; coordinate; velocity; director; header }
# Contacts (underused)
# dump contacts_v0_0.2_j_0.01 { type=contact; start=0; freq=10000; rcut = 2.4; multi }
# M (technically O) constraints
# sphere
# constraint sphere { r = 28.209479177387816 } (for a sphere)
# plane
# constraint plane { lx = 100; ly = 100 } (for a plane)
# constraint plane { }
# hourglass
# We constrain all particles to move only on the surface of a hourglass if radius R = 10 with amplitude A = 2.0
# It is assumed that we have only one waist (node)
# constraint hourglass { R = 10.0; A = 2.0 }
# cylinder
# We constrain all particles to move only on the xy cylinder with lx=100, ly = 100 (actually those are read from the box)
# constraint cylinder { r = 5 }
# MISSING: catenoid, wavy cylinder
# M pair potentials (used for group=all by default)
# soft, with and without radii
# pair_potential soft { k = 1 }
# pair_potential soft { k = 1.0; use_particle_radii}
# gaussian
# pair_potential gaussian { A = 2.0; B = -1.0 }
# pair_potential gaussian { A = 3.0; B = -1.0; alpha = 3.0; beta = 3.0; rA = 0.0; rB = 1.0 }
# morse
# pair_potential morse { D = 1.5; a = 1.0; re = 1 }
# pair_potential morse { D = 1.0; a = 3.0; re = 2.0; use_particle_radii }
# rods
# pair_potential rod { k = 1.0 }
# MISSING: hertzian rods, hertzian?
# O pair alignment (used for group=all by default. Careful! J has to be capitalized ...)
# pair_align polar { j = 0.1 }
# pair_align vicsek { rcut = 1.0 }
# pair_align nematic { J = 0.1 }
# O external alignment (active jamming, technically. Any external field fits as well)
# external_align aj { tau = 0.01 }
# O logging (note that a log file with time step is generated no matter what)
# log sphere_J_0.1_v0_0.1.log { freq = 1000; velocity; soft_energy; nematic_align; vec_velocity }
# log sphere_test.log { freq = 1000; velocity; soft_energy; polar_align; vec_velocity }
# log plane_test.log { freq = 1000; velocity; soft_energy; polar_align }
# log plane_test.log { freq = 1000; velocity; rod_energy }
# log hourglass_test.log { freq = 1000; velocity; soft_energy; polar_align; vec_velocity }
# log plane_test.log { freq = 1000; velocity; soft_energy; polar_align }
# log plane_test.log { freq = 1000; velocity; morse_energy }
# O NVE integrator
# integrator nve { dt=0.001; limit=0.0001 }, together with
# disable nve { group=all } (or even possibly disable nve)
# integrator nve { dt=0.001; limit=0.0001; group = g1 }, together with
# disable nve { group=g1 }
# O NVE integrator run time
# run 10000 (for the relaxation step)
# O group-wise pair interaction parameters. Seems to have been used consistently after the NVE stage
# pair_param morse { type_1 = 1; type_2 = 1; D = 1.5; a = 1.0; re = 1 }
# pair_param morse { type_1 = 1; type_2 = 2; D = 3.0; a = 1.0; re = 1 }
# pair_param morse { type_1 = 2; type_2 = 2; D = 1.5; a = 1.0; re = 1 }
# pair_param soft { type_1 = 1; type_2 = 1; k=1.0 }
# pair_param soft { type_1 = 1; type_2 = 2; k=10.0 }
# pair_param soft { type_1 = 1; type_2 = 3; k=10.0 }
# pair_param soft { type_1 = 2; type_2 = 2; k=1.0 }
# pair_param soft { type_1 = 2; type_2 = 3; k=1.0 }
# pair_param soft { type_1 = 3; type_2 = 3; k=1.0 }
# pair_param gaussian { type_1 = 1; type_2 = 1; A = 3.0; B = 0.0; alpha = 3.0; beta = 3.0; rA = 0.0; rB = 1.0 }
# pair_param gaussian { type_1 = 1; type_2 = 2; A = 3.0; B = -1.0; alpha = 3.0; beta = 3.0; rA = 0.0; rB = 1.0 }
# pair_param gaussian { type_1 = 2; type_2 = 2; A = 3.0; B = 0.0; alpha = 3.0; beta = 3.0; rA = 0.0; rB = 1.0 }
# O group-wise pair alignment parameters. Seems to have been used consistently after the NVE stage
# align_param polar { type_1 = 1; type_2 = 1; J = 0.25 }
# align_param polar { type_1 = 1; type_2 = 2; J = 0.25 }
# align_param polar { type_1 = 2; type_2 = 2; J = 0.0 }
# align_param polar { type_1 = 1; type_2 = 3; J = 0.0 }
# align_param polar { type_1 = 2; type_2 = 3; J = 0.0 }
# align_param polar { type_1 = 3; type_2 = 3; J = 0.0 }
# M main integrator
# Polar sphere brownian integrators
# integrator brownian { dt= 0.001; seed = 7; nu = 0.002; mu = 1.0; v0 = 0.02 }
# integrator brownian { dt=0.001; seed = 1; nu = 0.002; mu = 1.0; v0 = 0.05; group = g1 }
# integrator brownian { dt=0.001; seed = 1; nu = 0.00; mu = 1.0; v0 = 1.0; group = all }
# Nematic sphere brownian integrators, tau is flip time
# integrator brownian { dt=0.001; seed = 22960; nu = 0.0; mu = 1.0; v0 = 0.1; nematic; tau = 1.0 }
# Nematic rod brownian integrator
# nu sets the width of the distribution for random changes of velocity
# mu is particle mobility
# mur is rotational rod mobility
# v0 is the intensity of the self-propelling velocity
# integrator brownian { dt=0.001; seed = 1; nu = 0.00; mu = 1.0; mur = 1.0; v0 = 1.0; group = all; tau = 1.0; nematic }
# Thermal brownian integrator (need more info!)
# and another one for passive particles, but this time at temperature T = 0.3
# temperature_control tells integrator to use constnat temeprature set by paramter
# min_val. in this case max_val and steps are ignored. if we choose temperature_control=linear
# then temeprature in linearly interpolated between min_val and max_val
# integrator brownian { group = passive; dt=0.001; seed = 4; nu = 0.00; mu = 1.0; v0 = 0.0; temperature_control=constant; min_val=0.3; max_val=0.3; steps = 1000 }
# Vicsek integrator
# integrator vicsek { dt=0.01; seed = 37; eta = 1.0; mu = 1.0; v0 = 0.5 }
# O Particle dynamics (division and death)
# Stochastic divide and death
# population random { group = g1; division_rate = 1000.0; death_rate = 1000.0; freq = 1000 }
# Density-controlled divide and death; rho_max is in contact number units
# Simple everyone
# population density { group = all; division_rate = 0.0003; split_distance=0.1,rho_max = 6.0,death_rate = 0.00006; freq = 1000 }
# While switching groups
# population density { group = g1; division_rate = 0.0; death_rate = 0.00025; freq = 1000; change_prob_1 = 0.0; change_prob_2 = 0.0 , old_group = g1; new_group = g1; old_type = 1; new_type = 1 }
# population density { group = g2; division_rate = 0.1; death_rate = 0.0; freq = 1000; change_prob_1 = 0.0; split_distance = 0.05; change_prob_2 = 1.0; old_group = g2; new_group = g1; old_type = 2; new_type = 1 }
# population density { group = g2; division_rate = 0.025; death_rate = 0.0; freq = 1000; split_distance = 0.0;rho_max = 1000.0, change_prob_2 = 1.0; old_group = g2; new_group = g1; old_type = 2; new_type = 1 }
# M main running time
# run 10000000 (note that's the second occurence; the one refering *NOT* to the relaxation step
class Param:
def __init__(self,filename):
#for file in os.listdir(folder):
#if file.endswith(".conf"):
#self.filename=folder + file
#print self.filename
self.filename=filename
print filename
conf = ReadConf(self.filename)
# Message file
self.messagefile = conf.key_words['messages'][0].name
print "Message file: " + self.messagefile
# Boxes
self.boxtype = conf.key_words['box'][0].name
print "Box type: " + self.boxtype
self.box=[]
self.box.append(float(conf.key_words['box'][0].attributes[0].val))
self.box.append(float(conf.key_words['box'][0].attributes[1].val))
self.box.append(float(conf.key_words['box'][0].attributes[2].val))
print "Box dimensions: "
print self.box
# Neighbour list
self.nlist_rcut = float(conf.key_words['nlist'][0].attributes[0].val)
self.nlist_pad = float(conf.key_words['nlist'][0].attributes[1].val)
print "Neighbour list rcut " + str(self.nlist_rcut) + " and padding " + str(self.nlist_pad)
# Input file
self.inputfile = conf.key_words['input'][0].name
print "Input file: " + self.inputfile
# Dump parameters
self.dumpname=conf.key_words['dump'][0].name
self.dump={}
for l in range(len(conf.key_words['dump'][0].attributes)):
try:
self.dump[str.strip(conf.key_words['dump'][0].attributes[l].name)]=float(conf.key_words['dump'][0].attributes[l].val)
except:
try:
self.dump[str.strip(conf.key_words['dump'][0].attributes[l].name)]=str.strip(conf.key_words['dump'][0].attributes[l].val)
except: # no constraints
pass
# Groups
try:
self.ngroups=len(conf.key_words['group'])
self.groupnames=[]
self.grouptypes=[]
for l in range(self.ngroups):
self.groupnames.append(conf.key_words['group'][l].name)
self.grouptypes.append(int(conf.key_words['group'][l].attributes[0].val))
print "Group names: "
print self.groupnames
print "Group types: "
print self.grouptypes
except KeyError:
self.ngroups=1
print "Number of groups: " + str(self.ngroups)
# Constraints
try:
self.constraint = conf.key_words['constraint'][0].name
print "Constraint: " + self.constraint
self.const_params={}
try:
for l in range(len(conf.key_words['constraint'][0].attributes)):
try:
self.const_params[str.strip(conf.key_words['constraint'][0].attributes[l].name)]=float(conf.key_words['constraint'][0].attributes[l].val)
except:
try:
self.const_params[str.strip(conf.key_words['constraint'][0].attributes[l].name)]=str.strip(conf.key_words['constraint'][0].attributes[l].val)
except: # no constraints
pass
print "Constraint parameters "
print self.const_params
except KeyError:
pass
# Legacy for the two most common constraints
if self.constraint == 'sphere':
self.r=float(conf.key_words['constraint'][0].attributes[0].val)
print 'Radius'
print self.r
elif self.constraint == 'plane':
try: # try from the constraint
self.lx=float(conf.key_words['constraint'][0].attributes[0].val)
self.ly=float(conf.key_words['constraint'][0].attributes[1].val)
except:# else use the box
self.lx=self.box[0]
self.ly=self.box[1]
print 'Lx and Ly'
print self.lx
print self.ly
if self.boxtype=='periodic':
self.constraint='plane_periodic'
except KeyError:
self.constraint='none'
self.const_params={}
# Default pair potentials and aligners
try:
self.potential=conf.key_words['pair_potential'][0].name
self.pot_params={}
for l in range(len(conf.key_words['pair_potential'][0].attributes)):
try:
self.pot_params[str.strip(conf.key_words['pair_potential'][0].attributes[l].name)]=float(conf.key_words['pair_potential'][0].attributes[l].val)
except:
try:
self.pot_params[str.strip(conf.key_words['pair_potential'][0].attributes[l].name)]=str.strip(conf.key_words['pair_potential'][0].attributes[l].val)
except: # use_particle_radii: different form ...
self.pot_params[str.strip(conf.key_words['pair_potential'][0].attributes[l].name)]=True
except KeyError:
self.potential = 'none'
self.pot_params={}
try:
self.aligner=conf.key_words['pair_align'][0].name
self.J=float(conf.key_words['pair_align'][0].attributes[0].val)
except KeyError:
try:
self.aligner=conf.key_words['external_align'][0].name
self.J=float(conf.key_words['external_align'][0].attributes[0].val)
except KeyError:
self.aligner='none'
self.J=0.0
print "Potential: " + self.potential
print "Parameters: "
print self.pot_params
print "Aligner: " +self.aligner
print "J: " + str(self.J)
# Something for our friends the cells
# As written, we only ever read the first potential ... generalise
#pair_potential vp { K = 1.0; gamma = 1.0; lambda = -6.283184000}
#pair_potential line_tension { lambda = 0.0 }
#pair_potential soft {k = 10.0; a=0.5}
if self.potential=='vp':
self.kappa=float(conf.key_words['pair_potential'][0].attributes[0].val)
self.gamma=float(conf.key_words['pair_potential'][0].attributes[1].val)
self.lambdaval=float(conf.key_words['pair_potential'][0].attributes[2].val)
# NVE integrator
# Everything is based on the assumption that there is only one of these, currently ...
nNVE=0
if conf.key_words['integrator'][0].name=='nve':
print "NVE integrator "
self.nstepsNVE= int(conf.key_words['run'][0].name)
nNVE+=1
else:
self.nstepsNVE=0
print "NVE steps: " + str(self.nstepsNVE)
print "NVE integrators: " + str(nNVE)
# Type-wise pair potentials and aligners (careful: types and groups don't have to match!)
# square lists of lists of dictionaries or names
# Default: initialize with the defaults (including the 'none' if applicable)
# Minor leap of faith: types are numbered 0 1 2 etc., and all of them are part of *some* group
if self.ngroups>1:
self.ntypes=max(self.grouptypes)
print "Number of types: " + str(self.ntypes)
if self.ntypes>1:
# Potentials
self.type_potential=[[self.potential for u in range(self.ntypes)] for u in range(self.ntypes)]
self.type_pot_params=[[self.pot_params for u in range(self.ntypes)] for u in range(self.ntypes)]
try:
for l in range(len(conf.key_words['pair_param'])):
type1=int(conf.key_words['pair_param'][l].attributes[0].val)-1
type2=int(conf.key_words['pair_param'][l].attributes[1].val)-1
potential=conf.key_words['pair_param'][l].name
self.type_potential[type1][type2]=potential
self.type_pot_params[type1][type2]={}
for m in range(2,len(conf.key_words['pair_param'][l].attributes)):
try:
self.type_pot_params[type1][type2][str.strip(conf.key_words['pair_param'][l].attributes[m].name)]=float(conf.key_words['pair_param'][l].attributes[m].val)
except:
try:
self.type_pot_params[type1][type2][str.strip(conf.key_words['pair_param'][l].attributes[m].name)]=float(conf.key_words['pair_param'][l].attributes[m].val)
except: # use_particle_radii
self.type_pot_params[type1][type2][str.strip(conf.key_words['pair_param'][l].attributes[m].name)]=True
except KeyError:
pass
# Aligners
self.type_aligner=[[self.aligner for u in range(self.ntypes)] for u in range(self.ntypes)]
self.type_J=[[self.J for u in range(self.ntypes)] for u in range(self.ntypes)]
try:
for l in range(len(conf.key_words['align_param'])):
type1=int(conf.key_words['align_param'][l].attributes[0].val)-1
type2=int(conf.key_words['align_param'][l].attributes[1].val)-1
aligner=conf.key_words['align_param'][l].name
self.type_aligner[type1][type2]=aligner
self.type_J[type1][type2]=float(conf.key_words['align_param'][l].attributes[2].val)
except:
pass
print "Type potentials: "
print self.type_potential
print "Type potential parameters"
print self.type_pot_params
print "Type aligners: "
print self.type_aligner
print "Type J"
print self.type_J
else:
self.ntypes=1
# Main integrator(s)
# Define straightforward paramters for the most common Brownian one
# First: distinguish between groups and no groups
self.one_integrator=False
if self.ngroups==1:
self.integrator=conf.key_words['integrator'][nNVE].name
print "Main integrator: " + self.integrator
self.int_params={}
for l in range(len(conf.key_words['integrator'][nNVE].attributes)):
try:
self.int_params[str.strip(conf.key_words['integrator'][nNVE].attributes[l].name)]=str.strip(conf.key_words['integrator'][nNVE].attributes[l].val)
except: # some odd thermal ones are effectively boolean
self.int_params[str.strip(conf.key_words['integrator'][nNVE].attributes[l].name)]=True
done = self.oneInt(conf)
else:
self.group_integrator=['none' for u in range(self.ngroups)]
self.group_int_params=[{} for u in range(self.ngroups)]
nintegrator=len(conf.key_words['integrator'])
print "Found " + str(nintegrator) + " intergrators!"
for k in range(nNVE,nintegrator): # Excluding the NVE here
int_params={}
for l in range(len(conf.key_words['integrator'][k].attributes)):
try:
int_params[str.strip(conf.key_words['integrator'][k].attributes[l].name)]=str.strip(conf.key_words['integrator'][k].attributes[l].val)
except:
int_params[str.strip(conf.key_words['integrator'][k].attributes[l].name)]=True
# now sort them into groups
# first in case it's all of them
# I don't care if some idiot has added more integrators on top of it. That's their problem.
try:
mygroup = int_params['group']
except KeyError:
mygroup='all'
if mygroup =='all':
self.integrator=conf.key_words['integrator'][k].name
print "Main integrator: " + self.integrator
self.int_params=int_params
done = self.oneInt(conf)
else:
groupidx=self.groupnames.index(mygroup)
self.group_integrator[groupidx]=conf.key_words['integrator'][k].name
self.group_int_params[groupidx]=int_params
if (nintegrator-nNVE)==1: #only one moving group, for example
self.integrator=conf.key_words['integrator'][k].name
print "Main integrator: " + self.integrator
self.int_params=int_params
done = self.oneInt(conf)
else:
if k==(nintegrator-1):
print "Warning: multiple complex integrators "
print self.group_integrator
print " for groups "
print self.groupnames
print "Parameters are stored in the dictionary self.group_int_params:"
print self.group_int_params
# Population control
# MISSING: The fade-in options
# Since this is very underdeveloped, just store the name(s), and the various options in a dictionary
# Warning: the values of these parameters are strings, even the ones that should be int or double
try:
self.npopulation = len(conf.key_words['population'])
if self.npopulation>0:
print "Number of populations: " + str(self.npopulation)
self.population=[]
self.pop_params=[{} for k in range(self.npopulation)]
for k in range(self.npopulation):
self.population.append(conf.key_words['population'][k].name)
for l in range(len(conf.key_words['population'][k].attributes)):
try:
self.pop_params[k][str.strip(conf.key_words['population'][k].attributes[l].name)]=float(conf.key_words['population'][k].attributes[l].val)
except:
self.pop_params[k][str.strip(conf.key_words['population'][k].attributes[l].name)]=str.strip(conf.key_words['population'][k].attributes[l].val)
print "Populations: "
print self.population
print "Population parameters: "
print self.pop_params
except KeyError:
self.npopulation=0
pass
self.nsteps = int(conf.key_words['run'][nNVE].name)
print 'Simulation time steps'
print self.nsteps
def oneInt(self,conf):
self.one_integrator=True
if self.integrator=='brownian':
# In case it's one of the newer ones where the dt is on its own
# If the except doesn't work either, we are fucked in any case
try:
self.dt =float(self.int_params['dt'])
except:
#print conf.key_words['timestep']
#self.dt = float(conf.key_words['timestep'])
# Oh yes, that syntax is incompatible with the parser
# And leads to all kind of BS if I don't stay on top of it
self.dt=0.01
print "Time step: " + str(self.dt)
self.seed = self.int_params['seed']
print "Dynamics seed: " + self.seed
self.mu = self.int_params['mu']
print "Mobility: " + str(self.mu)
# Again, the stupid v0 as external aligner type
try:
self.v0 = float(conf.key_words['external'][0].attributes[0].val)
except:
self.v0 = self.int_params['v0']
print "v0: " + str(self.v0)
self.nu = self.int_params['nu']
print "Noise strength: " + str(self.nu)
self.nematic=False
try:
dmp=self.int_params['nematic']
self.nematic=True
self.tau_flip=self.int_params['tau']
print "Nematic system with flip time " + str(self.tau_flip)
except:
pass
if self.potential=='rod':
try:
self.mur = self.int_params['mur']
print "Rod rotational mobility " + str(self.mur)
except:
self.mu = self.int_params['mu']
print "Rod rotational mobility " + str(self.mu)
self.thermal=False
try:
self.thermal_type=self.int_params['temperature_control']
self.thermal=True
self.kT=self.int_params['min_val'] # screw this, put in ramps only once we need them
self.kT_steps=self.int_params['steps']
print "Thermal brownian with " + self.thermal + " temperature " + self(kT) + " and steps " + str(self.kT_steps)
except:
pass
self.movegroup='all'
try:
self.movegroup = self.int_params['group']
except:
pass
print "Moving group: " + self.movegroup
elif self.integrator=='vicsek':
self.dt =float(self.int_params['dt'])
self.seed = self.int_params['seed']
self.mu = self.int_params['mu']
self.v0 = self.int_params['v0']
self.nu = self.int_params['eta']
else:
self.dt =float(self.int_params['dt'])
print "Warning: unknown integrator type " + self.integrator + ". Parameters are stored in the dictionary self.int_params."
return 1
return 0
|
sknepneklab/SAMoS
|
utils/read_param.py
|
Python
|
gpl-3.0
| 24,144
|
[
"Gaussian"
] |
bc6badbaecce45c6469eb669cedb7548b31550fa59449e00423c2f0c1f0e0b0b
|
# $Id$
#
# Copyright (C) 2011 greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" unit testing code for molecule drawing
"""
from rdkit import RDConfig
import unittest,os,tempfile
from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.RDLogger import logger
logger = logger()
class TestCase(unittest.TestCase):
def setUp(self):
self.mol = Chem.MolFromSmiles('c1c(C[15NH3+])ccnc1[C@](Cl)(Br)[C@](Cl)(Br)F')
def testCairoFile(self):
try:
from rdkit.Chem.Draw.cairoCanvas import Canvas
except ImportError:
logger.info("Skipping cairo test")
return
os.environ['RDKIT_CANVAS']='cairo'
foo,fn=tempfile.mkstemp(suffix='.png')
foo=None
self.assertEqual(os.path.getsize(fn),0)
Draw.MolToFile(self.mol,fn)
self.assertNotEqual(os.path.getsize(fn),0)
try:
os.unlink(fn)
except:
pass
def testAggFile(self):
try:
from rdkit.Chem.Draw.aggCanvas import Canvas
except ImportError:
logger.info("Skipping agg test")
return
os.environ['RDKIT_CANVAS']='agg'
foo,fn=tempfile.mkstemp(suffix='.png')
foo=None
self.assertEqual(os.path.getsize(fn),0)
Draw.MolToFile(self.mol,fn)
self.assertNotEqual(os.path.getsize(fn),0)
try:
os.unlink(fn)
except:
pass
def testSpingFile(self):
try:
from rdkit.Chem.Draw.spingCanvas import Canvas
except ImportError:
logger.info("Skipping sping test")
return
os.environ['RDKIT_CANVAS']='sping'
foo,fn=tempfile.mkstemp(suffix='.png')
foo=None
self.assertEqual(os.path.getsize(fn),0)
Draw.MolToFile(self.mol,fn)
self.assertNotEqual(os.path.getsize(fn),0)
try:
os.unlink(fn)
except:
pass
def testCairoImage(self):
try:
from rdkit.Chem.Draw.cairoCanvas import Canvas
except ImportError:
return
os.environ['RDKIT_CANVAS']='cairo'
img=Draw.MolToImage(self.mol,size=(300,300))
self.assertTrue(img)
self.assertEqual(img.size[0],300)
self.assertEqual(img.size[1],300)
def testAggImage(self):
try:
from rdkit.Chem.Draw.aggCanvas import Canvas
except ImportError:
return
os.environ['RDKIT_CANVAS']='agg'
img=Draw.MolToImage(self.mol,size=(300,300))
self.assertTrue(img)
self.assertEqual(img.size[0],300)
self.assertEqual(img.size[1],300)
def testSpingImage(self):
try:
from rdkit.Chem.Draw.spingCanvas import Canvas
except ImportError:
return
os.environ['RDKIT_CANVAS']='sping'
img=Draw.MolToImage(self.mol,size=(300,300))
self.assertTrue(img)
self.assertEqual(img.size[0],300)
self.assertEqual(img.size[1],300)
def testQtImage(self):
import sys
try:
from PySide import QtGui
from rdkit.Chem.Draw.qtCanvas import Canvas
except ImportError:
return
app = QtGui.QApplication(sys.argv)
img = Draw.MolToQPixmap(self.mol, size=(300, 300))
self.assertTrue(img)
self.assertEqual(img.size().height(), 300)
self.assertEqual(img.size().width(), 300)
def testCairoImageDash(self):
try:
from rdkit.Chem.Draw.cairoCanvas import Canvas
except ImportError:
return
os.environ['RDKIT_CANVAS']='cairo'
img=Draw.MolToImage(self.mol,size=(300,300),kekulize=False)
self.assertTrue(img)
self.assertEqual(img.size[0],300)
self.assertEqual(img.size[1],300)
def testAggImageDash(self):
try:
from rdkit.Chem.Draw.aggCanvas import Canvas
except ImportError:
return
os.environ['RDKIT_CANVAS']='agg'
img=Draw.MolToImage(self.mol,size=(300,300),kekulize=False)
self.assertTrue(img)
self.assertEqual(img.size[0],300)
self.assertEqual(img.size[1],300)
def testSpingImageDash(self):
try:
from rdkit.Chem.Draw.spingCanvas import Canvas
except ImportError:
return
os.environ['RDKIT_CANVAS']='sping'
img=Draw.MolToImage(self.mol,size=(300,300),kekulize=False)
self.assertTrue(img)
self.assertEqual(img.size[0],300)
self.assertEqual(img.size[1],300)
def testGithubIssue54(self):
try:
from rdkit.Chem.Draw.spingCanvas import Canvas
except ImportError:
return
os.environ['RDKIT_CANVAS']='sping'
mol = Chem.MolFromSmiles('c1([O])ccc(O)cc1')
img = Draw.MolToImage(mol)
self.assertTrue(img)
def testGithubIssue86(self):
mol = Chem.MolFromSmiles('F[C@H](Cl)Br')
for b in mol.GetBonds():
self.assertEqual(b.GetBondDir(),Chem.BondDir.NONE)
img = Draw.MolToImage(mol,kekulize=False)
self.assertTrue(img)
for b in mol.GetBonds():
self.assertEqual(b.GetBondDir(),Chem.BondDir.NONE)
Chem.WedgeMolBonds(mol,mol.GetConformer())
obds = [x.GetBondDir() for x in mol.GetBonds()]
self.assertEqual(obds.count(Chem.BondDir.NONE),2)
img = Draw.MolToImage(mol,kekulize=False)
self.assertTrue(img)
nbds = [x.GetBondDir() for x in mol.GetBonds()]
self.assertEqual(obds,nbds)
if __name__ == '__main__':
unittest.main()
|
soerendip42/rdkit
|
rdkit/Chem/Draw/UnitTestDraw.py
|
Python
|
bsd-3-clause
| 5,233
|
[
"RDKit"
] |
7e78da78564f056ffcde3fd0c52414986b1d17e8c065c9d392ed1747e63c4cdc
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for feature_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import feature_utils
import mass_spec_constants as ms_constants
import numpy as np
from rdkit import Chem
import tensorflow as tf
class FeatureUtilsTest(tf.test.TestCase):
def setUp(self):
test_mol_smis = [
'CCCC', 'CC(=CC(=O)C)O.CC(=CC(=O)C)O.[Cu]', 'CCCCl',
('C[C@H](CCCC(C)C)[C@H]1CC[C@@H]2[C@@]1'
'(CC[C@H]3[C@H]2CC=C4[C@@]3(CC[C@@H](C4)O)C)C')
]
self.test_mols = [
Chem.MolFromSmiles(mol_str) for mol_str in test_mol_smis
]
def _validate_smiles_string_tokenization(self, smiles_string,
expected_token_list):
token_list = feature_utils.tokenize_smiles(np.array([smiles_string]))
self.assertAllEqual(token_list, expected_token_list)
def test_tokenize_smiles_string(self):
self._validate_smiles_string_tokenization('CCCC', [28, 28, 28, 28])
self._validate_smiles_string_tokenization('ClCCCC', [31, 28, 28, 28, 28])
self._validate_smiles_string_tokenization('CCClCC', [28, 28, 31, 28, 28])
self._validate_smiles_string_tokenization('CCCCCl', [28, 28, 28, 28, 31])
self._validate_smiles_string_tokenization('ClC(CC)CCl',
[31, 28, 2, 28, 28, 3, 28, 31])
self._validate_smiles_string_tokenization(
'ClC(CCCl)CCl', [31, 28, 2, 28, 28, 31, 3, 28, 31])
self._validate_smiles_string_tokenization('BrCCCCCl',
[27, 28, 28, 28, 28, 31])
self._validate_smiles_string_tokenization('ClCCCCBr',
[31, 28, 28, 28, 28, 27])
self._validate_smiles_string_tokenization('[Te][te]',
[81, 71, 83, 81, 71, 83])
def test_check_mol_only_has_atoms(self):
result = [
feature_utils.check_mol_only_has_atoms(mol, ['C'])
for mol in self.test_mols
]
self.assertAllEqual(result, [True, False, False, False])
def test_check_mol_does_not_have_atoms(self):
result = [
feature_utils.check_mol_does_not_have_atoms(
mol, ms_constants.METAL_ATOM_SYMBOLS) for mol in self.test_mols
]
self.assertAllEqual(result, [True, False, True, True])
def test_make_filter_by_substructure(self):
filter_fn = feature_utils.make_filter_by_substructure('steroid')
result = [filter_fn(mol) for mol in self.test_mols]
self.assertAllEqual(result, [False, False, False, True])
def test_convert_spectrum_array_to_string(self):
spectra_array = np.zeros((2, 1000))
spectra_array[0, 3] = 100
spectra_array[1, 39] = 100
spectra_array[1, 21] = 60
expected_spectra_strings = ['3 100', '21 60\n39 100']
result_spectra_strings = []
for idx in range(np.shape(spectra_array)[0]):
result_spectra_strings.append(
feature_utils.convert_spectrum_array_to_string(spectra_array[idx, :]))
self.assertAllEqual(expected_spectra_strings, result_spectra_strings)
if __name__ == '__main__':
tf.test.main()
|
brain-research/deep-molecular-massspec
|
feature_utils_test.py
|
Python
|
apache-2.0
| 3,719
|
[
"RDKit"
] |
5fb9eb4bb1ce1a49e3129246154282acfb002412e870254d7cd7a0fad593dd05
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to display running config of Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_showrun
author: "Dave Kasberg (@dkasberg)"
short_description: Collect the current running configuration on devices running Lenovo CNOS
description:
- This module allows you to view the switch running configuration. It executes the display running-config CLI
command on a switch and returns a file containing the current running configuration of the target network
device. This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_showrun.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_showrun. These are written in the main.yml file of the tasks directory.
---
- name: Run show running-config
cnos_showrun:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_showrun_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Running Configuration saved in file"
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
cliCommand = "display running-config"
outputfile = module.params['outputfile']
hostIP = module.params['host']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand + "\n", "#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Running Configuration saved in file ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/lenovo/cnos_showrun.py
|
Python
|
bsd-3-clause
| 4,911
|
[
"VisIt"
] |
37a8ac32fa651a9448cf4008b1276b308c3a3a3474582acd2d9367b92b05be90
|
def cell_array_slicer(data):
""" Iterate over cell components on a vtk cell array
VTK stores the associated point index for each cell in a one
dimensional array based on the following template::
[n, id0, id1, id2, ..., idn, m, id0, ...]
The iterator takes a cell array and returns the point indices for
each cell.
"""
count = 0
collection = []
for value in data:
if count == 0:
collection = []
count = value
else:
collection.append(value)
count -= 1
if count == 0:
yield collection
|
simphony/simphony-mayavi
|
simphony_mayavi/core/cell_array_tools.py
|
Python
|
bsd-2-clause
| 620
|
[
"VTK"
] |
25a2fa6c80b17a5081dd14703a8501f98f49b9c0b59af21f7cd05953738d1e3e
|
"""\
Create general Channel Proto, pass in name, x and y power, and params
Also, create the library of channels
Might need a few other chan_proto types, such as
inf-tau channels
Ca dep channels
chan_proto quires alpha and beta params for both activation and inactivation
If no inactivation, just send in empty Yparam array.
"""
from __future__ import print_function, division
import moose
import numpy as np
from util import NamedList
SSTauChannelParams = NamedList('SSTauChannelParams', '''
Arate
A_B
A_C
Avhalf
Avslope
taumin
tauVdep
tauPow
tauVhalf
tauVslope''')
AlphaBetaChannelParams = NamedList('AlphaBetaChannelParams', '''
A_rate
A_B
A_C
Avhalf
A_vslope
B_rate
B_B
B_C
Bvhalf
B_vslope''')
ZChannelParams = NamedList('ZChannelParams', 'Kd power tau')
ChannelSettings = NamedList('ChannelSettings', 'Xpow Ypow Zpow Erev name')
def interpolate_values_in_table(tabA,V_0,l=40):
import param_chan
'''This function interpolates values in the table
around tabA[V_0]. '''
V = np.linspace(param_chan.VMIN, param_chan.VMAX, len(tabA))
idx = abs(V-V_0).argmin()
A_min = tabA[idx-l]
V_min = V[idx-l]
A_max = tabA[idx+l]
V_max = V[idx+l]
a = (A_max-A_min)/(V_max-V_min)
b = A_max - a*V_max
tabA[idx-l:idx+l] = V[idx-l:idx+l]*a+b
return tabA
def fix_singularities(Params,Gate):
import param_chan
if Params.A_C < 0:
V_0 = Params.A_vslope*np.log(-Params.A_C)-Params.Avhalf
if V_0 > param_chan.VMIN and V_0 < param_chan.VMAX:
#change values in tableA and tableB, because tableB contains sum of alpha and beta
tabA = interpolate_values_in_table(Gate.tableA,V_0)
tabB = interpolate_values_in_table(Gate.tableB,V_0)
Gate.tableA = tabA
Gate.tableB = tabB
if Params.B_C < 0:
V_0 = Params.B_vslope*np.log(-Params.B_C)-Params.Bvhalf
if V_0 > param_chan.VMIN and V_0 < param_chan.VMAX:
#change values in tableB
tabB = interpolate_values_in_table(Gate.tableB,V_0)
Gate.tableB = tabB
return Gate
#may need a CaV channel if X gate uses alpha,beta and Ygate uses inf tau
#Or, have Y form an option - if in tau, do something like NaF
def chan_proto(chanpath, params):
import param_chan
chan = moose.HHChannel(chanpath)
chan.Xpower = params.channel.Xpow
if params.channel.Xpow > 0:
xGate = moose.HHGate(chan.path + '/gateX')
xGate.setupAlpha(params.X + [param_chan.VDIVS, param_chan.VMIN, param_chan.VMAX])
xGate = fix_singularities(params.X, xGate)
chan.Ypower = params.channel.Ypow
if params.channel.Ypow > 0:
yGate = moose.HHGate(chan.path + '/gateY')
yGate.setupAlpha(params.Y + [param_chan.VDIVS, param_chan.VMIN, param_chan.VMAX])
yGate = fix_singularities(params.Y, yGate)
if params.channel.Zpow > 0:
chan.Zpower = params.channel.Zpow
zgate = moose.HHGate(chan.path + '/gateZ')
ca_array = np.linspace(param_chan.CAMIN, param_chan.CAMAX, param_chan.CADIVS)
zgate.min = param_chan.CAMIN
zgate.max = param_chan.CAMAX
caterm = (ca_array/params.Z.Kd) ** params.Z.power
inf_z = caterm / (1 + caterm)
tau_z = params.Z.tau * np.ones(len(ca_array))
zgate.tableA = inf_z / tau_z
zgate.tableB = 1 / tau_z
chan.useConcentration = True
chan.Ek = params.channel.Erev
return chan
TypicalOneDalpha = NamedList('TypicalOneDalpha',
'''channel X Y Z=[] calciumPermeable=False calciumPermeable2=False''')
_FUNCTIONS = {
TypicalOneDalpha: chan_proto,
}
#*params... passes the set of values not as a list but as individuals
def make_channel(chanpath, params):
func = _FUNCTIONS[params.__class__]
return func(chanpath, params)
def chanlib():
import param_chan
if not moose.exists('/library'):
moose.Neutral('/library')
#Adding all the channels to the library. *list removes list elements from the list,
#so they can be used as function arguments
chan = [make_channel('/library/'+key, value) for key, value in param_chan.ChanDict.items()]
return chan
|
subhacom/moose-core
|
tests/python/chan_proto.py
|
Python
|
gpl-3.0
| 4,834
|
[
"MOOSE"
] |
af4b535c7659a7b5cc086da050e533c64dfe65b77887755d8a0281dcb3b870f9
|
#
# Copyright (C) 2013,2014 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
import unittest as ut
import espressomd
import numpy as np
from espressomd.electrostatics import P3M, DH
class ElectrostaticInteractionsTests(ut.TestCase):
# Handle to espresso system
system = espressomd.System()
def paramsMatch(self, inParams, outParams):
"""Check, if the parameters set and gotten back match.
Only check keys present in inParams.
"""
for k in inParams.keys():
if k not in outParams:
print(k, "missing from returned parameters")
return False
if outParams[k] != inParams[k]:
print("Mismatch in parameter ", k, inParams[k], outParams[k])
return False
return True
def setUp(self):
self.system.box_l = 10, 10, 10
self.system.part[0].pos = 0, 0, 0
self.system.part[1].pos = 0.1, 0.1, 0.1
self.system.part[0].q = 1
self.system.part[1].q = -1
def generateTestForElectrostaticInteraction(_interClass, _params):
"""Generates test cases for checking interaction parameters set and gotten back
from Es actually match. Only keys which are present in _params are checked
1st: Interaction parameters as dictionary, i.e., {"k"=1.,"r_0"=0.
2nd: Name of the interaction property to set (i.e. "P3M")
"""
params = _params
interClass = _interClass
def func(self):
# This code is run at the execution of the generated function.
# It will use the state of the variables in the outer function,
# which was there, when the outer function was called
# set Parameter
Inter = interClass(**params)
Inter.validateParams()
Inter._setParamsInEsCore()
# Read them out again
outParams = Inter.getParams()
self.assertTrue(self.paramsMatch(params, outParams), "Missmatch of parameters.\nParameters set " +
params.__str__() + " vs. output parameters " + outParams.__str__())
return func
test_P3M = generateTestForElectrostaticInteraction(P3M, dict(bjerrum_length=1.0,
epsilon=0.0,
inter=1000,
mesh_off=[
0.5, 0.5, 0.5],
r_cut=2.4,
mesh=[
2, 2, 2],
cao=1,
alpha=12,
accuracy=0.01))
# test_P3M_GPU=
test_DH = generateTestForElectrostaticInteraction(DH, dict(bjerrum_length=1.0,
kappa=2.3,
r_cut=2))
test_CDH = generateTestForElectrostaticInteraction(CDH, dict(bjerrum_length=1.0,
kappa=2.3,
r_cut=2,
r0=1,
r1=2,
eps_int=0.8,
eps_ext=1,
alpha=2))
if __name__ == "__main__":
print("Features: ", espressomd.features())
ut.main()
|
rbardak/espresso
|
testsuite/python/electrostaticInteractions.py
|
Python
|
gpl-3.0
| 4,650
|
[
"ESPResSo"
] |
acbb23120da9f9c51c2798d511c8879edac3030e570f1dce08e74777f7eadc5f
|
r"""
brew install libtiff libjpeg webp little-cms2
sudo easy_install pip
sudo pip install Pillow
"""
import sys, os, json, math, gzip, shutil
import numpy as np
from PIL import ImImagePlugin
from PIL import Image
from vtk import *
from tonic.paraview import data_converter
def extractFloatArrays(directory, ranges):
for root, dirs, files in os.walk(directory):
for name in files:
if '.png' in name:
fieldName = name[name.index('_')+1:-4]
srcFile = os.path.join(root, name)
destFile = os.path.join(root, name[:-4] + '.float32')
imageSize = data_converter.convertImageToFloat(srcFile, destFile, ranges[fieldName])
# Remove image
os.remove(srcFile)
# Compress data
with open(destFile, 'rb') as f_in, gzip.open(destFile + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(destFile)
return imageSize
def createIntensityArray(directory, nbLayers):
outputArray = vtkUnsignedCharArray()
imageSize = 0
reader = vtkPNGReader()
for layerIdx in range(nbLayers):
luminanceImage = os.path.join(directory, str(layerIdx) + '.luminance')
reader.SetFileName(luminanceImage)
reader.Update()
rgbArray = reader.GetOutput().GetPointData().GetArray(0)
# Extract image size and allocate memory
if imageSize == 0:
imageSize = rgbArray.GetNumberOfTuples()
outputArray.SetNumberOfTuples(imageSize * nbLayers)
# Extract each byte
for idx in range(imageSize):
outputArray.SetValue(layerIdx * imageSize + idx, (rgbArray.GetValue(idx * 3)))
# Remove luminance file
os.remove(luminanceImage)
return outputArray
def createOrderFile(directory, nbLayers, intensityArray, width, height):
# Load *.im, sort pixels, save
# Load data
layerImages = []
totalSize = intensityArray.GetNumberOfTuples()
imageSize = totalSize / nbLayers
for layerIdx in range(nbLayers):
imagePath = os.path.join(directory, str(layerIdx) + '.im')
im = Image.open(str(imagePath))
# im.show()
# try:
# input("Press enter to continue ")
# except NameError:
# pass
layerImages.append(np.array(im, np.float32).reshape(im.size[1] * im.size[0]))
# Create destination structure
orderArray = vtkUnsignedCharArray()
orderArray.SetName('order');
orderArray.SetNumberOfTuples(totalSize)
sortedIntensity = vtkUnsignedCharArray()
sortedIntensity.SetName('intensity');
sortedIntensity.SetNumberOfTuples(totalSize)
for pixelIdx in range(imageSize):
x = int(pixelIdx % width)
y = int(pixelIdx / width)
flipYIdx = width * (height - y - 1) + x
# flipYIdx = imageSize - pixelIdx - 1
# flipYIdx = pixelIdx
depthStack = []
for imageArray in layerImages:
depthStack.append((imageArray[flipYIdx], len(depthStack)))
depthStack.sort(key=lambda tup: tup[0])
for destLayerIdx in range(len(depthStack)):
# if depthStack[destLayerIdx][0] > 255:
# orderArray.SetValue((imageSize * destLayerIdx) + pixelIdx, 255)
# sortedIntensity.SetValue((imageSize * destLayerIdx) + pixelIdx, 0)
# else:
sourceLayerIdx = depthStack[destLayerIdx][1]
# Copy Idx
orderArray.SetValue((imageSize * destLayerIdx) + pixelIdx, sourceLayerIdx)
sortedIntensity.SetValue((imageSize * destLayerIdx) + pixelIdx, intensityArray.GetValue((imageSize * sourceLayerIdx) + pixelIdx))
# Write order file
orderFileName = os.path.join(directory,'order.uint8')
with open(orderFileName, 'wb') as f:
f.write(buffer(orderArray))
# Compress data
with open(orderFileName, 'rb') as f_in, gzip.open(orderFileName + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(orderFileName)
# Write intensity file
intensityFileName = os.path.join(directory,'intensity.uint8')
with open(intensityFileName, 'wb') as f:
f.write(buffer(sortedIntensity))
# Compress data
with open(intensityFileName, 'rb') as f_in, gzip.open(intensityFileName + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(intensityFileName)
# Remove IM files
for layerIdx in range(nbLayers):
imagePath = os.path.join(directory, str(layerIdx) + '.im')
os.remove(imagePath)
# =============================================================================
# Start processing dataset
# =============================================================================
convertFileName = os.path.join(sys.argv[-1], 'convert.json')
tonicFileName = os.path.join(sys.argv[-1], 'index.json')
with open(convertFileName, "r") as f:
convertInfo = json.load(f)
for directory in convertInfo['directories']:
# Convert images to float
imageSize = extractFloatArrays(directory, convertInfo['scalars'])
# Convert luminence to intensity
intensityStack = createIntensityArray(directory, convertInfo['layers'])
# Generate order layer
createOrderFile(directory, convertInfo['layers'], intensityStack, imageSize[0], imageSize[1])
# Update image size inside index.json
with open(tonicFileName, "r") as f:
tonicMeta = json.load(f)
tonicMeta['SortedComposite']['dimensions'] = [ imageSize[0], imageSize[1] ]
print "resolution", imageSize[0], 'x', imageSize[1], '=', (imageSize[0]*imageSize[1])
with open(tonicFileName + '_', 'w') as fw:
fw.write(json.dumps(tonicMeta, indent=4))
os.remove(tonicFileName)
os.rename(tonicFileName + '_', tonicFileName)
os.remove(convertFileName)
|
Kitware/tonic-data-generator
|
python/tonic/cinema/spec-b-converter.py
|
Python
|
bsd-3-clause
| 5,917
|
[
"ParaView",
"VTK"
] |
9f3809926be2ef4604068dba68eb2433b8158d549c1214178e31165e94f6d6b4
|
#!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
from http import cookies
import dbSession
import dbShared
import cgi
import pymysql
import ghNames
#
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
form = cgi.FieldStorage()
# Get Cookies
errorstr = ''
C = cookies.SimpleCookie()
try:
C.load(os.environ['HTTP_COOKIE'])
except KeyError:
errorstr = 'no cookies\n'
if errorstr == '':
try:
currentUser = C['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = C['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = C['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
else:
currentUser = ''
loginResult = 'success'
sid = form.getfirst('gh_sid', '')
op = form.getfirst('op', '')
friend = form.getfirst('friend', '')
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
spawnName = dbShared.dbInsertSafe(op)
galaxy = dbShared.dbInsertSafe(friend)
# Get a session
logged_state = 0
linkappend = ''
sess = dbSession.getSession(sid)
if (sess != ''):
logged_state = 1
currentUser = sess
linkappend = 'gh_sid=' + sid
# Main program
print('Content-type: text/html\n')
if (logged_state > 0):
try:
conn = dbShared.ghConn()
cursor = conn.cursor()
except Exception:
result = "Error: could not connect to database"
if (cursor):
row = None
sqlStr = ""
if (op != '' and friend != ''):
if (op == "remove"):
# remove friend
sqlStr = "DELETE FROM tUserFriends WHERE userID='" + currentUser + "' AND friendID='" + friend + "';"
result = friend + " removed"
elif (op == "add"):
# add friend
cursor.execute("SELECT added FROM tUserFriends WHERE userID='" + currentUser + "' AND friendID='" + friend + "';")
row = cursor.fetchone()
if row == None:
# insert friend
sqlStr = "INSERT INTO tUserFriends (userID, friendID, added) VALUES ('" + currentUser + "','" + friend + "',NOW());"
result = friend + " added"
else:
result = friend + " is already on your friends list."
cursor.execute(sqlStr)
else:
result = "Error: You must provide an operation and user id"
cursor.close()
else:
result = "Error: No database connection"
conn.close()
else:
result = "Error: You must be logged in to update your friends."
print(result)
if (result.find("Error:") > -1):
sys.exit(500)
else:
sys.exit(200)
|
pwillworth/galaxyharvester
|
html/changeFriend.py
|
Python
|
gpl-3.0
| 3,145
|
[
"Galaxy"
] |
f5820992de0a0fd2ac5cb85ebe906c72c2bca7d024377f936e01011a139923ba
|
"""
This sample demonstrates a simple skill built with the Amazon Alexa Skills Kit.
The Intent Schema, Custom Slots, and Sample Utterances for this skill, as well
as testing instructions are located at http://amzn.to/1LzFrj6
For additional samples, visit the Alexa Skills Kit Getting Started guide at
http://amzn.to/1LGWsLG
"""
from __future__ import print_function
import urllib.request
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session, debug):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session,
'debug': debug
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "I will " + \
"stop transcribing"
reprompt_text = "You can ask me to start or stop transcribing by saying, " \
"start transcribing or stop transcribing."
debug = "stoping reading"
captioning = urllib.request.urlopen("https://1d496ef7.ngrok.io/stop").read()
#print(captioning)
print(speech_output)
should_end_session = True
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session, debug))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for trying the Alexa Skills for transcribing. " \
"Bye! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
debug = " "
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session, debug))
def create_transcribe_attribute(stopTrans):
return {"stopTrans": stopTrans}
def set_transcribe_in_session(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = False
speech_output = "I will " + \
"stop transcribing" + \
". You can ask me to start transcribing anytime. "
reprompt_text = "You can ask me to start or stop transcribing by saying, " \
"start transcribing or stop transcribing."
debug = "stoping reading"
captioning = urllib.request.urlopen("https://1d496ef7.ngrok.io/stop").read()
#print(captioning)
print(speech_output)
should_end_session = True
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session, debug))
def get_transcribe_from_session(intent, session):
session_attributes = {}
reprompt_text = None
if session.get('attributes', {}) and "stopTrans" in session.get('attributes', {}):
stopTrans = session['attributes']['stopTrans']
speech_output = "You can " + stopTrans + \
". Goodbye."
should_end_session = True
else:
speech_output = "I'm not sure what you mean. " \
"Please try again."
should_end_session = False
debug = " "
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session, debug))
# --------------- Events ------------------ (Alexa is called)
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "stopTransIsIntent":
return set_transcribe_in_session(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
|
ItsNotABugItsAFeature/transcribe
|
alexa/stopTranscribe.py
|
Python
|
mit
| 7,197
|
[
"VisIt"
] |
b85566bc644d64804f8d3af467c2c2c3c453e357ffa7e31e56b634d5ea3b4f4d
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
__author__ = "Bharat Medasani"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "bkmedasani@lbl.gov"
__date__ = "Aug 2, 2013"
import os
import re
import unittest
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.core.periodic_table import Species
from pymatgen.core.structure import Molecule, Structure
from pymatgen.io.cif import CifParser
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.io.zeopp import (
ZeoCssr,
ZeoVoronoiXYZ,
get_free_sphere_params,
get_high_accuracy_voronoi_nodes,
get_void_volume_surfarea,
get_voronoi_nodes,
)
from pymatgen.util.testing import PymatgenTest
try:
import zeo
except ImportError:
zeo = None
@unittest.skipIf(not zeo, "zeo not present.")
class ZeoCssrTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")
p = Poscar.from_file(filepath)
self.zeocssr = ZeoCssr(p.structure)
def test_str(self):
expected_string = """4.7595 10.4118 6.0672
90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1
24 0
0 Fe4 P4 O16
1 Fe 0.4749 0.2187 0.7500 0 0 0 0 0 0 0 0 0.0000
2 Fe 0.9749 0.2813 0.2500 0 0 0 0 0 0 0 0 0.0000
3 Fe 0.0251 0.7187 0.7500 0 0 0 0 0 0 0 0 0.0000
4 Fe 0.5251 0.7813 0.2500 0 0 0 0 0 0 0 0 0.0000
5 P 0.4182 0.0946 0.2500 0 0 0 0 0 0 0 0 0.0000
6 P 0.9182 0.4054 0.7500 0 0 0 0 0 0 0 0 0.0000
7 P 0.0818 0.5946 0.2500 0 0 0 0 0 0 0 0 0.0000
8 P 0.5818 0.9054 0.7500 0 0 0 0 0 0 0 0 0.0000
9 O 0.7071 0.0434 0.7500 0 0 0 0 0 0 0 0 0.0000
10 O 0.7413 0.0966 0.2500 0 0 0 0 0 0 0 0 0.0000
11 O 0.2854 0.1657 0.0461 0 0 0 0 0 0 0 0 0.0000
12 O 0.2854 0.1657 0.4539 0 0 0 0 0 0 0 0 0.0000
13 O 0.7854 0.3343 0.5461 0 0 0 0 0 0 0 0 0.0000
14 O 0.7854 0.3343 0.9539 0 0 0 0 0 0 0 0 0.0000
15 O 0.2413 0.4034 0.7500 0 0 0 0 0 0 0 0 0.0000
16 O 0.2071 0.4566 0.2500 0 0 0 0 0 0 0 0 0.0000
17 O 0.7929 0.5434 0.7500 0 0 0 0 0 0 0 0 0.0000
18 O 0.7587 0.5966 0.2500 0 0 0 0 0 0 0 0 0.0000
19 O 0.2146 0.6657 0.0461 0 0 0 0 0 0 0 0 0.0000
20 O 0.2146 0.6657 0.4539 0 0 0 0 0 0 0 0 0.0000
21 O 0.7146 0.8343 0.5461 0 0 0 0 0 0 0 0 0.0000
22 O 0.7146 0.8343 0.9539 0 0 0 0 0 0 0 0 0.0000
23 O 0.2587 0.9034 0.7500 0 0 0 0 0 0 0 0 0.0000
24 O 0.2929 0.9566 0.2500 0 0 0 0 0 0 0 0 0.0000"""
self.assertEqual(str(self.zeocssr), expected_string)
def test_from_file(self):
filename = os.path.join(PymatgenTest.TEST_FILES_DIR, "EDI.cssr")
zeocssr = ZeoCssr.from_file(filename)
self.assertIsInstance(zeocssr.structure, Structure)
# @unittest.skipIf(not zeo, "zeo not present.")
class ZeoCssrOxiTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")
p = Poscar.from_file(filepath)
structure = BVAnalyzer().get_oxi_state_decorated_structure(p.structure)
self.zeocssr = ZeoCssr(structure)
def test_str(self):
expected_string = """4.7595 10.4118 6.0672
90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1
24 0
0 Fe4 P4 O16
1 Fe3+ 0.4749 0.2187 0.7500 0 0 0 0 0 0 0 0 0.0000
2 Fe3+ 0.9749 0.2813 0.2500 0 0 0 0 0 0 0 0 0.0000
3 Fe3+ 0.0251 0.7187 0.7500 0 0 0 0 0 0 0 0 0.0000
4 Fe3+ 0.5251 0.7813 0.2500 0 0 0 0 0 0 0 0 0.0000
5 P5+ 0.4182 0.0946 0.2500 0 0 0 0 0 0 0 0 0.0000
6 P5+ 0.9182 0.4054 0.7500 0 0 0 0 0 0 0 0 0.0000
7 P5+ 0.0818 0.5946 0.2500 0 0 0 0 0 0 0 0 0.0000
8 P5+ 0.5818 0.9054 0.7500 0 0 0 0 0 0 0 0 0.0000
9 O2- 0.7071 0.0434 0.7500 0 0 0 0 0 0 0 0 0.0000
10 O2- 0.7413 0.0966 0.2500 0 0 0 0 0 0 0 0 0.0000
11 O2- 0.2854 0.1657 0.0461 0 0 0 0 0 0 0 0 0.0000
12 O2- 0.2854 0.1657 0.4539 0 0 0 0 0 0 0 0 0.0000
13 O2- 0.7854 0.3343 0.5461 0 0 0 0 0 0 0 0 0.0000
14 O2- 0.7854 0.3343 0.9539 0 0 0 0 0 0 0 0 0.0000
15 O2- 0.2413 0.4034 0.7500 0 0 0 0 0 0 0 0 0.0000
16 O2- 0.2071 0.4566 0.2500 0 0 0 0 0 0 0 0 0.0000
17 O2- 0.7929 0.5434 0.7500 0 0 0 0 0 0 0 0 0.0000
18 O2- 0.7587 0.5966 0.2500 0 0 0 0 0 0 0 0 0.0000
19 O2- 0.2146 0.6657 0.0461 0 0 0 0 0 0 0 0 0.0000
20 O2- 0.2146 0.6657 0.4539 0 0 0 0 0 0 0 0 0.0000
21 O2- 0.7146 0.8343 0.5461 0 0 0 0 0 0 0 0 0.0000
22 O2- 0.7146 0.8343 0.9539 0 0 0 0 0 0 0 0 0.0000
23 O2- 0.2587 0.9034 0.7500 0 0 0 0 0 0 0 0 0.0000
24 O2- 0.2929 0.9566 0.2500 0 0 0 0 0 0 0 0 0.0000"""
self.assertEqual(str(self.zeocssr), expected_string)
def test_from_file(self):
filename = os.path.join(PymatgenTest.TEST_FILES_DIR, "EDI_oxistate_decorated.cssr")
zeocssr = ZeoCssr.from_file(filename)
self.assertIsInstance(zeocssr.structure, Structure)
@unittest.skipIf(not zeo, "zeo not present.")
class ZeoVoronoiXYZTest(unittest.TestCase):
def setUp(self):
coords = [
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
]
prop = [0.4, 0.2, 0.2, 0.2, 0.2]
self.mol = Molecule(["C", "H", "H", "H", "H"], coords, site_properties={"voronoi_radius": prop})
self.xyz = ZeoVoronoiXYZ(self.mol)
def test_str(self):
ans = """5
H4 C1
C 0.000000 0.000000 0.000000 0.400000
H 1.089000 0.000000 0.000000 0.200000
H -0.363000 1.026719 0.000000 0.200000
H -0.363000 -0.513360 -0.889165 0.200000
H -0.363000 -0.513360 0.889165 0.200000"""
self.assertEqual(str(self.xyz), ans)
self.assertEqual(str(self.xyz), ans)
def test_from_file(self):
filename = os.path.join(PymatgenTest.TEST_FILES_DIR, "EDI_voro.xyz")
vor = ZeoVoronoiXYZ.from_file(filename)
self.assertIsInstance(vor.molecule, Molecule)
@unittest.skipIf(not zeo, "zeo not present.")
class GetVoronoiNodesTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")
p = Poscar.from_file(filepath)
self.structure = p.structure
bv = BVAnalyzer()
valences = bv.get_valences(self.structure)
el = [site.species_string for site in self.structure.sites]
valence_dict = dict(zip(el, valences))
self.rad_dict = {}
for k, v in valence_dict.items():
self.rad_dict[k] = float(Species(k, v).ionic_radius)
assert len(self.rad_dict) == len(self.structure.composition)
def test_get_voronoi_nodes(self):
(
vor_node_struct,
vor_edge_center_struct,
vor_face_center_struct,
) = get_voronoi_nodes(self.structure, self.rad_dict)
self.assertIsInstance(vor_node_struct, Structure)
self.assertIsInstance(vor_edge_center_struct, Structure)
self.assertIsInstance(vor_face_center_struct, Structure)
print(len(vor_node_struct.sites))
print(len(vor_face_center_struct.sites))
@unittest.skip("file free_sph.cif not present")
class GetFreeSphereParamsTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "free_sph.cif")
self.structure = Structure.from_file(filepath)
self.rad_dict = {
"Ge": 0.67,
"P": 0.52,
"S": 1.7,
"La": 1.17,
"Zr": 0.86,
"O": 1.26,
}
def test_get_free_sphere_params(self):
free_sph_params = get_free_sphere_params(self.structure, rad_dict=self.rad_dict)
# Zeo results can change in future. Hence loose comparison
self.assertAlmostEqual(free_sph_params["inc_sph_max_dia"], 2.58251, places=1)
self.assertAlmostEqual(free_sph_params["free_sph_max_dia"], 1.29452, places=1)
self.assertAlmostEqual(free_sph_params["inc_sph_along_free_sph_path_max_dia"], 2.58251, places=1)
@unittest.skipIf(not zeo, "zeo not present.")
class GetHighAccuracyVoronoiNodesTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")
p = Poscar.from_file(filepath)
self.structure = p.structure
bv = BVAnalyzer()
valences = bv.get_valences(self.structure)
el = [site.species_string for site in self.structure.sites]
valence_dict = dict(zip(el, valences))
self.rad_dict = {}
for k, v in valence_dict.items():
self.rad_dict[k] = float(Species(k, v).ionic_radius)
assert len(self.rad_dict) == len(self.structure.composition)
def test_get_voronoi_nodes(self):
# vor_node_struct, vor_ec_struct, vor_fc_struct = \
# get_high_accuracy_voronoi_nodes(self.structure, self.rad_dict)
vor_node_struct = get_high_accuracy_voronoi_nodes(self.structure, self.rad_dict)
self.assertIsInstance(vor_node_struct, Structure)
# self.assertIsInstance(vor_ec_struct, Structure)
# self.assertIsInstance(vor_fc_struct, Structure)
print(len(vor_node_struct.sites))
# print(len(vor_fc_struct.sites))
@unittest.skipIf(not zeo, "zeo not present.")
class GetVoronoiNodesMultiOxiTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")
p = Poscar.from_file(filepath)
self.structure = p.structure
bv = BVAnalyzer()
self.structure = bv.get_oxi_state_decorated_structure(self.structure)
valences = bv.get_valences(self.structure)
radii = []
for i in range(len(valences)):
el = self.structure.sites[i].specie.symbol
radius = Species(el, valences[i]).ionic_radius
radii.append(radius)
el = [site.species_string for site in self.structure.sites]
self.rad_dict = dict(zip(el, radii))
for el in self.rad_dict.keys():
print((el, self.rad_dict[el].real))
def test_get_voronoi_nodes(self):
(
vor_node_struct,
vor_edge_center_struct,
vor_face_center_struct,
) = get_voronoi_nodes(self.structure, self.rad_dict)
self.assertIsInstance(vor_node_struct, Structure)
self.assertIsInstance(vor_edge_center_struct, Structure)
self.assertIsInstance(vor_face_center_struct, Structure)
@unittest.skip("The function is deprecated")
class GetVoidVolumeSurfaceTest(unittest.TestCase):
def setUp(self):
filepath1 = os.path.join(PymatgenTest.TEST_FILES_DIR, "Li2O.cif")
p = CifParser(filepath1).get_structures(False)[0]
bv = BVAnalyzer()
valences = bv.get_valences(p)
el = [site.species_string for site in p.sites]
val_dict = dict(zip(el, valences))
self._radii = {}
for k, v in val_dict.items():
k1 = re.sub(r"[1-9,+,\-]", "", k)
self._radii[k1] = float(Species(k1, v).ionic_radius)
p.remove(0)
self._vac_struct = p
def test_void_volume_surface_area(self):
pass
vol, sa = get_void_volume_surfarea(self._vac_struct, self._radii)
# print "vol: ", vol, "sa: ", sa
self.assertIsInstance(vol, float)
self.assertIsInstance(sa, float)
if __name__ == "__main__":
unittest.main()
|
gmatteo/pymatgen
|
pymatgen/io/tests/test_zeopp.py
|
Python
|
mit
| 11,239
|
[
"VASP",
"pymatgen"
] |
ca6d0f60364eb3f91f123f3c8811b3ffa75065f777959c0611a87b31c1e9b3a4
|
#!/usr/bin/env python
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
import re
import logging
from lib.elasticbeanstalk.request import TemplateSpecification
from lib.utility import misc, shell_utils
from scli import api_wrapper, config_file, prompt
from scli.constants import DevToolsEndpoint, DevToolsDefault, DefaultAppSource, \
OptionSettingVPC, OptionSettingEnvironmentType, \
ParameterName as PName, ParameterSource as PSource, ServiceDefault, ServiceEndpoint
from scli.parameter import Parameter
from scli.resources import DevToolsMessage, ValidationMessage
from scli.terminal.base import TerminalBase
log = logging.getLogger('eb')
def generate_endpoint(parameter_pool, region, source, force = False):
parameter_pool.put(Parameter(PName.ServiceEndpoint,
ServiceEndpoint[region],
source))
parameter_pool.put(Parameter(PName.DevToolsEndpoint,
DevToolsEndpoint[region],
source))
def has_default_app(parameter_pool, solution_stack, eb_client = None):
appsource_options = {DefaultAppSource.Namespace : {DefaultAppSource.OptionName}}
if not eb_client:
eb_client = api_wrapper.create_eb_client(parameter_pool)
spec = TemplateSpecification()
spec.template_source.solution_stack_name = solution_stack,
options = api_wrapper.retrieve_configuration_options(eb_client = eb_client,
solution_stack = solution_stack,
options = appsource_options,
template_specification = spec)
for option in options:
if misc.string_equal_ignore_case(DefaultAppSource.Namespace, option.namespace) \
and misc.string_equal_ignore_case(DefaultAppSource.OptionName, option.name):
return True
return False
def trim_vpc_options(parameter_pool, option_settings, option_to_remove):
if OptionSettingVPC.Namespace in option_settings\
and OptionSettingVPC.MagicOptionName in option_settings[OptionSettingVPC.Namespace]\
and not misc.is_blank_string(option_settings[OptionSettingVPC.Namespace]\
[OptionSettingVPC.MagicOptionName]):
# VPC enabled
for namespace in OptionSettingVPC.TrimOption:
for option in OptionSettingVPC.TrimOption[namespace]:
remove_option_setting(option_settings, option_to_remove, namespace, option)
# Reapply DBSubnets if RDS is enabled
if parameter_pool.get_value(PName.RdsEnabled):
option_location = parameter_pool.get_value(PName.OptionSettingFile, False)
ori_option_settings = config_file.load_env_option_setting_file(option_location,
quiet = True)
if OptionSettingVPC.Namespace in ori_option_settings\
and OptionSettingVPC.DBSubnets in ori_option_settings[OptionSettingVPC.Namespace]:
dbsubnets = ori_option_settings[OptionSettingVPC.Namespace][OptionSettingVPC.DBSubnets]
if not misc.is_blank_string(dbsubnets):
add_option_setting(option_settings, option_to_remove,
OptionSettingVPC.Namespace,
OptionSettingVPC.DBSubnets,
dbsubnets)
else:
# VPC disabled
remove_option_namespace(option_settings, option_to_remove, OptionSettingVPC.Namespace)
def apply_environment_type(parameter_pool, template_spec, stack_name, env_name, option_settings, option_to_remove):
# If not specified, skip
envtype = parameter_pool.get_value(PName.EnvironmentType)
if envtype:
# Describe applicable option settings
eb_client = api_wrapper.create_eb_client(parameter_pool)
app_name = parameter_pool.get_value(PName.ApplicationName, False)
if env_name:
raw_option_defs = api_wrapper.retrieve_configuration_options(eb_client=eb_client,
app_name=app_name,
env_name=env_name,
template_specification=template_spec,
options=None)
else:
raw_option_defs = api_wrapper.retrieve_configuration_options(eb_client=eb_client,
app_name=app_name,
solution_stack=stack_name,
template_specification=template_spec,
options=None)
option_defs = set()
for raw_option_def in raw_option_defs:
option_defs.add(raw_option_def.namespace + '-' + raw_option_def.name)
# Return if environment type option is not available
if OptionSettingEnvironmentType.Namespace + '-' + \
OptionSettingEnvironmentType.OptionName not in option_defs:
prompt.result(ValidationMessage.EnvTypeInapplicable.format(envtype))
return
# remove inapplicable option settings
removed = False
for namespace in list(option_settings.keys()):
# TODO Fix this temporary hack to let environment tier options pass through
if namespace == 'aws:elasticbeanstalk:sqsd':
continue
for option_name in list(option_settings[namespace].keys()):
if namespace + '-' + option_name not in option_defs:
remove_option_setting(option_settings, option_to_remove,
namespace, option_name, False)
removed = True
if removed:
prompt.result(ValidationMessage.EnvTypeBlowAwayOptionSettings)
# Set environment type
add_option_setting(option_settings, option_to_remove,
OptionSettingEnvironmentType.Namespace,
OptionSettingEnvironmentType.OptionName,
envtype)
def check_app_version(parameter_pool, eb_client = None):
#TODO: Do we need to blast version info away if this part is strong enough?
if not parameter_pool.has(PName.ApplicationVersionName) \
or parameter_pool.get_source(PName.ApplicationVersionName) == PSource.Default:
version_name = get_head_version(parameter_pool, eb_client=eb_client, quiet=True)
if version_name is not None:
log.info('Found a version from local repository: {0}. Using this version.'.\
format(version_name))
return version_name
else:
# Otherwise try push a new one
if not parameter_pool.get_value(PName.Force) == ServiceDefault.ENABLED\
and not TerminalBase.ask_confirmation(DevToolsMessage.PushLocalHead):
return ServiceDefault.DEFAULT_VERSION_NAME
else:
if shell_utils.git_aws_push(False):
version_name = get_head_version(parameter_pool,
eb_client=eb_client,
quiet=False)
if version_name:
return version_name
return ServiceDefault.DEFAULT_VERSION_NAME
else:
# Verify existence of version
app_name = parameter_pool.get_value(PName.ApplicationName, False)
version_names = api_wrapper.get_all_versions(parameter_pool, app_name, eb_client)
version_name = parameter_pool.get_value(PName.ApplicationVersionName)
if version_name in version_names:
# Assume version is still valid and compatible with current solution stack
return version_name
else:
#
return ServiceDefault.DEFAULT_VERSION_NAME
def get_head_version(parameter_pool, eb_client = None, quiet = True):
# Get all versions
app_name = parameter_pool.get_value(PName.ApplicationName, False)
version_names = api_wrapper.get_all_versions(parameter_pool, app_name, eb_client)
# Try get local commit HEAD hash
head_hash = shell_utils.get_repo_head_hash(quiet)
if head_hash is None:
return ServiceDefault.DEFAULT_VERSION_NAME
# Try find a version corresponding to local HEAD
version_re = re.compile(DevToolsDefault.VersionNameRe.format(head_hash),re.UNICODE)
timestamp = 0
for version in version_names:
if version_re.match(version):
cur_timestamp = int(version.split(DevToolsDefault.NameDelimiter)[2])
timestamp = cur_timestamp if cur_timestamp > timestamp else timestamp
if timestamp > 0:
# Found a version generated from local repos HEAD
log.info('Found a version generated from local HEAD {0}. Using this version.'.\
format(head_hash))
return DevToolsDefault.VersionNameMask.format(head_hash, timestamp)
else:
return None
# Add/update an option setting of specified value to option setting dict
def add_option_setting(option_settings, option_remove, namespace, option, value):
if namespace not in option_settings:
option_settings[namespace] = dict()
option_settings[namespace][option] = value
if namespace in option_remove and option in option_remove[namespace]:
option_remove[namespace].remove(option)
# Remove an option setting from option setting dict
def remove_option_setting(option_settings, option_remove,
namespace, option, add_to_remove = True):
if namespace in option_settings and option in option_settings[namespace]:
del option_settings[namespace][option]
if len(option_settings[namespace]) < 1:
del option_settings[namespace]
if add_to_remove:
if namespace not in option_remove:
option_remove[namespace] = set()
option_remove[namespace].add(option)
# Remove an entire option namespace from option setting dict
def remove_option_namespace(option_settings, option_remove,
namespace, add_to_remove = True):
if namespace in option_settings:
if add_to_remove:
for option in list(option_settings[namespace].keys()):
remove_option_setting(option_settings, option_remove,
namespace, option, True)
else:
del option_settings[namespace]
# Get definition for a particular option setting
def get_option_def(eb_client, app_name, namespace, option_name,
solution_stack = None, env_name = None):
options = dict()
options[namespace] = set()
options[namespace].add(option_name)
optionDef = api_wrapper.retrieve_configuration_options(eb_client = eb_client,
app_name = app_name,
solution_stack =solution_stack,
options = options)
if len(optionDef) > 0:
return optionDef[0]
else:
return None
|
singlebrook/AWS-ElasticBeanstalk-CLI
|
eb/macosx/python3/lib/elasticbeanstalk/eb_utils.py
|
Python
|
apache-2.0
| 12,340
|
[
"BLAST"
] |
c918b3524103aee3c292f5687e5b2b4c172d43d00673207538eb066837aac921
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ViT-SNGP on JFT-300M."""
from functools import partial # pylint: disable=g-importing-member so standard
import itertools
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
from clu import metric_writers
from clu import parameter_overview
from clu import periodic_actions
from clu import preprocess_spec
import flax
import flax.jax_utils as flax_utils
import jax
import jax.numpy as jnp
from ml_collections.config_flags import config_flags
import numpy as np
import robustness_metrics as rm
import tensorflow as tf
from tensorflow.io import gfile
import uncertainty_baselines as ub
import checkpoint_utils # local file import from baselines.jft
import data_uncertainty_utils # local file import from baselines.jft
import input_utils # local file import from baselines.jft
import ood_utils # local file import from baselines.jft
import preprocess_utils # local file import from baselines.jft
import train_utils # local file import from baselines.jft
# TODO(dusenberrymw): Open-source remaining imports.
fewshot = None
config_flags.DEFINE_config_file(
'config', None, 'Training configuration.', lock_config=True)
flags.DEFINE_string('output_dir', default=None, help='Work unit directory.')
flags.DEFINE_integer(
'num_cores', default=None, help='Unused. How many devices being used.')
flags.DEFINE_boolean(
'use_gpu', default=None, help='Unused. Whether or not running on GPU.')
flags.DEFINE_string('tpu', None,
'Unused. Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
def get_gp_kwargs(gp_config):
"""Extract keyword argument parameters for the Gaussian process layer."""
covmat_momentum = gp_config.get('covmat_momentum', 0.999)
# Extracts model parameter.
logging.info('gp_config.covmat_momentum = %s', covmat_momentum)
covmat_momentum = None if covmat_momentum < 0. else covmat_momentum
covmat_kwargs = dict(momentum=covmat_momentum)
# Assembles into kwargs dictionary.
gp_layer_kwargs = dict(covmat_kwargs=covmat_kwargs)
return gp_layer_kwargs
def main(config, output_dir):
seed = config.get('seed', 0)
rng = jax.random.PRNGKey(seed)
tf.random.set_seed(seed)
if config.get('data_dir'):
logging.info('data_dir=%s', config.data_dir)
logging.info('Output dir: %s', output_dir)
save_checkpoint_path = None
if config.get('checkpoint_steps'):
gfile.makedirs(output_dir)
save_checkpoint_path = os.path.join(output_dir, 'checkpoint.npz')
# Create an asynchronous multi-metric writer.
writer = metric_writers.create_default_writer(
output_dir, just_logging=jax.process_index() > 0)
# The pool is used to perform misc operations such as logging in async way.
pool = multiprocessing.pool.ThreadPool()
def write_note(note):
if jax.process_index() == 0:
logging.info('NOTE: %s', note)
write_note('Initializing...')
# Verify settings to make sure no checkpoints are accidentally missed.
if config.get('keep_checkpoint_steps'):
assert config.get('checkpoint_steps'), 'Specify `checkpoint_steps`.'
assert config.keep_checkpoint_steps % config.checkpoint_steps == 0, (
f'`keep_checkpoint_steps` ({config.checkpoint_steps}) should be'
f'divisible by `checkpoint_steps ({config.checkpoint_steps}).`')
batch_size = config.batch_size
batch_size_eval = config.get('batch_size_eval', batch_size)
if (batch_size % jax.device_count() != 0 or
batch_size_eval % jax.device_count() != 0):
raise ValueError(f'Batch sizes ({batch_size} and {batch_size_eval}) must '
f'be divisible by device number ({jax.device_count()})')
local_batch_size = batch_size // jax.process_count()
local_batch_size_eval = batch_size_eval // jax.process_count()
logging.info(
'Global batch size %d on %d hosts results in %d local batch size. '
'With %d dev per host (%d dev total), that is a %d per-device batch size.',
batch_size,
jax.process_count(), local_batch_size, jax.local_device_count(),
jax.device_count(), local_batch_size // jax.local_device_count())
write_note('Initializing train dataset...')
rng, train_ds_rng = jax.random.split(rng)
train_ds_rng = jax.random.fold_in(train_ds_rng, jax.process_index())
train_ds = input_utils.get_data(
dataset=config.dataset,
split=config.train_split,
rng=train_ds_rng,
process_batch_size=local_batch_size,
preprocess_fn=preprocess_spec.parse(
spec=config.pp_train, available_ops=preprocess_utils.all_ops()),
shuffle_buffer_size=config.shuffle_buffer_size,
prefetch_size=config.get('prefetch_to_host', 2),
data_dir=config.get('data_dir'))
logging.info('image_size = %s', train_ds.element_spec['image'].shape[1:])
# Start prefetching already.
train_iter = input_utils.start_input_pipeline(
train_ds, config.get('prefetch_to_device', 1))
write_note('Initializing val dataset(s)...')
def _get_val_split(dataset, split, pp_eval, data_dir=None):
# We do ceil rounding such that we include the last incomplete batch.
nval_img = input_utils.get_num_examples(
dataset,
split=split,
process_batch_size=local_batch_size_eval,
drop_remainder=False,
data_dir=data_dir)
val_steps = int(np.ceil(nval_img / batch_size_eval))
logging.info('Running validation for %d steps for %s, %s', val_steps,
dataset, split)
if isinstance(pp_eval, str):
pp_eval = preprocess_spec.parse(
spec=pp_eval, available_ops=preprocess_utils.all_ops())
val_ds = input_utils.get_data(
dataset=dataset,
split=split,
rng=None,
process_batch_size=local_batch_size_eval,
preprocess_fn=pp_eval,
cache=config.get('val_cache', 'batched'),
num_epochs=1,
repeat_after_batching=True,
shuffle=False,
prefetch_size=config.get('prefetch_to_host', 2),
drop_remainder=False,
data_dir=data_dir)
return val_ds
val_ds_splits = {
'val':
_get_val_split(config.dataset, config.val_split, config.pp_eval,
config.get('data_dir'))
}
if config.get('test_split'):
val_ds_splits.update({
'test': _get_val_split(
config.dataset,
split=config.test_split,
pp_eval=config.pp_eval,
data_dir=config.get('data_dir'))
})
if config.get('eval_on_cifar_10h'):
cifar10_to_cifar10h_fn = data_uncertainty_utils.create_cifar10_to_cifar10h_fn(
config.get('data_dir', None))
preprocess_fn = preprocess_spec.parse(
spec=config.pp_eval_cifar_10h, available_ops=preprocess_utils.all_ops())
pp_eval = lambda ex: preprocess_fn(cifar10_to_cifar10h_fn(ex))
val_ds_splits['cifar_10h'] = _get_val_split(
'cifar10',
split=config.get('cifar_10h_split') or 'test',
pp_eval=pp_eval,
data_dir=config.get('data_dir'))
elif config.get('eval_on_imagenet_real'):
imagenet_to_real_fn = data_uncertainty_utils.create_imagenet_to_real_fn()
preprocess_fn = preprocess_spec.parse(
spec=config.pp_eval_imagenet_real,
available_ops=preprocess_utils.all_ops())
pp_eval = lambda ex: preprocess_fn(imagenet_to_real_fn(ex))
val_ds_splits['imagenet_real'] = _get_val_split(
'imagenet2012_real',
split=config.get('imagenet_real_split') or 'validation',
pp_eval=pp_eval,
data_dir=config.get('data_dir'))
ood_ds = {}
if config.get('ood_datasets') and config.get('ood_methods'):
if config.get('ood_methods'): # config.ood_methods is not a empty list
logging.info('loading OOD dataset = %s', config.get('ood_datasets'))
ood_ds, ood_ds_names = ood_utils.load_ood_datasets(
config.dataset,
config.ood_datasets,
config.ood_split,
config.pp_eval,
config.pp_eval_ood,
config.ood_methods,
config.train_split,
config.get('data_dir'),
_get_val_split,
)
ntrain_img = input_utils.get_num_examples(
config.dataset,
split=config.train_split,
process_batch_size=local_batch_size,
data_dir=config.get('data_dir'))
steps_per_epoch = int(ntrain_img / batch_size)
if config.get('num_epochs'):
total_steps = int(config.num_epochs * steps_per_epoch)
assert not config.get('total_steps'), 'Set either num_epochs or total_steps'
else:
total_steps = config.total_steps
logging.info('Total train data points: %d', ntrain_img)
logging.info(
'Running for %d steps, that means %f epochs and %d steps per epoch',
total_steps, total_steps * batch_size / ntrain_img, steps_per_epoch)
write_note('Initializing model...')
logging.info('config.model = %s', config.get('model'))
# Specify Gaussian process layer configs.
use_gp_layer = config.get('use_gp_layer', True)
gp_config = config.get('gp_layer', {})
gp_layer_kwargs = get_gp_kwargs(gp_config)
# Process ViT backbone model configs.
vit_kwargs = config.get('model')
model = ub.models.vision_transformer_gp(
num_classes=config.num_classes,
use_gp_layer=use_gp_layer,
vit_kwargs=vit_kwargs,
gp_layer_kwargs=gp_layer_kwargs)
# We want all parameters to be created in host RAM, not on any device, they'll
# be sent there later as needed, otherwise we already encountered two
# situations where we allocate them twice.
@partial(jax.jit, backend='cpu')
def init(rng):
image_size = tuple(train_ds.element_spec['image'].shape[2:])
logging.info('image_size = %s', image_size)
dummy_input = jnp.zeros((local_batch_size,) + image_size, jnp.float32)
variables = model.init(rng, dummy_input, train=False)
# Split model parameters into trainable and untrainable collections.
states, params = variables.pop('params')
del variables
# Set bias in the head to a low value, such that loss is small initially.
params = flax.core.unfreeze(params)
if use_gp_layer:
# Modify the head parameter in the GP head.
params['head']['output_layer']['bias'] = jnp.full_like(
params['head']['output_layer']['bias'],
config.get('init_head_bias', 0))
else:
params['head']['bias'] = jnp.full_like(
params['head']['bias'], config.get('init_head_bias', 0))
return params, states
rng, rng_init = jax.random.split(rng)
params_cpu, states_cpu = init(rng_init)
if jax.process_index() == 0:
num_params = sum(p.size for p in jax.tree_flatten(params_cpu)[0])
parameter_overview.log_parameter_overview(params_cpu)
writer.write_scalars(step=0, scalars={'num_params': num_params})
@partial(jax.pmap, axis_name='batch')
def evaluation_fn(params, states, images, labels, mask):
# Ignore the entries with all zero labels for evaluation.
mask *= labels.max(axis=1)
variable_dict = {'params': flax.core.freeze(params), **states}
logits, out = model.apply(
variable_dict,
images,
train=False,
mean_field_factor=gp_config.get('mean_field_factor', -1.))
# Note that logits and labels are usually of the shape [batch,num_classes].
# But for OOD data, when num_classes_ood > num_classes_ind, we need to
# adjust labels to labels[:, :config.num_classes] to match the shape of
# logits. That is just to avoid shape mismatch. The output losses does not
# have any meaning for OOD data, because OOD not belong to any IND class.
losses = getattr(train_utils, config.get('loss', 'sigmoid_xent'))(
logits=logits, labels=labels[:, :config.num_classes], reduction=False)
loss = jax.lax.psum(losses * mask, axis_name='batch')
top1_idx = jnp.argmax(logits, axis=1)
# Extracts the label at the highest logit index for each image.
top1_correct = jnp.take_along_axis(labels, top1_idx[:, None], axis=1)[:, 0]
ncorrect = jax.lax.psum(top1_correct * mask, axis_name='batch')
n = jax.lax.psum(mask, axis_name='batch')
metric_args = jax.lax.all_gather([logits, labels, out['pre_logits'], mask],
axis_name='batch')
return ncorrect, loss, n, metric_args
@partial(jax.pmap, axis_name='batch')
def cifar_10h_evaluation_fn(params, states, images, labels, mask):
variable_dict = {'params': flax.core.freeze(params), **states}
logits, out = model.apply(
variable_dict,
images,
train=False,
mean_field_factor=gp_config.get('mean_field_factor', -1.))
losses = getattr(train_utils, config.get('loss', 'softmax_xent'))(
logits=logits, labels=labels, reduction=False)
loss = jax.lax.psum(losses, axis_name='batch')
top1_idx = jnp.argmax(logits, axis=1)
# Extracts the label at the highest logit index for each image.
one_hot_labels = jnp.eye(10)[jnp.argmax(labels, axis=1)]
top1_correct = jnp.take_along_axis(
one_hot_labels, top1_idx[:, None], axis=1)[:, 0]
ncorrect = jax.lax.psum(top1_correct, axis_name='batch')
n = jax.lax.psum(one_hot_labels, axis_name='batch')
metric_args = jax.lax.all_gather([logits, labels, out['pre_logits'], mask],
axis_name='batch')
return ncorrect, loss, n, metric_args
# Setup function for computing representation.
@partial(jax.pmap, axis_name='batch')
def representation_fn(params, images, labels, mask, states):
variable_dict = {'params': flax.core.freeze(params), **states}
_, outputs = model.apply(
variable_dict,
images,
train=False,
mean_field_factor=gp_config.get('mean_field_factor', -1.))
representation = outputs[config.fewshot.representation_layer]
representation = jax.lax.all_gather(representation, 'batch')
labels = jax.lax.all_gather(labels, 'batch')
mask = jax.lax.all_gather(mask, 'batch')
return representation, labels, mask
# Load the optimizer from flax.
opt_name = config.get('optim_name')
write_note(f'Initializing {opt_name} optimizer...')
opt_def = getattr(flax.optim, opt_name)(**config.get('optim', {}))
# We jit this, such that the arrays that are created are created on the same
# device as the input is, in this case the CPU. Else they'd be on device[0].
opt_cpu = jax.jit(opt_def.create)(params_cpu)
weight_decay_rules = config.get('weight_decay', []) or []
rescale_value = config.lr.base if config.get('weight_decay_decouple') else 1.
weight_decay_fn = train_utils.get_weight_decay_fn(
weight_decay_rules=weight_decay_rules, rescale_value=rescale_value)
@partial(jax.pmap, axis_name='batch', donate_argnums=(0,))
def update_fn(opt, states, lr, reset_covmat, images, labels, rng):
"""Update step."""
measurements = {}
# Get device-specific loss rng.
rng, rng_model = jax.random.split(rng, 2)
rng_model_local = jax.random.fold_in(rng_model, jax.lax.axis_index('batch'))
def loss_fn(params, states, images, labels):
# Specify mutable collection to update untrainable GP parameters.
variable_dict = {'params': flax.core.freeze(params), **states}
model_results, updated_states = model.apply(
variable_dict,
images,
train=True,
rngs={'dropout': rng_model_local},
mutable=list(states.keys()),
mean_field_factor=gp_config.get('mean_field_factor', -1.))
logits, _ = model_results
loss = getattr(train_utils, config.get('loss', 'sigmoid_xent'))(
logits=logits, labels=labels)
return loss, updated_states
# Performs exact covariance update (i.e., reset precision matrix resetting
# at begining of new epoch) if covmat_momentum is a null value.
if use_gp_layer and gp_config.get('covmat_momentum', -1.) < 0:
# Resets precision matrix to Identity * ridge_penalty if at the begining
# of a new epoch. This should be done before accumulate gradient.
ridge_penalty = gp_config.get('ridge_penalty', 1.)
prec_mat_old = states['laplace_covariance']['head']['covmat_layer'][
'precision_matrix']
prec_mat_new = (
(1. - reset_covmat) * prec_mat_old +
reset_covmat * jnp.eye(prec_mat_old.shape[0]) * ridge_penalty)
states = flax.core.unfreeze(states)
states['laplace_covariance']['head']['covmat_layer'][
'precision_matrix'] = prec_mat_new
states = flax.core.freeze(states)
# Implementation considerations compared and summarized at
# https://docs.google.com/document/d/1g3kMEvqu1DOawaflKNyUsIoQ4yIVEoyE5ZlIPkIl4Lc/edit?hl=en#
(l, s), g = train_utils.accumulate_gradient_with_states(
jax.value_and_grad(loss_fn, has_aux=True), opt.target, states, images,
labels, config.get('grad_accum_steps'))
l, g = jax.lax.pmean((l, g), axis_name='batch')
# Log the gradient norm only if we need to compute it anyways (clipping)
# or if we don't use grad_accum_steps, as they interact badly.
do_grad_clip = config.get('grad_clip_norm', -1.) > 0.
if config.get('grad_accum_steps', 1) == 1 or do_grad_clip:
grads, _ = jax.tree_flatten(g)
l2_g = jnp.sqrt(sum([jnp.vdot(p, p) for p in grads]))
measurements['l2_grads'] = l2_g
# Optionally resize the global gradient to a maximum norm. We found this
# useful in some cases across optimizers, hence it's in the main loop.
if do_grad_clip:
g_factor = jnp.minimum(1.0, config.grad_clip_norm / l2_g)
g = jax.tree_map(lambda p: g_factor * p, g)
opt = opt.apply_gradient(g, learning_rate=lr)
opt = opt.replace(target=weight_decay_fn(opt.target, lr))
params, _ = jax.tree_flatten(opt.target)
measurements['l2_params'] = jnp.sqrt(sum([jnp.vdot(p, p) for p in params]))
measurements['reset_covmat'] = reset_covmat
return opt, s, l, rng, measurements
default_reinit_params = ('head/output_layer/kernel', 'head/output_layer/bias',
'head/kernel', 'head/bias')
rng, train_loop_rngs = jax.random.split(rng)
checkpoint_data = checkpoint_utils.maybe_load_checkpoint(
train_loop_rngs=train_loop_rngs,
save_checkpoint_path=save_checkpoint_path,
init_optimizer=opt_cpu,
init_params=params_cpu,
init_fixed_model_states=states_cpu,
default_reinit_params=default_reinit_params,
config=config)
train_loop_rngs = checkpoint_data.train_loop_rngs
opt_cpu = checkpoint_data.optimizer
states_cpu = checkpoint_data.fixed_model_states
accumulated_train_time = checkpoint_data.accumulated_train_time
write_note('Kicking off misc stuff...')
first_step = int(opt_cpu.state.step) # Might be a DeviceArray type.
if first_step == 0 and jax.process_index() == 0:
writer.write_hparams(dict(config))
chrono = train_utils.Chrono(first_step, total_steps, batch_size,
accumulated_train_time)
# Note: switch to ProfileAllHosts() if you need to profile all hosts.
# (Xprof data become much larger and take longer to load for analysis)
profiler = periodic_actions.Profile(
# Create profile after every restart to analyze pre-emption related
# problems and assure we get similar performance in every run.
logdir=output_dir, first_profile=first_step + 10)
# Prepare the learning-rate and pre-fetch it to device to avoid delays.
lr_fn = train_utils.create_learning_rate_schedule(total_steps,
**config.get('lr', {}))
# TODO(dusenberrymw): According to flax docs, prefetching shouldn't be
# necessary for TPUs.
lr_iter = train_utils.prefetch_scalar(
map(lr_fn, range(total_steps)), config.get('prefetch_to_device', 1))
# Prepare the precision matrix resetting schedule, and pre-fetch it to device.
reset_steps = steps_per_epoch * 1
reset_covmat_fn = lambda step: float(step % reset_steps == 0)
reset_covmat_iter = train_utils.prefetch_scalar(
map(reset_covmat_fn, range(first_step, total_steps)),
nprefetch=config.get('prefetch_to_device', 1))
write_note(f'Replicating...\n{chrono.note}')
opt_repl = flax_utils.replicate(opt_cpu)
states_repl = flax_utils.replicate(states_cpu)
write_note(f'Initializing few-shotters...\n{chrono.note}')
fewshotter = None
if 'fewshot' in config and fewshot is not None:
fewshotter = fewshot.FewShotEvaluator(
representation_fn, config.fewshot,
config.fewshot.get('batch_size') or batch_size_eval)
checkpoint_writer = None
# Note: we return the train loss, val loss, and fewshot best l2s for use in
# reproducibility unit tests.
train_loss = -jnp.inf
val_loss = {val_name: -jnp.inf for val_name, _ in val_ds_splits.items()}
fewshot_results = {'dummy': {(0, 1): -jnp.inf}}
write_note(f'First step compilations...\n{chrono.note}')
logging.info('first_step = %s', first_step)
# Advance the iterators if we are restarting from an earlier checkpoint.
# TODO(dusenberrymw): Look into checkpointing dataset state instead.
# Makes sure log_eval_steps is same as steps_per_epoch. This is because
# the precision matrix needs to be updated fully (at the end of each epoch)
# when eval takes place.
log_eval_steps = max(steps_per_epoch, 2)
if first_step > 0:
write_note('Advancing iterators after resuming from a checkpoint...')
lr_iter = itertools.islice(lr_iter, first_step, None)
train_iter = itertools.islice(train_iter, first_step, None)
# Using a python integer for step here, because opt.state.step is allocated
# on TPU during replication.
for step, train_batch, lr_repl, reset_covmat_repl in zip(
range(first_step + 1, total_steps + 1), train_iter, lr_iter,
reset_covmat_iter):
with jax.profiler.TraceAnnotation('train_step', step_num=step, _r=1):
# TODO(jereliu): Expand to allow precision matrix resetting.
(opt_repl, states_repl, loss_value, train_loop_rngs,
extra_measurements) = update_fn(
opt_repl,
states_repl,
lr_repl,
reset_covmat_repl,
train_batch['image'],
train_batch['labels'],
rng=train_loop_rngs)
if jax.process_index() == 0:
profiler(step)
# Checkpoint saving
if train_utils.itstime(
step, config.get('checkpoint_steps'), total_steps, process=0):
write_note('Checkpointing...')
chrono.pause()
train_utils.checkpointing_timeout(checkpoint_writer,
config.get('checkpoint_timeout', 1))
accumulated_train_time = chrono.accum_train_time
# We need to transfer the weights over now or else we risk keeping them
# alive while they'll be updated in a future step, creating hard to debug
# memory errors (see b/160593526). Also, takes device 0's params only.
# For GP layer, we will also do the same for untrainable parameters
# (`states`). This is ok since `random features` are frozen throughout
# pre-training, and `precision matrix` is a finetuning-specific parameters
# that will be re-learned in the finetuning task.
opt_cpu = jax.tree_map(lambda x: np.array(x[0]), opt_repl)
states_cpu = jax.tree_map(lambda x: np.array(x[0]), states_repl)
# Check whether we want to keep a copy of the current checkpoint.
copy_step = None
if train_utils.itstime(step, config.get('keep_checkpoint_steps'),
total_steps):
write_note('Keeping a checkpoint copy...')
copy_step = step
# Checkpoint should be a nested dictionary or FLAX datataclasses from
# `flax.struct`. Both can be present in a checkpoint.
checkpoint_data = checkpoint_utils.CheckpointData(
optimizer=opt_cpu,
fixed_model_states=states_cpu,
train_loop_rngs=train_loop_rngs,
accumulated_train_time=accumulated_train_time)
checkpoint_writer = pool.apply_async(
checkpoint_utils.checkpoint_trained_model,
(checkpoint_data, save_checkpoint_path, copy_step))
chrono.resume()
# Report training progress
if train_utils.itstime(
step, config.log_training_steps, total_steps, process=0):
write_note('Reporting training progress...')
train_loss = loss_value[0] # Keep to return for reproducibility tests.
timing_measurements, note = chrono.tick(step)
write_note(note)
train_measurements = {}
train_measurements.update({
'learning_rate': lr_repl[0],
'training_loss': train_loss,
})
train_measurements.update(flax.jax_utils.unreplicate(extra_measurements))
train_measurements.update(timing_measurements)
writer.write_scalars(step, train_measurements)
# Report validation performance
if train_utils.itstime(step, log_eval_steps, total_steps):
write_note('Evaluating on the validation set...')
chrono.pause()
for val_name, val_ds in val_ds_splits.items():
# Sets up evaluation metrics.
ece_num_bins = config.get('ece_num_bins', 15)
auc_num_bins = config.get('auc_num_bins', 1000)
ece = rm.metrics.ExpectedCalibrationError(num_bins=ece_num_bins)
calib_auc = rm.metrics.CalibrationAUC(correct_pred_as_pos_label=False)
# TODO(jereliu): Extend to support soft multi-class probabilities.
oc_auc_0_5 = rm.metrics.OracleCollaborativeAUC(
oracle_fraction=0.005, num_bins=auc_num_bins)
oc_auc_1 = rm.metrics.OracleCollaborativeAUC(
oracle_fraction=0.01, num_bins=auc_num_bins)
oc_auc_2 = rm.metrics.OracleCollaborativeAUC(
oracle_fraction=0.02, num_bins=auc_num_bins)
oc_auc_5 = rm.metrics.OracleCollaborativeAUC(
oracle_fraction=0.05, num_bins=auc_num_bins)
label_diversity = tf.keras.metrics.Mean()
sample_diversity = tf.keras.metrics.Mean()
ged = tf.keras.metrics.Mean()
# Runs evaluation loop.
val_iter = input_utils.start_input_pipeline(
val_ds, config.get('prefetch_to_device', 1))
ncorrect, loss, nseen = 0, 0, 0
for batch in val_iter:
if val_name == 'cifar_10h':
batch_ncorrect, batch_losses, batch_n, batch_metric_args = (
cifar_10h_evaluation_fn(
opt_repl.target, states_repl, batch['image'],
batch['labels'], batch['mask']))
else:
batch_ncorrect, batch_losses, batch_n, batch_metric_args = (
evaluation_fn(opt_repl.target, states_repl, batch['image'],
batch['labels'], batch['mask']))
# All results are a replicated array shaped as follows:
# (local_devices, per_device_batch_size, elem_shape...)
# with each local device's entry being identical as they got psum'd.
# So let's just take the first one to the host as numpy.
ncorrect += np.sum(np.array(batch_ncorrect[0]))
loss += np.sum(np.array(batch_losses[0]))
nseen += np.sum(np.array(batch_n[0]))
if config.get('loss', 'sigmoid_xent') != 'sigmoid_xent':
# Here we parse batch_metric_args to compute uncertainty metrics.
# (e.g., ECE or Calibration AUC).
logits, labels, _, masks = batch_metric_args
masks = np.array(masks[0], dtype=np.bool)
logits = np.array(logits[0])
probs = jax.nn.softmax(logits)
# From one-hot to integer labels, as required by ECE.
int_labels = np.argmax(np.array(labels[0]), axis=-1)
int_preds = np.argmax(logits, axis=-1)
confidence = np.max(probs, axis=-1)
for p, c, l, d, m, label in zip(probs, confidence, int_labels,
int_preds, masks, labels[0]):
ece.add_batch(p[m, :], label=l[m])
calib_auc.add_batch(d[m], label=l[m], confidence=c[m])
oc_auc_0_5.add_batch(d[m], label=l[m], custom_binning_score=c[m])
oc_auc_1.add_batch(d[m], label=l[m], custom_binning_score=c[m])
oc_auc_2.add_batch(d[m], label=l[m], custom_binning_score=c[m])
oc_auc_5.add_batch(d[m], label=l[m], custom_binning_score=c[m])
if val_name == 'cifar_10h' or val_name == 'imagenet_real':
batch_label_diversity, batch_sample_diversity, batch_ged = data_uncertainty_utils.generalized_energy_distance(
label[m], p[m, :], config.num_classes)
label_diversity.update_state(batch_label_diversity)
sample_diversity.update_state(batch_sample_diversity)
ged.update_state(batch_ged)
val_loss[val_name] = loss / nseen # Keep for reproducibility tests.
val_measurements = {
f'{val_name}_prec@1': ncorrect / nseen,
f'{val_name}_loss': val_loss[val_name]
}
if config.get('loss', 'sigmoid_xent') != 'sigmoid_xent':
val_measurements[f'{val_name}_ece'] = ece.result()['ece']
val_measurements[f'{val_name}_calib_auc'] = calib_auc.result()[
'calibration_auc']
val_measurements[f'{val_name}_oc_auc_0.5%'] = oc_auc_0_5.result()[
'collaborative_auc']
val_measurements[f'{val_name}_oc_auc_1%'] = oc_auc_1.result()[
'collaborative_auc']
val_measurements[f'{val_name}_oc_auc_2%'] = oc_auc_2.result()[
'collaborative_auc']
val_measurements[f'{val_name}_oc_auc_5%'] = oc_auc_5.result()[
'collaborative_auc']
writer.write_scalars(step, val_measurements)
if val_name == 'cifar_10h' or val_name == 'imagenet_real':
cifar_10h_measurements = {
f'{val_name}_label_diversity': label_diversity.result(),
f'{val_name}_sample_diversity': sample_diversity.result(),
f'{val_name}_ged': ged.result(),
}
writer.write_scalars(step, cifar_10h_measurements)
# OOD eval
# There are two entries in the ood_ds dict (in-dist, ood), and that this
# section computes metrics using both pieces. This is in contrast to
# normal validation eval above where we eval metrics separately for each
# val split in val_ds.
if ood_ds and config.ood_methods:
def make_sngp_eval_fn(states):
def sngp_eval_fn(params, images, labels, mask):
return evaluation_fn(
params=params,
states=states,
images=images,
labels=labels,
mask=mask)
return sngp_eval_fn
ood_measurements = ood_utils.eval_ood_metrics(
ood_ds,
ood_ds_names,
config.ood_methods,
make_sngp_eval_fn(states_repl),
opt_repl.target,
n_prefetch=config.get('prefetch_to_device', 1))
writer.write_scalars(step, ood_measurements)
chrono.resume()
if 'fewshot' in config and fewshotter is not None:
# Compute few-shot on-the-fly evaluation.
if train_utils.itstime(step, config.fewshot.log_steps, total_steps):
chrono.pause()
write_note(f'Few-shot evaluation...\n{chrono.note}')
# Keep `results` to return for reproducibility tests.
fewshot_results, best_l2 = fewshotter.run_all(
opt_repl.target,
datasets=config.fewshot.datasets,
states=states_repl)
# TODO(dusenberrymw): Remove this once fewshot.py is updated.
def make_writer_measure_fn(step):
def writer_measure(name, value):
writer.write_scalars(step, {name: value})
return writer_measure
fewshotter.walk_results(
make_writer_measure_fn(step), fewshot_results, best_l2)
chrono.resume()
# End of step.
if config.get('testing_failure_step'):
# Break early to simulate infra failures in test cases.
if config.testing_failure_step == step:
break
write_note(f'Done!\n{chrono.note}')
pool.close()
pool.join()
writer.close()
# Return final training loss, validation loss, and fewshot results for
# reproducibility test cases.
return train_loss, val_loss, fewshot_results
if __name__ == '__main__':
# Adds jax flags to the program.
jax.config.config_with_absl()
# TODO(dusenberrymw): Refactor `main` such that there is a `train_eval`
# function that returns values for tests and does not directly access flags,
# and then have `main` return None.
def _main(argv):
del argv
main(FLAGS.config, FLAGS.output_dir)
app.run(_main) # Ignore the returned values from `main`.
|
google/uncertainty-baselines
|
baselines/jft/sngp.py
|
Python
|
apache-2.0
| 33,350
|
[
"Gaussian"
] |
6ad9b38df7084c4ebc5bea2fbe18e346d2771c46febb327ad1731190a4ea7c50
|
"""
====================================================================
Probabilistic predictions with Gaussian process classification (GPC)
====================================================================
This example illustrates the predicted probability of GPC for an RBF kernel
with different choices of the hyperparameters. The first figure shows the
predicted probability of GPC with arbitrarily chosen hyperparameters and with
the hyperparameters corresponding to the maximum log-marginal-likelihood (LML).
While the hyperparameters chosen by optimizing LML have a considerable larger
LML, they perform slightly worse according to the log-loss on test data. The
figure shows that this is because they exhibit a steep change of the class
probabilities at the class boundaries (which is good) but have predicted
probabilities close to 0.5 far away from the class boundaries (which is bad)
This undesirable effect is caused by the Laplace approximation used
internally by GPC.
The second figure shows the log-marginal-likelihood for different choices of
the kernel's hyperparameters, highlighting the two choices of the
hyperparameters used in the first figure by black dots.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics.classification import accuracy_score, log_loss
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# Generate data
train_size = 50
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 100)[:, np.newaxis]
y = np.array(X[:, 0] > 2.5, dtype=int)
# Specify Gaussian Processes with fixed and optimized hyperparameters
gp_fix = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0),
optimizer=None)
gp_fix.fit(X[:train_size], y[:train_size])
gp_opt = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0))
gp_opt.fit(X[:train_size], y[:train_size])
print("Log Marginal Likelihood (initial): %.3f"
% gp_fix.log_marginal_likelihood(gp_fix.kernel_.theta))
print("Log Marginal Likelihood (optimized): %.3f"
% gp_opt.log_marginal_likelihood(gp_opt.kernel_.theta))
print("Accuracy: %.3f (initial) %.3f (optimized)"
% (accuracy_score(y[:train_size], gp_fix.predict(X[:train_size])),
accuracy_score(y[:train_size], gp_opt.predict(X[:train_size]))))
print("Log-loss: %.3f (initial) %.3f (optimized)"
% (log_loss(y[:train_size], gp_fix.predict_proba(X[:train_size])[:, 1]),
log_loss(y[:train_size], gp_opt.predict_proba(X[:train_size])[:, 1])))
# Plot posteriors
plt.figure()
plt.scatter(X[:train_size, 0], y[:train_size], c='k', label="Train data",
edgecolors=(0, 0, 0))
plt.scatter(X[train_size:, 0], y[train_size:], c='g', label="Test data",
edgecolors=(0, 0, 0))
X_ = np.linspace(0, 5, 100)
plt.plot(X_, gp_fix.predict_proba(X_[:, np.newaxis])[:, 1], 'r',
label="Initial kernel: %s" % gp_fix.kernel_)
plt.plot(X_, gp_opt.predict_proba(X_[:, np.newaxis])[:, 1], 'b',
label="Optimized kernel: %s" % gp_opt.kernel_)
plt.xlabel("Feature")
plt.ylabel("Class 1 probability")
plt.xlim(0, 5)
plt.ylim(-0.25, 1.5)
plt.legend(loc="best")
# Plot LML landscape
plt.figure()
theta0 = np.logspace(0, 8, 30)
theta1 = np.logspace(-1, 1, 29)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp_opt.log_marginal_likelihood(np.log([Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
plt.plot(np.exp(gp_fix.kernel_.theta)[0], np.exp(gp_fix.kernel_.theta)[1],
'ko', zorder=10)
plt.plot(np.exp(gp_opt.kernel_.theta)[0], np.exp(gp_opt.kernel_.theta)[1],
'ko', zorder=10)
plt.pcolor(Theta0, Theta1, LML)
plt.xscale("log")
plt.yscale("log")
plt.colorbar()
plt.xlabel("Magnitude")
plt.ylabel("Length-scale")
plt.title("Log-marginal-likelihood")
plt.show()
|
chrsrds/scikit-learn
|
examples/gaussian_process/plot_gpc.py
|
Python
|
bsd-3-clause
| 3,993
|
[
"Gaussian"
] |
5ab696e817b3ac9bb481c9efb17924a5cc882836e466f7c8ce0cec50cc6f1dcf
|
"""
Subpackage with functions to plot all kind of results from runs.
"""
import numpy as np
import math, sys, os
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, MaxNLocator
import scipy.ndimage
import pycs.gen.util
def mad(xs):
"""
Return the median absolute deviation. Write it myself here instead of importing it from astropy, since it will add another depenency. Work with 1d array only
@todo: for PyCS 3, will use astropy as a default module (good) and use their functions
:param xs: list of values
:return: median absolute deviation
"""
median = np.median(xs)
mad = np.median([np.abs(x-median) for x in xs])
return mad
class delaycontainer:
"""
Stores the delay or error bar measurement(s) (one for each curve pair).
This object is usually produced by the plot-functions ``hists`` or ``meanvstrue`` below.
markers : [ 7 | 4 | 5 | 6 | 'o' | 'D' | 'h' | 'H' | '_' | '' | 'None' | ' ' | None | '8' | 'p' | ',' | '+' | '.' | 's' | '*' | 'd' | 3 | 0 | 1 | 2 | '1' | '3' | '4' | '2' | 'v' | '<' | '>' | '^' | '|' | 'x' | '$...$' | tuple | Nx2 array ]
"""
def __init__(self, data, name="No name", objects=None, plotcolour = "black", marker=None):
"""
self.data is a list of dicts, the fields depend on if it's delays or errorbars
* delays : label, mean, med, std
* errorbars : label, tot, sys, ran, bias
"""
self.data = data
self.name = name
self.objects = objects
self.plotcolour = plotcolour
self.marker = marker
self.markersize = 5.0
self.yshift = 0.0 # allows to "group" measurements
def newdelayplot(plotlist, rplot=7.0, displaytext=True, hidedetails=False, showbias=True, showran=True, showerr=True, showlegend=True, text=None, figsize=(10, 6), left = 0.06, right=0.97, top=0.99, bottom=0.08, wspace=0.15, hspace=0.3, txtstep=0.04, majorticksstep=2, filename=None, refshifts=None, refdelays=None, legendfromrefdelays=False, hatches=None, centershifts=None, ymin=0.2, hlines=None, tweakeddisplay=False, blindness=False, horizontaldisplay=False, showxlabelhd=True):
"""
Plots delay measurements from different methods, telescopes, sub-curves, etc in one single plot.
For this I use only ``delaycontainer`` objects, i.e. I don't do any "computation" myself.
:param plotlist: Give me a list of tuples (delays, errorbars), where delays and errorbars are delaycontainer objects as written into pkl files by ``hists`` and ``meanvstrue``.
NEW : plotlist delaycont can handle asymmetric errors (e.g. to seamlessly compare pycs with other papers' results). Instead of the "tot" key, new "plus" and "minus" keys are used.
:type plotlist: list
:param rplot: radius of delay axis, in days.
:param displaytext: Show labels with technique names and values of delays
:type displaytext: boolean
:param hidedetails: Do not show (ran, sys) in labels
:type hidedetails: boolean
:param refshifts: This is a list of dicts like {"colour":"gray", "shifts":(0, 0, 0, 90)}. Will be plotted as dashed vertical lines.
:type refshifts: list
:param refdelays: a list of tuples (delays, errorbars) to be plotted as shaded vertical zones.
:type refdelays: list
:param legendfromrefdelays: if you want to display the refdelays name in the legend panel
:type legendfromrefdelays: boolean
:param hatches: list of hatch keyword for the refdelays plotting
:type hatches: list
:param showbias: draws a little cross at the position of the delay "corrected" for the bias.
:type showbias: boolean
:param showran: draws "minor" error bar ticks using the random error only.
:type showran: boolean
:param text:
Text that you want to display, in the form : [line1, line2, line3 ...]
where line_i is (x, y, text, kwargs) where kwargs is e.g. {"fontsize":18} and x and y are relative positions (from 0 to 1).
:type text: list
:param blindness: Shift the measurements by their mean, so the displayed value are centered around 0
:type blindness: boolean
:param horizontaldisplay: display the delay panels on a single line. Works only for three-delay containers.
:type horizontaldisplay: boolean
:param showxlabelhd: display or not the x label when horizontal display is True
:type showxlabelhd: boolean
.. warning:: Altough the code says I'm plotting mean and std for the measured delays, I might be using median and mad instead! This depends on how ``hists`` was called! Be careful with this...
"""
# Some checks :
objects = plotlist[0][0].objects
for (delays, errors) in plotlist:
if delays.objects != objects or errors.objects != objects:
raise RuntimeError("Don't ask me to overplot stuff from different objects !")
n = len(objects)
nmeas = len(plotlist)
print "Objects : %s" % (", ".join(objects))
if horizontaldisplay and n != 3:
print "Horizontal display works only for three delays, you have %i" % n
print "Switching back to regular display"
horizontaldisplay = False
for (delays, errors) in plotlist:
if delays.plotcolour != errors.plotcolour:
raise RuntimeError("Hmm, plotcolours of delays and errors don't correspond !")
print "Delays : %s <-> Errors : %s" % (delays.name, errors.name)
fig = plt.figure(figsize=figsize)
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
axisNum = 0
print "#" * 80
for i in range(n): # A, B, C, D and so on
for j in range(n):
# print i, j
if (i == 0) or (j == n - 1):
continue # No plot
if not horizontaldisplay:
axisNum += 1
if j >= i:
continue
if horizontaldisplay:
axisNum += 1
ax = plt.subplot(1, n, axisNum)
else:
ax = plt.subplot(n - 1, n - 1, axisNum)
# We will express the delays "i - j"
delaylabel = "%s%s" % (objects[j], objects[i])
print " Delay %s" % (delaylabel)
# General esthetics :
ax.get_yaxis().set_ticks([])
minorLocator = MultipleLocator(1.0)
majorLocator = MultipleLocator(majorticksstep)
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_major_locator(majorLocator)
# To determine the plot range :
paneldelays = []
# Going throuh plotlist :
if tweakeddisplay:
labelfontsize = 18
else:
labelfontsize = 14
if blindness:
blinddelays = []
for (ipl, (delays, errors)) in enumerate(plotlist):
blinddelays.append([meas for meas in delays.data if meas["label"] == delaylabel][0]["mean"])
blindmean = np.mean(blinddelays)
for (ipl, (delays, errors)) in enumerate(plotlist):
# Getting the delay for this particular panel
delay = [meas for meas in delays.data if meas["label"] == delaylabel][0]
if blindness:
delay["mean"] -= blindmean
error = [meas for meas in errors.data if meas["label"] == delaylabel][0]
paneldelays.append(delay["mean"])
ypos = nmeas - ipl + delays.yshift
# treat two cases: symmetric error ("tot" kw) and asymmetric ("plus" and "minus" kw)
if "tot" in error: # then it is symmetric
xerr = error["tot"]
else:
xerr = np.array([[error["minus"], error["plus"]]]).T
if hasattr(delays, 'elinewidth'):
elinewidth = delays.elinewidth
else:
elinewidth = 1.5
plt.errorbar([delay["mean"]], [ypos], yerr=None, xerr=xerr, fmt='-', ecolor=delays.plotcolour, elinewidth=elinewidth, capsize=3, barsabove=False)
if showran:
plt.errorbar([delay["mean"]], [ypos], yerr=None, xerr=error["ran"], fmt='-',
ecolor=delays.plotcolour, elinewidth=0.5, capsize=2, barsabove=False)
if delays.marker == None or delays.marker == ".":
plt.plot([delay["mean"]], [ypos], marker='o', markersize=delays.markersize,
markeredgecolor=delays.plotcolour, color=delays.plotcolour)
else:
plt.plot([delay["mean"]], [ypos], marker=delays.marker, markersize=delays.markersize,
markeredgecolor=delays.plotcolour, color=delays.plotcolour)
if showbias:
plt.plot([delay["mean"] - error["bias"]], [ypos], marker="x", markersize=delays.markersize,
markeredgecolor=delays.plotcolour, color=delays.plotcolour)
if hidedetails or (error["ran"] < 0.001 and error["sys"] < 0.001): # Then we ommit to write them.
if "tot" in error:
delaytext = r"$%+.1f \pm %.1f$" % (delay["mean"], error["tot"])
else:
delaytext = r"$%+.1f^{+%.1f}_{-%.1f}$" % (delay["mean"], error["plus"], error["minus"])
else:
if "tot" in error:
delaytext = r"$%+.1f \pm %.1f\,(%.1f, %.1f)$" % (
delay["mean"], error["tot"], error["ran"], error["sys"])
else: # no sys and random for the asymmetric guys...
delaytext = r"$%+.1f^{+%.1f}_{-%.1f}$" % (delay["mean"], error["plus"], error["minus"])
# if you want to hide the error...
if not showerr:
delaytext = r"$%+.1f$" % delay["mean"]
if n == 2: # For doubles, we include the technique name into the txt :
delaytext = r"%s : " % (delays.name) + delaytext
if displaytext:
if hasattr(delays, 'labelfontsize'):
thislabelfontsize = delays.labelfontsize
else:
thislabelfontsize = labelfontsize
ax.annotate(delaytext, xy=(delay["mean"], ypos + 0.3), color=delays.plotcolour,
horizontalalignment="center", fontsize=thislabelfontsize)
if "tot" in error:
print "%45s : %+6.2f +/- %.2f (%.2f, %.2f)" % (
delays.name, delay["mean"], error["tot"], error["ran"], error["sys"])
else:
print "%45s : %+6.2f + %.2f - %.2f" % (delays.name, delay["mean"], error["plus"], error["minus"])
print "#" * 80
# Now this panel is done. Some general settings :
if centershifts != None:
centerdelay = centershifts[i] - centershifts[j]
else:
centerdelay = np.median(paneldelays)
plt.xlim((centerdelay - rplot, centerdelay + rplot))
plt.ylim((ymin, nmeas + 1.5))
# Blindness display options
if blindness:
xlabel = "Blind delay [day]"
else:
xlabel = "Delay [day]"
# Tweaked display option (should disappear for an uniform display !!)
if tweakeddisplay:
plt.xticks(fontsize=15)
xlabelfontsize = 18
else:
xlabelfontsize = 14
if i == n - 1 and not horizontaldisplay:
plt.xlabel(xlabel, fontsize=xlabelfontsize)
elif horizontaldisplay:
if showxlabelhd:
plt.xlabel(xlabel, fontsize=xlabelfontsize)
else:
ax.get_xaxis().set_ticks([])
if n != 2: # otherwise only one panel, no need
plt.annotate(delaylabel, xy=(0.03, 0.88 - txtstep), xycoords='axes fraction', fontsize=14,
color="black")
if refshifts != None:
for item in refshifts:
refdelay = item["shifts"][i] - item["shifts"][j]
plt.axvline(refdelay, color=item["colour"], linestyle="--", dashes=(3, 3), zorder=-20)
if refdelays != None:
try: # if refdelays are in the form of delays and errors containers:
for (ipl,(delays, errors)) in enumerate(refdelays):
# Getting the delay for this particular panel
delay = [meas for meas in delays.data if meas["label"] == delaylabel][0]
error = [meas for meas in errors.data if meas["label"] == delaylabel][0]
if hatches!=None:
plt.axvspan(delay["mean"]-error["tot"], delay["mean"]+error["tot"], facecolor=delays.plotcolour, alpha=0.25, zorder=-20, edgecolor="none", linewidth=0, hatch=hatches[ipl])
else:
plt.axvspan(delay["mean"]-error["tot"], delay["mean"]+error["tot"], facecolor=delays.plotcolour, alpha=0.25, zorder=-20, edgecolor="none", linewidth=0)
plt.axvline(delay["mean"], color=delays.plotcolour, linestyle="--", dashes=(5, 5), lw=1.0, zorder=-20)
#plt.axvline(delay["mean"], color=item.plotcolour, linestyle="-", lw=2, alpha=0.5, zorder=-20)
except: # then refdelays is a list of flat delays
(delay, errors) = refdelays[axisNum-1]
plt.axvspan(delay-errors[1], delay+errors[0], facecolor="gray", alpha=0.15, zorder=-20, edgecolor="none", linewidth=0)
plt.axvline(delay, color="gray", linestyle='--', dashes=(5, 5), lw=1.0, zorder=-20, alpha=0.4)
if hlines != None:
for hline in hlines:
plt.axhline(hline, lw=0.5, color="gray", zorder=-30)
# The "legend" :
if showlegend:
for (ipl, (delays, errors)) in enumerate(plotlist):
line = "%s" % (delays.name)
if not tweakeddisplay:
plt.figtext(x=right, y=top - txtstep * ipl, s=line, verticalalignment="top",
horizontalalignment="right", color=delays.plotcolour, fontsize=14)
else:
if hasattr(delays, 'legendfontsize'):
lfontsize = delays.legendfontsize
else:
lfontsize = 16
plt.figtext(x=0.75, y=top - txtstep * ipl - 0.1, s=line, verticalalignment="top",
horizontalalignment="center", color=delays.plotcolour,
fontsize=lfontsize) # for 3-delay plots
if legendfromrefdelays:
for (ipl, (delays, errors)) in enumerate(refdelays):
line = "%s" % (delays.name)
plt.figtext(x=right, y=top - txtstep * (ipl + len(plotlist)), s=line, verticalalignment="top",
horizontalalignment="right", color=delays.plotcolour, fontsize=14)
# Generic text :
if text != None:
for line in text:
plt.figtext(x=line[0], y=line[1], s=line[2], **line[3])
if filename == None:
plt.show()
else:
plt.savefig(filename)
def newdelayplot2(plotlist, rplot=7.0, displaytext=True, hidedetails=False, showbias=True, showran=True, showlegend=True, text=None, figsize=(10, 6), left = 0.06, right=0.97, top=0.99, bottom=0.08, wspace=0.15, hspace=0.3, txtstep=0.04, majorticksstep=2, filename="screen", refshifts=None, refdelays=None, legendfromrefdelays=False, hatches=None, centershifts=None, ymin=0.2, hlines=None):
"""
Plots delay measurements from different methods, telescopes, sub-curves, etc in one single plot.
For this I use only ``delaycontainer`` objects, i.e. I don't do any "computation" myself.
Difference from newdelayplot is that the previously hatched/shaded regions are plotted as smaller points, without infos on the time-delay
:param plotlist: Give me a list of tuples (delays, errorbars), where delays and errorbars are delaycontainer objects as written into pkl files by ``hists`` and ``meanvstrue``.
:type plotlist: list
:param rplot: radius of delay axis, in days.
:param displaytext: Show labels with technique names and values of delays
:type displaytext: boolean
:param hidedetails: Do not show (ran, sys) in labels
:type hidedetails: boolean
:param refshifts: This is a list of dicts like {"colour":"gray", "shifts":(0, 0, 0, 90)}. Will be plotted as dashed vertical lines.
:type refshifts: list
:param refdelays: a list of tuples (delays, errorbars) to be plotted as shaded vertical zones.
:type refdelays: list
:param legendfromrefdelays: if you want to display the refdelays name in the legend panel
:type refdelays: boolean
:param hatches: list of hatch keyword for the refdelays plotting
:type refdelays: list
:param showbias: draws a little cross at the position of the delay "corrected" for the bias.
:type showbias: boolean
:param showran: draws "minor" error bar ticks using the random error only.
:type showran: boolean
:param text:
Text that you want to display, in the form : [line1, line2, line3 ...]
where line_i is (x, y, text, kwargs) where kwargs is e.g. {"fontsize":18} and x and y are relative positions (from 0 to 1).
:type text: list
"""
# Some checks :
objects = plotlist[0][0].objects
for (delays, errors) in plotlist:
if delays.objects != objects or errors.objects != objects:
raise RuntimeError("Don't ask me to overplot stuff from different objects !")
n = len(objects)
nmeas = len(plotlist)+len(refdelays)/2 +1
print "Objects : %s" % (", ".join(objects))
for (delays, errors) in plotlist:
if delays.plotcolour != errors.plotcolour:
raise RuntimeError("Hmm, plotcolours of delays and errors don't correspond !")
print "Delays : %s <-> Errors : %s" % (delays.name, errors.name)
fig = plt.figure(figsize=figsize)
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
axisNum = 0
print "#"*80
for i in range(n): # A, B, C, D and so on
for j in range(n):
#print i, j
if (i == 0) or (j == n-1) :
continue # No plot
axisNum += 1
if j >= i:
continue
ax = plt.subplot(n-1, n-1, axisNum)
# We will express the delays "i - j"
delaylabel="%s%s" % (objects[j], objects[i])
print " Delay %s" % (delaylabel)
# General esthetics :
ax.get_yaxis().set_ticks([])
minorLocator = MultipleLocator(1.0)
majorLocator = MultipleLocator(majorticksstep)
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_major_locator(majorLocator)
# To determine the plot range :
paneldelays = []
# Going throuh plotlist :
for (ipl,(delays, errors)) in enumerate(plotlist):
# Getting the delay for this particular panel
delay = [meas for meas in delays.data if meas["label"] == delaylabel][0]
error = [meas for meas in errors.data if meas["label"] == delaylabel][0]
paneldelays.append(delay["mean"])
ypos = nmeas - ipl*1.3 + delays.yshift
plt.errorbar([delay["mean"]], [ypos], yerr=None, xerr=error["tot"], fmt='-', ecolor=delays.plotcolour, elinewidth=delays.markersize/5.0*1.5, capsize=3, barsabove=False)
if showran:
plt.errorbar([delay["mean"]], [ypos], yerr=None, xerr=error["ran"], fmt='-', ecolor=delays.plotcolour, elinewidth=0.5, capsize=2, barsabove=False)
if delays.marker == None or delays.marker == ".":
plt.plot([delay["mean"]], [ypos], marker='o', markersize=delays.markersize, markeredgecolor=delays.plotcolour, color=delays.plotcolour)
else:
plt.plot([delay["mean"]], [ypos], marker=delays.marker, markersize=delays.markersize, markeredgecolor=delays.plotcolour, color="white")
if showbias:
plt.plot([delay["mean"] - error["bias"]], [ypos], marker="x", markersize=delays.markersize, markeredgecolor=delays.plotcolour, color=delays.plotcolour)
if hidedetails or (error["ran"] < 0.001 and error["sys"] < 0.001): # Then we ommit to write them.
delaytext = r"$%+.1f \pm %.1f$" % (delay["mean"], error["tot"])
else:
delaytext = r"$%+.1f \pm %.1f\,(%.1f, %.1f)$" % (delay["mean"], error["tot"], error["ran"], error["sys"])
if n==2: # For doubles, we include the technique name into the txt :
delaytext = r"%s : " % (delays.name) + delaytext
if displaytext:
if delays.markersize>5:
ax.annotate(delaytext, xy=(delay["mean"], ypos + 0.3), color = delays.plotcolour, horizontalalignment="center", fontsize=16)
else:
ax.annotate(delaytext, xy=(delay["mean"], ypos + 0.3), color = delays.plotcolour, horizontalalignment="center", fontsize=14)
print "%45s : %+6.2f +/- %.2f (%.2f, %.2f)" % (delays.name, delay["mean"], error["tot"], error["ran"], error["sys"])
# Going throuh plotlist :
for (ipl,(delays, errors)) in enumerate(refdelays):
# Getting the delay for this particular panel
delay = [meas for meas in delays.data if meas["label"] == delaylabel][0]
error = [meas for meas in errors.data if meas["label"] == delaylabel][0]
paneldelays.append(delay["mean"])
if ipl in [0, 1]:
ypos = nmeas - (ipl/2.5+4.2) + 0.6
elif ipl in [2, 3]:
ypos = nmeas - (ipl/2.5+4.2) + 0.6 -0.4
elif ipl in [4, 5]:
ypos = nmeas - (ipl/2.5+4.2) + 0.6 -0.8
plt.errorbar([delay["mean"]], [ypos], yerr=None, xerr=error["tot"], fmt='-', ecolor=delays.plotcolour, elinewidth=1.0, capsize=3, barsabove=False)
if showran:
plt.errorbar([delay["mean"]], [ypos], yerr=None, xerr=error["ran"], fmt='-', ecolor=delays.plotcolour, elinewidth=0.33, capsize=2, barsabove=False)
if delays.marker == None or delays.marker == ".":
plt.plot([delay["mean"]], [ypos], marker='o', markersize=delays.markersize/1.5, markeredgecolor=delays.plotcolour, color=delays.plotcolour)
else:
plt.plot([delay["mean"]], [ypos], marker=delays.marker, markersize=delays.markersize/1.5, markeredgecolor=delays.plotcolour, color=delays.plotcolour)
if showbias:
plt.plot([delay["mean"] - error["bias"]], [ypos], marker="x", markersize=delays.markersize, markeredgecolor=delays.plotcolour, color=delays.plotcolour)
if hidedetails or (error["ran"] < 0.001 and error["sys"] < 0.001): # Then we ommit to write them.
delaytext = r"$%+.1f \pm %.1f$" % (delay["mean"], error["tot"])
else:
delaytext = r"$%+.1f \pm %.1f\,(%.1f, %.1f)$" % (delay["mean"], error["tot"], error["ran"], error["sys"])
if n==2: # For doubles, we include the technique name into the txt :
delaytext = r"%s : " % (delays.name) + delaytext
if displaytext:
pass
#ax.annotate(delaytext, xy=(delay["mean"], ypos + 0.3), color = delays.plotcolour, horizontalalignment="center", fontsize=14)
print "%45s : %+6.2f +/- %.2f (%.2f, %.2f)" % (delays.name, delay["mean"], error["tot"], error["ran"], error["sys"])
if axisNum ==1:
"""
ax.annotate(r"0", xy=(-13.3, 4.1), color = "crimson", horizontalalignment="center", fontsize=16)
ax.annotate(r"1", xy=(-13.3, 2.9), color = "crimson", horizontalalignment="center", fontsize=16)
ax.annotate(r"2", xy=(-13.33, 1.7), color = "crimson", horizontalalignment="center", fontsize=16)
ax.annotate(r"3", xy=(-13.37, 0.5), color = "crimson", horizontalalignment="center", fontsize=16)
"""
"""
ax.annotate(r"$\diamond$", xy=(-13.3, 3.0), color = "crimson", horizontalalignment="center", fontsize=18)
ax.annotate(r"$\dag$", xy=(-13.33, 1.8), color = "crimson", horizontalalignment="center", fontsize=18)
ax.annotate(r"$\bowtie$", xy=(-13.37, 0.6), color = "crimson", horizontalalignment="center", fontsize=18)
"""
print "#"*80
# Now this panel is done. Some general settings :
if centershifts != None:
centerdelay = centershifts[i] - centershifts[j]
else:
centerdelay = np.median(paneldelays)
plt.xlim((centerdelay - rplot, centerdelay + rplot))
plt.ylim((ymin, nmeas+1.5))
if i == n-1:
plt.xlabel("Delay [day]", fontsize=14)
if n != 2: # otherwise only one panel, no need
plt.annotate(delaylabel, xy=(0.03, 0.88-txtstep), xycoords='axes fraction', fontsize=14, color="black")
if refshifts != None:
for item in refshifts:
refdelay = item["shifts"][i] - item["shifts"][j]
plt.axvline(refdelay, color=item["colour"], linestyle="--", dashes=(3, 3), zorder=-20)
if hlines != None:
for hline in hlines:
plt.axhline(hline, lw=0.5, color="gray", zorder=-30)
# The "legend" :
if showlegend:
for (ipl,(delays, errors)) in enumerate(plotlist):
line = "%s" % (delays.name)
plt.figtext(x = right, y = top - txtstep*ipl, s = line, verticalalignment="top", horizontalalignment="right", color=delays.plotcolour, fontsize=16)
"""
if legendfromrefdelays:
for (ipl,(delays, errors)) in enumerate(refdelays):
line = "%s" % (delays.name)
plt.figtext(x = right, y = top - txtstep*(ipl+len(plotlist)), s = line, verticalalignment="top", horizontalalignment="right", color=delays.plotcolour, fontsize=12)
"""
"""
plt.figtext(x = right-0.123, y = top - txtstep*len(plotlist) - 0.025, s = r"$\diamond$", verticalalignment="top", horizontalalignment="right", color="crimson", fontsize=18)
plt.figtext(x = right-0.125, y = top - txtstep*(len(plotlist)+1) - 0.023 , s = r"$\dag$", verticalalignment="top", horizontalalignment="right", color="crimson", fontsize=18)
plt.figtext(x = right-0.12, y = top - txtstep*(len(plotlist)+2) - 0.025, s = r"$\bowtie$", verticalalignment="top", horizontalalignment="right", color="crimson", fontsize=18)
plt.figtext(x = right, y = top - txtstep*len(plotlist) - 0.03, s = "- 2003-2007", verticalalignment="top", horizontalalignment="right", color="crimson", fontsize=13)
plt.figtext(x = right, y = top - txtstep*(len(plotlist)+1) - 0.03 , s = "- 2008-2012", verticalalignment="top", horizontalalignment="right", color="crimson", fontsize=13)
plt.figtext(x = right, y = top - txtstep*(len(plotlist)+2) - 0.03, s = "- 2013-2016", verticalalignment="top", horizontalalignment="right", color="crimson", fontsize=13)
"""
# Generic text :
if text != None:
for line in text:
plt.figtext(x=line[0], y=line[1], s=line[2], **line[3])
if filename=="screen":
plt.show()
else:
plt.savefig(filename)
def normal(x, mu, sigma):
"""
Plain normal distribution.
You can directly apply me on numpy arrays x, mu, sigma.
"""
return (1.0/np.sqrt(2.0*np.pi*sigma*sigma)) * np.exp( - (x - mu)**2/(2*sigma*sigma))
def hists(rrlist, r=10.0, nbins=100, showqs=True, showallqs=False, qsrange=None, title=None, xtitle=0.5, ytitle=0.95, titlesize=18, niceplot=False, displaytext=True, figsize=(16, 9), left = 0.06, right=0.95, bottom=0.065, top=0.95, wspace=0.2, hspace=0.2, txtstep=0.04, majorticksstep=2, hideyaxis=True, trueshifts=None, filename=None, dataout=False, blindness=False, usemedian=False, outdir = "./"):
"""
Comparing the delay distributions from different run result objects.
:param rrlist: a list of runresults object.
:param r: a range radius for the hists
:param showqs: If True, I overplot the qs as scatter points.
:param dataout: True means that I'll write the pkl file needed to make the delayplot.
:param removeoutliers: True means I remove estimates that are the farthest from the median. Use this with CAUTION !!!
:param usemedian: if True, use the median and median absolute deviation instead of mean and std.
.. warning:: To avoid rewriting newdelayplot, if usemedian is True then I write the median and mad in the mean and std fields of the pickles. This is dangerous (and a bit stupid and lazy), but since hists() and newdelayplot() are usually called one after the other it should not create too much confusion.
.. note:: Actually, using median and mad as default estimators might be smarter...? To meditate for PyCS 3.0...
"""
n = rrlist[0].nimages()
labels = rrlist[0].labels
# To get some fixed ranges for the histograms, we will use the center of the histos :
#reftrueshifts = (1.0/len(rrlist)) * np.sum(np.array([rr.getts()["center"] for rr in rrlist]), axis=0)
reftrueshifts = 0.5 * (np.max(np.array([rr.getts()["center"] for rr in rrlist]), axis=0) + np.min(np.array([rr.getts()["center"] for rr in rrlist]), axis=0))
#print reftrueshifts
for rr in rrlist:
if rr.labels != labels:
raise RuntimeError("Don't ask me to overplot runresults of different curves !")
#if not np.all(rr.gettruets()["center"] == reftrueshifts):
# print "Warning : I use the trueshift of the first rr to set the ranges."
rr.trues = rr.gettruets() # To store this, avoids calculating it again and again.
rr.tmpdata = []
fig = plt.figure(figsize=figsize)
#fig.subplots_adjust(left = 0.03, right=0.95, bottom=0.05, top=0.95, wspace=0.2, hspace=0.2)
#Looks good :
#fig.subplots_adjust(left = 0.06, right=0.95, bottom=0.065, top=0.95, wspace=0.2, hspace=0.2)
fig.subplots_adjust(left = left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
axisNum = 0
for i in range(n): # [A, B, C, D]
for j in range(n):
if (i == 0) or (j == n-1) :
continue # No plot
axisNum += 1
if j >= i:
continue
ax = plt.subplot(n-1, n-1, axisNum)
#ax = plt.gca()
# Delay label, used not only for display purposes, but also for the output pkl.
delaylabel="%s%s" % (labels[j], labels[i])
if i == n-1:
if n == 2: # Only one panel -> we write the object names into the xlabel
plt.xlabel("Delay %s%s [day]" % (labels[j], labels[i]), fontsize=14)
else:
plt.xlabel("Delay [day]", fontsize=14)
if showqs:
axscatter = ax.twinx()
# Hide the y ticks :
#ax.get_yaxis().set_ticks([])
# Ranges to plot
reftruedelay = reftrueshifts[i] - reftrueshifts[j]
histrange = (reftruedelay - r, reftruedelay + r)
for irr, rr in enumerate(rrlist):
# We will express the delays "i - j"
if rr.plottrue == True:
delays = rr.truetsarray[:,i] - rr.truetsarray[:,j]
else:
delays = rr.tsarray[:,i] - rr.tsarray[:,j]
meddelay = np.median(delays)
maddelay = mad(delays)
meandelay = np.mean(delays)
stddelay = np.std(delays)
# We save these :
if usemedian:
rr.tmpdata.append({"label":delaylabel, "mean":meddelay, "med":meddelay, "std":maddelay})
else:
rr.tmpdata.append({"label":delaylabel, "mean":meandelay, "med":meddelay, "std":stddelay})
#(counts, bins, patches) = ax.hist(delays, bins=nbins, range=histrange, histtype="step", color=colours[irr % len(colours)], normed=True)
(counts, bins, patches) = ax.hist(delays, bins=nbins, range=histrange, histtype="bar", color=rr.plotcolour, alpha = 0.4, lw=0, normed=True)
if niceplot:
majorLocator = MultipleLocator(majorticksstep)
minorLocator = MultipleLocator(1.0)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_minor_locator(minorLocator)
ax.yaxis.set_ticks([])
if showqs and not rr.plottrue :
if showallqs:
axscatter.scatter(delays, rr.qs, s=1, facecolor=rr.plotcolour, lw = 0)
else:
axscatter.scatter(delays[::5], rr.qs[::5], s=1, facecolor=rr.plotcolour, lw = 0)
#cmap = colors.LinearSegmentedColormap.from_list('custom',['white', rr.plotcolour],gamma=1.0)
#axscatter.hexbin(delays, rr.qs, gridsize=(5, 2), mincnt=1, cmap=cmap, edgecolor="none")
# extent=(histrange[0], histrange[1], -r, r)
if qsrange:
axscatter.set_ylim(qsrange)
if niceplot:
majorLocator = MultipleLocator(500)
axscatter.yaxis.set_major_locator(majorLocator)
if axisNum == 1:
axscatter.set_ylabel(r"$\chi^2$", fontsize=18)
# We plot the true shifts (if available) as a straight line individually for each rr :
if rr.trues["type"] == "same":
truedelay = rr.trues["center"][i] - rr.trues["center"][j]
plt.axvline(x=truedelay, linewidth=1, linestyle="--", color=rr.plotcolour)
# We compute and display the mean and std of the hist :
#if getattr(rr, "plotgauss", False) == True:
if displaytext == True:
if getattr(rr, "plotgauss", False) == True:
x = np.linspace(histrange[0], histrange[1], 100)
y = normal(x, meandelay, stddelay)
ax.plot(x, y, linestyle="-", color = rr.plotcolour)
if not usemedian:
delaytext = r"%+.1f $\pm$ %.1f" % (meandelay, stddelay)
else:
delaytext = r"%+.1f $\pm$ %.1f" % (meddelay, maddelay)
#print rr.name
#print delaylabel + " " + delaytext
#ax.text(meddelay, np.max(y)/2.0, "%.1f +/- %.1f" % (meddelay, stddelay), horizontalalignment = "center", color = rr.plotcolour)
ax.annotate(delaytext, xy=(0.04, 0.7 - 0.12*irr), xycoords='axes fraction', color = rr.plotcolour, fontsize=10)
plt.xlim(histrange)
# We increase ylim by 30% if it
ylims = list(ax.get_ylim())
if n == 2: # single panel
ylims[1] *= 1.4
else:
ylims[1] *= 1.1
ax.set_ylim(ylims)
# hide y axis if wanted to
if hideyaxis:
ax.set_yticks([])
# make the ticks a little bit bigger than default
plt.xticks(fontsize=13)
# enforce blindness if wanted, by modifying the xticks labels (not touching the data)
if blindness:
labels = ax.xaxis.get_ticklabels()
locs = ax.xaxis.get_ticklocs()
meanloc = np.mean(locs)
blindlabels = []
for loc, label in zip(locs, labels):
blindlabels.append(str(loc-meanloc))
ax.xaxis.set_ticklabels(blindlabels)
# Looked ok on big plots :
#plt.annotate(delaylabel, xy=(0.03, 0.88), xycoords='axes fraction', fontsize=12, color="black")
if n != 2: # otherwise we have only one single panel
plt.annotate(delaylabel, xy=(0.05, 0.84), xycoords='axes fraction', fontsize=14, color="black")
if trueshifts != None:
truedelay = trueshifts[i] - trueshifts[j]
plt.axvline(truedelay, color="gray", linestyle="--", dashes=(3, 3), zorder=-20)
if dataout:
for rr in rrlist:
dc = delaycontainer(data = rr.tmpdata, name = rr.name, plotcolour = rr.plotcolour, objects=labels[:])
pycs.gen.util.writepickle(dc, outdir+ "%s_delays.pkl" % (rr.autoname))
rr.tmpdata = None
labelspacetop = 0.0
labelspaceright = 0.0
if n == 2:
labelspacetop = 0.04
labelspaceright = 0.04
for irr, rr in enumerate(rrlist):
if niceplot:
labeltxt = "%s" % (getattr(rr, 'name', 'NoName'))
plt.figtext(x = right - labelspaceright, y = top - labelspacetop - txtstep*irr, s = labeltxt, verticalalignment="top", horizontalalignment="right", color=rr.plotcolour, fontsize=15)
else:
labeltxt = "%s (%s, %i) " % (getattr(rr, 'name', 'NoName'), "Truth" if rr.plottrue else "Measured", rr.tsarray.shape[0])
plt.figtext(x = right - labelspaceright, y = top - labelspacetop - txtstep*irr, s = labeltxt, verticalalignment="top", horizontalalignment="right", color=rr.plotcolour, fontsize=15)
print 'Plotting "%s"' % labeltxt
print " Labels : %s" % (", ".join(rr.labels))
print " Median shifts : %s" % (", ".join(["%.2f" % (np.median(rr.tsarray[:,i])) for i in range(len(rr.labels))]))
print " Std shifts : %s" % (", ".join(["%.2f" % (np.std(rr.tsarray[:,i])) for i in range(len(rr.labels))]))
if title != None:
plt.figtext(x = xtitle, y = ytitle, s = title, horizontalalignment="center", color="black", fontsize=titlesize)
if filename == None:
plt.show()
else:
plt.savefig(filename)
def newcovplot(rrlist, r=6, rerr=3, nbins = 10, nbins2d=3, binclip=True, binclipr=10.0, figsize=(13, 13), left=0.06, right=0.97, top=0.97, bottom=0.04, wspace=0.3, hspace=0.3, method='indepbin', minsamples=10, showplots=True, printdetails=True, printcovmat=True, detailplots=False, filepath=None, verbose=True):
#TODO: there is no binclip in depbin ! Should I implement it ?
assert (method in ['depbin', 'indepbin'])
retdict = {} # we put all the intermediate products in a dict that we return
nimages = rrlist[0].nimages()
imginds = np.arange(nimages)
labels = rrlist[0].labels
if nimages == 4: # then it's a quad
covmatsize = 6
elif nimages == 3: # then it's a folded quad
covmatsize = 3
else: # then it's a double
print "This function does not work for doubles"
print "I kindly remind you that the covariance between a variable and itself is called variance, and there are simpler functions to compute that in PyCS. Try newdelayplot for instance."
couplelist = [(i, j) for j in imginds for i in imginds if i > j]
ncouples = len(couplelist)
# print couplelist
tderrsdicts = []
# rrlist is just a list of rr, we treat them one after the other
for rr in rrlist:
# for each rr, we compute the error from the true delay
truetsslist = rr.truetsarray
tsslist = rr.tsarray-truetsslist
for ind, tss in enumerate(tsslist):
tderrs = []
truetds = []
for (i, j) in couplelist:
tderrs.append(tss[i]-tss[j])
truetds.append(truetsslist[ind][i]-truetsslist[ind][j])
tderrsdicts.append({"tderrs": tderrs, "truetds": truetds})
#tderrsdict contains the errors on the true delays, as well as the true delays for each simulation
# figure 1: general covariance plot for each pair of delays. Diagonal elements are the same than newdelayplot, off-diagonal elements are covariance for all the runresults
allcovplot = plt.figure(figsize=figsize)
allcovplot.subplots_adjust(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
# figure 2: covariance computed in each bin, for each pair of delays. Diagonal elements are the same than newdelayplot, off diagonal elements are colored tiles of covariance per true delays bins, with points overplotted.
bincovplot = plt.figure(figsize=figsize)
bincovplot.subplots_adjust(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
axisNum = 0
# create the empty covariance matrix
covmat = []
for ind in range(len(couplelist)):
covmat.append(np.zeros(len(couplelist)))
indepbins = np.zeros(len(couplelist))
depbins = np.zeros(len(couplelist))
rranges = np.zeros(len(couplelist))
retdict["delay"] = {} # dict in a dict !
for ii, i in enumerate(couplelist): # (0, 1), (0, 2) ...
delaylabel="%s%s" % (labels[i[1]], labels[i[0]])
retdict["delay"]["%s" % delaylabel] = {} # dict in a dict in a dict ! dictception !!
xtderrs = [tderrsdict["tderrs"][ii] for tderrsdict in tderrsdicts]
xtruetds = [tderrsdict["truetds"][ii] for tderrsdict in tderrsdicts]
maxx = np.max(xtruetds)
minx = np.min(xtruetds)
### fill the diagonal element
ax1 = allcovplot.add_subplot(ncouples, ncouples, covmatsize*ii + (ii+1))
ax2 = bincovplot.add_subplot(ncouples, ncouples, covmatsize*ii + (ii+1))
majorLocator = MultipleLocator(1.0)
for ax in [ax1, ax2]:
ax.yaxis.set_major_locator(majorLocator)
ax.xaxis.set_major_locator(MaxNLocator(5))
if ii == len(couplelist)-1:
ax1.set_xlabel('True Delay [day]')
ax2.set_ylabel('Measurement error [day]', labelpad=-10)
# way 1 - binning independent of xtruedelays distribution. User choose the plot range. Similar to newdelayplot()
reftrueshifts = np.mean([rr.gettruets()["center"] for rr in rrlist], axis=0)
#reftrueshifts = np.round(rrlist[0].gettruets()["center"])
reftruedelay = reftrueshifts[i[0]] - reftrueshifts[i[1]]
plotrange = (reftruedelay - r, reftruedelay + r)
binlims = np.linspace(plotrange[0], plotrange[1], nbins + 1)
# If we want to compare to newdelayplot():
# xtruetds = truedelays
# xtderrs = resis
# needed for binvals:
xtderrs = np.array(xtderrs)
digitized = np.digitize(xtruetds, binlims)
binvals = [xtderrs[digitized == bini] for bini in range(1, len(binlims))]
binstds = map(np.std, binvals)
binmeans = map(np.mean, binvals)
if binclip:
for (bini, binvalarray) in enumerate(binvals):
keep = np.logical_and(binvalarray < binclipr, binvalarray > -binclipr)
if np.sum(keep == False) != 0:
print "Kicking %i points." % (np.sum(keep == False))
binvals[bini] = binvalarray[keep]
binstds = map(np.std, binvals)
binmeans = map(np.mean, binvals)
syserror = np.max(np.fabs(binmeans))
randerror = np.max(binstds)
toterror = np.sqrt(syserror*syserror + randerror*randerror)
indepbins[ii] = toterror
retdict["delay"]["%s" % delaylabel]["indep"] = {} # dict in a dict in a dict in a dict ! we need to go deeper !!!
retdict["delay"]["%s" % delaylabel]["indep"]["syserror"] = syserror
retdict["delay"]["%s" % delaylabel]["indep"]["randerror"] = randerror
retdict["delay"]["%s" % delaylabel]["indep"]["toterror"] = toterror # that's already in the covariance matrix...
# Plot the result !
line = np.linspace(plotrange[0], plotrange[1], 100)
zeros = np.zeros(100)
width = binlims[1] - binlims[0]
for ax in [ax1, ax2]:
ax.plot(line, zeros, color="black", lw=0.5)
ax.bar(binlims[:-1], binmeans, yerr=binstds, width=width, color=rr.plotcolour, ecolor=rr.plotcolour, error_kw={"capsize":2.5, "capthick":0.5, "markeredgewidth":0.5}, edgecolor=rr.plotcolour, alpha = 0.2)
ax.set_ylim((-rerr, rerr))
if figsize[0] > 8:
ax.annotate(delaylabel, xy=(0.9, 0.05), xycoords='axes fraction', ha="center") # x axis
else:
ax.annotate(delaylabel, xy=(0.78, 0.08), xycoords='axes fraction', ha="center")
ax.set_xlim(plotrange)
majorLocator = MultipleLocator(int(r/2.0)+1)
ax.xaxis.set_major_locator(majorLocator)
ax.set_title(r'sys=%.2f | ran=%.2f' % (syserror, randerror)+'\n'+'tot=%.2f' % toterror, fontsize=10)
# way 2 - binning dependent on the xtruedelays samples: min and max vals corresponds to the extremas of xtruedelays distribution
xbinvals = np.linspace(minx, maxx, num=nbins+1, endpoint=True)
rranges[ii] = maxx-minx
binmeans = []
binstds = []
for indx, xbinval in enumerate(xbinvals[:nbins]):
subsamples = []
for (ind, xtruetd) in enumerate(xtruetds):
if xtruetd > xbinval and xtruetd < xbinvals[indx+1]:
subsamples.append(xtderrs[ind])
binmeans.append(np.mean(subsamples))
binstds.append(np.std(subsamples))
syserror = np.max(np.fabs(binmeans))
randerror = np.max(binstds)
toterror = np.sqrt(syserror*syserror + randerror*randerror)
depbins[ii] = toterror
retdict["delay"]["%s" % delaylabel]["dep"] = {}
retdict["delay"]["%s" % delaylabel]["dep"]["syserror"] = syserror
retdict["delay"]["%s" % delaylabel]["dep"]["randerror"] = randerror
retdict["delay"]["%s" % delaylabel]["dep"]["toterror"] = toterror
# We let the user choose which method he prefers
# Dear user, be CAREFUL with your choice !
if method == 'depbin':
if ii == 0 and verbose : print "You chose a binning depending on the sample values"
covmat[ii][ii] = depbins[ii]
elif method == 'indepbin': # that should be the default value
if ii == 0 and verbose : print "You chose a binning independent of the sample values"
covmat[ii][ii] = indepbins[ii]
### fill the off-diagonal elements
retdict["cov"] = {}
for jj, j in enumerate(couplelist):
axisNum += 1
if (ii == 0) or (jj == ncouples-1) :
continue # No plot
if jj >= ii:
continue
xdelaylabel="%s%s" % (labels[i[1]], labels[i[0]])
ydelaylabel="%s%s" % (labels[j[1]], labels[j[0]])
retdict["cov"]["%s-%s" % (ydelaylabel, xdelaylabel)] = {}
if detailplots:
# figure 3: for each pair, plot the covariance in each bin. One figure per pair
bincovplot2 = plt.figure(figsize=figsize)
bincovplot2.subplots_adjust(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
ytderrs = [tderrsdict["tderrs"][jj] for tderrsdict in tderrsdicts]
ytruetds = [tderrsdict["truetds"][jj] for tderrsdict in tderrsdicts]
ax1 = allcovplot.add_subplot(ncouples, ncouples, axisNum)
ax2 = bincovplot.add_subplot(ncouples, ncouples, axisNum)
majorLocator = MultipleLocator(2.0)
ax1.set_xlim(-rerr, rerr)
ax1.set_ylim(-rerr, rerr)
ax1.xaxis.set_major_locator(majorLocator)
ax1.yaxis.set_major_locator(majorLocator)
ax1.axhline(0, color="black")
ax1.axvline(0, color="black")
ax2.xaxis.set_major_locator(MaxNLocator(3))
ax2.yaxis.set_major_locator(MaxNLocator(3))
if axisNum == ncouples*(ncouples-1) + 1:
ax1.set_xlabel('Measurement error [day]')
ax1.set_ylabel('Measurement error [day]')
ax2.set_xlabel('True delay [day]')
ax2.set_ylabel('True delay [day]')
## binning independent of xtrudelays and ytruedelays distribution. Same plotrange as diagonal elements, but 2d binning
retdict["cov"]["%s-%s" % (ydelaylabel, xdelaylabel)]["indep"] = {}
xbinlims2d = np.linspace(plotrange[0], plotrange[1], nbins2d + 1)
yreftruedelay = reftrueshifts[j[0]] - reftrueshifts[j[1]]
yplotrange = (yreftruedelay - r, yreftruedelay + r)
ybinlims2d = np.linspace(yplotrange[0], yplotrange[1], nbins2d + 1)
xcoordsan=[]
ycoordsan=[]
colorsan=[]
covsindep=[]
for indx, xbinlim in enumerate(xbinlims2d[:nbins2d]):
for indy, ybinlim in enumerate(ybinlims2d[:nbins2d]):
subsamples = []
for (ind, xtruetd), ytruetd in zip(enumerate(xtruetds), ytruetds):
if xtruetd > xbinlim and xtruetd < xbinlims2d[indx+1] and ytruetd > ybinlim and ytruetd < ybinlims2d[indy+1]:
subsamples.append((xtderrs[ind], ytderrs[ind]))
if len(subsamples) > minsamples:
covval = np.cov(subsamples, rowvar=False)[0][1]
colorsan.append("black")
else:
covval = 0
colorsan.append('crimson')
# save the plotting coords, to bold plot the biggest covval later...
xcoordsan.append(xbinlim + (xbinlims2d[indx+1]-xbinlim)/2)
ycoordsan.append(ybinlim + (ybinlims2d[indy+1]-ybinlim)/2)
# colorize the regions according to the covariance value
maxval=0.5
alpha = min(np.abs(covval/maxval), 1.0)
from matplotlib.patches import Rectangle
rect = Rectangle((xbinlim, ybinlim), xbinlims2d[indx+1]-xbinlim, ybinlims2d[indy+1]-ybinlim, color=rrlist[0].plotcolour, alpha=alpha)
ax2.add_patch(rect)
xdelaylabeldet="%s%s [%.1f , %.1f]" % (labels[i[1]], labels[i[0]], xbinlim, xbinlims2d[indx+1])
ydelaylabeldet="%s%s [%.1f , %.1f]" % (labels[j[1]], labels[j[0]], ybinlim, ybinlims2d[indy+1])
retdict["cov"]["%s-%s" % (ydelaylabel, xdelaylabel)]["indep"]["%s-%s" % (ydelaylabeldet, xdelaylabeldet)] = covval
covsindep.append(covval)
if detailplots:
# add an Axes on the figure for each bin, and plot the errors
# mapping the maptlotlib indice is a bit tricky:
# if we use nbins2dx and nbins2dy: nbins2dx*nbins2dy - (nbins2dx-1-indx) - (nbins2dy*indy)
spind = nbins2d*nbins2d - (nbins2d-1-indx) - (nbins2d*indy)
ax3 = bincovplot2.add_subplot(nbins2d, nbins2d, spind)
ax3.set_xlim(-rerr, rerr)
ax3.set_ylim(-rerr, rerr)
ax3.xaxis.set_major_locator(majorLocator)
ax3.yaxis.set_major_locator(majorLocator)
ax3.axhline(0, color="black")
ax3.axvline(0, color="black")
ax3.set_xlabel('Measurement error [day]')
ax3.set_ylabel('Measurement error [day]')
showdensity = True
bins = 10
if showdensity:
cmap = colors.LinearSegmentedColormap.from_list('custom', ['white', rrlist[0].plotcolour],gamma=1.0)
ax3.hexbin([s[0] for s in subsamples], [s[1] for s in subsamples], gridsize=bins, extent=(-rerr, rerr, -rerr, rerr), mincnt=1, cmap=cmap, edgecolor="none")
showpoints=True
if showpoints:
ax3.scatter([s[0] for s in subsamples], [s[1] for s in subsamples], s=5, facecolor=rrlist[0].plotcolour, lw=0, alpha=0.5)
showcontour=True
if showcontour:
H, xedges, yedges = np.histogram2d([s[0] for s in subsamples], [s[1] for s in subsamples], range=[[-r, r], [-r, r]], bins=(bins, bins))
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
data = np.vstack((xtderrs, ytderrs))
kde = scipy.stats.kde.gaussian_kde(data)
grid = np.mgrid[-r:r:1j*bins, -r:r:1j*bins]
grid_coords = np.append(grid[0].reshape(-1,1),grid[1].reshape(-1,1),axis=1)
z = kde(grid_coords.T)
z = z.reshape(bins,bins)
levels = [np.max(z)*0.45]
cset = ax3.contour(grid[0], grid[1], z, levels=levels, origin="lower", colors=rrlist[0].plotcolour, extent=extent, linewidth=0.5)
if figsize[0] > 8:
ax3.annotate(xdelaylabeldet, xy=(0.77, 0.05), xycoords='axes fraction', ha="center")
ax3.annotate(ydelaylabeldet, xy=(0.04, 0.90), xycoords='axes fraction', ha="left", rotation=90.0)
if detailplots and filepath != None:
bincovplot2.savefig(os.path.join(filepath, "bincov_%s%s-vs-%s%s.png" % (labels[j[1]], labels[j[0]], labels[i[1]], labels[i[0]])))
mincovindep = np.min(covsindep)
maxcovindep = np.max(covsindep)
if abs(mincovindep) > maxcovindep:
extcovindep = mincovindep
else:
extcovindep = maxcovindep
mind = covsindep.index(extcovindep)
for ind, val in enumerate(covsindep):
if ind == mind:
ax2.annotate("%.2f" % val, xy=(xcoordsan[ind], ycoordsan[ind]), ha="center", va='center', color='darkblue', fontsize=14)
else:
ax2.annotate("%.2f" % val, xy=(xcoordsan[ind], ycoordsan[ind]), ha="center", va='center', color=colorsan[ind])
#plotting ax2 uses the 2d binning
for ind, xbinlim in enumerate(xbinlims2d):
ax2.axvline(xbinlim, linestyle='--', color='black', alpha=0.5)
ax2.axhline(ybinlims2d[ind], linestyle='--', color='black', alpha=0.5)
showpoints=False
if showpoints:
ax2.scatter(xtruetds, ytruetds, s=2, facecolor=rrlist[0].plotcolour, lw=0, alpha=0.1)
ax2.set_xlim(plotrange)
ax2.set_ylim(yplotrange)
# plotting ax1 is pretty basic, that's only the points
retdict["cov"]["%s-%s" % (ydelaylabel, xdelaylabel)]["dep"] = {}
showdensity = True
bins = 10
if showdensity:
cmap = colors.LinearSegmentedColormap.from_list('custom', ['white', rrlist[0].plotcolour],gamma=1.0)
ax1.hexbin(xtderrs, ytderrs, gridsize=bins, extent=(-rerr, rerr, -rerr, rerr), mincnt=1, cmap=cmap, edgecolor="none")
showpoints=False
if showpoints:
ax1.scatter(xtderrs, ytderrs, s=2, facecolor=rrlist[0].plotcolour, lw=0)
showcontour=True
if showcontour:
H, xedges, yedges = np.histogram2d(xtderrs, ytderrs, range=[[-r, r], [-r, r]], bins=(bins, bins))
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
data = np.vstack((xtderrs, ytderrs))
kde = scipy.stats.kde.gaussian_kde(data)
grid = np.mgrid[-r:r:1j*bins, -r:r:1j*bins]
grid_coords = np.append(grid[0].reshape(-1,1),grid[1].reshape(-1,1),axis=1)
z = kde(grid_coords.T)
z = z.reshape(bins,bins)
levels = [np.max(z)*0.45]
cset = ax1.contour(grid[0], grid[1], z, levels=levels, origin="lower", colors=rrlist[0].plotcolour, extent=extent, linewidth=0.5)
if figsize[0] > 8:
ax1.annotate(xdelaylabel, xy=(0.9, 0.05), xycoords='axes fraction', ha="center") # x axis
ax1.annotate(ydelaylabel, xy=(0.06, 0.85), xycoords='axes fraction', ha="left", rotation=90.0) # y axis
else:
ax1.annotate(xdelaylabel, xy=(0.78, 0.08), xycoords='axes fraction', ha="center") # x axis
ax1.annotate(ydelaylabel, xy=(0.08, 0.76), xycoords='axes fraction', ha="left", rotation=90.0) # y axis
meancov = np.cov([(xtderr, ytderr) for xtderr, ytderr in zip(xtderrs, ytderrs)], rowvar=False)[0][1]
ax2.set_title('%s vs %s | mean = %.2f' % (ydelaylabel, xdelaylabel, meancov), fontsize=10)
## binning dependent of true delays, for comparision
xbinvals = np.linspace(minx, maxx, num=nbins2d+1, endpoint=True)
maxy = np.max(ytruetds)
miny = np.min(ytruetds)
ybinvals = np.linspace(miny, maxy, num=nbins2d+1, endpoint=True)
covsdep=[]
for indx, xbinval in enumerate(xbinvals[:nbins2d]):
for indy, ybinval in enumerate(ybinvals[:nbins2d]):
subsamples = []
for (ind, xtruetd), ytruetd in zip(enumerate(xtruetds), ytruetds):
if xtruetd > xbinval and xtruetd < xbinvals[indx+1] and ytruetd > ybinval and ytruetd < ybinvals[indy+1]:
subsamples.append((xtderrs[ind], ytderrs[ind]))
#TODO: due to the non-uniform sampling of the simulated true tds, some regions of the truetd_x vs truetd_y are rather empty (less than 10 samples). Should we i) increase the number of simulated samples, ii) discard these regions from the analysis, iii) transfer these samples to the nearest bin ?
#print len(subsamples), len(subsamples[0]), subsamples[0]
xdelaylabeldet = "%s%s [%.1f , %.1f]" % (labels[i[1]], labels[i[0]], xbinval, xbinvals[indx+1])
ydelaylabeldet = "%s%s [%.1f , %.1f]" % (labels[j[1]], labels[j[0]], ybinval, ybinvals[indy+1])
if len(subsamples) > minsamples:
covvaldep = np.cov(subsamples, rowvar=False)[0][1]
else:
covvaldep = 0.0
retdict["cov"]["%s-%s" % (ydelaylabel, xdelaylabel)]["dep"]["%s-%s" % (ydelaylabeldet, xdelaylabeldet)] = covvaldep
covsdep.append(covvaldep)
mincovdep = np.min(covsdep)
maxcovdep = np.max(covsdep)
if abs(mincovdep) > maxcovdep:
extcovdep = mincovdep
else:
extcovdep = maxcovdep
# We do NOT want the min or max in the final covmat but the mean on all samples.
# do NOT take the mean of covsdep, some samples are not in !!
covdep = meancov
covindep = meancov
if method == "depbin":
covmat[ii][jj] = covdep
covmat[jj][ii] = covdep
elif method == "indepbin":
covmat[ii][jj] = covindep
covmat[jj][ii] = covindep
if verbose:
# I shoud definitely improve that display part...
print "-"*15
print i, j
print covdep, covindep
axinv = bincovplot.add_subplot(ncouples, ncouples, 2, frameon=False)
axinv.set_xticklabels([])
axinv.set_yticklabels([])
axinv.set_xticks([])
axinv.set_yticks([])
# and annotate
text = 'True delay plot range: +- %i [days]' % r + '\n\n'
text += 'Measurement error plot range: +- %.1f [days]' % rerr + '\n\n'
text += '1D binning: %i bins' % nbins + '\n\n'
text += '2D binning: %ix%i bins' % (nbins2d, nbins2d) + '\n\n'
text += 'Min. number of samples in 2D binning: %i samples' % minsamples + '\n\n\n\n'
if printdetails:
if len(covmat[0]) == 6:
mylist = [str(e) for e in covmat[0]]+\
[str(e) for e in covmat[1]]+\
[str(e) for e in covmat[2]]+\
[str(e) for e in covmat[3]]+\
[str(e) for e in covmat[4]]+\
[str(e) for e in covmat[5]]
mylist = [float(e) for e in mylist]
else:
print "Cov. matrix display not defined for matrices other than 6x6 !"
printcovmat = False
if printcovmat:
text += ' AB AC AD BC BD CD \n'
text += ' '+'-----'*12+'\n'
text += 'AB | %.2f %.2f %.2f %.2f %.2f %.2f \n |\n'\
'AC | %.2f %.2f %.2f %.2f %.2f %.2f \n |\n' \
'AD | %.2f %.2f %.2f %.2f %.2f %.2f \n |\n' \
'BC | %.2f %.2f %.2f %.2f %.2f %.2f \n |\n' \
'BD | %.2f %.2f %.2f %.2f %.2f %.2f \n |\n' \
'CD | %.2f %.2f %.2f %.2f %.2f %.2f \n |\n' \
% (mylist[0], mylist[1], mylist[2], mylist[3], mylist[4], mylist[5]
, mylist[6], mylist[7], mylist[8], mylist[9], mylist[10], mylist[11]
, mylist[12], mylist[13], mylist[14], mylist[15], mylist[16], mylist[17]
, mylist[18], mylist[19], mylist[20], mylist[21], mylist[22], mylist[23]
, mylist[24], mylist[25], mylist[26], mylist[27], mylist[28], mylist[29]
, mylist[30], mylist[31], mylist[32], mylist[33], mylist[34], mylist[35])
axinv.annotate(text, xy=(0.7 * (ncouples-1), -2.0), xycoords='axes fraction', ha="left")
else:
axinv.annotate(text, xy=(0.7 * (ncouples-1), -1.0), xycoords='axes fraction', ha="left")
retdict["r"] = r
retdict["rerr"] = rerr
retdict["nbins"] = nbins
retdict["nbins2d"] = nbins2d
retdict["minsamples"] = minsamples
if filepath != None:
bincovplot.savefig(os.path.join(filepath, "bincov.png"))
allcovplot.savefig(os.path.join(filepath, "allcov.png"))
else:
if showplots:
plt.show()
# now let's compare indepbins and depbins
if verbose:
print "-"*35
print "nbins = %i" % nbins
print "indepbins - r = %.1f" % r
print "depbins - r(max-min) =", np.mean(rranges)
print "-"*35
print "pair - indepbins - depbins - diff"
print "-"*35
print "AB - %.2f - %.2f - %.1f%%" % (indepbins[0], depbins[0], (max(indepbins[0], depbins[0])-min(indepbins[0], depbins[0])) / max(indepbins[0], depbins[0])*100)
print "AC - %.2f - %.2f - %.1f%%" % (indepbins[1], depbins[1], (max(indepbins[1], depbins[1])-min(indepbins[1], depbins[1])) / max(indepbins[1], depbins[1])*100)
if nimages == 4:
print "BC - %.2f - %.2f - %.1f%%" % (indepbins[3], depbins[3], (max(indepbins[3], depbins[3])-min(indepbins[3], depbins[3])) / max(indepbins[3], depbins[3])*100)
print "AD - %.2f - %.2f - %.1f%%" % (indepbins[2], depbins[2], (max(indepbins[2], depbins[2])-min(indepbins[2], depbins[2])) / max(indepbins[2], depbins[2])*100)
print "BD - %.2f - %.2f - %.1f%%" % (indepbins[4], depbins[4], (max(indepbins[4], depbins[4])-min(indepbins[4], depbins[4])) / max(indepbins[4], depbins[4])*100)
print "CD - %.2f - %.2f - %.1f%%" % (indepbins[5], depbins[5], (max(indepbins[5], depbins[5])-min(indepbins[5], depbins[5])) / max(indepbins[5], depbins[5])*100)
elif nimages == 3:
print "BC - %.2f - %.2f - %.1f%%" % (indepbins[2], depbins[2], (max(indepbins[2], depbins[2])-min(indepbins[2], depbins[2])) / max(indepbins[2], depbins[2])*100)
print "-"*35
retdict["covmat"] = covmat
return retdict
def measvstrue(rrlist, r=10.0, nbins = 10, plotpoints=True, alphapoints=1.0, plotrods=True, alpharods=0.2, ploterrorbars=True, sidebyside=True, errorrange=None, binclip=False, binclipr=10.0, title=None, xtitle=0.75, ytitle=0.95, titlesize=30, figsize=(10, 6), left = 0.06, right=0.97, top=0.99, bottom=0.08, wspace=0.15, hspace=0.3, txtstep=0.04, majorticksstep=2, displayn=True, filename=None, dataout=False, tweakeddisplay=False, blindness=False, outdir = "./"):
"""
Plots measured delays versus true delays
:param r: radius of simulation input delays to plot (x axis range)
:param nbins: number of bins for the bar plot within this range.
:param plotpoints: should I plot the points (scatter plot) ?
:param plotrods: should I plot the avg within each bin ?
:param ploterrorbars: should I add errorbars upon the bar plot ?
:param sidebyside: should I plot bars side by side, or overplot them ?
:param errorrange: radius of measurement errors to plot (y axis range). You can also give a tuple (low, high), to make asymetric plots.
:param binclip: should I clip errors larger than binclipr days (catastrophic failures of methods) ?
:param binclipr: see binclip ...
"""
n = rrlist[0].nimages()
labels = rrlist[0].labels
# To get some fixed ranges for the histograms, we will use the first element of rrlist.
reftrueshifts = np.round(rrlist[0].gettruets()["center"])
#@todo: WAAARNING ! Depending on the shape your rrlist (is it a 1x1000 runresults or 50x20 runresults), reftrueshift will have different values, impacting the final determination of the systematic and random error you compute. This can lead to a variation >10% on the final error !!!! DO SOMETHING !!!
#print len(rrlist), rrlist[0].gettruets()["center"]
#sys.exit()
for rr in rrlist:
if rr.labels != labels:
raise RuntimeError("Don't ask me to overplot runresults of different curves !")
#if not np.all(rr.gettruets()["center"] == reftrueshifts):
# print "Warning : I use the trueshift of the first rr to set the ranges."
rr.trues = rr.gettruets() # To store this, avoids calculating it again and again.
rr.tmpdata = []
fig = plt.figure(figsize=figsize)
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
axisNum = 0
for i in range(n): # [A, B, C, D]
for j in range(n):
#print i, j
if (i == 0) or (j == n-1) :
continue # No plot
axisNum += 1
if j >= i:
continue
ax = plt.subplot(n-1, n-1, axisNum)
minorLocator = MultipleLocator(1.0)
majorLocator = MultipleLocator(majorticksstep)
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_major_locator(majorLocator)
if tweakeddisplay:
from matplotlib.ticker import MaxNLocator
locator=MaxNLocator(prune='both', nbins=6)
ax.yaxis.set_major_locator(locator)
ax.yaxis.set_minor_locator(MultipleLocator(1.0))
reftruedelay = reftrueshifts[i] - reftrueshifts[j]
plotrange = (reftruedelay - r, reftruedelay + r)
# Identity line :
line = np.linspace(plotrange[0], plotrange[1], 100)
zeros = np.zeros(100)
plt.plot(line, zeros, color="black", lw=0.5)
# Delay label
delaylabel="%s%s" % (labels[j], labels[i])
# Preparing the bins :
binlims = np.linspace(plotrange[0], plotrange[1], nbins + 1)
print binlims
for irr, rr in enumerate(rrlist): # We go through the different runresult objects
# We will express the delays "i - j"
truedelays = rr.truetsarray[:,i] - rr.truetsarray[:,j]
measdelays = rr.tsarray[:,i] - rr.tsarray[:,j]
resis = measdelays-truedelays
# A simple scatter plot of the residues :
if plotpoints:
ax.scatter(truedelays, resis, s=2, facecolor=rr.plotcolour, lw = 0, alpha=alphapoints)
# We bin those :
digitized = np.digitize(truedelays, binlims)
binvals = [resis[digitized == bini] for bini in range(1, len(binlims))]
binstds = map(np.std, binvals)
binmedians = map(np.median, binvals)
binmeans = map(np.mean, binvals)
if binclip:
for (bini, binvalarray) in enumerate(binvals):
#keep = np.logical_and(binvalarray < (binmedians[bini] + 1*binstds[bini]), binvalarray > (binmedians[bini] - 1*binstds[bini]))
#keep = np.logical_and(binvalarray < np.max(binvalarray), binvalarray > np.min(binvalarray))
keep = np.logical_and(binvalarray < binclipr, binvalarray > -binclipr)
if np.sum(keep == False) != 0:
print "Kicking %i points." % (np.sum(keep == False))
binvals[bini] = binvalarray[keep]
binstds = map(np.std, binvals)
binmedians = map(np.median, binvals)
binmeans = map(np.mean, binvals)
# We save the maximum sys and ran error :
syserror = np.max(np.fabs(binmeans))
randerror = np.max(binstds)
toterror = np.sqrt(syserror*syserror + randerror*randerror)
bias = np.mean(binmeans) # The signed bias
rr.tmpdata.append({
"label":delaylabel,
"sys":syserror,
"ran":randerror,
"tot":toterror,
"bias":bias
})
#binmeans = [np.median(resis[digitized == bini]) for bini in range(1, len(binlims))]
#binstds = [np.std(resis[digitized == bini]) for bini in range(1, len(binlims))]
width = binlims[1] - binlims[0]
if plotrods:
if not sidebyside:
if ploterrorbars:
ax.bar(binlims[:-1] + width/2.0, binmeans, yerr=binstds, width=width, color=rr.plotcolour, ecolor=rr.plotcolour, error_kw={"capsize":2.5, "capthick":0.5, "markeredgewidth":0.5}, edgecolor=rr.plotcolour, alpha = alpharods)
else:
ax.bar(binlims[:-1] + width/2.0, binmeans, width=width, color=rr.plotcolour, edgecolor=rr.plotcolour, alpha = alpharods)
else:
width = width/len(rrlist)
squeezefactor = 1.0
plotwidth = squeezefactor * width
offset = width * (1.0-squeezefactor)/2.0
if ploterrorbars:
ax.bar(binlims[:-1]+ width/2.0 + offset + irr*plotwidth, binmeans, yerr=binstds, width=plotwidth, color=rr.plotcolour, ecolor=rr.plotcolour, error_kw={"capsize":2.5, "capthick":0.5, "markeredgewidth":0.5}, edgecolor=rr.plotcolour, alpha = alpharods, linewidth=0)
else:
ax.bar(binlims[:-1]+ width/2.0 + offset + irr*plotwidth, binmeans, width=plotwidth, color=rr.plotcolour, edgecolor=rr.plotcolour, alpha = alpharods)
# That's it for the different runresult objects, back to the common stuff for this particular panel :
if sidebyside:
for binlim in binlims:
plt.axvline(binlim, lw=0.5, color="#AAAAAA", zorder=-30)
# on all border plots :
#if i == n-1:
# plt.xlabel("Synthetic input delay [day]")
#if j == 0:
# plt.ylabel("Delay measurement error [day]")
# Just on 2 plots :
if tweakeddisplay:
if i == n-1:
plt.xlabel("True delay [day]", fontsize=18)
if j == 0 and i == int(math.floor(n/2.0)):
plt.ylabel("Delay measurement error [day]", fontsize=18, y=-0.10)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
else:
if i == n-1:
plt.xlabel("True delay [day]", fontsize=16)
if j == 0 and i == int(math.floor(n/2.0)):
plt.ylabel("Delay measurement error [day]", fontsize=16)
plt.xticks(fontsize=13)
plt.yticks(fontsize=13)
xmin, xmax = ax.get_xlim()
ax.set_xticks(np.round(np.linspace(xmin, xmax, 10)))
plt.xlim(plotrange)
#plt.ylim(plotrange)
if errorrange != None:
if hasattr(errorrange, '__iter__'): # then its a tuple or list
plt.ylim((errorrange[0], errorrange[1]))
else:
plt.ylim((-errorrange, errorrange))
if n != 2: # otherwise we have only 1 delay and panel
plt.annotate(delaylabel, xy=(0.03, 0.88-txtstep), xycoords='axes fraction', fontsize=14, color="black")
# enforce blindness if wanted, by modifying the xticks labels (not touching the data)
if blindness:
labels = ax.xaxis.get_ticklabels()
locs = ax.xaxis.get_ticklocs()
meanloc = np.mean(locs)
blindlabels = []
for loc, label in zip(locs, labels):
blindlabels.append(str(loc-meanloc))
ax.xaxis.set_ticklabels(blindlabels)
# That's it for this panel, back to the total figure :
if dataout:
for rr in rrlist:
dc = delaycontainer(data = rr.tmpdata, name = rr.name, plotcolour = rr.plotcolour, objects=labels[:])
pycs.gen.util.writepickle(dc,outdir+ "%s_errorbars.pkl" % (rr.autoname))
rr.tmpdata = None
labelspacetop = 0.0
labelspaceright = 0.0
if n == 2:
labelspacetop = 0.04
labelspaceright = 0.04
for irr, rr in enumerate(rrlist):
if displayn:
labeltxt = "%s (%i) " % (getattr(rr, 'name', 'NoName'), rr.tsarray.shape[0])
else:
labeltxt = "%s" % (getattr(rr, 'name', 'NoName'))
if not tweakeddisplay:
plt.figtext(x = right - labelspaceright, y = top - labelspacetop - txtstep*irr, s = labeltxt, verticalalignment="top", horizontalalignment="right", color=rr.plotcolour, fontsize=15)
else:
plt.figtext(x = 0.54, y = 0.8325 - txtstep*irr, s = labeltxt, verticalalignment="top", horizontalalignment="left", color=rr.plotcolour, fontsize=17)
if title != None:
#plt.figtext(x = left + (right-left)/2.0, y = ytitle, s = title, horizontalalignment="center", color="black", fontsize=18)
plt.figtext(x = xtitle, y = ytitle, s = title, horizontalalignment="center", color="black", fontsize=titlesize)
if filename==None:
plt.show()
else:
plt.savefig(filename)
def covplot(rrlist, showpoints=False, showcontour=True, showdensity=False, fractionalresiduals=False, bins=50, smoothing=0.0, figsize=(12, 12), left=0.02, right=0.98, bottom=0.02, top=0.98, wspace=0.05, hspace=0.05, r=5.0, title=None, txtstep=0.04, filename=None):
"""
Covariance scatter of all measurement errors.
Give me a single runresults object (from a sim, with known true delays).
"""
import scipy.stats
import matplotlib.colors as colors
nimages = rrlist[0].nimages()
imginds = np.arange(nimages)
#nruns = len(rr[0])
labels = rrlist[0].labels
couplelist = [(i, j) for j in imginds for i in imginds if i > j]
ncouples = len(couplelist)
fig = plt.figure(figsize=figsize)
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
axisNum = 0
for ii, i in enumerate(couplelist): # (0, 1), (0, 2) ...
for jj, j in enumerate(couplelist):
if (ii == 0) or (jj == ncouples-1) :
continue # No plot
axisNum += 1
if jj >= ii:
continue
#print i, j, axisNum
ax = plt.subplot(ncouples-1, ncouples-1, axisNum, aspect='equal')
ax.axhline(0, color="black")
ax.axvline(0, color="black")
for rr in rrlist:
#print idelaylabel, " vs ", jdelaylabel
itruedelays = rr.truetsarray[:,i[0]] - rr.truetsarray[:,i[1]]
imeasdelays = rr.tsarray[:,i[0]] - rr.tsarray[:,i[1]]
if fractionalresiduals:
iresis = (imeasdelays - itruedelays)/itruedelays
else:
iresis = imeasdelays - itruedelays
jtruedelays = rr.truetsarray[:,j[0]] - rr.truetsarray[:,j[1]]
jmeasdelays = rr.tsarray[:,j[0]] - rr.tsarray[:,j[1]]
if fractionalresiduals:
jresis = (jmeasdelays - jtruedelays)/jtruedelays
else:
jresis = jmeasdelays - jtruedelays
if showdensity or "diff" in rr.name:
cmap = colors.LinearSegmentedColormap.from_list('custom',['white', rr.plotcolour],gamma=1.0)
#cmap = colors.LinearSegmentedColormap.from_list('custom',[rr.plotcolour, rr.plotcolour],gamma=1.0)
#cmap._init()
#alphas = np.abs(np.linspace(0.0, 0.5, cmap.N))
#cmap._lut[:-3,-1] = alphas
ax.hexbin(iresis, jresis, gridsize=bins, extent=(-r, r, -r, r), mincnt=1, cmap=cmap, edgecolor="none")
if showpoints:
ax.scatter(iresis, jresis, s=2, facecolor=rr.plotcolour, lw = 0)
#ax.hexbin(iresis, jresis, gridsize=20, extent=(-r, r, -r, r))
if showcontour:
"""
H, xedges, yedges = np.histogram2d(iresis, jresis, range=[[-r,r], [-r,r]], bins=(bins, bins))
H = H.transpose()
if smoothing > 0.01:
H = scipy.ndimage.filters.gaussian_filter(H, smoothing, mode='constant', cval=0.0)
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
#levels = [np.mean(H), np.max(H)/2.0]
#levels = [2.0*np.mean(H), 6.0*np.mean(H)]
#levels = (1.0e4, 1.0e3, 1.0e2, 2.0e1)
levels = [scipy.stats.scoreatpercentile(H.flatten(), 95.45), scipy.stats.scoreatpercentile(H.flatten(), 68.27)]
#levels = [scipy.stats.scoreatpercentile(H.flatten(), 68.27)]
cset = ax.contour(H, levels=levels, origin="lower", colors=rr.plotcolour, extent=extent, linewidth=0.5)
"""
H, xedges, yedges = np.histogram2d(iresis, jresis, range=[[-r,r], [-r,r]], bins=(bins, bins))
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
data = np.vstack((iresis, jresis))
#print data.shape
kde = scipy.stats.kde.gaussian_kde(data)
grid = np.mgrid[-r:r:1j*bins, -r:r:1j*bins]
grid_coords = np.append(grid[0].reshape(-1,1),grid[1].reshape(-1,1),axis=1)
z = kde(grid_coords.T)
z = z.reshape(bins,bins)
#levels = [scipy.stats.scoreatpercentile(z.flatten(), 95.45)]
levels = [np.max(z)*0.45]
cset = ax.contour(grid[0], grid[1], z, levels=levels, origin="lower", colors=rr.plotcolour, extent=extent, linewidth=0.5)
idelaylabel="%s%s" % (labels[i[1]], labels[i[0]])
jdelaylabel="%s%s" % (labels[j[1]], labels[j[0]])
#ax.set_xlabel(idelaylabel)
#ax.set_ylabel(jdelaylabel)
if figsize[0] > 8:
ax.annotate(idelaylabel, xy=(0.9, 0.05), xycoords='axes fraction', ha="center") # x axis
ax.annotate(jdelaylabel, xy=(0.06, 0.85), xycoords='axes fraction', ha="left", rotation=90.0) # y axis
else:
ax.annotate(idelaylabel, xy=(0.78, 0.08), xycoords='axes fraction', ha="center") # x axis
ax.annotate(jdelaylabel, xy=(0.08, 0.76), xycoords='axes fraction', ha="left", rotation=90.0) # y axis
ax.set_xlim(-r, r)
ax.set_ylim(-r, r)
majorLocator = MultipleLocator(1.0)
ax.xaxis.set_major_locator(majorLocator)
majorLocator = MultipleLocator(1.0)
ax.yaxis.set_major_locator(majorLocator)
ax.set_xticklabels([])
ax.set_yticklabels([])
#ax.annotate(delaytext, xy=(0.03, 0.78 - 3*txtstep*(irr+0.5)), xycoords='axes fraction', color = datarr.plotcolour)
if title != None:
plt.figtext(x = 0.5, y = 0.97, s = title, horizontalalignment="center", color="black", fontsize=18)
#for (irr, rr) in enumerate(rrlist):
# plt.figtext(x = left + 0.25*irr, y = 0.96, s = getattr(rr, 'name', 'NoName'), horizontalalignment="left", color=rr.plotcolour)
for irr, rr in enumerate(rrlist):
labeltxt = "%s" % (getattr(rr, 'name', 'NoName'))
plt.figtext(x = right, y = top - txtstep*irr, s = labeltxt, verticalalignment="top", horizontalalignment="right", color=rr.plotcolour)
if filename==None:
plt.show()
else:
plt.savefig(filename)
|
COSMOGRAIL/PyCS
|
pycs/sim/plot.py
|
Python
|
gpl-3.0
| 70,485
|
[
"Bowtie"
] |
83581d7c569188bd6a2e91fd7d93404fb14d2f4ff454d3381b2590ff822b1bab
|
"""
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx import addnodes, __version__ as sphinx_ver
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.writers.html import SmartyPantsHTMLTranslator
from sphinx.util.console import bold
from sphinx.util.compat import Directive
from sphinx.util.nodes import set_source_info
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag"
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter"
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s; field lookup type",
)
app.add_description_unit(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_description_unit(
directivename="django-admin-option",
rolename="djadminopt",
indextemplate="pair: %s; django-admin command-line option",
parse_node=parse_django_adminopt_node,
)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
# register the snippet directive
app.add_directive('snippet', SnippetWithFilename)
# register a node for snippet directive so that the xml parser
# knows how to handle the enter/exit parsing event
app.add_node(snippet_with_filename,
html=(visit_snippet, depart_snippet_literal),
latex=(visit_snippet_latex, depart_snippet_latex),
man=(visit_snippet_literal, depart_snippet_literal),
text=(visit_snippet_literal, depart_snippet_literal),
texinfo=(visit_snippet_literal, depart_snippet_literal))
class snippet_with_filename(nodes.literal_block):
"""
Subclass the literal_block to override the visit/depart event handlers
"""
pass
def visit_snippet_literal(self, node):
"""
default literal block handler
"""
self.visit_literal_block(node)
def depart_snippet_literal(self, node):
"""
default literal block handler
"""
self.depart_literal_block(node)
def visit_snippet(self, node):
"""
HTML document generator visit handler
"""
lang = self.highlightlang
linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(node.rawsource, lang,
warn=warner,
linenos=linenos,
**highlight_args)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s' % lang)
self.body.append(starttag)
self.body.append('<div class="snippet-filename">%s</div>\n''' % (fname,))
self.body.append(highlighted)
self.body.append('</div>\n')
raise nodes.SkipNode
def visit_snippet_latex(self, node):
"""
Latex document generator visit handler
"""
self.verbatim = ''
def depart_snippet_latex(self, node):
"""
Latex document generator depart handler.
"""
code = self.verbatim.rstrip('\n')
lang = self.hlsettingstack[-1][0]
linenos = code.count('\n') >= self.hlsettingstack[-1][1] - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.curfilestack[-1], node.line))
hlcode = self.highlighter.highlight_block(code, lang, warn=warner,
linenos=linenos,
**highlight_args)
self.body.append('\n{\\colorbox[rgb]{0.9,0.9,0.9}'
'{\\makebox[\\textwidth][l]'
'{\\small\\texttt{%s}}}}\n' % (fname,))
if self.table:
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{OriginalVerbatim}')
self.table.has_problematic = True
self.table.has_verbatim = True
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
hlcode = hlcode.rstrip() + '\n'
self.body.append('\n' + hlcode + '\\end{%sVerbatim}\n' %
(self.table and 'Original' or ''))
self.verbatim = None
class SnippetWithFilename(Directive):
"""
The 'snippet' directive that allows to add the filename (optional)
of a code snippet in the document. This is modeled after CodeBlock.
"""
has_content = True
optional_arguments = 1
option_spec = {'filename': directives.unchanged_required}
def run(self):
code = '\n'.join(self.content)
literal = snippet_with_filename(code, code)
if self.arguments:
literal['language'] = self.arguments[0]
literal['filename'] = self.options['filename']
set_source_info(self, literal)
return [literal]
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node['version'] = "Development version"
else:
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
# <big>? Really?
def visit_desc_parameterlist(self, node):
self.body.append('(')
self.first_param = 1
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
self.body.append(')')
if sphinx_ver < '1.0.8':
#
# Don't apply smartypants to literal blocks
#
def visit_literal_block(self, node):
self.no_smarty += 1
SmartyPantsHTMLTranslator.visit_literal_block(self, node)
def depart_literal_block(self, node):
SmartyPantsHTMLTranslator.depart_literal_block(self, node)
self.no_smarty -= 1
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'deprecated': 'Deprecated in Django %s',
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
title = "%s%s" % (
self.version_text[node['type']] % node['version'],
":" if len(node) else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
SmartyPantsHTMLTranslator.visit_section(self, node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env._django_curr_admin_command = command
title = "django-admin.py %s" % sig
signode += addnodes.desc_name(title, title)
return sig
def parse_django_adminopt_node(env, sig, signode):
"""A copy of sphinx.directives.CmdoptionDesc.parse_signature()"""
from sphinx.domains.std import option_desc_re
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not count:
for m in simple_option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super(DjangoStandaloneHTMLBuilder, self).finish()
self.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatetag" and l == "ref/templates/builtins"],
"tfilters": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatefilter" and l == "ref/templates/builtins"],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, 'w') as fp:
fp.write('var django_template_builtins = ')
json.dump(templatebuiltins, fp)
fp.write(';\n')
|
Beeblio/django
|
docs/_ext/djangodocs.py
|
Python
|
bsd-3-clause
| 11,807
|
[
"VisIt"
] |
410c15ea2fec7935d0cc332a2122ddbb46b01ef4d8bb3f1437d952d808e6f517
|
"""
This app is intended to provide the core functionality for tracking user
engagement with content and Kolibri in general. As such, it is intended
to store details of user interactions with content, a summary of those
interactions, interactions with the software in general, as well as user
feedback on the content and the software.
"""
from __future__ import unicode_literals
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from kolibri.auth.constants import role_kinds
from kolibri.auth.models import AbstractFacilityDataModel, Facility, FacilityUser
from kolibri.auth.permissions.base import RoleBasedPermissions
from kolibri.auth.permissions.general import IsOwn
from kolibri.content.content_db_router import default_database_is_attached, get_active_content_database
from kolibri.content.models import UUIDField
from .permissions import AnyoneCanWriteAnonymousLogs
class BaseLogQuerySet(models.QuerySet):
def filter_by_topic(self, topic, content_id_lookup="content_id"):
"""
Filter a set of logs by content_id, using content_ids from all descendants of specified topic.
"""
content_ids = topic.get_descendant_content_ids()
return self.filter_by_content_ids(content_ids)
def filter_by_content_ids(self, content_ids, content_id_lookup="content_id"):
"""
Filter a set of logs by content_id, using content_ids from the provided list or queryset.
"""
if default_database_is_attached():
# perform the query using an efficient cross-database join, if possible
return self.using(get_active_content_database()).filter(**{content_id_lookup + "__in": content_ids})
else:
# if the databases can't be joined, convert the content_ids into a list and pass in
return self.filter(**{content_id_lookup + "__in": list(content_ids)})
def log_permissions(user_field):
return (
AnyoneCanWriteAnonymousLogs(field_name=user_field + '_id') |
IsOwn(field_name=user_field + '_id') |
RoleBasedPermissions(
target_field=user_field,
can_be_created_by=(role_kinds.ADMIN,),
can_be_read_by=(role_kinds.ADMIN, role_kinds.COACH),
can_be_updated_by=(role_kinds.ADMIN,),
can_be_deleted_by=(role_kinds.ADMIN,),
)
)
class BaseLogModel(AbstractFacilityDataModel):
permissions = log_permissions("user")
class Meta:
abstract = True
def infer_dataset(self):
if self.user:
return self.user.dataset
else:
facility = Facility.get_default_facility()
assert facility, "Before you can save logs, you must have a facility"
return facility.dataset
objects = BaseLogQuerySet.as_manager()
class ContentSessionLog(BaseLogModel):
"""
This model provides a record of interactions with a content item within a single visit to that content page.
"""
user = models.ForeignKey(FacilityUser, blank=True, null=True)
content_id = UUIDField(db_index=True)
channel_id = UUIDField()
start_timestamp = models.DateTimeField()
end_timestamp = models.DateTimeField(blank=True, null=True)
time_spent = models.FloatField(help_text="(in seconds)", default=0.0, validators=[MinValueValidator(0)])
progress = models.FloatField(default=0, validators=[MinValueValidator(0)])
kind = models.CharField(max_length=200)
extra_fields = models.TextField(default="{}")
class ContentSummaryLog(BaseLogModel):
"""
This model provides a summary of all interactions a user has had with a content item.
"""
user = models.ForeignKey(FacilityUser)
content_id = UUIDField(db_index=True)
channel_id = UUIDField()
start_timestamp = models.DateTimeField()
end_timestamp = models.DateTimeField(blank=True, null=True)
completion_timestamp = models.DateTimeField(blank=True, null=True)
time_spent = models.FloatField(help_text="(in seconds)", default=0.0, validators=[MinValueValidator(0)])
progress = models.FloatField(default=0, validators=[MinValueValidator(0), MaxValueValidator(1)])
kind = models.CharField(max_length=200)
extra_fields = models.TextField(default="{}")
class ContentRatingLog(BaseLogModel):
"""
This model provides a record of user feedback on a content item.
"""
user = models.ForeignKey(FacilityUser, blank=True, null=True)
content_id = UUIDField(db_index=True)
channel_id = UUIDField()
quality = models.IntegerField(blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(5)])
ease = models.IntegerField(blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(5)])
learning = models.IntegerField(blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(5)])
feedback = models.TextField(blank=True)
class UserSessionLog(BaseLogModel):
"""
This model provides a record of a user session in Kolibri.
"""
user = models.ForeignKey(FacilityUser)
channels = models.TextField(blank=True)
start_timestamp = models.DateTimeField(auto_now_add=True)
completion_timestamp = models.DateTimeField(blank=True, null=True)
pages = models.TextField(blank=True)
class MasteryLog(BaseLogModel):
"""
This model provides a summary of a user's engagement with an assessment within a mastery level
"""
permissions = log_permissions("summarylog__user")
# Every MasteryLog is related to the single summary log for the user/content pair
summarylog = models.ForeignKey(ContentSummaryLog, related_name="masterylogs")
# The MasteryLog records the mastery criterion that has been specified for the user.
# It is recorded here to prevent this changing in the middle of a user's engagement
# with an assessment.
mastery_criterion = models.TextField()
start_timestamp = models.DateTimeField()
end_timestamp = models.DateTimeField(blank=True, null=True)
completion_timestamp = models.DateTimeField(blank=True, null=True)
# The integer mastery level that this log is tracking.
mastery_level = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)])
# Has this mastery level been completed?
complete = models.BooleanField(default=False)
def infer_dataset(self):
return self.summarylog.dataset
class AttemptLog(AbstractFacilityDataModel):
"""
This model provides a summary of a user's engagement within a particular interaction with an
item in an assessment
"""
permissions = log_permissions("sessionlog__user")
# Unique identifier within the relevant assessment for the particular question/item
# that this attemptlog is a record of an interaction with.
item = models.CharField(max_length=200)
start_timestamp = models.DateTimeField()
end_timestamp = models.DateTimeField()
completion_timestamp = models.DateTimeField(blank=True, null=True)
# Which mastery log was this attemptlog associated with?
masterylog = models.ForeignKey(MasteryLog, related_name="attemptlogs", blank=True, null=True)
sessionlog = models.ForeignKey(ContentSessionLog, related_name="attemptlogs")
time_spent = models.FloatField(help_text="(in seconds)", default=0.0, validators=[MinValueValidator(0)])
complete = models.BooleanField(default=False)
# How correct was their answer? In simple cases, just 0 or 1.
correct = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(1)])
hinted = models.BooleanField(default=False)
# JSON blob that would allow the learner's answer to be rerendered in the frontend interface
answer = models.TextField()
# A human readable answer that could be rendered directly in coach reports, can be blank.
simple_answer = models.CharField(max_length=200, blank=True)
# A JSON Array with a sequence of JSON objects that describe the history of interaction of the user
# with this assessment item in this attempt.
interaction_history = models.TextField()
def infer_dataset(self):
return self.sessionlog.dataset
|
jayoshih/kolibri
|
kolibri/logger/models.py
|
Python
|
mit
| 8,175
|
[
"VisIt"
] |
9e7e609b0be89458597e1d8363cb7a79dfdf96f065985d9c54fd1fe817ffdd0d
|
# -*- coding: utf-8 -*-
import re
import os
import sys
from os import path
import traceback
#TODO:
# support #else
# support #define
# support constrainttypes
GMXLIB = ["/usr/local/gromacs/share/gromacs/top/","/usr/share/gromacs/top/"]
if("GMXLIB" in os.environ):
GMXLIB = os.environ["GMXLIB"].split(":")
#===============================================================================
class LineEntry(object):
_fieldtypes = ()
_fieldnames = ()
def __init__(self, lineno, line, ifdef_stack):
self.lineno = lineno
self.ifdef_stack = ifdef_stack
if(not hasattr(self, "_min_values")):
self._min_values = len(self._fieldnames)
values = line.split()
assert(len(values) >= self._min_values)
for t, k in zip(self._fieldtypes, self._fieldnames):
self.__dict__[k] = None
if(len(values)>0):
self.__dict__[k] = t(values.pop(0))
self._rest = values
def asline(self):
values = [ self.__dict__[k] for k in self._fieldnames ]
parts = [ str(v) for v in values if v!=None ]
parts += self._rest
return(" ".join(parts))
#===============================================================================
# Copied from gromacs-4.5.3/src/kernel/toppush.c , row 214
#
# Comments on optional fields in the atomtypes section:
#
# The force field format is getting a bit old. For OPLS-AA we needed
# to add a special bonded atomtype, and for Gerrit Groenhofs QM/MM stuff
# we also needed the atomic numbers.
# To avoid making all old or user-generated force fields unusable we
# have introduced both these quantities as optional columns, and do some
# acrobatics to check whether they are present or not.
# This will all look much nicer when we switch to XML... sigh.
#
# Field 0 (mandatory) is the nonbonded type name. (string)
# Field 1 (optional) is the bonded type (string)
# Field 2 (optional) is the atomic number (int)
# Field 3 (mandatory) is the mass (numerical)
# Field 4 (mandatory) is the charge (numerical)
# Field 5 (mandatory) is the particle type (single character)
# This is followed by a number of nonbonded parameters.
#
# The safest way to identify the format is the particle type field.
#
# So, here is what we do:
#
# A. Read in the first six fields as strings
# B. If field 3 (starting from 0) is a single char, we have neither
# bonded_type or atomic numbers.
# C. If field 5 is a single char we have both.
# D. If field 4 is a single char we check field 1. If this begins with
# an alphabetical character we have bonded types, otherwise atomic numbers.
# atomtypes have more columns but e.g. the diala_quick has a strange 3rd column
class Atomtype(LineEntry):
def __init__(self, lineno, line, ifdef_stack):
self.bondtype = None
self.at_number = None
values = line.split()
if(len(values[3])==1 and values[3].isalpha()):
self._fieldtypes = (str, float, float, str)
self._fieldnames = ("name", "mass", "charge", "ptype")
elif(len(values[5])==1 and values[5].isalpha()):
self._fieldtypes = (str, str, int, float, float, str)
self._fieldnames = ("name", "bondtype", "at_number", "mass", "charge", "ptype")
elif(len(values[4])==1 and values[4].isalpha()):
if(values[1][0].isalpha()):
self._fieldtypes = (str, str, float, float, str)
self._fieldnames = ("name", "bondtype", "mass", "charge", "ptype")
else:
self._fieldtypes = (str, int, float, float, str)
self._fieldnames = ("name", "at_number", "mass", "charge", "ptype")
else:
assert(False)
LineEntry.__init__(self, lineno, line, ifdef_stack)
#===============================================================================
class Bondtype(LineEntry):
pass
class Default(LineEntry):
pass
class Angletype(LineEntry):
pass
#===============================================================================
class Dihedraltype(LineEntry):
# Copied from gromacs-4.5.3/src/kernel/toppush.c, row 683
#
# This routine accepts dihedraltypes defined from either 2 or 4 atoms.
#
# We first check for 2 atoms with the 3th column being an integer
# defining the type. If this isn't the case, we try it with 4 atoms
# and the 5th column defining the dihedral type.
def __init__(self, lineno, line, ifdef_stack):
values = line.split()
if(values[4].isdigit()):
self._fieldtypes = (str, str, str, str, int)
self._fieldnames = ("ai", "aj", "ak", "al", "funct")
elif(values[2].isdigit()):
if(values[3] == '2'): # improper - the two atomtypes are 1,4. Use wildcards for 2,3
self._fieldtypes = (str, str, int)
self._fieldnames = ("ai", "al", "funct")
self.aj = "X" #wildcard
self.ak = "X" #wildcard
else: #proper - the two atomtypes are 2,3. Use wildcards for 1,4 */
self._fieldtypes = (str, str, int)
self._fieldnames = ("aj", "ak", "funct")
self.ai = "X" #wildcard
self.al = "X" #wildcard
else:
assert(False)
LineEntry.__init__(self, lineno, line, ifdef_stack)
#===============================================================================
class Atom(LineEntry):
_fieldtypes = (int , str , int , str , str , int , float , float)
_fieldnames = ("id", "atomtype", "res_nr", "res_name", "name", "cg_nr", "charge", "mass")
_min_values = 2 #TODO: check gromacs code
class Bond(LineEntry):
_fieldtypes = (int, int)
_fieldnames = ("ai", "aj")
class Pair(LineEntry):
_fieldtypes = (int, int)
_fieldnames = ("ai", "aj")
class Angle(LineEntry):
_fieldtypes = (int, int, int)
_fieldnames = ("ai", "aj", "ak")
class Dihedral(LineEntry):
_fieldtypes = (int , int , int , int , int)
_fieldnames = ("ai", "aj", "ak", "al", "funct")
_min_values = 4
class PositionRestraint(LineEntry):
_fieldtypes = (int,)
_fieldnames = ("ai",)
class Molecule(LineEntry):
_fieldtypes = (str , int )
_fieldnames = ("name", "mols")
#===============================================================================
class Section(list):
def __init__(self, name):
list.__init__(self)
self.name = name
def write(self):
if(len(self) == 0):
return("")
output = "[ "+self.name+" ]\n"
curr_ifdef_stack = []
for e in self:
if("\n".join(e.ifdef_stack) != "\n".join(curr_ifdef_stack)):
output += "#endif\n" * len(curr_ifdef_stack)
output += "".join([s+"\n" for s in e.ifdef_stack])
curr_ifdef_stack = e.ifdef_stack
output += e.asline() +"\n"
output += "#endif\n" * len(curr_ifdef_stack) #close left open blocks
return(output+"\n")
#===============================================================================
class Moleculetype(object):
def __init__(self, lineno, line, ifdef_stack):
self.lineno = lineno
self.ifdef_stack = ifdef_stack
self.name = line.split()[0]
self.nrexcl = line.split()[1]
self.atoms = Section("atoms")
self.bonds = Section("bonds")
self.pairs = Section("pairs")
self.angles = Section("angles")
self.dihedrals = Section("dihedrals")
self.position_restraints = Section("position_restraints")
def write(self):
output = "[ moleculetype ]\n"
output += "%s %s\n\n"%(self.name, self.nrexcl)
output += self.atoms.write()
output += self.pairs.write()
output += self.bonds.write()
output += self.angles.write()
output += self.dihedrals.write()
output += self.position_restraints.write()
return(output)
#===============================================================================
class Topology(object):
def write(self):
#not implementd, yet
assert(len(self.defaults) == 0)
output = ""
output += "".join([s+"\n" for s in self.early_includes])
output += self.atomtypes.write()
output += self.bondtypes.write()
output += self.angletypes.write()
output += self.dihedraltypes.write()
for mt in self.moleculetypes:
output += mt.write()
output += "".join([s+"\n" for s in self.late_includes])
output += "[ system ]\n"
output += "%s\n\n"%self.system
output += self.molecules.write()
return(output)
#---------------------------------------------------------------------------
def __init__(self, rawdata):
self.defaults = []
self.atomtypes = Section("atomtypes")
self.bondtypes = Section("bondtypes")
self.angletypes = Section("angletypes")
self.dihedraltypes = Section("dihedraltypes")
self.moleculetypes = []
self.molecules = Section("molecules")
self.system = ""
self.early_includes = []
self.late_includes = []
curr_section = None # this is the currently open section
ifdef_stack = [] # this is a stack of currently open ifdef/ifndef blocks
rawlines = rawdata.split("\n")
for (i, line) in enumerate(rawlines, start=1):
try:
line = re.sub(";.*$", "", line)
line = line.strip()
if(len(line) == 0): # ignore empty lines
continue
if(line.lower().startswith("#if")):
ifdef_stack = ifdef_stack + [line] # creates a copy of ifdef_stack
continue
if(line.lower().startswith("#endif")):
assert(len(ifdef_stack) > 0) # endif needs prior opened if-block
ifdef_stack = ifdef_stack[:-1] # creates a copy of ifdef_stack
continue
if(line.lower().startswith("#include")):
if(len(self.atomtypes) == 0):
self.early_includes += [line]
else:
self.late_includes += [line]
continue
if(line.startswith("#")):
print("Skiping "+line)
continue
#recognize section headers
m = re.match("\s*\[\s*(\w*)\s*\]\s*", line)
if(m != None):
curr_section = m.group(1)
continue
if(curr_section == None):
continue # we might ignore unkown section
#assert(curr_section != None) # there should be an open section by now
if(curr_section == "defaults"):
self.defaults.append(Default(i, line, ifdef_stack))
elif(curr_section == "atomtypes"):
self.atomtypes.append(Atomtype(i,line, ifdef_stack))
elif(curr_section == "bondtypes"):
self.bondtypes.append(Bondtype(i,line, ifdef_stack))
elif(curr_section == "angletypes"):
self.angletypes.append(Angletype(i,line, ifdef_stack))
elif(curr_section == "dihedraltypes"):
self.dihedraltypes.append(Dihedraltype(i,line, ifdef_stack))
elif(curr_section == "moleculetype"):
self.moleculetypes.append(Moleculetype(i,line, ifdef_stack))
elif(curr_section == "atoms"):
self.moleculetypes[-1].atoms.append(Atom(i,line, ifdef_stack))
elif(curr_section == "bonds"):
self.moleculetypes[-1].bonds.append(Bond(i,line, ifdef_stack))
elif(curr_section == "pairs"):
self.moleculetypes[-1].pairs.append(Pair(i,line, ifdef_stack))
elif(curr_section == "angles"):
self.moleculetypes[-1].angles.append(Angle(i,line, ifdef_stack))
elif(curr_section == "dihedrals"):
self.moleculetypes[-1].dihedrals.append(Dihedral(i,line, ifdef_stack))
elif(curr_section == "position_restraints"):
self.moleculetypes[-1].position_restraints.append(PositionRestraint(i, line, ifdef_stack))
elif(curr_section == "system"):
self.system = (self.system+"\n"+line).strip()
elif(curr_section == "molecules"):
self.molecules.append(Molecule(i,line, ifdef_stack))
else:
print("ignoring unkown section: "+curr_section)
curr_section = None # skipt the following lines of this section
# except Exception as e:
# print "exception: ",e
# print "In section %s in line %d"%(curr_section,i)
# print line
# # print("Ignoring strange line:" +line)
#except Exception as e:
except:
print('Latest line: "%s"'%line)
# print "\n".join(rawlines[max(0, i-5): i-1])
# print('Latest line: "%s"'%line)
# print "\n".join(rawlines[max(0, i-5): min(len(rawlines)-1, i+5])
traceback.print_exc()
sys.exit()
assert(len(ifdef_stack) == 0) # all if-blocks should be closed at the end
#===============================================================================
# verworfene Alternativen:
# original "cpp" will immer ALLE includes auflösen :-(
# "grompp -pp" braucht viele weitere files um ohne fehler durch zulaufen
# Daher: selber machen
def preprocess(filename, includedirs=GMXLIB):
#pylint: disable=W0102
tmp = resolve_includes(filename, includedirs)
output = resolve_defines(tmp)
return(output)
#===============================================================================
def resolve_includes(filename, includedirs, filesloaded=set()):
if(filename in filesloaded):
raise(Exception("circular include"))
filesloaded.add(filename)
rawdata = open(filename).read()
def loadfile(m):
fn = m.group(1)
for d in [path.dirname(filename)] + includedirs:
absfn = path.join(d, fn)
if(path.exists(absfn)):
print("Including %s"%absfn)
return( resolve_includes(absfn, includedirs, filesloaded) )
print("Could not include %s"%fn)
return('\n#include "%s"\n'%fn)
output = re.sub('[\n^]#include\s+"([^"]*)"(?=\s)', loadfile , rawdata) #resolve includes
return(output)
#===============================================================================
def resolve_defines(input_data):
defines = dict()
output = ""
for line in input_data.split("\n"):
if(line.lower().startswith("#define ")):
parts = line.split(None, 2) + [None] #None is for flag-like defines
defines[ parts[1] ] = parts[2]
continue
for (k,v) in defines.items():
if( v!=None ):
line = line.replace(k, v)
output += line+"\n"
return(output)
#===============================================================================
#EOF
|
CMD-at-ZIB/ZIBMolPy
|
ZIBMolPy_package/ZIBMolPy/io/topology.py
|
Python
|
lgpl-3.0
| 13,427
|
[
"Gromacs"
] |
674a31dd06b2753a63d216f91561f5c9ef95c98752d478287ff71e7eacee1f1f
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import logging
import logging.handlers
import os
import espressopp
from espressopp.interaction import LennardJones
filename = "log.out"
class Test0Logging(unittest.TestCase) :
def test0Create(self) :
lj = LennardJones(1.0, 2.0, 3.0)
self.assertEqual(lj.epsilon, 1.0)
self.assertEqual(lj.sigma, 2.0)
self.assertEqual(lj.cutoff, 3.0)
# now read the file log.out and find "DEBUG" and "potential"
f = open(filename, "r")
s = f.read()
self.assert_(s.find("_espressopp.interaction.LennardJones") > 0)
self.assert_(s.find("DEBUG") > 0)
self.assert_(s.find("TRACE") < 0)
f.close()
if __name__ == "__main__":
if os.path.exists(filename):
os.remove(filename)
# create logger
log = logging.getLogger("_espressopp.interaction.LennardJones")
log.setLevel(logging.TRACE)
# deactivate propagation of log messages up the hierarchy
log.propagate=0
# create handler
handler = logging.FileHandler(filename)
handler.setLevel(logging.TRACE)
# create formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# add formatter to handler
handler.setFormatter(formatter)
# add handler to logger
log.addHandler(handler)
unittest.main()
|
MrTheodor/espressopp
|
testsuite/unittest/TestEsLogging.py
|
Python
|
gpl-3.0
| 2,198
|
[
"ESPResSo"
] |
3f33ffa0c5bfdc7611c4c8f5f016868316a61edf940c2f813a7e2ec271514a7e
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
===================
Dirac video decoder
===================
This component decodes a stream of video, coded using the Dirac codec, into
frames of YUV video data.
This component is a thin wrapper around the Dirac Python bindings.
Example Usage
-------------
A simple video player::
Pipeline(ReadFileAdapter("diracvideofile.drc", ...other args...),
DiracDecoder(),
RateLimit(framerate),
VideoOverlay()
).activate()
More detail
-----------
Reads a raw dirac data stream, as strings, from the "inbox" inbox.
Sends out frames of decoded video to the "outbox" outbox.
The frames may not be emitted at a constant rate. You may therefore need to
buffer and rate limit them if displaying them.
The decoder will terminate if it receives a shutdownMicroprocess message on
its "control" inbox. The message is passed on out of the "signal" outbox.
It will ignore producerFinished messages.
The decoder is able to work out from the data stream when it has reached the
end of the stream. It then sends a producerFinished message out of the "signal"
outbox and terminates.
For more information see the Dirac Python bindings documentation.
===================
Dirac video encoder
===================
This component encodes frames of YUV video data with the Dirac codec.
This component is a thin wrapper around the Dirac Python bindings.
Example Usage
-------------
Raw video file encoder::
imagesize = (352, 288) # "CIF" size video
Pipeline(ReadFileAdapter("raw352x288video.yuv", ...other args...),
RawYUVFramer(imagesize),
DiracEncoder(preset="CIF"),
WriteFileAdapter("diracvideo.drc")
).activate()
RawYUVFramer is needed to frame raw YUV data into individual video frames.
More detail
-----------
Reads video frames from the "inbox" inbox.
Sends out encoded video data (as strings) in chunks to the "outbox" outbox.
The encoder can be configured with simple presets and/or more detailed encoder
and sequence parameters. Encoder and sequence parameters override those set with
a preset.
For more information see the Dirac Python bindings documentation.
The encoder will terminate if it receives a shutdownMicroprocess or
producerFinished message on its "control" inbox. The message is passed on out of
the "signal" outbox. If the message is producerFinished, then it will also send
any data still waiting to be sent out of the "outbox" outbox, otherwise any
pending data is lost.
The component does not yet support output of instrumentation or locally decoded
frames (the "verbose" option).
=========================
UNCOMPRESSED FRAME FORMAT
=========================
Uncompresed video frames are output by the decoder, as dictionaries. Each
contains the following entries::
{
"yuv" : (y_data, u_data, v_data) # a tuple of strings
"size" : (width, height) # in pixels
"frame_rate" : fps # frames per second
"interlaced" : 0 or not 0 # non-zero if the frame is two interlaced fields
"topfieldfirst" : 0 or not 0 # non-zero the first field comes first in the data
"pixformat" : "YUV420_planar" # format of raw video data
"chroma_size" : (width, height) # in pixels, for the u and v data
}
The encoder expects data in the same format, but only requires "yuv", "size",
and "pixformat".
"""
from Axon.Component import component
from Axon.Ipc import producerFinished, shutdownMicroprocess
from dirac_parser import DiracParser
from dirac_encoder import DiracEncoder as EncoderWrapper
try:
from dirac_parser import dirac_version as _parser_version
except ImportError:
_parser_version = (0,5,4)
try:
from dirac_encoder import dirac_version as _encoder_version
except ImportError:
_encoder_version = (0,5,4)
from Kamaelia.Support.Data.Rationals import rational
def map_chroma_type(chromatype):
"""Maps string names for chrominance data formats to those understood by the Dirac Python bindings."""
if chromatype == "420":
return "YUV420_planar"
else:
raise "Dont know how to deal with this chroma type yet, sorry! - " + chromtype
class DiracDecoder(component):
"""
DiracDecoder() -> new Dirac decoder component
Creates a component that decodes Dirac video.
"""
Inboxes = { "inbox" : "Strings containing an encoded dirac video stream",
"control" : "for shutdown signalling",
}
Outboxes = { "outbox" : "YUV decoded video frames",
"signal" : "for shutdown/completion signalling",
}
def __init__(self):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(DiracDecoder, self).__init__()
self.decoder = DiracParser()
self.inputbuffer = ""
def main(self):
"""Main loop"""
done = False
while not done:
dataShortage = False
while self.dataReady("inbox"):
self.inputbuffer += self.recv("inbox")
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
done=True
try:
frame = self.decoder.getFrame()
frame['pixformat'] = map_chroma_type(frame['chroma_type'])
self.send(frame,"outbox")
except "NEEDDATA":
if self.inputbuffer:
self.decoder.sendBytesForDecode(self.inputbuffer)
self.inputbuffer = ""
else:
datashortage = True
except "SEQINFO":
# sequence info dict in self.decoder.getSeqData()
pass
except "END":
done = True
self.send(producerFinished(self), "signal")
except "STREAMERROR":
print "Stream error"
raise "STREAMERROR"
except "INTERNALFAULT":
print "Internal fault"
raise "INTERNALFAULT"
if dataShortage and not done:
self.pause()
yield 1
class DiracEncoder(component):
"""
DiracEncoder([preset][,verbose][,encParams][,seqParams][,allParams]) -> new Dirac encoder component
Creates a component to encode video using the Dirac codec. Configuration based on
optional preset, optionally overriden by individual encoder and sequence parameters.
All three 'params' arguments are munged together, so do what you like :)
Keyword arguments:
- preset -- "CIF" or "SD576" or "HD720" or "HD1080" (presets for common video formats)
- verbose -- NOT YET IMPLEMENTED (IGNORED)
- encParams -- dict of encoder setup parameters only
- seqParams -- dict of video sequence parameters only
- allParams -- dict of encoder setup parameters, sequence parameters, and source parameters, all munged together
"""
def __init__(self, preset=None, verbose=False, encParams={}, seqParams={}, allParams={}):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(DiracEncoder, self).__init__()
allParams.update(encParams)
allParams.update(seqParams)
if 'frame_rate' in allParams:
allParams['frame_rate'] = rational(allParams['frame_rate'])
if "pix_asr" in allParams:
allParams['pix_asr'] = rational(allParams['pix_asr'])
if _encoder_version == (0,5,4):
self.encoder = EncoderWrapper(preset=preset, bufsize=1024*1024, verbose=verbose, encParams=allParams, seqParams=allParams)
else: # _encoder_version == (0,6,0):
self.encoder = EncoderWrapper(preset=preset, bufsize=1024*1024, verbose=verbose, allParams=allParams)
def main(self):
"""Main loop"""
done = False
msg = None
while not done:
while self.dataReady("inbox"):
frame = self.recv("inbox")
data = "".join(frame['yuv'])
self.encoder.sendFrameForEncode(data)
while 1: # loop until 'needdata' event breaks out of this
try:
bytes = self.encoder.getCompressedData()
self.send(bytes,"outbox")
except "NEEDDATA":
break
except "ENCODERERROR":
print "Encoder Error"
raise "ENCODERERROR"
except "INTERNALFAULT":
print "Internal Fault"
raise "INTERNALFAULT"
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, shutdownMicroprocess):
self.send(msg,"signal")
done=True
elif isinstance(msg, producerFinished):
# write 'end of sequence' data
data = self.encoder.getEndSequence()
self.send(data, "outbox")
yield 1
self.send(msg, "signal")
if not done:
self.pause()
yield 1
__kamaelia_components__ = ( DiracDecoder, DiracEncoder )
|
sparkslabs/kamaelia
|
Sketches/MH/dirac/Dirac.py
|
Python
|
apache-2.0
| 10,508
|
[
"DIRAC"
] |
5c274cebd8c0f5f62962b86fa87062e5745795cd01f2b079a2711ed55760ffb4
|
from mdtraj import element
from mdtraj.testing import assert_raises
def test_immutable():
def f():
element.hydrogen.mass = 1
def g():
element.radium.symbol = 'sdfsdfsdf'
def h():
element.iron.name = 'sdfsdf'
assert_raises(AttributeError, f)
assert_raises(AttributeError, g)
assert_raises(AttributeError, h)
assert element.hydrogen.mass == 1.007947
assert element.radium.symbol == 'Ra'
assert element.iron.name == 'iron'
|
casawa/mdtraj
|
mdtraj/tests/test_element.py
|
Python
|
lgpl-2.1
| 482
|
[
"MDTraj"
] |
aff558ba8135f6fa0fbc518ed288d0671d7174e5ceaee116d1b567c9a2eabd4b
|
########################################################################
# File: RequestExecutingAgent.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/03/12 15:36:47
########################################################################
""" :mod: RequestExecutingAgent
===========================
.. module: RequestExecutingAgent
:synopsis: request executing agent
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
request processing agent
"""
__RCSID__ = '$Id$'
# #
# @file RequestExecutingAgent.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/12 15:36:56
# @brief Definition of RequestExecutingAgent class.
# # imports
import time
# # from DIRAC
from DIRAC import gMonitor, S_OK, S_ERROR, gConfig
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.Utilities.ProcessPool import ProcessPool
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.private.RequestTask import RequestTask
# # agent name
AGENT_NAME = "RequestManagement/RequestExecutingAgent"
class AgentConfigError( Exception ):
""" misconfiguration error """
def __init__( self, msg ):
""" ctor
:param str msg: error string
"""
Exception.__init__( self )
self.msg = msg
def __str__( self ):
""" str op """
return self.msg
########################################################################
class RequestExecutingAgent( AgentModule ):
"""
.. class:: RequestExecutingAgent
request processing agent using ProcessPool, Operation handlers and RequestTask
"""
# # process pool
__processPool = None
# # request cache
__requestCache = {}
# # requests/cycle
__requestsPerCycle = 100
# # minimal nb of subprocess running
__minProcess = 2
# # maximal nb of subprocess executed same time
__maxProcess = 4
# # ProcessPool queue size
__queueSize = 20
# # file timeout
__fileTimeout = 300
# # operation timeout
__operationTimeout = 300
# # ProcessTask default timeout in seconds
__taskTimeout = 900
# # ProcessPool finalization timeout
__poolTimeout = 900
# # ProcessPool sleep time
__poolSleep = 5
# # placeholder for RequestClient instance
__requestClient = None
# # Size of the bulk if use of getRequests. If 0, use getRequest
__bulkRequest = 0
def __init__( self, *args, **kwargs ):
""" c'tor """
# # call base class ctor
AgentModule.__init__( self, *args, **kwargs )
# # ProcessPool related stuff
self.__requestsPerCycle = self.am_getOption( "RequestsPerCycle", self.__requestsPerCycle )
self.log.info( "Requests/cycle = %d" % self.__requestsPerCycle )
self.__minProcess = self.am_getOption( "MinProcess", self.__minProcess )
self.log.info( "ProcessPool min process = %d" % self.__minProcess )
self.__maxProcess = self.am_getOption( "MaxProcess", 4 )
self.log.info( "ProcessPool max process = %d" % self.__maxProcess )
self.__queueSize = self.am_getOption( "ProcessPoolQueueSize", self.__queueSize )
self.log.info( "ProcessPool queue size = %d" % self.__queueSize )
self.__poolTimeout = int( self.am_getOption( "ProcessPoolTimeout", self.__poolTimeout ) )
self.log.info( "ProcessPool timeout = %d seconds" % self.__poolTimeout )
self.__poolSleep = int( self.am_getOption( "ProcessPoolSleep", self.__poolSleep ) )
self.log.info( "ProcessPool sleep time = %d seconds" % self.__poolSleep )
self.__taskTimeout = int( self.am_getOption( "ProcessTaskTimeout", self.__taskTimeout ) )
self.log.info( "ProcessTask timeout = %d seconds" % self.__taskTimeout )
self.__bulkRequest = self.am_getOption( "BulkRequest", 0 )
self.log.info( "Bulk request size = %d" % self.__bulkRequest )
# # keep config path and agent name
self.agentName = self.am_getModuleParam( "fullName" )
self.__configPath = PathFinder.getAgentSection( self.agentName )
# # operation handlers over here
opHandlersPath = "%s/%s" % ( self.__configPath, "OperationHandlers" )
opHandlers = gConfig.getSections( opHandlersPath )
if not opHandlers["OK"]:
self.log.error( opHandlers["Message" ] )
raise AgentConfigError( "OperationHandlers section not found in CS under %s" % self.__configPath )
opHandlers = opHandlers["Value"]
self.timeOuts = dict()
# # handlers dict
self.handlersDict = dict()
for opHandler in opHandlers:
opHandlerPath = "%s/%s/Location" % ( opHandlersPath, opHandler )
opLocation = gConfig.getValue( opHandlerPath, "" )
if not opLocation:
self.log.error( "%s not set for %s operation handler" % ( opHandlerPath, opHandler ) )
continue
self.timeOuts[opHandler] = { "PerFile": self.__fileTimeout, "PerOperation": self.__operationTimeout }
opTimeout = gConfig.getValue( "%s/%s/TimeOut" % ( opHandlersPath, opHandler ), 0 )
if opTimeout:
self.timeOuts[opHandler]["PerOperation"] = opTimeout
fileTimeout = gConfig.getValue( "%s/%s/TimeOutPerFile" % ( opHandlersPath, opHandler ), 0 )
if fileTimeout:
self.timeOuts[opHandler]["PerFile"] = fileTimeout
self.handlersDict[opHandler] = opLocation
self.log.info( "Operation handlers:" )
for item in enumerate ( self.handlersDict.items() ):
opHandler = item[1][0]
self.log.info( "[%s] %s: %s (timeout: %d s + %d s per file)" % ( item[0], item[1][0], item[1][1],
self.timeOuts[opHandler]['PerOperation'],
self.timeOuts[opHandler]['PerFile'] ) )
# # common monitor activity
gMonitor.registerActivity( "Iteration", "Agent Loops",
"RequestExecutingAgent", "Loops/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "Processed", "Request Processed",
"RequestExecutingAgent", "Requests/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "Done", "Request Completed",
"RequestExecutingAgent", "Requests/min", gMonitor.OP_SUM )
# # create request dict
self.__requestCache = dict()
self.FTSMode = self.am_getOption( "FTSMode", False )
def processPool( self ):
""" facade for ProcessPool """
if not self.__processPool:
minProcess = max( 1, self.__minProcess )
maxProcess = max( self.__minProcess, self.__maxProcess )
queueSize = abs( self.__queueSize )
self.log.info( "ProcessPool: minProcess = %d maxProcess = %d queueSize = %d" % ( minProcess,
maxProcess,
queueSize ) )
self.__processPool = ProcessPool( minProcess,
maxProcess,
queueSize,
poolCallback = self.resultCallback,
poolExceptionCallback = self.exceptionCallback )
self.__processPool.daemonize()
return self.__processPool
def requestClient( self ):
""" RequestClient getter """
if not self.__requestClient:
self.__requestClient = ReqClient()
return self.__requestClient
def cacheRequest( self, request ):
""" put request into requestCache
:param Request request: Request instance
"""
count = 5
# Wait a bit as there may be a race condition between RequestTask putting back the request and the callback clearing the cache
while request.RequestID in self.__requestCache:
count -= 1
if not count:
self.requestClient().putRequest( request, useFailoverProxy = False, retryMainService = 2 )
return S_ERROR( "Duplicate request, ignore: %s" % request.RequestID )
time.sleep( 1 )
self.__requestCache[ request.RequestID ] = request
return S_OK()
def putRequest( self, requestID, taskResult = None ):
""" put back :requestID: to RequestClient
:param str requestID: request's id
"""
if requestID in self.__requestCache:
request = self.__requestCache.pop( requestID )
if taskResult and taskResult['OK']:
request = taskResult['Value']
reset = self.requestClient().putRequest( request, useFailoverProxy = False, retryMainService = 2 )
if not reset["OK"]:
return S_ERROR( "putRequest: unable to reset request %s: %s" % ( requestID, reset["Message"] ) )
else:
return S_ERROR( 'Not in cache' )
return S_OK()
def putAllRequests( self ):
""" put back all requests without callback called into requestClient
:param self: self reference
"""
self.log.info( "putAllRequests: will put %s back requests" % len( self.__requestCache ) )
for requestID in self.__requestCache.keys():
reset = self.putRequest( requestID )
if not reset["OK"]:
self.log.error( 'Failed to put request', reset["Message"] )
else:
self.log.debug( "putAllRequests: request %s has been put back with its initial state" % requestID )
return S_OK()
def initialize( self ):
""" initialize agent
"""
return S_OK()
def execute( self ):
""" read requests from RequestClient and enqueue them into ProcessPool """
gMonitor.addMark( "Iteration", 1 )
# # requests (and so tasks) counter
taskCounter = 0
while taskCounter < self.__requestsPerCycle:
self.log.debug( "execute: executing %d request in this cycle" % taskCounter )
requestsToExecute = []
if not self.__bulkRequest:
self.log.info( "execute: ask for a single request" )
getRequest = self.requestClient().getRequest()
if not getRequest["OK"]:
self.log.error( "execute: %s" % getRequest["Message"] )
break
if not getRequest["Value"]:
self.log.info( "execute: no more 'Waiting' requests to process" )
break
requestsToExecute = [getRequest["Value"] ]
else:
numberOfRequest = min( self.__bulkRequest, self.__requestsPerCycle - taskCounter )
self.log.info( "execute: ask for %s requests" % numberOfRequest )
getRequests = self.requestClient().getBulkRequests( numberOfRequest )
if not getRequests["OK"]:
self.log.error( "execute: %s" % getRequests["Message"] )
break
if not getRequests["Value"]:
self.log.info( "execute: no more 'Waiting' requests to process" )
break
for rId in getRequests["Value"]["Failed"]:
self.log.error( "execute: %s" % getRequests["Value"]["Failed"][rId] )
requestsToExecute = getRequests["Value"]["Successful"].values()
self.log.info( "execute: will execute %s requests " % len( requestsToExecute ) )
for request in requestsToExecute:
# # set task id
taskID = request.RequestID
# # save current request in cache
self.cacheRequest( request )
# # serialize to JSON
requestJSON = request.toJSON()
self.log.info( "processPool tasks idle = %s working = %s" % ( self.processPool().getNumIdleProcesses(),
self.processPool().getNumWorkingProcesses() ) )
looping = 0
while True:
if not self.processPool().getFreeSlots():
if not looping:
self.log.info( "No free slots available in processPool, will wait %d seconds to proceed" % self.__poolSleep )
time.sleep( self.__poolSleep )
looping += 1
else:
if looping:
self.log.info( "Free slot found after %d seconds" % looping * self.__poolSleep )
looping = 0
self.log.info( "spawning task for request '%s/%s'" % ( request.RequestID, request.RequestName ) )
timeOut = self.getTimeout( request )
enqueue = self.processPool().createAndQueueTask( RequestTask,
kwargs = { "requestJSON" : requestJSON,
"handlersDict" : self.handlersDict,
"csPath" : self.__configPath,
"agentName": self.agentName },
taskID = taskID,
blocking = True,
usePoolCallbacks = True,
timeOut = timeOut )
if not enqueue["OK"]:
self.log.error( enqueue["Message"] )
else:
self.log.debug( "successfully enqueued task '%s'" % taskID )
# # update monitor
gMonitor.addMark( "Processed", 1 )
# # update request counter
taskCounter += 1
# # task created, a little time kick to proceed
time.sleep( 0.1 )
break
# # clean return
return S_OK()
def getTimeout( self, request ):
""" get timeout for request """
timeout = 0
for op in request:
if op.Status not in ( "Waiting", "Scheduled", 'Queued' ):
continue
if op.Type not in self.timeOuts:
timeout += self.__operationTimeout
else:
perOp = self.timeOuts[op.Type].get( "PerOperation", self.__operationTimeout )
perFiles = self.timeOuts[op.Type].get( "PerFile", self.__fileTimeout ) * len( op )
timeout += perOp + perFiles
self.log.info( "estimated timeOut for request (%s/%s) is %s" % ( request.RequestID, request.RequestName, timeout ) )
return timeout
def finalize( self ):
""" agent finalization """
if self.__processPool:
self.processPool().finalize( timeout = self.__poolTimeout )
self.putAllRequests()
return S_OK()
def resultCallback( self, taskID, taskResult ):
""" definition of request callback function
:param str taskID: Request.RequestID
:param dict taskResult: task result S_OK(Request)/S_ERROR(Message)
"""
# # clean cache
res = self.putRequest( taskID, taskResult )
self.log.info( "callback: %s result is %s(%s), put %s(%s)" % ( taskID,
"S_OK" if taskResult["OK"] else "S_ERROR",
taskResult["Value"].Status if taskResult["OK"] else taskResult["Message"],
"S_OK" if res['OK'] else 'S_ERROR',
'' if res['OK'] else res['Message'] ) )
def exceptionCallback( self, taskID, taskException ):
""" definition of exception callback function
:param str taskID: Request.RequestID
:param Exception taskException: Exception instance
"""
self.log.error( "exceptionCallback: %s was hit by exception %s" % ( taskID, taskException ) )
self.putRequest( taskID )
|
miloszz/DIRAC
|
RequestManagementSystem/Agent/RequestExecutingAgent.py
|
Python
|
gpl-3.0
| 15,238
|
[
"DIRAC"
] |
87d8baee86ec8bf3090f98d72538712b8955765af574938c53699c354290e5f2
|
"""
Generalized linear models currently supports estimation using the one-parameter
exponential families
References
----------
Gill, Jeff. 2000. Generalized Linear Models: A Unified Approach.
SAGE QASS Series.
Green, PJ. 1984. "Iteratively reweighted least squares for maximum
likelihood estimation, and some robust and resistant alternatives."
Journal of the Royal Statistical Society, Series B, 46, 149-192.
Hardin, J.W. and Hilbe, J.M. 2007. "Generalized Linear Models and
Extensions." 2nd ed. Stata Press, College Station, TX.
McCullagh, P. and Nelder, J.A. 1989. "Generalized Linear Models." 2nd ed.
Chapman & Hall, Boca Rotan.
"""
import numpy as np
from . import families
from statsmodels.tools.decorators import (cache_readonly,
cached_data, cached_value)
from statsmodels.tools.validation import float_like
from statsmodels.compat.pandas import Appender
import statsmodels.base.model as base
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
import statsmodels.regression._tools as reg_tools
import warnings
from statsmodels.graphics._regressionplots_doc import (
_plot_added_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc)
# need import in module instead of lazily to copy `__doc__`
from . import _prediction as pred
from statsmodels.genmod._prediction import PredictionResults
from statsmodels.tools.sm_exceptions import (PerfectSeparationError,
DomainWarning,
HessianInversionWarning)
from numpy.linalg.linalg import LinAlgError
__all__ = ['GLM', 'PredictionResults']
def _check_convergence(criterion, iteration, atol, rtol):
return np.allclose(criterion[iteration], criterion[iteration + 1],
atol=atol, rtol=rtol)
# Remove after 0.13 when bic changes to bic llf
class _ModuleVariable:
_value = None
@property
def use_bic_llf(self):
return self._value
def set_use_bic_llf(self, val):
if val not in (True, False, None):
raise ValueError("Must be True, False or None")
self._value = bool(val) if val is not None else val
_use_bic_helper = _ModuleVariable()
SET_USE_BIC_LLF = _use_bic_helper.set_use_bic_llf
class GLM(base.LikelihoodModel):
__doc__ = """
Generalized Linear Models
GLM inherits from statsmodels.base.model.LikelihoodModel
Parameters
----------
endog : array_like
1d array of endogenous response variable. This array can be 1d or 2d.
Binomial family models accept a 2d array with two columns. If
supplied, each observation is expected to be [success, failure].
exog : array_like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by default
and should be added by the user (models specified using a formula
include an intercept by default). See `statsmodels.tools.add_constant`.
family : family class instance
The default is Gaussian. To specify the binomial distribution
family = sm.family.Binomial()
Each family can take a link instance as an argument. See
statsmodels.family.family for more information.
offset : array_like or None
An offset to be included in the model. If provided, must be
an array whose length is the number of rows in exog.
exposure : array_like or None
Log(exposure) will be added to the linear prediction in the model.
Exposure is only valid if the log link is used. If provided, it must be
an array with the same length as endog.
freq_weights : array_like
1d array of frequency weights. The default is None. If None is selected
or a blank value, then the algorithm will replace with an array of 1's
with length equal to the endog.
WARNING: Using weights is not verified yet for all possible options
and results, see Notes.
var_weights : array_like
1d array of variance (analytic) weights. The default is None. If None
is selected or a blank value, then the algorithm will replace with an
array of 1's with length equal to the endog.
WARNING: Using weights is not verified yet for all possible options
and results, see Notes.
%(extra_params)s
Attributes
----------
df_model : float
Model degrees of freedom is equal to p - 1, where p is the number
of regressors. Note that the intercept is not reported as a
degree of freedom.
df_resid : float
Residual degrees of freedom is equal to the number of observation n
minus the number of regressors p.
endog : ndarray
See Notes. Note that `endog` is a reference to the data so that if
data is already an array and it is changed, then `endog` changes
as well.
exposure : array_like
Include ln(exposure) in model with coefficient constrained to 1. Can
only be used if the link is the logarithm function.
exog : ndarray
See Notes. Note that `exog` is a reference to the data so that if
data is already an array and it is changed, then `exog` changes
as well.
freq_weights : ndarray
See Notes. Note that `freq_weights` is a reference to the data so that
if data is already an array and it is changed, then `freq_weights`
changes as well.
var_weights : ndarray
See Notes. Note that `var_weights` is a reference to the data so that
if data is already an array and it is changed, then `var_weights`
changes as well.
iteration : int
The number of iterations that fit has run. Initialized at 0.
family : family class instance
The distribution family of the model. Can be any family in
statsmodels.families. Default is Gaussian.
mu : ndarray
The mean response of the transformed variable. `mu` is the value of
the inverse of the link function at lin_pred, where lin_pred is the
linear predicted value of the WLS fit of the transformed variable.
`mu` is only available after fit is called. See
statsmodels.families.family.fitted of the distribution family for more
information.
n_trials : ndarray
See Notes. Note that `n_trials` is a reference to the data so that if
data is already an array and it is changed, then `n_trials` changes
as well. `n_trials` is the number of binomial trials and only available
with that distribution. See statsmodels.families.Binomial for more
information.
normalized_cov_params : ndarray
The p x p normalized covariance of the design / exogenous data.
This is approximately equal to (X.T X)^(-1)
offset : array_like
Include offset in model with coefficient constrained to 1.
scale : float
The estimate of the scale / dispersion of the model fit. Only
available after fit is called. See GLM.fit and GLM.estimate_scale
for more information.
scaletype : str
The scaling used for fitting the model. This is only available after
fit is called. The default is None. See GLM.fit for more information.
weights : ndarray
The value of the weights after the last iteration of fit. Only
available after fit is called. See statsmodels.families.family for
the specific distribution weighting functions.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.scotland.load(as_pandas=False)
>>> data.exog = sm.add_constant(data.exog)
Instantiate a gamma family model with the default link function.
>>> gamma_model = sm.GLM(data.endog, data.exog,
... family=sm.families.Gamma())
>>> gamma_results = gamma_model.fit()
>>> gamma_results.params
array([-0.01776527, 0.00004962, 0.00203442, -0.00007181, 0.00011185,
-0.00000015, -0.00051868, -0.00000243])
>>> gamma_results.scale
0.0035842831734919055
>>> gamma_results.deviance
0.087388516416999198
>>> gamma_results.pearson_chi2
0.086022796163805704
>>> gamma_results.llf
-83.017202161073527
See Also
--------
statsmodels.genmod.families.family.Family
:ref:`families`
:ref:`links`
Notes
-----
Only the following combinations make sense for family and link:
============= ===== === ===== ====== ======= === ==== ====== ====== ====
Family ident log logit probit cloglog pow opow nbinom loglog logc
============= ===== === ===== ====== ======= === ==== ====== ====== ====
Gaussian x x x x x x x x x
inv Gaussian x x x
binomial x x x x x x x x x
Poisson x x x
neg binomial x x x x
gamma x x x
Tweedie x x x
============= ===== === ===== ====== ======= === ==== ====== ====== ====
Not all of these link functions are currently available.
Endog and exog are references so that if the data they refer to are already
arrays and these arrays are changed, endog and exog will change.
statsmodels supports two separate definitions of weights: frequency weights
and variance weights.
Frequency weights produce the same results as repeating observations by the
frequencies (if those are integers). Frequency weights will keep the number
of observations consistent, but the degrees of freedom will change to
reflect the new weights.
Variance weights (referred to in other packages as analytic weights) are
used when ``endog`` represents an an average or mean. This relies on the
assumption that that the inverse variance scales proportionally to the
weight--an observation that is deemed more credible should have less
variance and therefore have more weight. For the ``Poisson`` family--which
assumes that occurrences scale proportionally with time--a natural practice
would be to use the amount of time as the variance weight and set ``endog``
to be a rate (occurrences per period of time). Similarly, using a
compound Poisson family, namely ``Tweedie``, makes a similar assumption
about the rate (or frequency) of occurrences having variance proportional to
time.
Both frequency and variance weights are verified for all basic results with
nonrobust or heteroscedasticity robust ``cov_type``. Other robust
covariance types have not yet been verified, and at least the small sample
correction is currently not based on the correct total frequency count.
Currently, all residuals are not weighted by frequency, although they may
incorporate ``n_trials`` for ``Binomial`` and ``var_weights``
+---------------+----------------------------------+
| Residual Type | Applicable weights |
+===============+==================================+
| Anscombe | ``var_weights`` |
+---------------+----------------------------------+
| Deviance | ``var_weights`` |
+---------------+----------------------------------+
| Pearson | ``var_weights`` and ``n_trials`` |
+---------------+----------------------------------+
| Reponse | ``n_trials`` |
+---------------+----------------------------------+
| Working | ``n_trials`` |
+---------------+----------------------------------+
WARNING: Loglikelihood and deviance are not valid in models where
scale is equal to 1 (i.e., ``Binomial``, ``NegativeBinomial``, and
``Poisson``). If variance weights are specified, then results such as
``loglike`` and ``deviance`` are based on a quasi-likelihood
interpretation. The loglikelihood is not correctly specified in this case,
and statistics based on it, such AIC or likelihood ratio tests, are not
appropriate.
""" % {'extra_params': base._missing_param_doc}
# Maximum number of endogenous variables when using a formula
_formula_max_endog = 2
def __init__(self, endog, exog, family=None, offset=None,
exposure=None, freq_weights=None, var_weights=None,
missing='none', **kwargs):
if (family is not None) and not isinstance(family.link,
tuple(family.safe_links)):
warnings.warn((f"The {type(family.link).__name__} link function "
"does not respect the domain of the "
f"{type(family).__name__} family."),
DomainWarning)
if exposure is not None:
exposure = np.log(exposure)
if offset is not None: # this should probably be done upstream
offset = np.asarray(offset)
if freq_weights is not None:
freq_weights = np.asarray(freq_weights)
if var_weights is not None:
var_weights = np.asarray(var_weights)
self.freq_weights = freq_weights
self.var_weights = var_weights
super(GLM, self).__init__(endog, exog, missing=missing,
offset=offset, exposure=exposure,
freq_weights=freq_weights,
var_weights=var_weights, **kwargs)
self._check_inputs(family, self.offset, self.exposure, self.endog,
self.freq_weights, self.var_weights)
if offset is None:
delattr(self, 'offset')
if exposure is None:
delattr(self, 'exposure')
self.nobs = self.endog.shape[0]
# things to remove_data
self._data_attr.extend(['weights', 'mu', 'freq_weights',
'var_weights', 'iweights', '_offset_exposure',
'n_trials'])
# register kwds for __init__, offset and exposure are added by super
self._init_keys.append('family')
self._setup_binomial()
# internal usage for recreating a model
if 'n_trials' in kwargs:
self.n_trials = kwargs['n_trials']
# Construct a combined offset/exposure term. Note that
# exposure has already been logged if present.
offset_exposure = 0.
if hasattr(self, 'offset'):
offset_exposure = self.offset
if hasattr(self, 'exposure'):
offset_exposure = offset_exposure + self.exposure
self._offset_exposure = offset_exposure
self.scaletype = None
def initialize(self):
"""
Initialize a generalized linear model.
"""
self.df_model = np.linalg.matrix_rank(self.exog) - 1
if (self.freq_weights is not None) and \
(self.freq_weights.shape[0] == self.endog.shape[0]):
self.wnobs = self.freq_weights.sum()
self.df_resid = self.wnobs - self.df_model - 1
else:
self.wnobs = self.exog.shape[0]
self.df_resid = self.exog.shape[0] - self.df_model - 1
def _check_inputs(self, family, offset, exposure, endog, freq_weights,
var_weights):
# Default family is Gaussian
if family is None:
family = families.Gaussian()
self.family = family
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError("exposure can only be used with the log "
"link function")
elif exposure.shape[0] != endog.shape[0]:
raise ValueError("exposure is not the same length as endog")
if offset is not None:
if offset.shape[0] != endog.shape[0]:
raise ValueError("offset is not the same length as endog")
if freq_weights is not None:
if freq_weights.shape[0] != endog.shape[0]:
raise ValueError("freq weights not the same length as endog")
if len(freq_weights.shape) > 1:
raise ValueError("freq weights has too many dimensions")
# internal flag to store whether freq_weights were not None
self._has_freq_weights = (self.freq_weights is not None)
if self.freq_weights is None:
self.freq_weights = np.ones((endog.shape[0]))
# TODO: check do we want to keep None as sentinel for freq_weights
if np.shape(self.freq_weights) == () and self.freq_weights > 1:
self.freq_weights = (self.freq_weights *
np.ones((endog.shape[0])))
if var_weights is not None:
if var_weights.shape[0] != endog.shape[0]:
raise ValueError("var weights not the same length as endog")
if len(var_weights.shape) > 1:
raise ValueError("var weights has too many dimensions")
# internal flag to store whether var_weights were not None
self._has_var_weights = (var_weights is not None)
if var_weights is None:
self.var_weights = np.ones((endog.shape[0]))
# TODO: check do we want to keep None as sentinel for var_weights
self.iweights = np.asarray(self.freq_weights * self.var_weights)
def _get_init_kwds(self):
# this is a temporary fixup because exposure has been transformed
# see #1609, copied from discrete_model.CountModel
kwds = super(GLM, self)._get_init_kwds()
if 'exposure' in kwds and kwds['exposure'] is not None:
kwds['exposure'] = np.exp(kwds['exposure'])
return kwds
def loglike_mu(self, mu, scale=1.):
"""
Evaluate the log-likelihood for a generalized linear model.
"""
scale = float_like(scale, "scale")
return self.family.loglike(self.endog, mu, self.var_weights,
self.freq_weights, scale)
def loglike(self, params, scale=None):
"""
Evaluate the log-likelihood for a generalized linear model.
"""
scale = float_like(scale, "scale", optional=True)
lin_pred = np.dot(self.exog, params) + self._offset_exposure
expval = self.family.link.inverse(lin_pred)
if scale is None:
scale = self.estimate_scale(expval)
llf = self.family.loglike(self.endog, expval, self.var_weights,
self.freq_weights, scale)
return llf
def score_obs(self, params, scale=None):
"""score first derivative of the loglikelihood for each observation.
Parameters
----------
params : ndarray
parameter at which score is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
Returns
-------
score_obs : ndarray, 2d
The first derivative of the loglikelihood function evaluated at
params for each observation.
"""
scale = float_like(scale, "scale", optional=True)
score_factor = self.score_factor(params, scale=scale)
return score_factor[:, None] * self.exog
def score(self, params, scale=None):
"""score, first derivative of the loglikelihood function
Parameters
----------
params : ndarray
parameter at which score is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
Returns
-------
score : ndarray_1d
The first derivative of the loglikelihood function calculated as
the sum of `score_obs`
"""
scale = float_like(scale, "scale", optional=True)
score_factor = self.score_factor(params, scale=scale)
return np.dot(score_factor, self.exog)
def score_factor(self, params, scale=None):
"""weights for score for each observation
This can be considered as score residuals.
Parameters
----------
params : ndarray
parameter at which score is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
Returns
-------
score_factor : ndarray_1d
A 1d weight vector used in the calculation of the score_obs.
The score_obs are obtained by `score_factor[:, None] * exog`
"""
scale = float_like(scale, "scale", optional=True)
mu = self.predict(params)
if scale is None:
scale = self.estimate_scale(mu)
score_factor = (self.endog - mu) / self.family.link.deriv(mu)
score_factor /= self.family.variance(mu)
score_factor *= self.iweights * self.n_trials
if not scale == 1:
score_factor /= scale
return score_factor
def hessian_factor(self, params, scale=None, observed=True):
"""Weights for calculating Hessian
Parameters
----------
params : ndarray
parameter at which Hessian is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
hessian_factor : ndarray, 1d
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`
"""
# calculating eim_factor
mu = self.predict(params)
if scale is None:
scale = self.estimate_scale(mu)
eim_factor = 1 / (self.family.link.deriv(mu)**2 *
self.family.variance(mu))
eim_factor *= self.iweights * self.n_trials
if not observed:
if not scale == 1:
eim_factor /= scale
return eim_factor
# calculating oim_factor, eim_factor is with scale=1
score_factor = self.score_factor(params, scale=1.)
if eim_factor.ndim > 1 or score_factor.ndim > 1:
raise RuntimeError('something wrong')
tmp = self.family.variance(mu) * self.family.link.deriv2(mu)
tmp += self.family.variance.deriv(mu) * self.family.link.deriv(mu)
tmp = score_factor * tmp
# correct for duplicatee iweights in oim_factor and score_factor
tmp /= self.iweights * self.n_trials
oim_factor = eim_factor * (1 + tmp)
if tmp.ndim > 1:
raise RuntimeError('something wrong')
if not scale == 1:
oim_factor /= scale
return oim_factor
def hessian(self, params, scale=None, observed=None):
"""Hessian, second derivative of loglikelihood function
Parameters
----------
params : ndarray
parameter at which Hessian is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned (default).
If false then the expected information matrix is returned.
Returns
-------
hessian : ndarray
Hessian, i.e. observed information, or expected information matrix.
"""
if observed is None:
if getattr(self, '_optim_hessian', None) == 'eim':
observed = False
else:
observed = True
scale = float_like(scale, "scale", optional=True)
tmp = getattr(self, '_tmp_like_exog', np.empty_like(self.exog, dtype=float))
factor = self.hessian_factor(params, scale=scale, observed=observed)
np.multiply(self.exog.T, factor, out=tmp.T)
return -tmp.T.dot(self.exog)
def information(self, params, scale=None):
"""
Fisher information matrix.
"""
scale = float_like(scale, "scale", optional=True)
return self.hessian(params, scale=scale, observed=False)
def _deriv_mean_dparams(self, params):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
lin_pred = self.predict(params, linear=True)
idl = self.family.link.inverse_deriv(lin_pred)
dmat = self.exog * idl[:, None]
return dmat
def _deriv_score_obs_dendog(self, params, scale=None):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog. This
can is given by `score_factor0[:, None] * exog` where
`score_factor0` is the score_factor without the residual.
"""
scale = float_like(scale, "scale", optional=True)
mu = self.predict(params)
if scale is None:
scale = self.estimate_scale(mu)
score_factor = 1 / self.family.link.deriv(mu)
score_factor /= self.family.variance(mu)
score_factor *= self.iweights * self.n_trials
if not scale == 1:
score_factor /= scale
return score_factor[:, None] * self.exog
def score_test(self, params_constrained, k_constraints=None,
exog_extra=None, observed=True):
"""score test for restrictions or for omitted variables
The covariance matrix for the score is based on the Hessian, i.e.
observed information matrix or optionally on the expected information
matrix..
Parameters
----------
params_constrained : array_like
estimated parameter of the restricted model. This can be the
parameter estimate for the current when testing for omitted
variables.
k_constraints : int or None
Number of constraints that were used in the estimation of params
restricted relative to the number of exog in the model.
This must be provided if no exog_extra are given. If exog_extra is
not None, then k_constraints is assumed to be zero if it is None.
exog_extra : None or array_like
Explanatory variables that are jointly tested for inclusion in the
model, i.e. omitted variables.
observed : bool
If True, then the observed Hessian is used in calculating the
covariance matrix of the score. If false then the expected
information matrix is used.
Returns
-------
chi2_stat : float
chisquare statistic for the score test
p-value : float
P-value of the score test based on the chisquare distribution.
df : int
Degrees of freedom used in the p-value calculation. This is equal
to the number of constraints.
Notes
-----
not yet verified for case with scale not equal to 1.
"""
if exog_extra is None:
if k_constraints is None:
raise ValueError('if exog_extra is None, then k_constraints'
'needs to be given')
score = self.score(params_constrained)
hessian = self.hessian(params_constrained, observed=observed)
else:
# exog_extra = np.asarray(exog_extra)
if k_constraints is None:
k_constraints = 0
ex = np.column_stack((self.exog, exog_extra))
k_constraints += ex.shape[1] - self.exog.shape[1]
score_factor = self.score_factor(params_constrained)
score = (score_factor[:, None] * ex).sum(0)
hessian_factor = self.hessian_factor(params_constrained,
observed=observed)
hessian = -np.dot(ex.T * hessian_factor, ex)
from scipy import stats
# TODO check sign, why minus?
chi2stat = -score.dot(np.linalg.solve(hessian, score[:, None]))
pval = stats.chi2.sf(chi2stat, k_constraints)
# return a stats results instance instead? Contrast?
return chi2stat, pval, k_constraints
def _update_history(self, tmp_result, mu, history):
"""
Helper method to update history during iterative fit.
"""
history['params'].append(tmp_result.params)
history['deviance'].append(self.family.deviance(self.endog, mu,
self.var_weights,
self.freq_weights,
self.scale))
return history
def estimate_scale(self, mu):
"""
Estimate the dispersion/scale.
Type of scale can be chose in the fit method.
Parameters
----------
mu : ndarray
mu is the mean response estimate
Returns
-------
Estimate of scale
Notes
-----
The default scale for Binomial, Poisson and Negative Binomial
families is 1. The default for the other families is Pearson's
Chi-Square estimate.
See Also
--------
statsmodels.genmod.generalized_linear_model.GLM.fit
"""
if not self.scaletype:
if isinstance(self.family, (families.Binomial, families.Poisson,
families.NegativeBinomial)):
return 1.
else:
return self._estimate_x2_scale(mu)
if isinstance(self.scaletype, float):
return np.array(self.scaletype)
if isinstance(self.scaletype, str):
if self.scaletype.lower() == 'x2':
return self._estimate_x2_scale(mu)
elif self.scaletype.lower() == 'dev':
return (self.family.deviance(self.endog, mu, self.var_weights,
self.freq_weights, 1.) /
(self.df_resid))
else:
raise ValueError("Scale %s with type %s not understood" %
(self.scaletype, type(self.scaletype)))
else:
raise ValueError("Scale %s with type %s not understood" %
(self.scaletype, type(self.scaletype)))
def _estimate_x2_scale(self, mu):
resid = np.power(self.endog - mu, 2) * self.iweights
return np.sum(resid / self.family.variance(mu)) / self.df_resid
def estimate_tweedie_power(self, mu, method='brentq', low=1.01, high=5.):
"""
Tweedie specific function to estimate scale and the variance parameter.
The variance parameter is also referred to as p, xi, or shape.
Parameters
----------
mu : array_like
Fitted mean response variable
method : str, defaults to 'brentq'
Scipy optimizer used to solve the Pearson equation. Only brentq
currently supported.
low : float, optional
Low end of the bracketing interval [a,b] to be used in the search
for the power. Defaults to 1.01.
high : float, optional
High end of the bracketing interval [a,b] to be used in the search
for the power. Defaults to 5.
Returns
-------
power : float
The estimated shape or power.
"""
if method == 'brentq':
from scipy.optimize import brentq
def psi_p(power, mu):
scale = ((self.iweights * (self.endog - mu) ** 2 /
(mu ** power)).sum() / self.df_resid)
return (np.sum(self.iweights * ((self.endog - mu) ** 2 /
(scale * (mu ** power)) - 1) *
np.log(mu)) / self.freq_weights.sum())
power = brentq(psi_p, low, high, args=(mu))
else:
raise NotImplementedError('Only brentq can currently be used')
return power
def predict(self, params, exog=None, exposure=None, offset=None,
linear=False):
"""
Return predicted values for a design matrix
Parameters
----------
params : array_like
Parameters / coefficients of a GLM.
exog : array_like, optional
Design / exogenous data. Is exog is None, model exog is used.
exposure : array_like, optional
Exposure time values, only can be used with the log link
function. See notes for details.
offset : array_like, optional
Offset values. See notes for details.
linear : bool
If True, returns the linear predicted values. If False,
returns the value of the inverse of the model's link function at
the linear predicted values.
Returns
-------
An array of fitted values
Notes
-----
Any `exposure` and `offset` provided here take precedence over
the `exposure` and `offset` used in the model fit. If `exog`
is passed as an argument here, then any `exposure` and
`offset` values in the fit will be ignored.
Exposure values must be strictly positive.
"""
# Use fit offset if appropriate
if offset is None and exog is None and hasattr(self, 'offset'):
offset = self.offset
elif offset is None:
offset = 0.
if exposure is not None and not isinstance(self.family.link,
families.links.Log):
raise ValueError("exposure can only be used with the log link "
"function")
# Use fit exposure if appropriate
if exposure is None and exog is None and hasattr(self, 'exposure'):
# Already logged
exposure = self.exposure
elif exposure is None:
exposure = 0.
else:
exposure = np.log(np.asarray(exposure))
if exog is None:
exog = self.exog
linpred = np.dot(exog, params) + offset + exposure
if linear:
return linpred
else:
return self.family.fitted(linpred)
def get_distribution(self, params, scale=1., exog=None, exposure=None,
offset=None):
"""
Return a random number generator for the predictive distribution.
Parameters
----------
params : array_like
The model parameters.
scale : scalar
The scale parameter.
exog : array_like
The predictor variable matrix.
Returns
-------
gen
Frozen random number generator object. Use the ``rvs`` method to
generate random values.
Notes
-----
Due to the behavior of ``scipy.stats.distributions objects``, the
returned random number generator must be called with ``gen.rvs(n)``
where ``n`` is the number of observations in the data set used
to fit the model. If any other value is used for ``n``, misleading
results will be produced.
"""
scale = float_like(scale, "scale", optional=True)
fit = self.predict(params, exog, exposure, offset, linear=False)
import scipy.stats.distributions as dist
if isinstance(self.family, families.Gaussian):
return dist.norm(loc=fit, scale=np.sqrt(scale))
elif isinstance(self.family, families.Binomial):
return dist.binom(n=1, p=fit)
elif isinstance(self.family, families.Poisson):
return dist.poisson(mu=fit)
elif isinstance(self.family, families.Gamma):
alpha = fit / float(scale)
return dist.gamma(alpha, scale=scale)
else:
raise ValueError("get_distribution not implemented for %s" %
self.family.name)
def _setup_binomial(self):
# this checks what kind of data is given for Binomial.
# family will need a reference to endog if this is to be removed from
# preprocessing
self.n_trials = np.ones((self.endog.shape[0])) # For binomial
if isinstance(self.family, families.Binomial):
tmp = self.family.initialize(self.endog, self.freq_weights)
self.endog = tmp[0]
self.n_trials = tmp[1]
self._init_keys.append('n_trials')
def fit(self, start_params=None, maxiter=100, method='IRLS', tol=1e-8,
scale=None, cov_type='nonrobust', cov_kwds=None, use_t=None,
full_output=True, disp=False, max_start_irls=3, **kwargs):
"""
Fits a generalized linear model for a given family.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is family-specific and is given by the
``family.starting_mu(endog)``. If start_params is given then the
initial mean will be calculated as ``np.dot(exog, start_params)``.
maxiter : int, optional
Default is 100.
method : str
Default is 'IRLS' for iteratively reweighted least squares.
Otherwise gradient optimization is used.
tol : float
Convergence tolerance. Default is 1e-8.
scale : str or float, optional
`scale` can be 'X2', 'dev', or a float
The default value is None, which uses `X2` for Gamma, Gaussian,
and Inverse Gaussian.
`X2` is Pearson's chi-square divided by `df_resid`.
The default is 1 for the Binomial and Poisson families.
`dev` is the deviance divided by df_resid
cov_type : str
The type of parameter estimate covariance matrix to compute.
cov_kwds : dict-like
Extra arguments for calculating the covariance of the parameter
estimates.
use_t : bool
If True, the Student t-distribution is used for inference.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
Not used if methhod is IRLS.
disp : bool, optional
Set to True to print convergence messages. Not used if method is
IRLS.
max_start_irls : int
The number of IRLS iterations used to obtain starting
values for gradient optimization. Only relevant if
`method` is set to something other than 'IRLS'.
atol : float, optional
(available with IRLS fits) The absolute tolerance criterion that
must be satisfied. Defaults to ``tol``. Convergence is attained
when: :math:`rtol * prior + atol > abs(current - prior)`
rtol : float, optional
(available with IRLS fits) The relative tolerance criterion that
must be satisfied. Defaults to 0 which means ``rtol`` is not used.
Convergence is attained when:
:math:`rtol * prior + atol > abs(current - prior)`
tol_criterion : str, optional
(available with IRLS fits) Defaults to ``'deviance'``. Can
optionally be ``'params'``.
wls_method : str, optional
(available with IRLS fits) options are 'lstsq', 'pinv' and 'qr'
specifies which linear algebra function to use for the irls
optimization. Default is `lstsq` which uses the same underlying
svd based approach as 'pinv', but is faster during iterations.
'lstsq' and 'pinv' regularize the estimate in singular and
near-singular cases by truncating small singular values based
on `rcond` of the respective numpy.linalg function. 'qr' is
only valid for cases that are not singular nor near-singular.
optim_hessian : {'eim', 'oim'}, optional
(available with scipy optimizer fits) When 'oim'--the default--the
observed Hessian is used in fitting. 'eim' is the expected Hessian.
This may provide more stable fits, but adds assumption that the
Hessian is correctly specified.
Notes
-----
If method is 'IRLS', then an additional keyword 'attach_wls' is
available. This is currently for internal use only and might change
in future versions. If attach_wls' is true, then the final WLS
instance of the IRLS iteration is attached to the results instance
as `results_wls` attribute.
"""
if isinstance(scale, str):
scale = scale.lower()
if scale not in ("x2", "dev"):
raise ValueError(
"scale must be either X2 or dev when a string."
)
elif scale is not None:
# GH-6627
try:
scale = float(scale)
except Exception as exc:
raise type(exc)(
"scale must be a float if given and no a string."
)
self.scaletype = scale
if method.lower() == "irls":
if cov_type.lower() == 'eim':
cov_type = 'nonrobust'
return self._fit_irls(start_params=start_params, maxiter=maxiter,
tol=tol, scale=scale, cov_type=cov_type,
cov_kwds=cov_kwds, use_t=use_t, **kwargs)
else:
self._optim_hessian = kwargs.get('optim_hessian')
self._tmp_like_exog = np.empty_like(self.exog, dtype=float)
fit_ = self._fit_gradient(start_params=start_params,
method=method,
maxiter=maxiter,
tol=tol, scale=scale,
full_output=full_output,
disp=disp, cov_type=cov_type,
cov_kwds=cov_kwds, use_t=use_t,
max_start_irls=max_start_irls,
**kwargs)
del self._optim_hessian
del self._tmp_like_exog
return fit_
def _fit_gradient(self, start_params=None, method="newton",
maxiter=100, tol=1e-8, full_output=True,
disp=True, scale=None, cov_type='nonrobust',
cov_kwds=None, use_t=None, max_start_irls=3,
**kwargs):
"""
Fits a generalized linear model for a given family iteratively
using the scipy gradient optimizers.
"""
# fix scale during optimization, see #4616
scaletype = self.scaletype
self.scaletype = 1.
if (max_start_irls > 0) and (start_params is None):
irls_rslt = self._fit_irls(start_params=start_params,
maxiter=max_start_irls,
tol=tol, scale=1., cov_type='nonrobust',
cov_kwds=None, use_t=None,
**kwargs)
start_params = irls_rslt.params
del irls_rslt
rslt = super(GLM, self).fit(start_params=start_params, tol=tol,
maxiter=maxiter, full_output=full_output,
method=method, disp=disp, **kwargs)
# reset scaletype to original
self.scaletype = scaletype
mu = self.predict(rslt.params)
scale = self.estimate_scale(mu)
if rslt.normalized_cov_params is None:
cov_p = None
else:
cov_p = rslt.normalized_cov_params / scale
if cov_type.lower() == 'eim':
oim = False
cov_type = 'nonrobust'
else:
oim = True
try:
cov_p = np.linalg.inv(-self.hessian(rslt.params, observed=oim)) / scale
except LinAlgError:
warnings.warn('Inverting hessian failed, no bse or cov_params '
'available', HessianInversionWarning)
cov_p = None
results_class = getattr(self, '_results_class', GLMResults)
results_class_wrapper = getattr(self, '_results_class_wrapper', GLMResultsWrapper)
glm_results = results_class(self, rslt.params,
cov_p,
scale,
cov_type=cov_type, cov_kwds=cov_kwds,
use_t=use_t)
# TODO: iteration count is not always available
history = {'iteration': 0}
if full_output:
glm_results.mle_retvals = rslt.mle_retvals
if 'iterations' in rslt.mle_retvals:
history['iteration'] = rslt.mle_retvals['iterations']
glm_results.method = method
glm_results.fit_history = history
return results_class_wrapper(glm_results)
def _fit_irls(self, start_params=None, maxiter=100, tol=1e-8,
scale=None, cov_type='nonrobust', cov_kwds=None,
use_t=None, **kwargs):
"""
Fits a generalized linear model for a given family using
iteratively reweighted least squares (IRLS).
"""
attach_wls = kwargs.pop('attach_wls', False)
atol = kwargs.get('atol')
rtol = kwargs.get('rtol', 0.)
tol_criterion = kwargs.get('tol_criterion', 'deviance')
wls_method = kwargs.get('wls_method', 'lstsq')
atol = tol if atol is None else atol
endog = self.endog
wlsexog = self.exog
if start_params is None:
start_params = np.zeros(self.exog.shape[1])
mu = self.family.starting_mu(self.endog)
lin_pred = self.family.predict(mu)
else:
lin_pred = np.dot(wlsexog, start_params) + self._offset_exposure
mu = self.family.fitted(lin_pred)
self.scale = self.estimate_scale(mu)
dev = self.family.deviance(self.endog, mu, self.var_weights,
self.freq_weights, self.scale)
if np.isnan(dev):
raise ValueError("The first guess on the deviance function "
"returned a nan. This could be a boundary "
" problem and should be reported.")
# first guess on the deviance is assumed to be scaled by 1.
# params are none to start, so they line up with the deviance
history = dict(params=[np.inf, start_params], deviance=[np.inf, dev])
converged = False
criterion = history[tol_criterion]
# This special case is used to get the likelihood for a specific
# params vector.
if maxiter == 0:
mu = self.family.fitted(lin_pred)
self.scale = self.estimate_scale(mu)
wls_results = lm.RegressionResults(self, start_params, None)
iteration = 0
for iteration in range(maxiter):
self.weights = (self.iweights * self.n_trials *
self.family.weights(mu))
wlsendog = (lin_pred + self.family.link.deriv(mu) * (self.endog-mu)
- self._offset_exposure)
wls_mod = reg_tools._MinimalWLS(wlsendog, wlsexog,
self.weights, check_endog=True,
check_weights=True)
wls_results = wls_mod.fit(method=wls_method)
lin_pred = np.dot(self.exog, wls_results.params)
lin_pred += self._offset_exposure
mu = self.family.fitted(lin_pred)
history = self._update_history(wls_results, mu, history)
self.scale = self.estimate_scale(mu)
if endog.squeeze().ndim == 1 and np.allclose(mu - endog, 0):
msg = "Perfect separation detected, results not available"
raise PerfectSeparationError(msg)
converged = _check_convergence(criterion, iteration + 1, atol,
rtol)
if converged:
break
self.mu = mu
if maxiter > 0: # Only if iterative used
wls_method2 = 'pinv' if wls_method == 'lstsq' else wls_method
wls_model = lm.WLS(wlsendog, wlsexog, self.weights)
wls_results = wls_model.fit(method=wls_method2)
glm_results = GLMResults(self, wls_results.params,
wls_results.normalized_cov_params,
self.scale,
cov_type=cov_type, cov_kwds=cov_kwds,
use_t=use_t)
glm_results.method = "IRLS"
glm_results.mle_settings = {}
glm_results.mle_settings['wls_method'] = wls_method
glm_results.mle_settings['optimizer'] = glm_results.method
if (maxiter > 0) and (attach_wls is True):
glm_results.results_wls = wls_results
history['iteration'] = iteration + 1
glm_results.fit_history = history
glm_results.converged = converged
return GLMResultsWrapper(glm_results)
def fit_regularized(self, method="elastic_net", alpha=0.,
start_params=None, refit=False,
opt_method="bfgs", **kwargs):
r"""
Return a regularized fit to a linear regression model.
Parameters
----------
method : {'elastic_net'}
Only the `elastic_net` approach is currently implemented.
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
start_params : array_like
Starting values for `params`.
refit : bool
If True, the model is refit using only the variables that
have non-zero coefficients in the regularized fit. The
refitted model is not regularized.
opt_method : string
The method used for numerical optimization.
**kwargs
Additional keyword arguments used when fitting the model.
Returns
-------
GLMResults
An array or a GLMResults object, same type returned by `fit`.
Notes
-----
The penalty is the ``elastic net`` penalty, which is a
combination of L1 and L2 penalties.
The function that is minimized is:
.. math::
-loglike/n + alpha*((1-L1\_wt)*|params|_2^2/2 + L1\_wt*|params|_1)
where :math:`|*|_1` and :math:`|*|_2` are the L1 and L2 norms.
Post-estimation results are based on the same data used to
select variables, hence may be subject to overfitting biases.
The elastic_net method uses the following keyword arguments:
maxiter : int
Maximum number of iterations
L1_wt : float
Must be in [0, 1]. The L1 penalty has weight L1_wt and the
L2 penalty has weight 1 - L1_wt.
cnvrg_tol : float
Convergence threshold for maximum parameter change after
one sweep through all coefficients.
zero_tol : float
Coefficients below this threshold are treated as zero.
"""
if kwargs.get("L1_wt", 1) == 0:
return self._fit_ridge(alpha, start_params, opt_method)
from statsmodels.base.elastic_net import fit_elasticnet
if method != "elastic_net":
raise ValueError("method for fit_regularied must be elastic_net")
defaults = {"maxiter": 50, "L1_wt": 1, "cnvrg_tol": 1e-10,
"zero_tol": 1e-10}
defaults.update(kwargs)
result = fit_elasticnet(self, method=method,
alpha=alpha,
start_params=start_params,
refit=refit,
**defaults)
self.mu = self.predict(result.params)
self.scale = self.estimate_scale(self.mu)
if not result.converged:
warnings.warn("Elastic net fitting did not converge")
return result
def _fit_ridge(self, alpha, start_params, method):
if start_params is None:
start_params = np.zeros(self.exog.shape[1])
def fun(x):
return -(self.loglike(x) / self.nobs - np.sum(alpha * x**2) / 2)
def grad(x):
return -(self.score(x) / self.nobs - alpha * x)
from scipy.optimize import minimize
from statsmodels.base.elastic_net import (RegularizedResults,
RegularizedResultsWrapper)
mr = minimize(fun, start_params, jac=grad, method=method)
params = mr.x
if not mr.success:
import warnings
ngrad = np.sqrt(np.sum(mr.jac**2))
msg = "GLM ridge optimization may have failed, |grad|=%f" % ngrad
warnings.warn(msg)
results = RegularizedResults(self, params)
results = RegularizedResultsWrapper(results)
return results
def fit_constrained(self, constraints, start_params=None, **fit_kwds):
"""fit the model subject to linear equality constraints
The constraints are of the form `R params = q`
where R is the constraint_matrix and q is the vector of
constraint_values.
The estimation creates a new model with transformed design matrix,
exog, and converts the results back to the original parameterization.
Parameters
----------
constraints : formula expression or tuple
If it is a tuple, then the constraint needs to be given by two
arrays (constraint_matrix, constraint_value), i.e. (R, q).
Otherwise, the constraints can be given as strings or list of
strings.
see t_test for details
start_params : None or array_like
starting values for the optimization. `start_params` needs to be
given in the original parameter space and are internally
transformed.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the transformed model.
Returns
-------
results : Results instance
"""
from patsy import DesignInfo
from statsmodels.base._constraints import (fit_constrained,
LinearConstraints)
# same pattern as in base.LikelihoodModel.t_test
lc = DesignInfo(self.exog_names).linear_constraint(constraints)
R, q = lc.coefs, lc.constants
# TODO: add start_params option, need access to tranformation
# fit_constrained needs to do the transformation
params, cov, res_constr = fit_constrained(self, R, q,
start_params=start_params,
fit_kwds=fit_kwds)
# create dummy results Instance, TODO: wire up properly
res = self.fit(start_params=params, maxiter=0) # we get a wrapper back
res._results.params = params
res._results.cov_params_default = cov
cov_type = fit_kwds.get('cov_type', 'nonrobust')
if cov_type != 'nonrobust':
res._results.normalized_cov_params = cov / res_constr.scale
else:
res._results.normalized_cov_params = None
res._results.scale = res_constr.scale
k_constr = len(q)
res._results.df_resid += k_constr
res._results.df_model -= k_constr
res._results.constraints = LinearConstraints.from_patsy(lc)
res._results.k_constr = k_constr
res._results.results_constrained = res_constr
return res
class GLMResults(base.LikelihoodModelResults):
"""
Class to contain GLM results.
GLMResults inherits from statsmodels.LikelihoodModelResults
Attributes
----------
df_model : float
See GLM.df_model
df_resid : float
See GLM.df_resid
fit_history : dict
Contains information about the iterations. Its keys are `iterations`,
`deviance` and `params`.
model : class instance
Pointer to GLM model instance that called fit.
nobs : float
The number of observations n.
normalized_cov_params : ndarray
See GLM docstring
params : ndarray
The coefficients of the fitted model. Note that interpretation
of the coefficients often depends on the distribution family and the
data.
pvalues : ndarray
The two-tailed p-values for the parameters.
scale : float
The estimate of the scale / dispersion for the model fit.
See GLM.fit and GLM.estimate_scale for more information.
stand_errors : ndarray
The standard errors of the fitted GLM. #TODO still named bse
See Also
--------
statsmodels.base.model.LikelihoodModelResults
"""
def __init__(self, model, params, normalized_cov_params, scale,
cov_type='nonrobust', cov_kwds=None, use_t=None):
super(GLMResults, self).__init__(
model,
params,
normalized_cov_params=normalized_cov_params,
scale=scale)
self.family = model.family
self._endog = model.endog
self.nobs = model.endog.shape[0]
self._freq_weights = model.freq_weights
self._var_weights = model.var_weights
self._iweights = model.iweights
if isinstance(self.family, families.Binomial):
self._n_trials = self.model.n_trials
else:
self._n_trials = 1
self.df_resid = model.df_resid
self.df_model = model.df_model
self._cache = {}
# are these intermediate results needed or can we just
# call the model's attributes?
# for remove data and pickle without large arrays
self._data_attr.extend(['results_constrained', '_freq_weights',
'_var_weights', '_iweights'])
self._data_in_cache.extend(['null', 'mu'])
self._data_attr_model = getattr(self, '_data_attr_model', [])
self._data_attr_model.append('mu')
# robust covariance
from statsmodels.base.covtype import get_robustcov_results
if use_t is None:
self.use_t = False # TODO: class default
else:
self.use_t = use_t
# temporary warning
ct = (cov_type == 'nonrobust') or (cov_type.upper().startswith('HC'))
if self.model._has_freq_weights and not ct:
import warnings
from statsmodels.tools.sm_exceptions import SpecificationWarning
warnings.warn('cov_type not fully supported with freq_weights',
SpecificationWarning)
if self.model._has_var_weights and not ct:
import warnings
from statsmodels.tools.sm_exceptions import SpecificationWarning
warnings.warn('cov_type not fully supported with var_weights',
SpecificationWarning)
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description': 'Standard Errors assume that the' +
' covariance matrix of the errors is correctly ' +
'specified.'}
else:
if cov_kwds is None:
cov_kwds = {}
get_robustcov_results(self, cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
@cached_data
def resid_response(self):
"""
Response residuals. The response residuals are defined as
`endog` - `fittedvalues`
"""
return self._n_trials * (self._endog-self.mu)
@cached_data
def resid_pearson(self):
"""
Pearson residuals. The Pearson residuals are defined as
(`endog` - `mu`)/sqrt(VAR(`mu`)) where VAR is the distribution
specific variance function. See statsmodels.families.family and
statsmodels.families.varfuncs for more information.
"""
return (np.sqrt(self._n_trials) * (self._endog-self.mu) *
np.sqrt(self._var_weights) /
np.sqrt(self.family.variance(self.mu)))
@cached_data
def resid_working(self):
"""
Working residuals. The working residuals are defined as
`resid_response`/link'(`mu`). See statsmodels.family.links for the
derivatives of the link functions. They are defined analytically.
"""
# Isn't self.resid_response is already adjusted by _n_trials?
val = (self.resid_response * self.family.link.deriv(self.mu))
val *= self._n_trials
return val
@cached_data
def resid_anscombe(self):
"""
Anscombe residuals. See statsmodels.families.family for distribution-
specific Anscombe residuals. Currently, the unscaled residuals are
provided. In a future version, the scaled residuals will be provided.
"""
import warnings
warnings.warn('Anscombe residuals currently unscaled. After the 0.12 '
'release, they will be scaled.', category=FutureWarning)
return self.family.resid_anscombe(self._endog, self.fittedvalues,
var_weights=self._var_weights,
scale=1.)
@cached_data
def resid_anscombe_scaled(self):
"""
Scaled Anscombe residuals. See statsmodels.families.family for
distribution-specific Anscombe residuals.
"""
return self.family.resid_anscombe(self._endog, self.fittedvalues,
var_weights=self._var_weights,
scale=self.scale)
@cached_data
def resid_anscombe_unscaled(self):
"""
Unscaled Anscombe residuals. See statsmodels.families.family for
distribution-specific Anscombe residuals.
"""
return self.family.resid_anscombe(self._endog, self.fittedvalues,
var_weights=self._var_weights,
scale=1.)
@cached_data
def resid_deviance(self):
"""
Deviance residuals. See statsmodels.families.family for distribution-
specific deviance residuals.
"""
dev = self.family.resid_dev(self._endog, self.fittedvalues,
var_weights=self._var_weights,
scale=1.)
return dev
@cached_value
def pearson_chi2(self):
"""
Pearson's Chi-Squared statistic is defined as the sum of the squares
of the Pearson residuals.
"""
chisq = (self._endog - self.mu)**2 / self.family.variance(self.mu)
chisq *= self._iweights * self._n_trials
chisqsum = np.sum(chisq)
return chisqsum
@cached_data
def fittedvalues(self):
"""
The estimated mean response.
This is the value of the inverse of the link function at
lin_pred, where lin_pred is the linear predicted value
obtained by multiplying the design matrix by the coefficient
vector.
"""
return self.mu
@cached_data
def mu(self):
"""
See GLM docstring.
"""
return self.model.predict(self.params)
@cache_readonly
def null(self):
"""
Fitted values of the null model
"""
endog = self._endog
model = self.model
exog = np.ones((len(endog), 1))
kwargs = model._get_init_kwds()
kwargs.pop('family')
if hasattr(self.model, '_offset_exposure'):
return GLM(endog, exog, family=self.family,
**kwargs).fit().fittedvalues
else:
# correct if fitted is identical across observations
wls_model = lm.WLS(endog, exog,
weights=self._iweights * self._n_trials)
return wls_model.fit().fittedvalues
@cache_readonly
def deviance(self):
"""
See statsmodels.families.family for the distribution-specific deviance
functions.
"""
return self.family.deviance(self._endog, self.mu, self._var_weights,
self._freq_weights)
@cache_readonly
def null_deviance(self):
"""The value of the deviance function for the model fit with a constant
as the only regressor."""
return self.family.deviance(self._endog, self.null, self._var_weights,
self._freq_weights)
@cache_readonly
def llnull(self):
"""
Log-likelihood of the model fit with a constant as the only regressor
"""
return self.family.loglike(self._endog, self.null,
var_weights=self._var_weights,
freq_weights=self._freq_weights,
scale=self.scale)
def llf_scaled(self, scale=None):
"""
Return the log-likelihood at the given scale, using the
estimated scale if the provided scale is None. In the Gaussian
case with linear link, the concentrated log-likelihood is
returned.
"""
_modelfamily = self.family
if scale is None:
if (isinstance(self.family, families.Gaussian) and
isinstance(self.family.link, families.links.Power) and
(self.family.link.power == 1.)):
# Scale for the concentrated Gaussian log likelihood
# (profile log likelihood with the scale parameter
# profiled out).
scale = (np.power(self._endog - self.mu, 2) * self._iweights).sum()
scale /= self.model.wnobs
else:
scale = self.scale
val = _modelfamily.loglike(self._endog, self.mu,
var_weights=self._var_weights,
freq_weights=self._freq_weights,
scale=scale)
return val
@cached_value
def llf(self):
"""
Value of the loglikelihood function evalued at params.
See statsmodels.families.family for distribution-specific
loglikelihoods. The result uses the concentrated
log-likelihood if the family is Gaussian and the link is linear,
otherwise it uses the non-concentrated log-likelihood evaluated
at the estimated scale.
"""
return self.llf_scaled()
@cached_value
def aic(self):
"""
Akaike Information Criterion
-2 * `llf` + 2 * (`df_model` + 1)
"""
return -2 * self.llf + 2 * (self.df_model + 1)
@property
def bic(self):
"""
Bayes Information Criterion
`deviance` - `df_resid` * log(`nobs`)
.. warning::
The current definition is base don the deviance rather than the
log-likelihood. This is not consistent with the AIC definition,
and after 0.13 both will make use of the log-likelihood definition.
Notes
-----
The log-likelihood version is defined
-2 * `llf` + (`df_model` + 1)*log(n)
"""
if _use_bic_helper.use_bic_llf not in (True, False):
warnings.warn(
"The bic value is computed using the deviance formula. After "
"0.13 this will change to the log-likelihood based formula. "
"This change has no impact on the relative rank of models "
"compared using BIC. You can directly access the "
"log-likelihood version using the `bic_llf` attribute. You "
"can suppress this message by calling "
"statsmodels.genmod.generalized_linear_model.SET_USE_BIC_LLF "
"with True to get the LLF-based version now or False to retain"
"the deviance version.",
FutureWarning
)
if bool(_use_bic_helper.use_bic_llf):
return self.bic_llf
return self.bic_deviance
@cached_value
def bic_deviance(self):
"""
Bayes Information Criterion
Based on the deviance,
`deviance` - `df_resid` * log(`nobs`)
"""
return (self.deviance -
(self.model.wnobs - self.df_model - 1) *
np.log(self.model.wnobs))
@cached_value
def bic_llf(self):
"""
Bayes Information Criterion
Based on the log-likelihood,
-2 * `llf` + log(n) * (`df_model` + 1)
"""
return -2*self.llf + (self.df_model+1)*np.log(
self.df_model+self.df_resid+1
)
def info_criteria(self, crit, scale=None):
"""Return an information criterion for the model.
Parameters
----------
crit : string
One of 'aic', 'bic', or 'qaic'.
scale : float
The scale parameter estimated using the parent model,
used only for qaic.
Returns the given information criterion value.
Notes
-----
The quasi-Akaike Information criterion (qaic) is -2 *
`llf`/`scale` + 2 * (`df_model` + 1). It may not give
meaningful results except for Poisson and related models.
The QAIC (ic_type='qaic') must be evaluated with a provided
scale parameter. Two QAIC values are only comparable if they
are calculated using the same scale parameter. The scale
parameter should be estimated using the largest model among
all models being compared.
References
----------
Burnham KP, Anderson KR (2002). Model Selection and Multimodel
Inference; Springer New York.
"""
crit = crit.lower()
if crit == "aic":
return self.aic
elif crit == "bic":
return self.bic
elif crit == "qaic":
f = self.model.family
fl = (families.Poisson, families.NegativeBinomial,
families.Binomial)
if not isinstance(f, fl):
msg = "QAIC is only valid for Binomial, Poisson and "
msg += "Negative Binomial families."
warnings.warn(msg)
llf = self.llf_scaled(scale=1)
return -2 * llf/scale + 2 * (self.df_model + 1)
@Appender(pred.get_prediction_glm.__doc__)
def get_prediction(self, exog=None, exposure=None, offset=None,
transform=True, linear=False,
row_labels=None):
import statsmodels.regression._prediction as linpred
pred_kwds = {'exposure': exposure, 'offset': offset, 'linear': True}
# two calls to a get_prediction duplicates exog generation if patsy
res_linpred = linpred.get_prediction(self, exog=exog,
transform=transform,
row_labels=row_labels,
pred_kwds=pred_kwds)
pred_kwds['linear'] = False
res = pred.get_prediction_glm(self, exog=exog, transform=transform,
row_labels=row_labels,
linpred=res_linpred,
link=self.model.family.link,
pred_kwds=pred_kwds)
return res
def get_hat_matrix_diag(self, observed=True):
"""
Compute the diagonal of the hat matrix
Parameters
----------
observed : bool
If true, then observed hessian is used in the hat matrix
computation. If false, then the expected hessian is used.
In the case of a canonical link function both are the same.
Returns
-------
hat_matrix_diag : ndarray
The diagonal of the hat matrix computed from the observed
or expected hessian.
"""
weights = self.model.hessian_factor(self.params, observed=observed)
wexog = np.sqrt(weights)[:, None] * self.model.exog
hd = (wexog * np.linalg.pinv(wexog).T).sum(1)
return hd
def get_influence(self, observed=True):
"""
Get an instance of GLMInfluence with influence and outlier measures
Parameters
----------
observed : bool
If true, then observed hessian is used in the hat matrix
computation. If false, then the expected hessian is used.
In the case of a canonical link function both are the same.
Returns
-------
infl : GLMInfluence instance
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.GLMInfluence
"""
from statsmodels.stats.outliers_influence import GLMInfluence
weights = self.model.hessian_factor(self.params, observed=observed)
weights_sqrt = np.sqrt(weights)
wexog = weights_sqrt[:, None] * self.model.exog
wendog = weights_sqrt * self.model.endog
# using get_hat_matrix_diag has duplicated computation
hat_matrix_diag = self.get_hat_matrix_diag(observed=observed)
infl = GLMInfluence(self, endog=wendog, exog=wexog,
resid=self.resid_pearson,
hat_matrix_diag=hat_matrix_diag)
return infl
@Appender(base.LikelihoodModelResults.remove_data.__doc__)
def remove_data(self):
# GLM has alias/reference in result instance
self._data_attr.extend([i for i in self.model._data_attr
if '_data.' not in i])
super(self.__class__, self).remove_data()
# TODO: what are these in results?
self._endog = None
self._freq_weights = None
self._var_weights = None
self._iweights = None
self._n_trials = None
@Appender(_plot_added_variable_doc % {'extra_params_doc': ''})
def plot_added_variable(self, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None,
ax=None):
from statsmodels.graphics.regressionplots import plot_added_variable
fig = plot_added_variable(self, focus_exog,
resid_type=resid_type,
use_glm_weights=use_glm_weights,
fit_kwargs=fit_kwargs, ax=ax)
return fig
@Appender(_plot_partial_residuals_doc % {'extra_params_doc': ''})
def plot_partial_residuals(self, focus_exog, ax=None):
from statsmodels.graphics.regressionplots import plot_partial_residuals
return plot_partial_residuals(self, focus_exog, ax=ax)
@Appender(_plot_ceres_residuals_doc % {'extra_params_doc': ''})
def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_means=None,
ax=None):
from statsmodels.graphics.regressionplots import plot_ceres_residuals
return plot_ceres_residuals(self, focus_exog, frac,
cond_means=cond_means, ax=ax)
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables, default is `var_#` for ## in
the number of regressors. Must match the number of parameters in
the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Model Family:', [self.family.__class__.__name__]),
('Link Function:', [self.family.link.__class__.__name__]),
('Method:', [self.method]),
('Date:', None),
('Time:', None),
('No. Iterations:',
["%d" % self.fit_history['iteration']]),
]
top_right = [('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
('Scale:', ["%#8.5g" % self.scale]),
('Log-Likelihood:', None),
('Deviance:', ["%#8.5g" % self.deviance]),
('Pearson chi2:', ["%#6.3g" % self.pearson_chi2])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
if title is None:
title = "Generalized Linear Model Regression Results"
# create summary tables
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
if hasattr(self, 'constraints'):
smry.add_extra_txt(['Model has been estimated subject to linear '
'equality constraints.'])
return smry
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental summary for regression Results
Parameters
----------
yname : str
Name of the dependent variable (optional)
xname : list[str], optional
Names for the exogenous variables, default is `var_#` for ## in
the number of regressors. Must match the number of parameters in
the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format : str
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results
"""
self.method = 'IRLS'
from statsmodels.iolib import summary2
smry = summary2.Summary()
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
if hasattr(self, 'constraints'):
smry.add_text('Model has been estimated subject to linear '
'equality constraints.')
return smry
class GLMResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {
'resid_anscombe': 'rows',
'resid_deviance': 'rows',
'resid_pearson': 'rows',
'resid_response': 'rows',
'resid_working': 'rows'
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(GLMResultsWrapper, GLMResults)
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.longley.load(as_pandas=False)
# data.exog = add_constant(data.exog)
GLMmod = GLM(data.endog, data.exog).fit()
GLMT = GLMmod.summary(returns='tables')
# GLMT[0].extend_right(GLMT[1])
# print(GLMT[0])
# print(GLMT[2])
GLMTp = GLMmod.summary(title='Test GLM')
"""
From Stata
. webuse beetle
. glm r i.beetle ldose, family(binomial n) link(cloglog)
Iteration 0: log likelihood = -79.012269
Iteration 1: log likelihood = -76.94951
Iteration 2: log likelihood = -76.945645
Iteration 3: log likelihood = -76.945645
Generalized linear models No. of obs = 24
Optimization : ML Residual df = 20
Scale parameter = 1
Deviance = 73.76505595 (1/df) Deviance = 3.688253
Pearson = 71.8901173 (1/df) Pearson = 3.594506
Variance function: V(u) = u*(1-u/n) [Binomial]
Link function : g(u) = ln(-ln(1-u/n)) [Complementary log-log]
AIC = 6.74547
Log likelihood = -76.94564525 BIC = 10.20398
------------------------------------------------------------------------------
| OIM
r | Coef. Std. Err. z P>|z| [95% Conf. Interval]
-------------+----------------------------------------------------------------
beetle |
2 | -.0910396 .1076132 -0.85 0.398 -.3019576 .1198783
3 | -1.836058 .1307125 -14.05 0.000 -2.09225 -1.579867
|
ldose | 19.41558 .9954265 19.50 0.000 17.46458 21.36658
_cons | -34.84602 1.79333 -19.43 0.000 -38.36089 -31.33116
------------------------------------------------------------------------------
"""
|
jseabold/statsmodels
|
statsmodels/genmod/generalized_linear_model.py
|
Python
|
bsd-3-clause
| 84,593
|
[
"Gaussian"
] |
5ab3815e483dd58b2cb7dbd69540b4dabb9734064fc12742999440b72f21d344
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, sys, glob, string
import zipfile
from datetime import date
try:
import json
except:
import simplejson as json
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
sdk = find_sdk(config)
support_dir = os.path.join(sdk,'module','support')
sys.path.append(support_dir)
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.plainprograms.streamingmetadata.js')
if not os.path.exists(js_file): return
sdk = find_sdk(config)
iphone_dir = os.path.join(sdk,'iphone')
sys.path.insert(0,iphone_dir)
from compiler import Compiler
path = os.path.basename(js_file)
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
metadata = compiler.make_function_from_file(path,js_file)
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
method = metadata['method']
eq = path.replace('.','_')
method = ' return %s;' % method
f = os.path.join(cwd,'Classes','ComPlainprogramsStreamingmetadataModuleAssets.m')
c = open(f).read()
idx = c.find('return ')
before = c[0:idx]
after = """
}
@end
"""
newc = before + method + after
if newc!=c:
x = open(f,'w')
x.write(newc)
x.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README','com.plainprograms.streamingmetadata.js']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[]):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e)==2 and e[1]=='.pyc':continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
for dn in ('assets','example','platform'):
if os.path.exists(dn):
zip_dir(zf,dn,'%s/%s' % (modulepath,dn),['README'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
plainprogrammer/timobile-streaming_metadata
|
build.py
|
Python
|
mit
| 6,493
|
[
"VisIt"
] |
5eaa2ceafce4be6b95725ca0686d448038b5209706ae704188bffdbc908d44f0
|
#!/usr/bin/env python3
"""
weatherBot tests
Copyright 2015-2019 Brian Mitchell under the MIT license
See the GitHub repository: https://github.com/BrianMitchL/weatherBot
"""
import configparser
import datetime
import logging
import os
import pickle
import sys
import unittest
import forecastio
import pytz
import tweepy
import yaml
from testfixtures import LogCapture
from testfixtures import replace
import keys
import models
import utils
import weatherBot
from test_helpers import mocked_forecastio_load_forecast
from test_helpers import mocked_forecastio_load_forecast_error
from test_helpers import mocked_get_tweepy_api
from test_helpers import mocked_tweepy_o_auth_handler
from test_helpers import mocked_requests_get
class TestUtils(unittest.TestCase):
def test_centerpoint(self):
"""Testing finding a centerpoint from a bounding box of locations"""
box = [[-93.207783, 44.89076], [-93.003514, 44.89076], [-93.003514, 44.992279], [-93.207783, 44.992279]]
average = utils.centerpoint(box)
self.assertEqual(average[0], 44.9415195)
self.assertEqual(average[1], -93.1056485)
def test_get_wind_direction(self):
"""Testing if wind direction conversions are successful"""
self.assertEqual(utils.get_wind_direction(0), 'N')
self.assertEqual(utils.get_wind_direction(338), 'N')
self.assertEqual(utils.get_wind_direction(65), 'NE')
self.assertEqual(utils.get_wind_direction(110), 'E')
self.assertEqual(utils.get_wind_direction(150), 'SE')
self.assertEqual(utils.get_wind_direction(200), 'S')
self.assertEqual(utils.get_wind_direction(240), 'SW')
self.assertEqual(utils.get_wind_direction(290), 'W')
self.assertEqual(utils.get_wind_direction(330), 'NW')
self.assertEqual(utils.get_wind_direction(400), 'N')
self.assertEqual(utils.get_wind_direction(-4), 'N')
self.assertEqual(utils.get_wind_direction('five'), '')
def test_get_units(self):
"""Testing getting units from a country/unit identifier"""
self.assertEqual(utils.get_units('us')['unit'], 'us')
self.assertEqual(utils.get_units('us')['nearestStormDistance'], 'mph')
self.assertEqual(utils.get_units('us')['precipIntensity'], 'in/h')
self.assertEqual(utils.get_units('us')['precipIntensityMax'], 'in/h')
self.assertEqual(utils.get_units('us')['precipAccumulation'], 'in')
self.assertEqual(utils.get_units('us')['temperature'], 'F')
self.assertEqual(utils.get_units('us')['temperatureMin'], 'F')
self.assertEqual(utils.get_units('us')['temperatureMax'], 'F')
self.assertEqual(utils.get_units('us')['apparentTemperature'], 'F')
self.assertEqual(utils.get_units('us')['dewPoint'], 'F')
self.assertEqual(utils.get_units('us')['windSpeed'], 'mph')
self.assertEqual(utils.get_units('us')['pressure'], 'mb')
self.assertEqual(utils.get_units('us')['visibility'], 'mi')
self.assertEqual(utils.get_units('ca')['unit'], 'ca')
self.assertEqual(utils.get_units('ca')['nearestStormDistance'], 'km')
self.assertEqual(utils.get_units('ca')['precipIntensity'], 'mm/h')
self.assertEqual(utils.get_units('ca')['precipIntensityMax'], 'mm/h')
self.assertEqual(utils.get_units('ca')['precipAccumulation'], 'cm')
self.assertEqual(utils.get_units('ca')['temperature'], 'C')
self.assertEqual(utils.get_units('ca')['temperatureMin'], 'C')
self.assertEqual(utils.get_units('ca')['temperatureMax'], 'C')
self.assertEqual(utils.get_units('ca')['apparentTemperature'], 'C')
self.assertEqual(utils.get_units('ca')['dewPoint'], 'C')
self.assertEqual(utils.get_units('ca')['windSpeed'], 'km/h')
self.assertEqual(utils.get_units('ca')['pressure'], 'hPa')
self.assertEqual(utils.get_units('ca')['visibility'], 'km')
self.assertEqual(utils.get_units('uk2')['unit'], 'uk2')
self.assertEqual(utils.get_units('uk2')['nearestStormDistance'], 'mi')
self.assertEqual(utils.get_units('uk2')['precipIntensity'], 'mm/h')
self.assertEqual(utils.get_units('uk2')['precipIntensityMax'], 'mm/h')
self.assertEqual(utils.get_units('uk2')['precipAccumulation'], 'cm')
self.assertEqual(utils.get_units('uk2')['temperature'], 'C')
self.assertEqual(utils.get_units('uk2')['temperatureMin'], 'C')
self.assertEqual(utils.get_units('uk2')['temperatureMax'], 'C')
self.assertEqual(utils.get_units('uk2')['apparentTemperature'], 'C')
self.assertEqual(utils.get_units('uk2')['dewPoint'], 'C')
self.assertEqual(utils.get_units('uk2')['windSpeed'], 'mph')
self.assertEqual(utils.get_units('uk2')['pressure'], 'hPa')
self.assertEqual(utils.get_units('uk2')['visibility'], 'mi')
self.assertEqual(utils.get_units('si')['unit'], 'si')
self.assertEqual(utils.get_units('si')['nearestStormDistance'], 'km')
self.assertEqual(utils.get_units('si')['precipIntensity'], 'mm/h')
self.assertEqual(utils.get_units('si')['precipIntensityMax'], 'mm/h')
self.assertEqual(utils.get_units('si')['precipAccumulation'], 'cm')
self.assertEqual(utils.get_units('si')['temperature'], 'C')
self.assertEqual(utils.get_units('si')['temperatureMin'], 'C')
self.assertEqual(utils.get_units('si')['temperatureMax'], 'C')
self.assertEqual(utils.get_units('si')['apparentTemperature'], 'C')
self.assertEqual(utils.get_units('si')['dewPoint'], 'C')
self.assertEqual(utils.get_units('si')['windSpeed'], 'm/s')
self.assertEqual(utils.get_units('si')['pressure'], 'hPa')
self.assertEqual(utils.get_units('si')['visibility'], 'km')
def test_precipitation_intensity(self):
"""Testing getting string description from precipitation intensity"""
self.assertEqual(utils.precipitation_intensity(0.00, 'in/h'), 'none')
self.assertEqual(utils.precipitation_intensity(0.002, 'in/h'), 'very-light')
self.assertEqual(utils.precipitation_intensity(0.017, 'in/h'), 'light')
self.assertEqual(utils.precipitation_intensity(0.1, 'in/h'), 'moderate')
self.assertEqual(utils.precipitation_intensity(0.4, 'in/h'), 'heavy')
self.assertEqual(utils.precipitation_intensity(0.00, 'mm/h'), 'none')
self.assertEqual(utils.precipitation_intensity(0.051, 'mm/h'), 'very-light')
self.assertEqual(utils.precipitation_intensity(0.432, 'mm/h'), 'light')
self.assertEqual(utils.precipitation_intensity(2.540, 'mm/h'), 'moderate')
self.assertEqual(utils.precipitation_intensity(5.08, 'mm/h'), 'heavy')
def test_localize_utc_datetime(self):
"""Testing localizing a plain datetime object to a pytz timezone aware object"""
dt = datetime.datetime.fromtimestamp(1461731335) # datetime.datetime(2016, 4, 26, 23, 28, 55)
timezone_id = 'Europe/Copenhagen'
localized_dt = utils.localize_utc_datetime(timezone_id, dt)
correct_dt = datetime.datetime.fromtimestamp(1461738535) # datetime.datetime(2016, 4, 27, 1, 28, 55)
self.assertEqual(localized_dt, pytz.timezone('Europe/Copenhagen').localize(correct_dt))
def test_datetime_to_utc(self):
"""Testing localize a normal datetime object to timezone id, then convert to UTC"""
dt = datetime.datetime.fromtimestamp(1461738535) # datetime.datetime(2016, 4, 27, 1, 28, 55)
timezone_id = 'Europe/Copenhagen'
utc_dt = utils.datetime_to_utc(timezone_id, dt)
correct_dt = pytz.timezone('Europe/Copenhagen').localize(dt).astimezone(pytz.utc)
self.assertEqual(utc_dt, correct_dt)
def test_parse_time_string(self):
"""Testing parsing string representing time to a Time namedtuple"""
self.assertEqual(utils.parse_time_string('7:00'), utils.Time(hour=7, minute=0))
self.assertEqual(utils.parse_time_string('0:0'), utils.Time(hour=0, minute=0))
self.assertEqual(utils.parse_time_string('000000001:00000'), utils.Time(hour=1, minute=0))
self.assertEqual(utils.parse_time_string('18:00000001'), utils.Time(hour=18, minute=1))
self.assertEqual(utils.parse_time_string('22:59'), utils.Time(hour=22, minute=59))
self.assertEqual(utils.parse_time_string('1:45'), utils.Time(hour=1, minute=45))
self.assertEqual(utils.parse_time_string('0000002:000003'), utils.Time(hour=2, minute=3))
with self.assertRaises(utils.InvalidTimeError):
utils.parse_time_string('12')
with self.assertRaises(utils.InvalidTimeError):
utils.parse_time_string('1:2:3;4')
with self.assertRaises(utils.InvalidTimeError):
utils.parse_time_string('34:')
with self.assertRaises(utils.InvalidTimeError):
utils.parse_time_string(':5')
with self.assertRaises(utils.InvalidTimeError):
utils.parse_time_string(':')
with self.assertRaises(utils.InvalidTimeError):
utils.parse_time_string('not an int:but nice try')
with self.assertRaises(utils.InvalidTimeError):
utils.parse_time_string('34:00')
with self.assertRaises(utils.InvalidTimeError):
utils.parse_time_string('00:65')
with self.assertRaises(utils.InvalidTimeError):
utils.parse_time_string('-46:00')
with self.assertRaises(utils.InvalidTimeError):
utils.parse_time_string('00:-34')
def test_get_times(self):
"""Testing converting a string of times into a list of Time namedtuples"""
raw_simple = '7:00\n12:00\n15:00\n18:00\n22:00'
raw_complex = '0:0\n00000000001:00000\n18:00000001\n22:59\n23:00\n1:45\n00:00\n23:59\n1:01\n01:00\n01:02\n11:32'
list_simple = [utils.Time(hour=7, minute=0),
utils.Time(hour=12, minute=0),
utils.Time(hour=15, minute=0),
utils.Time(hour=18, minute=0),
utils.Time(hour=22, minute=0)]
list_complex = [utils.Time(hour=0, minute=0),
utils.Time(hour=0, minute=0),
utils.Time(hour=1, minute=0),
utils.Time(hour=1, minute=0),
utils.Time(hour=1, minute=1),
utils.Time(hour=1, minute=2),
utils.Time(hour=1, minute=45),
utils.Time(hour=11, minute=32),
utils.Time(hour=18, minute=1),
utils.Time(hour=22, minute=59),
utils.Time(hour=23, minute=0),
utils.Time(hour=23, minute=59)]
self.assertEqual(utils.get_times(raw_simple), list_simple)
self.assertEqual(utils.get_times(raw_complex), list_complex)
class WeatherLocation(unittest.TestCase):
def setUp(self):
self.lat = 55.76
self.lng = 12.49
self.name = 'Lyngby-Taarbæk, Hovedstaden'
self.location = models.WeatherLocation(self.lat, self.lng, self.name)
def test_location(self):
"""Testing that locations are loaded correctly"""
self.assertEqual(self.location.lat, self.lat)
self.assertEqual(self.location.lng, self.lng)
self.assertEqual(self.location.name, self.name)
def test_str(self):
"""Testing that stringifying the object works correctly"""
self.assertEqual('<WeatherLocation: Lyngby-Taarbæk, Hovedstaden at 55.76,12.49>', str(self.location))
def test_repr(self):
"""Testing that __repr__ returns the __str__ representation of the object"""
self.assertEqual('<WeatherLocation: Lyngby-Taarbæk, Hovedstaden at 55.76,12.49>', repr(self.location))
def test_equality(self):
"""Testing equality comparisons"""
location_same = models.WeatherLocation(self.lat, self.lng, self.name)
self.assertEqual(self.location, location_same)
location2 = models.WeatherLocation(20, 16, 'testing')
self.assertNotEqual(self.location, location2)
class WeatherBotAlert(unittest.TestCase):
@replace('requests.get', mocked_requests_get)
def test_init(self, mock_get):
"""Test that a WeatherAlert is loaded correctly"""
forecast = forecastio.manual(os.path.join('fixtures', 'us_alert.json'))
alert = models.WeatherAlert(forecast.alerts()[0])
self.assertEqual('Wind Advisory', alert.title)
self.assertEqual(pytz.utc.localize(datetime.datetime(2016, 10, 18, 4, 4)), alert.time)
self.assertEqual(pytz.utc.localize(datetime.datetime(2016, 10, 20, 19, 0)), alert.expires)
self.assertEqual('https://alerts.weather.gov/cap/wwacapget.php?x=CA12561A519050.WindAdvisory.'
'12561A725D30CA.LOXNPWLOX.9240bcf720aae1b01b10f53f012e61bb', alert.uri)
self.assertEqual('a6bf597275fdf063c76a42b05c3c81ed093701b2344c3c98cfde36875f7a4c3d', alert.sha())
self.assertEqual('<WeatherAlert: Wind Advisory at 2016-10-18 04:04:00+00:00>', str(alert))
@replace('requests.get', mocked_requests_get)
def test_no_expires(self):
"""Test that a WeatherAlert is loaded correctly"""
forecast = forecastio.manual(os.path.join('fixtures', 'ca_alert.json'))
alert = models.WeatherAlert(forecast.alerts()[0])
self.assertEqual('Snowfall Warning', alert.title)
self.assertEqual(pytz.utc.localize(datetime.datetime(2017, 2, 4, 17, 11)), alert.time)
self.assertEqual('https://weather.gc.ca/warnings/report_e.html?ab6', alert.uri)
self.assertEqual('warning', alert.severity)
self.assertEqual('d5c1870d583f95441a41d452355173bad49f60f87c2962f195bd7873f0997d4b', alert.sha())
self.assertEqual('<WeatherAlert: Snowfall Warning at 2017-02-04 17:11:00+00:00>', str(alert))
@replace('requests.get', mocked_requests_get)
def test_expired(self):
"""Test that an alert is expired or active"""
forecast = forecastio.manual(os.path.join('fixtures', 'us_alert.json'))
alert = models.WeatherAlert(forecast.alerts()[0])
self.assertTrue(alert.expired(pytz.utc.localize(datetime.datetime(2017, 10, 18, 4, 4))))
self.assertFalse(alert.expired(pytz.utc.localize(datetime.datetime(2016, 10, 18, 4, 4))))
self.assertFalse(alert.expired(pytz.utc.localize(datetime.datetime(2015, 10, 18, 4, 4))))
forecast = forecastio.manual(os.path.join('fixtures', 'ca_alert.json'))
alert = models.WeatherAlert(forecast.alerts()[0])
self.assertTrue(alert.expired(pytz.utc.localize(datetime.datetime(2017, 2, 7, 17, 12))))
self.assertFalse(alert.expired(pytz.utc.localize(datetime.datetime(2017, 2, 7, 17, 11))))
self.assertFalse(alert.expired(pytz.utc.localize(datetime.datetime(2017, 2, 3, 17, 11))))
class WeatherBotData(unittest.TestCase):
def setUp(self):
with open('strings.yml', 'r') as file_stream:
self.weatherbot_strings = yaml.safe_load(file_stream)
self.location = models.WeatherLocation(55.76, 12.49, 'Lyngby-Taarbæk, Hovedstaden')
@replace('requests.get', mocked_requests_get)
def test_init(self):
"""Testing that weather data is loaded correctly"""
forecast = forecastio.manual(os.path.join('fixtures', 'us.json'))
wd = models.WeatherData(forecast, self.location)
self.assertEqual(wd.units, utils.get_units('us'))
self.assertEqual(wd.windBearing, 'SW')
self.assertEqual(wd.windSpeed, 10.81)
self.assertEqual(wd.apparentTemperature, 50.84)
self.assertEqual(wd.temp, 50.84)
self.assertEqual(wd.humidity, 89)
self.assertEqual(wd.precipIntensity, 0)
self.assertEqual(wd.precipProbability, 0)
self.assertEqual(wd.precipType, 'none')
self.assertEqual(wd.summary, 'Partly Cloudy')
self.assertEqual(wd.icon, 'partly-cloudy-day')
self.assertEqual(wd.location, self.location)
self.assertEqual(wd.timezone, 'Europe/Copenhagen')
self.assertEqual(wd.alerts, [])
self.assertTrue(wd.valid)
self.assertEqual(str(wd),
'<WeatherData: Lyngby-Taarbæk, Hovedstaden(55.76,12.49) at 2016-10-01 05:56:38+00:00>')
@replace('requests.get', mocked_requests_get)
def test_alerts(self):
"""Testing that alerts are loaded correctly into a list"""
location = models.WeatherLocation(34.2, -118.36, 'Los Angeles, CA')
forecast = forecastio.manual(os.path.join('fixtures', 'us_alert.json'))
wd = models.WeatherData(forecast, location)
self.assertEqual(wd.alerts[0].title, 'Wind Advisory')
self.assertEqual(wd.alerts[1].title, 'Beach Hazards Statement')
self.assertEqual(wd.alerts[2].title, 'Red Flag Warning')
@replace('requests.get', mocked_requests_get)
def test_bad_data(self):
"""Testing that bad data will gracefully fail"""
forecast = forecastio.manual(os.path.join('fixtures', 'bad_data_unavailable.json'))
wd = models.WeatherData(forecast, self.location)
self.assertFalse(wd.valid)
forecast = forecastio.manual(os.path.join('fixtures', 'bad_data_temperature.json'))
wd = models.WeatherData(forecast, self.location)
self.assertFalse(wd.valid)
forecast = forecastio.manual(os.path.join('fixtures', 'bad_data_summary.json'))
wd = models.WeatherData(forecast, self.location)
self.assertFalse(wd.valid)
@replace('requests.get', mocked_requests_get)
def test_optional_fields(self):
"""Testing that bad data will gracefully fail"""
forecast = forecastio.manual(os.path.join('fixtures', 'optional_fields.json'))
wd = models.WeatherData(forecast, self.location)
self.assertEqual(wd.precipType, 'rain')
self.assertEqual(wd.windBearing, 'unknown direction')
@replace('requests.get', mocked_requests_get)
def test_json(self):
"""Testing that json() returns a dict containing the response from the Dark Sky API"""
forecast = forecastio.manual(os.path.join('fixtures', 'us.json'))
wd = models.WeatherData(forecast, self.location)
self.assertEqual(wd.json(), forecast.json)
class WeatherBotString(unittest.TestCase):
def setUp(self):
with open('strings.yml', 'r') as file_stream:
self.weatherbot_strings = yaml.safe_load(file_stream)
self.location = models.WeatherLocation(55.76, 12.49, 'Lyngby-Taarbæk, Hovedstaden')
@replace('requests.get', mocked_requests_get)
def test_forecast(self):
"""Testing that forecasts are formatted correctly"""
forecast = forecastio.manual(os.path.join('fixtures', 'us.json'))
wd = models.WeatherData(forecast, self.location)
self.weatherbot_strings['forecast_endings'] = []
wbs = models.WeatherBotString(self.weatherbot_strings)
wbs.set_weather(wd)
forecast_string = wbs.forecast()
self.assertIn(forecast_string, wbs.forecasts)
self.weatherbot_strings['forecast_endings'] = ['Test ending!']
self.weatherbot_strings['forecasts'] = ['The forecast for today is {summary_lower} {high}/{low}.']
wbs = models.WeatherBotString(self.weatherbot_strings)
wbs.set_weather(wd)
forecast_string = wbs.forecast()
self.assertEqual(forecast_string,
'The forecast for today is mostly cloudy throughout the day. 66ºF/50ºF. Test ending!')
@replace('requests.get', mocked_requests_get)
def test_normal(self):
"""Testing that normal events are formatted"""
forecast = forecastio.manual(os.path.join('fixtures', 'us.json'))
wd = models.WeatherData(forecast, self.location)
wbs = models.WeatherBotString(self.weatherbot_strings)
wbs.set_weather(wd)
normal_string = wbs.normal()
self.assertIn(normal_string, wbs.normal_conditions)
@replace('requests.get', mocked_requests_get)
def test_special(self):
"""Testing if special events are triggered"""
forecast_si = forecastio.manual(os.path.join('fixtures', 'si.json'))
forecast_us = forecastio.manual(os.path.join('fixtures', 'us.json'))
forecast_ca = forecastio.manual(os.path.join('fixtures', 'ca.json'))
forecast_uk2 = forecastio.manual(os.path.join('fixtures', 'uk2.json'))
wd = models.WeatherData(forecast_si, self.location)
wbs = models.WeatherBotString(self.weatherbot_strings)
wbs.set_weather(wd)
self.assertEqual('normal', wbs.special().type)
self.assertEqual('', wbs.special().text)
"""Testing if wind-chill type is triggered"""
wd = models.WeatherData(forecast_si, self.location)
wd.apparentTemperature = -34
wbs = models.WeatherBotString(self.weatherbot_strings)
wbs.set_weather(wd)
self.assertEqual('wind-chill', wbs.special().type)
self.assertIn(wbs.special().text, wbs.special_conditions[wbs.special().type])
wd = models.WeatherData(forecast_us, self.location)
wd.apparentTemperature = -30
wbs = models.WeatherBotString(self.weatherbot_strings)
wbs.set_weather(wd)
self.assertEqual('wind-chill', wbs.special().type)
self.assertIn(wbs.special().text, wbs.special_conditions[wbs.special().type])
"""Testing if precip type is triggered"""
wd = models.WeatherData(forecast_si, self.location)
wd.precipProbability = 0.9
wd.precipType = 'rain'
wd.precipIntensity = 10.0
wbs.set_weather(wd)
self.assertEqual('heavy-rain', wbs.special().type)
self.assertIn(wbs.special().text, wbs.precipitations['rain']['heavy'])
wd = models.WeatherData(forecast_us, self.location)
wd.precipProbability = 0.9
wd.precipType = 'rain'
wd.precipIntensity = 1.0
wbs.set_weather(wd)
self.assertEqual('heavy-rain', wbs.special().type)
self.assertIn(wbs.special().text, wbs.precipitations['rain']['heavy'])
wd = models.WeatherData(forecast_us, self.location)
wd.precipProbability = 0.9
wd.precipType = 'none'
wd.precipIntensity = 1.0
wbs.set_weather(wd)
self.assertEqual('normal', wbs.special().type)
self.assertEqual('', wbs.special().text)
"""Testing if medium-wind type is triggered"""
wd = models.WeatherData(forecast_si, self.location)
wd.icon = 'medium-wind'
wbs.set_weather(wd)
self.assertEqual('medium-wind', wbs.special().type)
"""Testing if heavy-wind type is triggered"""
wd = models.WeatherData(forecast_si, self.location)
wd.icon = 'heavy-wind'
wbs.set_weather(wd)
self.assertEqual('heavy-wind', wbs.special().type)
wd = models.WeatherData(forecast_si, self.location)
wd.windSpeed = 15.0
wbs.set_weather(wd)
self.assertEqual('heavy-wind', wbs.special().type)
wd = models.WeatherData(forecast_ca, self.location)
wd.windSpeed = 56.0
wbs.set_weather(wd)
self.assertEqual('heavy-wind', wbs.special().type)
wd = models.WeatherData(forecast_us, self.location)
wd.windSpeed = 35.0
wbs.set_weather(wd)
self.assertEqual('heavy-wind', wbs.special().type)
wd = models.WeatherData(forecast_uk2, self.location)
wd.windSpeed = 35.0
wbs.set_weather(wd)
self.assertEqual('heavy-wind', wbs.special().type)
"""Testing if fog type is triggered"""
wd = models.WeatherData(forecast_si, self.location)
wd.icon = 'fog'
wbs.set_weather(wd)
self.assertEqual('fog', wbs.special().type)
"""Testing if cold type is triggered"""
wd = models.WeatherData(forecast_si, self.location)
wd.temp = -28.0
wbs.set_weather(wd)
self.assertEqual('cold', wbs.special().type)
wd = models.WeatherData(forecast_us, self.location)
wd.temp = -20.0
wbs.set_weather(wd)
self.assertEqual('cold', wbs.special().type)
"""Testing if super-hot type is triggered"""
wd = models.WeatherData(forecast_si, self.location)
wd.temp = 43.0
wbs.set_weather(wd)
self.assertEqual('super-hot', wbs.special().type)
wd = models.WeatherData(forecast_us, self.location)
wd.temp = 110.0
wbs.set_weather(wd)
self.assertEqual('super-hot', wbs.special().type)
"""Testing if hot type is triggered"""
wd = models.WeatherData(forecast_si, self.location)
wd.temp = 37.0
wbs.set_weather(wd)
self.assertEqual('hot', wbs.special().type)
wd = models.WeatherData(forecast_us, self.location)
wd.temp = 100.0
wbs.set_weather(wd)
self.assertEqual('hot', wbs.special().type)
"""Testing if dry type is triggered"""
wd = models.WeatherData(forecast_si, self.location)
wd.humidity = 25.0
wbs.set_weather(wd)
self.assertEqual('dry', wbs.special().type)
@replace('requests.get', mocked_requests_get)
def test_alert(self):
"""Testing that alerts are formatted"""
wbs = models.WeatherBotString(self.weatherbot_strings)
forecast = forecastio.manual(os.path.join('fixtures', 'ca_alert.json'))
location = models.WeatherLocation(50.564167, -111.898889, 'Brooks, Alberta')
wd = models.WeatherData(forecast, location)
wbs.set_weather(wd)
alert = wbs.alert(wd.alerts[0], wd.timezone)
self.assertIn('Snowfall Warning', alert)
self.assertIn('https://weather.gc.ca/warnings/report_e.html?ab6', alert)
@replace('requests.get', mocked_requests_get)
def test_precipitation(self):
"""Testing that precipitation conditions are met"""
wbs = models.WeatherBotString(self.weatherbot_strings)
forecast_us = forecastio.manual(os.path.join('fixtures', 'us.json'))
wd = models.WeatherData(forecast_us, self.location)
wbs.set_weather(wd)
self.assertEqual(wbs.precipitation(), models.Condition(type='none', text=''))
wd.precipIntensity = 0.3
wd.precipProbability = 0.5
wd.precipType = 'rain'
wbs.set_weather(wd)
self.assertEqual(wbs.precipitation(), models.Condition(type='none', text=''))
wd.precipIntensity = 0.3
wd.precipProbability = 1
wd.precipType = 'none'
wbs.set_weather(wd)
self.assertEqual(wbs.precipitation(), models.Condition(type='none', text=''))
wd.precipIntensity = 0
wd.precipProbability = 1
wd.precipType = 'rain'
wbs.set_weather(wd)
self.assertEqual(wbs.precipitation(), models.Condition(type='none', text=''))
wd.precipIntensity = 0
wd.precipProbability = 1
wd.precipType = 'none'
wbs.set_weather(wd)
self.assertEqual(wbs.precipitation(), models.Condition(type='none', text=''))
# testing with a few possible conditions
wd.precipIntensity = 0.3
wd.precipProbability = 1
wd.precipType = 'rain'
wbs.set_weather(wd)
precip = wbs.precipitation()
self.assertEqual(precip.type, 'moderate-rain')
self.assertIn(precip.text, wbs.precipitations['rain']['moderate'])
wd.precipIntensity = 0.4
wd.precipProbability = 0.85
wd.precipType = 'snow'
wbs.set_weather(wd)
precip = wbs.precipitation()
self.assertEqual(precip.type, 'heavy-snow')
self.assertIn(precip.text, wbs.precipitations['snow']['heavy'])
wd.precipIntensity = 0.06
wd.precipProbability = 1
wd.precipType = 'sleet'
wbs.set_weather(wd)
precip = wbs.precipitation()
self.assertEqual(precip.type, 'light-sleet')
self.assertIn(precip.text, wbs.precipitations['sleet']['light'])
wd.precipIntensity = 0.005
wd.precipProbability = 1
wd.precipType = 'rain'
wbs.set_weather(wd)
precip = wbs.precipitation()
self.assertEqual(precip.type, 'very-light-rain')
self.assertIn(precip.text, wbs.precipitations['rain']['very-light'])
@replace('requests.get', mocked_requests_get)
def test_update_weather_data(self):
"""Testing that new weather data is loaded correctly"""
forecast1 = forecastio.manual(os.path.join('fixtures', 'us.json'))
wd1 = models.WeatherData(forecast1, self.location)
forecast2 = forecastio.manual(os.path.join('fixtures', 'us_cincinnati.json'))
wd2 = models.WeatherData(forecast2, self.location)
wbs = models.WeatherBotString(self.weatherbot_strings)
wbs.set_weather(wd1)
self.assertEqual(50.84, wbs.weather_data.apparentTemperature)
self.assertIn('51', wbs.normal())
self.assertIn('66ºF', wbs.forecast())
self.assertEqual('normal', wbs.special().type)
self.assertEqual(0, len(wd1.alerts))
wbs.set_weather(wd2)
self.assertEqual(73.09, wbs.weather_data.apparentTemperature)
self.assertIn('73', wbs.normal())
self.assertIn('78ºF', wbs.forecast())
self.assertEqual('moderate-rain', wbs.special().type)
alert = wbs.alert(wd2.alerts[0], wd2.timezone)
self.assertIn('Severe Thunderstorm Warning', alert)
self.assertIn('Wed, Oct 19 at 19:30:00 EDT', alert)
self.assertIn('https://alerts.weather.gov/cap/wwacapget.php?x=OH12561A63BE38.SevereThunderstormWarning.'
'12561A63C2E8OH.ILNSVSILN.f17bc0b3ead1db18bf60532894d9925e', alert)
@replace('requests.get', mocked_requests_get)
def test_dict(self):
"""Testing that __dict__ returns the correct data"""
forecast = forecastio.manual(os.path.join('fixtures', 'us.json'))
wd = models.WeatherData(forecast, self.location)
wbs = models.WeatherBotString(self.weatherbot_strings)
wbs.set_weather(wd)
self.assertEqual({
'language': wbs.language,
'weather_data': wbs.weather_data,
'forecasts': wbs.forecasts,
'forecast_endings': wbs.forecasts_endings,
'normal_conditions': wbs.normal_conditions,
'special_conditions': wbs.special_conditions,
'precipitations': wbs.precipitations
}, wbs.__dict__())
class TestWB(unittest.TestCase):
def setUp(self):
self.location = models.WeatherLocation(55.76, 12.49, 'Lyngby-Taarbæk, Hovedstaden')
def test_config(self):
"""Testing config file handling"""
equal = {
'basic': {
'dm_errors': False,
'units': 'si',
'tweet_location': False,
'hashtag': '',
'refresh': 300,
'strings': 'fake_path.yml'
},
'scheduled_times': {
'forecast': utils.Time(hour=6, minute=0),
'conditions': [utils.Time(hour=7, minute=0),
utils.Time(hour=12, minute=0),
utils.Time(hour=15, minute=0),
utils.Time(hour=18, minute=0),
utils.Time(hour=22, minute=0)]
},
'default_location': models.WeatherLocation(-79.0, 12.0, 'Just a Test'),
'variable_location': {
'enabled': True,
'user': 'test_user',
'unnamed_location_name': 'Somewhere in deep space'
},
'log': {
'enabled': False,
'log_path': '/tmp/weatherBotTest.log'
},
'throttles': {
'default': 24,
'wind-chill': 23,
'medium-wind': 22,
'heavy-wind': 21,
'fog': 20,
'cold': 19,
'hot': 18,
'dry': 17,
'heavy-rain': 16,
'moderate-rain': 15,
'light-rain': 14,
'very-light-rain': 13,
'heavy-snow': 12,
'moderate-snow': 11,
'light-snow': 10,
'very-light-snow': 9,
'heavy-sleet': 8,
'moderate-sleet': 7,
'light-sleet': 6,
'very-light-sleet': 5,
'heavy-hail': 4,
'moderate-hail': 3,
'light-hail': 2,
'very-light-hail': 1
}
}
conf = configparser.ConfigParser()
conf['basic'] = {
'dm_errors': 'off',
'units': 'si',
'tweet_location': 'no',
'hashtag': '',
'refresh': '300',
'strings': 'fake_path.yml'
}
conf['scheduled times'] = {
'forecast': '6:00',
'conditions': '7:00\n12:00\n15:00\n18:00\n22:00'
}
conf['default location'] = {
'lat': '-79',
'lng': '12',
'name': 'Just a Test'
}
conf['variable location'] = {
'enabled': 'yes',
'user': 'test_user',
'unnamed_location_name': 'Somewhere in deep space'
}
conf['log'] = {
'enabled': '0',
'log_path': '/tmp/weatherBotTest.log'
}
conf['throttles'] = {
'default': '24',
'wind-chill': '23',
'medium-wind': '22',
'heavy-wind': '21',
'fog': '20',
'cold': '19',
'hot': '18',
'dry': '17',
'heavy-rain': '16',
'moderate-rain': '15',
'light-rain': '14',
'very-light-rain': '13',
'heavy-snow': '12',
'moderate-snow': '11',
'light-snow': '10',
'very-light-snow': '9',
'heavy-sleet': '8',
'moderate-sleet': '7',
'light-sleet': '6',
'very-light-sleet': '5',
'heavy-hail': '4',
'moderate-hail': '3',
'light-hail': '2',
'very-light-hail': '1'
}
with open(os.getcwd() + '/weatherBotTest.conf', 'w') as configfile:
conf.write(configfile)
weatherBot.load_config(os.path.abspath('weatherBotTest.conf'))
self.assertDictEqual(weatherBot.CONFIG, equal)
os.remove(os.path.abspath('weatherBotTest.conf'))
def test_logging(self):
"""Testing if the system version is in the log and log file"""
with LogCapture() as l:
logger = logging.getLogger()
logger.info('info')
weatherBot.initialize_logger(True, os.path.abspath('weatherBotTest.log'))
logger.debug('debug')
logger.warning('uh oh')
l.check(('root', 'INFO', 'info'), ('root', 'INFO', 'Starting weatherBot with Python ' + sys.version),
('root', 'DEBUG', 'debug'), ('root', 'WARNING', 'uh oh'))
path = os.path.join(os.getcwd(), 'weatherBotTest.log')
with open(path, 'rb') as path:
data = path.read()
self.assertTrue(bytes(sys.version, 'UTF-8') in data)
self.assertFalse(bytes('debug', 'UTF-8') in data)
self.assertTrue(bytes('uh oh', 'UTF-8') in data)
os.remove(os.path.abspath('weatherBotTest.log'))
@replace('tweepy.OAuthHandler', mocked_tweepy_o_auth_handler)
def test_get_tweepy_api(self):
"""Testing getting a tweepy API object"""
api = weatherBot.get_tweepy_api()
self.assertTrue(type(api) is tweepy.API)
@replace('forecastio.load_forecast', mocked_forecastio_load_forecast)
def test_get_forecast_object(self):
"""Testing getting the forecastio object"""
forecast = weatherBot.get_forecast_object(self.location.lat, self.location.lng, units='us', lang='de')
self.assertEqual(forecast.response.status_code, 200)
self.assertEqual(forecast.json['flags']['units'], 'us')
@replace('forecastio.load_forecast', mocked_forecastio_load_forecast_error)
def test_get_forecast_object_error(self):
"""Testing getting the forecastio object"""
bad_forecast = weatherBot.get_forecast_object(45.5, 123.45)
self.assertEqual(bad_forecast, None)
@replace('weatherBot.get_tweepy_api', mocked_get_tweepy_api)
def test_get_location_from_user_timeline_coordinates(self):
"""Testing getting a location from twitter account's recent tweets using the coordinates property"""
fallback_loc = models.WeatherLocation(4, 3, 'test')
test_loc = models.WeatherLocation(2, 1, 'test')
loc = weatherBot.get_location_from_user_timeline('MorrisMNWeather', fallback_loc)
self.assertTrue(type(loc) is models.WeatherLocation)
self.assertEqual(loc, test_loc)
@replace('weatherBot.get_tweepy_api', mocked_get_tweepy_api)
def test_get_location_from_user_timeline_coordinates_no_place_full_name(self):
"""Testing getting a location from twitter account's recent tweets using the coordinates property
when a place does not exist for that location"""
fallback_loc = models.WeatherLocation(4, 3, 'test')
test_loc = models.WeatherLocation(2.5, 1.5, 'unnamed location')
weatherBot.CONFIG['variable_location']['unnamed_location_name'] = 'unnamed location'
loc = weatherBot.get_location_from_user_timeline('coordsnoplace', fallback_loc)
self.assertTrue(type(loc) is models.WeatherLocation)
self.assertEqual(loc, test_loc)
@replace('weatherBot.get_tweepy_api', mocked_get_tweepy_api)
def test_get_location_from_user_timeline_place(self):
"""Testing getting a location from twitter account's recent tweets using the place bounding box"""
fallback_loc = models.WeatherLocation(4, 3, 'test')
test_loc = models.WeatherLocation(5.0, 4.0, 'cool place')
loc = weatherBot.get_location_from_user_timeline('nocoords', fallback_loc)
self.assertTrue(type(loc) is models.WeatherLocation)
self.assertEqual(loc, test_loc)
@replace('weatherBot.get_tweepy_api', mocked_get_tweepy_api)
def test_get_location_from_user_timeline_empty(self):
"""Testing getting a location from twitter account's recent tweets when there are none"""
fallback_loc = models.WeatherLocation(4, 3, 'test')
self.assertEqual(weatherBot.get_location_from_user_timeline('no tweets', fallback_loc), fallback_loc)
@replace('weatherBot.get_tweepy_api', mocked_get_tweepy_api)
def test_get_location_from_user_timeline_error(self):
"""Testing getting a location from twitter account's recent tweets when there is an error"""
fallback_loc = models.WeatherLocation(4, 3, 'test')
self.assertEqual(weatherBot.get_location_from_user_timeline('error', fallback_loc), fallback_loc)
@replace('weatherBot.get_tweepy_api', mocked_get_tweepy_api)
def test_do_tweet(self):
"""Testing tweeting a test tweet"""
tweet_location = False
variable_location = False
content = 'Just running unit tests, this should disappear...'
hashtag = '#testing'
tweet_content = content + ' ' + hashtag
status = weatherBot.do_tweet(content, self.location, tweet_location, variable_location, hashtag=hashtag)
self.assertEqual(status.text, tweet_content)
@replace('weatherBot.get_tweepy_api', mocked_get_tweepy_api)
def test_do_tweet_long(self):
"""Testing tweeting a test tweet that is over 280 characters"""
tweet_location = False
variable_location = False
content = 'This tweet is over 280 characters.\n' \
'This tweet is over 280 characters.\n' \
'This tweet is over 280 characters.\n' \
'This tweet is over 280 characters.\n' \
'This tweet is over 280 characters.\n' \
'This tweet is over 280 characters.\n' \
'This tweet is over 280 characters.\n' \
'This tweet is over 280 characters.\n' \
'This tweet is over 280 characters.\n' \
'This tweet is over 280 characters.'
hashtag = '#testing'
status = weatherBot.do_tweet(content, self.location, tweet_location, variable_location, hashtag=hashtag)
expected_text = 'This tweet is over 280 characters. ' \
'This tweet is over 280 characters. ' \
'This tweet is over 280 characters. ' \
'This tweet is over 280 characters. ' \
'This tweet is over 280 characters. ' \
'This tweet is over 280 characters. ' \
'This tweet is over 280 characters. ' \
'This tweet is over 280… #testing'
self.assertEqual(status.text, expected_text)
@replace('weatherBot.get_tweepy_api', mocked_get_tweepy_api)
def test_do_tweet_with_locations(self):
"""Testing tweeting a test tweet with location and variable location"""
tweet_location = True
variable_location = True
content = 'Just running unit tests, this should disappear...'
weatherBot.CONFIG['basic']['hashtag'] = ''
tweet_content = self.location.name + ': ' + content
status = weatherBot.do_tweet(content, self.location, tweet_location, variable_location)
self.assertEqual(status.text, tweet_content)
@replace('weatherBot.get_tweepy_api', mocked_get_tweepy_api)
def test_do_tweet_error(self):
"""Testing tweeting a test tweet that should throw and error using keys from env variables"""
tweet_location = False
variable_location = False
content = 'error'
status = weatherBot.do_tweet(content, self.location, tweet_location, variable_location)
self.assertEqual(None, status)
def test_cleanse_throttles(self):
"""Testing that an expired, non-default key will be removed from a dict"""
now = pytz.utc.localize(datetime.datetime(2016, 10, 14, hour=14, minute=42)).astimezone(pytz.utc)
base = {'default': now - datetime.timedelta(hours=2)}
a = base
a['snow-light'] = now + datetime.timedelta(minutes=20)
self.assertDictEqual(a, weatherBot.cleanse_throttles(a, now))
b = base
b['dummy'] = now - datetime.timedelta(hours=3)
self.assertDictEqual(base, weatherBot.cleanse_throttles(b, now))
self.assertDictEqual({}, weatherBot.cleanse_throttles({}, now))
def test_set_cache(self):
"""Testing that set_cache properly saves a dict"""
a = {'test': 123, 'more testing': 'look, a string!'}
weatherBot.set_cache(a, file='testsetcache.p')
with open('testsetcache.p', 'rb') as handle:
self.assertEqual(pickle.load(handle), a)
os.remove('testsetcache.p')
def test_get_cache(self):
"""Testing that get_cache properly gets a cache,
or returns the weatherBot.cache global variable if no cahe file exists"""
a = {'test': 123, 'more testing': 'look, a string!'}
self.assertEqual(weatherBot.get_cache('testgetcache.p'), weatherBot.CACHE)
with open('testgetcache.p', 'wb') as handle:
pickle.dump(a, handle)
self.assertEqual(weatherBot.get_cache('testgetcache.p'), a)
os.remove('testgetcache.p')
class TestKeys(unittest.TestCase):
def setUp(self):
self.WEATHERBOT_CONSUMER_KEY = os.environ['WEATHERBOT_CONSUMER_KEY']
self.WEATHERBOT_CONSUMER_SECRET = os.environ['WEATHERBOT_CONSUMER_SECRET']
self.WEATHERBOT_ACCESS_TOKEN = os.environ['WEATHERBOT_ACCESS_TOKEN']
self.WEATHERBOT_ACCESS_TOKEN_SECRET = os.environ['WEATHERBOT_ACCESS_TOKEN_SECRET']
self.WEATHERBOT_DARKSKY_KEY = os.environ['WEATHERBOT_DARKSKY_KEY']
def tearDown(self):
os.environ['WEATHERBOT_CONSUMER_KEY'] = self.WEATHERBOT_CONSUMER_KEY
os.environ['WEATHERBOT_CONSUMER_SECRET'] = self.WEATHERBOT_CONSUMER_SECRET
os.environ['WEATHERBOT_ACCESS_TOKEN'] = self.WEATHERBOT_ACCESS_TOKEN
os.environ['WEATHERBOT_ACCESS_TOKEN_SECRET'] = self.WEATHERBOT_ACCESS_TOKEN_SECRET
os.environ['WEATHERBOT_DARKSKY_KEY'] = self.WEATHERBOT_DARKSKY_KEY
def test_set_twitter_env_vars(self):
"""Test that Twitter environmental variables are set to values in keys.py"""
keys.set_twitter_env_vars()
self.assertIsNotNone(os.environ['WEATHERBOT_CONSUMER_KEY'])
self.assertIsNotNone(os.environ['WEATHERBOT_CONSUMER_SECRET'])
self.assertIsNotNone(os.environ['WEATHERBOT_ACCESS_TOKEN'])
self.assertIsNotNone(os.environ['WEATHERBOT_ACCESS_TOKEN_SECRET'])
del os.environ['WEATHERBOT_CONSUMER_KEY']
del os.environ['WEATHERBOT_CONSUMER_SECRET']
del os.environ['WEATHERBOT_ACCESS_TOKEN']
del os.environ['WEATHERBOT_ACCESS_TOKEN_SECRET']
keys.set_twitter_env_vars()
self.assertEqual(os.getenv('WEATHERBOT_CONSUMER_KEY'), 'xxx')
self.assertEqual(os.getenv('WEATHERBOT_CONSUMER_SECRET'), 'xxx')
self.assertEqual(os.getenv('WEATHERBOT_ACCESS_TOKEN'), 'xxx')
self.assertEqual(os.getenv('WEATHERBOT_ACCESS_TOKEN_SECRET'), 'xxx')
def test_set_darksky_env_vars(self):
"""Test that the Dark Sky environmental variable is set to value in keys.py"""
keys.set_darksky_env_vars()
self.assertIsNotNone(os.environ['WEATHERBOT_DARKSKY_KEY'])
del os.environ['WEATHERBOT_DARKSKY_KEY']
keys.set_darksky_env_vars()
self.assertEqual(os.getenv('WEATHERBOT_DARKSKY_KEY'), 'xxx')
if __name__ == '__main__':
keys.set_twitter_env_vars()
keys.set_darksky_env_vars()
unittest.main()
|
bman4789/weatherBot
|
test.py
|
Python
|
mit
| 45,908
|
[
"Brian"
] |
dda4d914f2ea54c4ec93a170660fe2388d4bed934a9954d31e3c94ae6d87e7ed
|
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Lambda equation of GHF-CCSD(T) with spin-orbital integrals
Ref:
JCP 98, 8718 (1993); DOI:10.1063/1.464480
JCP 147, 044104 (2017); DOI:10.1063/1.4994918
'''
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc import ccsd_lambda
from pyscf.cc import gccsd_lambda
def kernel(mycc, eris=None, t1=None, t2=None, l1=None, l2=None,
max_cycle=50, tol=1e-8, verbose=logger.INFO):
return ccsd_lambda.kernel(mycc, eris, t1, t2, l1, l2, max_cycle, tol,
verbose, make_intermediates, update_lambda)
def make_intermediates(mycc, t1, t2, eris):
imds = gccsd_lambda.make_intermediates(mycc, t1, t2, eris)
nocc, nvir = t1.shape
bcei = numpy.asarray(eris.ovvv).conj().transpose(3,2,1,0)
majk = numpy.asarray(eris.ooov).conj().transpose(2,3,0,1)
bcjk = numpy.asarray(eris.oovv).conj().transpose(2,3,0,1)
mo_e = eris.mo_energy
eia = mo_e[:nocc,None] - mo_e[nocc:]
d3 = lib.direct_sum('ia+jb+kc->ijkabc', eia, eia, eia)
t3c =(numpy.einsum('jkae,bcei->ijkabc', t2, bcei) -
numpy.einsum('imbc,majk->ijkabc', t2, majk))
t3c = t3c - t3c.transpose(0,1,2,4,3,5) - t3c.transpose(0,1,2,5,4,3)
t3c = t3c - t3c.transpose(1,0,2,3,4,5) - t3c.transpose(2,1,0,3,4,5)
t3c /= d3
t3d = numpy.einsum('ia,bcjk->ijkabc', t1, bcjk)
t3d += numpy.einsum('ai,jkbc->ijkabc', eris.fock[nocc:,:nocc], t2)
t3d = t3d - t3d.transpose(0,1,2,4,3,5) - t3d.transpose(0,1,2,5,4,3)
t3d = t3d - t3d.transpose(1,0,2,3,4,5) - t3d.transpose(2,1,0,3,4,5)
t3d /= d3
l1_t = numpy.einsum('ijkabc,jkbc->ia', t3c.conj(), eris.oovv) / eia
imds.l1_t = l1_t * .25
m3 = t3c * 2 + t3d
tmp = numpy.einsum('ijkaef,kbfe->ijab', m3.conj(), eris.ovvv) * .5
l2_t = tmp - tmp.transpose(0,1,3,2)
tmp = numpy.einsum('imnabc,mnjc->ijab', m3.conj(), eris.ooov) * .5
l2_t -= tmp - tmp.transpose(1,0,2,3)
l2_t += numpy.einsum('kc,ijkabc->ijab', eris.fock[:nocc,nocc:], t3c.conj())
imds.l2_t = l2_t / lib.direct_sum('ia+jb->ijab', eia, eia)
return imds
def update_lambda(mycc, t1, t2, l1, l2, eris=None, imds=None):
if eris is None: eris = mycc.ao2mo()
if imds is None: imds = make_intermediates(mycc, t1, t2, eris)
l1, l2 = gccsd_lambda.update_lambda(mycc, t1, t2, l1, l2, eris, imds)
l1 += imds.l1_t
l2 += imds.l2_t
return l1, l2
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import cc
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.spin = 0
mol.build()
mf0 = mf = scf.RHF(mol).run(conv_tol=1.)
mf = scf.addons.convert_to_ghf(mf)
from pyscf.cc import ccsd_t_lambda_slow as ccsd_t_lambda
mycc0 = cc.CCSD(mf0)
eris0 = mycc0.ao2mo()
mycc0.kernel(eris=eris0)
t1 = mycc0.t1
t2 = mycc0.t2
imds = ccsd_t_lambda.make_intermediates(mycc0, t1, t2, eris0)
l1, l2 = ccsd_t_lambda.update_lambda(mycc0, t1, t2, t1, t2, eris0, imds)
l1ref, l2ref = ccsd_t_lambda.update_lambda(mycc0, t1, t2, l1, l2, eris0, imds)
mycc = cc.GCCSD(mf)
eris = mycc.ao2mo()
t1 = mycc.spatial2spin(t1, mycc.mo_coeff.orbspin)
t2 = mycc.spatial2spin(t2, mycc.mo_coeff.orbspin)
l1 = mycc.spatial2spin(l1, mycc.mo_coeff.orbspin)
l2 = mycc.spatial2spin(l2, mycc.mo_coeff.orbspin)
imds = make_intermediates(mycc, t1, t2, eris)
l1, l2 = update_lambda(mycc, t1, t2, l1, l2, eris, imds)
l1 = mycc.spin2spatial(l1, mycc.mo_coeff.orbspin)
l2 = mycc.spin2spatial(l2, mycc.mo_coeff.orbspin)
print(abs(l2[1]-l2[1].transpose(1,0,2,3)-l2[0]).max())
print(abs(l2[1]-l2[1].transpose(0,1,3,2)-l2[0]).max())
print(abs(l1[0]-l1ref).max())
print(abs(l2[1]-l2ref).max())
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.spin = 2
mol.build()
mf0 = mf = scf.UHF(mol).run(conv_tol=1)
mf = scf.addons.convert_to_ghf(mf)
from pyscf.cc import uccsd_t_lambda
mycc0 = cc.CCSD(mf0)
eris0 = mycc0.ao2mo()
mycc0.kernel(eris=eris0)
t1 = mycc0.t1
t2 = mycc0.t2
imds = uccsd_t_lambda.make_intermediates(mycc0, t1, t2, eris0)
l1, l2 = uccsd_t_lambda.update_lambda(mycc0, t1, t2, t1, t2, eris0, imds)
l1ref, l2ref = uccsd_t_lambda.update_lambda(mycc0, t1, t2, l1, l2, eris0, imds)
mycc = cc.GCCSD(mf)
eris = mycc.ao2mo()
t1 = mycc.spatial2spin(t1, mycc.mo_coeff.orbspin)
t2 = mycc.spatial2spin(t2, mycc.mo_coeff.orbspin)
l1 = mycc.spatial2spin(l1, mycc.mo_coeff.orbspin)
l2 = mycc.spatial2spin(l2, mycc.mo_coeff.orbspin)
imds = make_intermediates(mycc, t1, t2, eris)
l1, l2 = update_lambda(mycc, t1, t2, l1, l2, eris, imds)
l1 = mycc.spin2spatial(l1, mycc.mo_coeff.orbspin)
l2 = mycc.spin2spatial(l2, mycc.mo_coeff.orbspin)
print(abs(l1[0]-l1ref[0]).max())
print(abs(l1[1]-l1ref[1]).max())
print(abs(l2[0]-l2ref[0]).max())
print(abs(l2[1]-l2ref[1]).max())
print(abs(l2[2]-l2ref[2]).max())
|
sunqm/pyscf
|
pyscf/cc/gccsd_t_lambda.py
|
Python
|
apache-2.0
| 5,864
|
[
"PySCF"
] |
dad326b75945f817fd50f11c8e4c9ac2eaa7e08fd8dee015cd0d44f1a5cb531b
|
# Copyright (C) 2015-2022: The University of Edinburgh
# Authors: Craig Warren and Antonis Giannopoulos
#
# This file is part of gprMax.
#
# gprMax is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gprMax is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
from colorama import init
from colorama import Fore
from colorama import Style
init()
import numpy as np
from tqdm import tqdm
from gprMax.constants import z0
from gprMax.constants import floattype
from gprMax.exceptions import CmdInputError
from gprMax.geometry_outputs import GeometryView
from gprMax.geometry_outputs import GeometryObjects
from gprMax.materials import Material
from gprMax.materials import PeplinskiSoil
from gprMax.pml import CFSParameter
from gprMax.pml import CFS
from gprMax.receivers import Rx
from gprMax.snapshots import Snapshot
from gprMax.sources import VoltageSource
from gprMax.sources import HertzianDipole
from gprMax.sources import MagneticDipole
from gprMax.sources import TransmissionLine
from gprMax.utilities import round_value
from gprMax.waveforms import Waveform
def process_multicmds(multicmds, G):
"""
Checks the validity of command parameters and creates instances of
classes of parameters.
Args:
multicmds (dict): Commands that can have multiple instances in the model.
G (class): Grid class instance - holds essential parameters describing the model.
"""
# Check if coordinates are within the bounds of the grid
def check_coordinates(x, y, z, name=''):
try:
G.within_bounds(x=x, y=y, z=z)
except ValueError as err:
s = "'{}: {} ' {} {}-coordinate is not within the model domain".format(cmdname, ' '.join(tmp), name, err.args[0])
raise CmdInputError(s)
# Waveform definitions
cmdname = '#waveform'
if multicmds[cmdname] is not None:
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 4:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires exactly four parameters')
if tmp[0].lower() not in Waveform.types:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' must have one of the following types {}'.format(','.join(Waveform.types)))
if float(tmp[2]) <= 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires an excitation frequency value of greater than zero')
if any(x.ID == tmp[3] for x in G.waveforms):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' with ID {} already exists'.format(tmp[3]))
w = Waveform()
w.ID = tmp[3]
w.type = tmp[0].lower()
w.amp = float(tmp[1])
w.freq = float(tmp[2])
if G.messages:
print('Waveform {} of type {} with maximum amplitude scaling {:g}, frequency {:g}Hz created.'.format(w.ID, w.type, w.amp, w.freq))
G.waveforms.append(w)
# Voltage source
cmdname = '#voltage_source'
if multicmds[cmdname] is not None:
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) < 6:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at least six parameters')
# Check polarity & position parameters
polarisation = tmp[0].lower()
if polarisation not in ('x', 'y', 'z'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be x, y, or z')
if '2D TMx' in G.mode and (polarisation == 'y' or polarisation == 'z'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be x in 2D TMx mode')
elif '2D TMy' in G.mode and (polarisation == 'x' or polarisation == 'z'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be y in 2D TMy mode')
elif '2D TMz' in G.mode and (polarisation == 'x' or polarisation == 'y'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be z in 2D TMz mode')
xcoord = G.calculate_coord('x', tmp[1])
ycoord = G.calculate_coord('y', tmp[2])
zcoord = G.calculate_coord('z', tmp[3])
resistance = float(tmp[4])
check_coordinates(xcoord, ycoord, zcoord)
if xcoord < G.pmlthickness['x0'] or xcoord > G.nx - G.pmlthickness['xmax'] or ycoord < G.pmlthickness['y0'] or ycoord > G.ny - G.pmlthickness['ymax'] or zcoord < G.pmlthickness['z0'] or zcoord > G.nz - G.pmlthickness['zmax']:
print(Fore.RED + "WARNING: '" + cmdname + ': ' + ' '.join(tmp) + "'" + ' sources and receivers should not normally be positioned within the PML.' + Style.RESET_ALL)
if resistance < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires a source resistance of zero or greater')
# Check if there is a waveformID in the waveforms list
if not any(x.ID == tmp[5] for x in G.waveforms):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' there is no waveform with the identifier {}'.format(tmp[5]))
v = VoltageSource()
v.polarisation = polarisation
v.xcoord = xcoord
v.ycoord = ycoord
v.zcoord = zcoord
v.ID = v.__class__.__name__ + '(' + str(v.xcoord) + ',' + str(v.ycoord) + ',' + str(v.zcoord) + ')'
v.resistance = resistance
v.waveformID = tmp[5]
if len(tmp) > 6:
# Check source start & source remove time parameters
start = float(tmp[6])
stop = float(tmp[7])
if start < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' delay of the initiation of the source should not be less than zero')
if stop < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' time to remove the source should not be less than zero')
if stop - start <= 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' duration of the source should not be zero or less')
v.start = start
if stop > G.timewindow:
v.stop = G.timewindow
else:
v.stop = stop
startstop = ' start time {:g} secs, finish time {:g} secs '.format(v.start, v.stop)
else:
v.start = 0
v.stop = G.timewindow
startstop = ' '
v.calculate_waveform_values(G)
if G.messages:
print('Voltage source with polarity {} at {:g}m, {:g}m, {:g}m, resistance {:.1f} Ohms,'.format(v.polarisation, v.xcoord * G.dx, v.ycoord * G.dy, v.zcoord * G.dz, v.resistance) + startstop + 'using waveform {} created.'.format(v.waveformID))
G.voltagesources.append(v)
# Hertzian dipole
cmdname = '#hertzian_dipole'
if multicmds[cmdname] is not None:
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) < 5:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at least five parameters')
# Check polarity & position parameters
polarisation = tmp[0].lower()
if polarisation not in ('x', 'y', 'z'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be x, y, or z')
if '2D TMx' in G.mode and (polarisation == 'y' or polarisation == 'z'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be x in 2D TMx mode')
elif '2D TMy' in G.mode and (polarisation == 'x' or polarisation == 'z'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be y in 2D TMy mode')
elif '2D TMz' in G.mode and (polarisation == 'x' or polarisation == 'y'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be z in 2D TMz mode')
xcoord = G.calculate_coord('x', tmp[1])
ycoord = G.calculate_coord('y', tmp[2])
zcoord = G.calculate_coord('z', tmp[3])
check_coordinates(xcoord, ycoord, zcoord)
if xcoord < G.pmlthickness['x0'] or xcoord > G.nx - G.pmlthickness['xmax'] or ycoord < G.pmlthickness['y0'] or ycoord > G.ny - G.pmlthickness['ymax'] or zcoord < G.pmlthickness['z0'] or zcoord > G.nz - G.pmlthickness['zmax']:
print(Fore.RED + "WARNING: '" + cmdname + ': ' + ' '.join(tmp) + "'" + ' sources and receivers should not normally be positioned within the PML.' + Style.RESET_ALL)
# Check if there is a waveformID in the waveforms list
if not any(x.ID == tmp[4] for x in G.waveforms):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' there is no waveform with the identifier {}'.format(tmp[4]))
h = HertzianDipole()
h.polarisation = polarisation
# Set length of dipole to grid size in polarisation direction
if h.polarisation == 'x':
h.dl = G.dx
elif h.polarisation == 'y':
h.dl = G.dy
elif h.polarisation == 'z':
h.dl = G.dz
h.xcoord = xcoord
h.ycoord = ycoord
h.zcoord = zcoord
h.xcoordorigin = xcoord
h.ycoordorigin = ycoord
h.zcoordorigin = zcoord
h.ID = h.__class__.__name__ + '(' + str(h.xcoord) + ',' + str(h.ycoord) + ',' + str(h.zcoord) + ')'
h.waveformID = tmp[4]
if len(tmp) > 5:
# Check source start & source remove time parameters
start = float(tmp[5])
stop = float(tmp[6])
if start < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' delay of the initiation of the source should not be less than zero')
if stop < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' time to remove the source should not be less than zero')
if stop - start <= 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' duration of the source should not be zero or less')
h.start = start
if stop > G.timewindow:
h.stop = G.timewindow
else:
h.stop = stop
startstop = ' start time {:g} secs, finish time {:g} secs '.format(h.start, h.stop)
else:
h.start = 0
h.stop = G.timewindow
startstop = ' '
h.calculate_waveform_values(G)
if G.messages:
if G.mode == '2D':
print('Hertzian dipole is a line source in 2D with polarity {} at {:g}m, {:g}m, {:g}m,'.format(h.polarisation, h.xcoord * G.dx, h.ycoord * G.dy, h.zcoord * G.dz) + startstop + 'using waveform {} created.'.format(h.waveformID))
else:
print('Hertzian dipole with polarity {} at {:g}m, {:g}m, {:g}m,'.format(h.polarisation, h.xcoord * G.dx, h.ycoord * G.dy, h.zcoord * G.dz) + startstop + 'using waveform {} created.'.format(h.waveformID))
G.hertziandipoles.append(h)
# Magnetic dipole
cmdname = '#magnetic_dipole'
if multicmds[cmdname] is not None:
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) < 5:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at least five parameters')
# Check polarity & position parameters
polarisation = tmp[0].lower()
if polarisation not in ('x', 'y', 'z'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be x, y, or z')
if '2D TMx' in G.mode and (polarisation == 'y' or polarisation == 'z'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be x in 2D TMx mode')
elif '2D TMy' in G.mode and (polarisation == 'x' or polarisation == 'z'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be y in 2D TMy mode')
elif '2D TMz' in G.mode and (polarisation == 'x' or polarisation == 'y'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be z in 2D TMz mode')
xcoord = G.calculate_coord('x', tmp[1])
ycoord = G.calculate_coord('y', tmp[2])
zcoord = G.calculate_coord('z', tmp[3])
check_coordinates(xcoord, ycoord, zcoord)
if xcoord < G.pmlthickness['x0'] or xcoord > G.nx - G.pmlthickness['xmax'] or ycoord < G.pmlthickness['y0'] or ycoord > G.ny - G.pmlthickness['ymax'] or zcoord < G.pmlthickness['z0'] or zcoord > G.nz - G.pmlthickness['zmax']:
print(Fore.RED + "WARNING: '" + cmdname + ': ' + ' '.join(tmp) + "'" + ' sources and receivers should not normally be positioned within the PML.' + Style.RESET_ALL)
# Check if there is a waveformID in the waveforms list
if not any(x.ID == tmp[4] for x in G.waveforms):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' there is no waveform with the identifier {}'.format(tmp[4]))
m = MagneticDipole()
m.polarisation = polarisation
m.xcoord = xcoord
m.ycoord = ycoord
m.zcoord = zcoord
m.xcoordorigin = xcoord
m.ycoordorigin = ycoord
m.zcoordorigin = zcoord
m.ID = m.__class__.__name__ + '(' + str(m.xcoord) + ',' + str(m.ycoord) + ',' + str(m.zcoord) + ')'
m.waveformID = tmp[4]
if len(tmp) > 5:
# Check source start & source remove time parameters
start = float(tmp[5])
stop = float(tmp[6])
if start < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' delay of the initiation of the source should not be less than zero')
if stop < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' time to remove the source should not be less than zero')
if stop - start <= 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' duration of the source should not be zero or less')
m.start = start
if stop > G.timewindow:
m.stop = G.timewindow
else:
m.stop = stop
startstop = ' start time {:g} secs, finish time {:g} secs '.format(m.start, m.stop)
else:
m.start = 0
m.stop = G.timewindow
startstop = ' '
m.calculate_waveform_values(G)
if G.messages:
print('Magnetic dipole with polarity {} at {:g}m, {:g}m, {:g}m,'.format(m.polarisation, m.xcoord * G.dx, m.ycoord * G.dy, m.zcoord * G.dz) + startstop + 'using waveform {} created.'.format(m.waveformID))
G.magneticdipoles.append(m)
# Transmission line
cmdname = '#transmission_line'
if multicmds[cmdname] is not None:
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) < 6:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at least six parameters')
# Warn about using a transmission line on GPU
if G.gpu is not None:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' A #transmission_line cannot currently be used with GPU solving. Consider using a #voltage_source instead.')
# Check polarity & position parameters
polarisation = tmp[0].lower()
if polarisation not in ('x', 'y', 'z'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be x, y, or z')
if '2D TMx' in G.mode and (polarisation == 'y' or polarisation == 'z'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be x in 2D TMx mode')
elif '2D TMy' in G.mode and (polarisation == 'x' or polarisation == 'z'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be y in 2D TMy mode')
elif '2D TMz' in G.mode and (polarisation == 'x' or polarisation == 'y'):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' polarisation must be z in 2D TMz mode')
xcoord = G.calculate_coord('x', tmp[1])
ycoord = G.calculate_coord('y', tmp[2])
zcoord = G.calculate_coord('z', tmp[3])
resistance = float(tmp[4])
check_coordinates(xcoord, ycoord, zcoord)
if xcoord < G.pmlthickness['x0'] or xcoord > G.nx - G.pmlthickness['xmax'] or ycoord < G.pmlthickness['y0'] or ycoord > G.ny - G.pmlthickness['ymax'] or zcoord < G.pmlthickness['z0'] or zcoord > G.nz - G.pmlthickness['zmax']:
print(Fore.RED + "WARNING: '" + cmdname + ': ' + ' '.join(tmp) + "'" + ' sources and receivers should not normally be positioned within the PML.' + Style.RESET_ALL)
if resistance <= 0 or resistance >= z0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires a resistance greater than zero and less than the impedance of free space, i.e. 376.73 Ohms')
# Check if there is a waveformID in the waveforms list
if not any(x.ID == tmp[5] for x in G.waveforms):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' there is no waveform with the identifier {}'.format(tmp[5]))
t = TransmissionLine(G)
t.polarisation = polarisation
t.xcoord = xcoord
t.ycoord = ycoord
t.zcoord = zcoord
t.ID = t.__class__.__name__ + '(' + str(t.xcoord) + ',' + str(t.ycoord) + ',' + str(t.zcoord) + ')'
t.resistance = resistance
t.waveformID = tmp[5]
if len(tmp) > 6:
# Check source start & source remove time parameters
start = float(tmp[6])
stop = float(tmp[7])
if start < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' delay of the initiation of the source should not be less than zero')
if stop < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' time to remove the source should not be less than zero')
if stop - start <= 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' duration of the source should not be zero or less')
t.start = start
if stop > G.timewindow:
t.stop = G.timewindow
else:
t.stop = stop
startstop = ' start time {:g} secs, finish time {:g} secs '.format(t.start, t.stop)
else:
t.start = 0
t.stop = G.timewindow
startstop = ' '
t.calculate_waveform_values(G)
t.calculate_incident_V_I(G)
if G.messages:
print('Transmission line with polarity {} at {:g}m, {:g}m, {:g}m, resistance {:.1f} Ohms,'.format(t.polarisation, t.xcoord * G.dx, t.ycoord * G.dy, t.zcoord * G.dz, t.resistance) + startstop + 'using waveform {} created.'.format(t.waveformID))
G.transmissionlines.append(t)
# Receiver
cmdname = '#rx'
if multicmds[cmdname] is not None:
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 3 and len(tmp) < 5:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' has an incorrect number of parameters')
# Check position parameters
xcoord = round_value(float(tmp[0]) / G.dx)
ycoord = round_value(float(tmp[1]) / G.dy)
zcoord = round_value(float(tmp[2]) / G.dz)
check_coordinates(xcoord, ycoord, zcoord)
if xcoord < G.pmlthickness['x0'] or xcoord > G.nx - G.pmlthickness['xmax'] or ycoord < G.pmlthickness['y0'] or ycoord > G.ny - G.pmlthickness['ymax'] or zcoord < G.pmlthickness['z0'] or zcoord > G.nz - G.pmlthickness['zmax']:
print(Fore.RED + "WARNING: '" + cmdname + ': ' + ' '.join(tmp) + "'" + ' sources and receivers should not normally be positioned within the PML.' + Style.RESET_ALL)
r = Rx()
r.xcoord = xcoord
r.ycoord = ycoord
r.zcoord = zcoord
r.xcoordorigin = xcoord
r.ycoordorigin = ycoord
r.zcoordorigin = zcoord
# If no ID or outputs are specified, use default
if len(tmp) == 3:
r.ID = r.__class__.__name__ + '(' + str(r.xcoord) + ',' + str(r.ycoord) + ',' + str(r.zcoord) + ')'
for key in Rx.defaultoutputs:
r.outputs[key] = np.zeros(G.iterations, dtype=floattype)
else:
r.ID = tmp[3]
# Get allowable outputs
if G.gpu is not None:
allowableoutputs = Rx.gpu_allowableoutputs
else:
allowableoutputs = Rx.allowableoutputs
# Check and add field output names
for field in tmp[4::]:
if field in allowableoutputs:
r.outputs[field] = np.zeros(G.iterations, dtype=floattype)
else:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' contains an output type that is not allowable. Allowable outputs in current context are {}'.format(allowableoutputs))
if G.messages:
print('Receiver at {:g}m, {:g}m, {:g}m with output component(s) {} created.'.format(r.xcoord * G.dx, r.ycoord * G.dy, r.zcoord * G.dz, ', '.join(r.outputs)))
G.rxs.append(r)
# Receiver array
cmdname = '#rx_array'
if multicmds[cmdname] is not None:
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 9:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires exactly nine parameters')
xs = G.calculate_coord('x', tmp[0])
ys = G.calculate_coord('y', tmp[1])
zs = G.calculate_coord('z', tmp[2])
xf = G.calculate_coord('x', tmp[3])
yf = G.calculate_coord('y', tmp[4])
zf = G.calculate_coord('z', tmp[5])
dx = G.calculate_coord('x', tmp[6])
dy = G.calculate_coord('y', tmp[7])
dz = G.calculate_coord('z', tmp[8])
check_coordinates(xs, ys, zs, name='lower')
check_coordinates(xf, yf, zf, name='upper')
if xcoord < G.pmlthickness['x0'] or xcoord > G.nx - G.pmlthickness['xmax'] or ycoord < G.pmlthickness['y0'] or ycoord > G.ny - G.pmlthickness['ymax'] or zcoord < G.pmlthickness['z0'] or zcoord > G.nz - G.pmlthickness['zmax']:
print(Fore.RED + "WARNING: '" + cmdname + ': ' + ' '.join(tmp) + "'" + ' sources and receivers should not normally be positioned within the PML.' + Style.RESET_ALL)
if xs > xf or ys > yf or zs > zf:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' the lower coordinates should be less than the upper coordinates')
if dx < 0 or dy < 0 or dz < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' the step size should not be less than zero')
if dx < 1:
if dx == 0:
dx = 1
else:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' the step size should not be less than the spatial discretisation')
if dy < 1:
if dy == 0:
dy = 1
else:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' the step size should not be less than the spatial discretisation')
if dz < 1:
if dz == 0:
dz = 1
else:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' the step size should not be less than the spatial discretisation')
if G.messages:
print('Receiver array {:g}m, {:g}m, {:g}m, to {:g}m, {:g}m, {:g}m with steps {:g}m, {:g}m, {:g}m'.format(xs * G.dx, ys * G.dy, zs * G.dz, xf * G.dx, yf * G.dy, zf * G.dz, dx * G.dx, dy * G.dy, dz * G.dz))
for x in range(xs, xf + 1, dx):
for y in range(ys, yf + 1, dy):
for z in range(zs, zf + 1, dz):
r = Rx()
r.xcoord = x
r.ycoord = y
r.zcoord = z
r.xcoordorigin = x
r.ycoordorigin = y
r.zcoordorigin = z
r.ID = r.__class__.__name__ + '(' + str(x) + ',' + str(y) + ',' + str(z) + ')'
for key in Rx.defaultoutputs:
r.outputs[key] = np.zeros(G.iterations, dtype=floattype)
if G.messages:
print(' Receiver at {:g}m, {:g}m, {:g}m with output component(s) {} created.'.format(r.xcoord * G.dx, r.ycoord * G.dy, r.zcoord * G.dz, ', '.join(r.outputs)))
G.rxs.append(r)
# Snapshot
cmdname = '#snapshot'
if multicmds[cmdname] is not None:
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 11:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires exactly eleven parameters')
xs = G.calculate_coord('x', tmp[0])
ys = G.calculate_coord('y', tmp[1])
zs = G.calculate_coord('z', tmp[2])
xf = G.calculate_coord('x', tmp[3])
yf = G.calculate_coord('y', tmp[4])
zf = G.calculate_coord('z', tmp[5])
dx = G.calculate_coord('x', tmp[6])
dy = G.calculate_coord('y', tmp[7])
dz = G.calculate_coord('z', tmp[8])
# If number of iterations given
try:
time = int(tmp[9])
# If real floating point value given
except ValueError:
time = float(tmp[9])
if time > 0:
time = round_value((time / G.dt)) + 1
else:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' time value must be greater than zero')
check_coordinates(xs, ys, zs, name='lower')
check_coordinates(xf, yf, zf, name='upper')
if xs >= xf or ys >= yf or zs >= zf:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' the lower coordinates should be less than the upper coordinates')
if dx < 0 or dy < 0 or dz < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' the step size should not be less than zero')
if dx < 1 or dy < 1 or dz < 1:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' the step size should not be less than the spatial discretisation')
if time <= 0 or time > G.iterations:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' time value is not valid')
s = Snapshot(xs, ys, zs, xf, yf, zf, dx, dy, dz, time, tmp[10])
if G.messages:
print('Snapshot from {:g}m, {:g}m, {:g}m, to {:g}m, {:g}m, {:g}m, discretisation {:g}m, {:g}m, {:g}m, at {:g} secs with filename {} created.'.format(xs * G.dx, ys * G.dy, zs * G.dz, xf * G.dx, yf * G.dy, zf * G.dz, dx * G.dx, dy * G.dy, dz * G.dz, s.time * G.dt, s.basefilename))
G.snapshots.append(s)
# Materials
cmdname = '#material'
if multicmds[cmdname] is not None:
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 5:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires exactly five parameters')
if float(tmp[0]) < 1:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires a positive value of one or greater for static (DC) permittivity')
if tmp[1] != 'inf':
se = float(tmp[1])
if se < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires a positive value for conductivity')
else:
se = float('inf')
if float(tmp[2]) < 1:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires a positive value of one or greater for permeability')
if float(tmp[3]) < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires a positive value for magnetic conductivity')
if any(x.ID == tmp[4] for x in G.materials):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' with ID {} already exists'.format(tmp[4]))
# Create a new instance of the Material class material (start index after pec & free_space)
m = Material(len(G.materials), tmp[4])
m.er = float(tmp[0])
m.se = se
m.mr = float(tmp[2])
m.sm = float(tmp[3])
# Set material averaging to False if infinite conductivity, i.e. pec
if m.se == float('inf'):
m.averagable = False
if G.messages:
tqdm.write('Material {} with eps_r={:g}, sigma={:g} S/m; mu_r={:g}, sigma*={:g} Ohm/m created.'.format(m.ID, m.er, m.se, m.mr, m.sm))
# Append the new material object to the materials list
G.materials.append(m)
cmdname = '#add_dispersion_debye'
if multicmds[cmdname] is not None:
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) < 4:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at least four parameters')
if int(tmp[0]) < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires a positive value for number of poles')
poles = int(tmp[0])
materialsrequested = tmp[(2 * poles) + 1:len(tmp)]
# Look up requested materials in existing list of material instances
materials = [y for x in materialsrequested for y in G.materials if y.ID == x]
if len(materials) != len(materialsrequested):
notfound = [x for x in materialsrequested if x not in materials]
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' material(s) {} do not exist'.format(notfound))
for material in materials:
material.type = 'debye'
material.poles = poles
material.averagable = False
for pole in range(1, 2 * poles, 2):
# N.B Not checking if relaxation times are greater than time-step
if float(tmp[pole]) > 0:
material.deltaer.append(float(tmp[pole]))
material.tau.append(float(tmp[pole + 1]))
else:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires positive values for the permittivity difference.')
if material.poles > Material.maxpoles:
Material.maxpoles = material.poles
if G.messages:
tqdm.write('Debye disperion added to {} with delta_eps_r={}, and tau={} secs created.'.format(material.ID, ', '.join('%4.2f' % deltaer for deltaer in material.deltaer), ', '.join('%4.3e' % tau for tau in material.tau)))
cmdname = '#add_dispersion_lorentz'
if multicmds[cmdname] is not None:
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) < 5:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at least five parameters')
if int(tmp[0]) < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires a positive value for number of poles')
poles = int(tmp[0])
materialsrequested = tmp[(3 * poles) + 1:len(tmp)]
# Look up requested materials in existing list of material instances
materials = [y for x in materialsrequested for y in G.materials if y.ID == x]
if len(materials) != len(materialsrequested):
notfound = [x for x in materialsrequested if x not in materials]
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' material(s) {} do not exist'.format(notfound))
for material in materials:
material.type = 'lorentz'
material.poles = poles
material.averagable = False
for pole in range(1, 3 * poles, 3):
if float(tmp[pole]) > 0 and float(tmp[pole + 1]) > G.dt and float(tmp[pole + 2]) > G.dt:
material.deltaer.append(float(tmp[pole]))
material.tau.append(float(tmp[pole + 1]))
material.alpha.append(float(tmp[pole + 2]))
else:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires positive values for the permittivity difference and frequencies, and associated times that are greater than the time step for the model.')
if material.poles > Material.maxpoles:
Material.maxpoles = material.poles
if G.messages:
tqdm.write('Lorentz disperion added to {} with delta_eps_r={}, omega={} secs, and gamma={} created.'.format(material.ID, ', '.join('%4.2f' % deltaer for deltaer in material.deltaer), ', '.join('%4.3e' % tau for tau in material.tau), ', '.join('%4.3e' % alpha for alpha in material.alpha)))
cmdname = '#add_dispersion_drude'
if multicmds[cmdname] is not None:
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) < 5:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at least five parameters')
if int(tmp[0]) < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires a positive value for number of poles')
poles = int(tmp[0])
materialsrequested = tmp[(3 * poles) + 1:len(tmp)]
# Look up requested materials in existing list of material instances
materials = [y for x in materialsrequested for y in G.materials if y.ID == x]
if len(materials) != len(materialsrequested):
notfound = [x for x in materialsrequested if x not in materials]
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' material(s) {} do not exist'.format(notfound))
for material in materials:
material.type = 'drude'
material.poles = poles
material.averagable = False
for pole in range(1, 2 * poles, 2):
if float(tmp[pole]) > 0 and float(tmp[pole + 1]) > G.dt:
material.tau.append(float(tmp[pole]))
material.alpha.append(float(tmp[pole + 1]))
else:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires positive values for the frequencies, and associated times that are greater than the time step for the model.')
if material.poles > Material.maxpoles:
Material.maxpoles = material.poles
if G.messages:
tqdm.write('Drude disperion added to {} with omega={} secs, and gamma={} secs created.'.format(material.ID, ', '.join('%4.3e' % tau for tau in material.tau), ', '.join('%4.3e' % alpha for alpha in material.alpha)))
cmdname = '#soil_peplinski'
if multicmds[cmdname] is not None:
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 7:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at exactly seven parameters')
if float(tmp[0]) < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires a positive value for the sand fraction')
if float(tmp[1]) < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires a positive value for the clay fraction')
if float(tmp[2]) < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires a positive value for the bulk density')
if float(tmp[3]) < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires a positive value for the sand particle density')
if float(tmp[4]) < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires a positive value for the lower limit of the water volumetric fraction')
if float(tmp[5]) < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires a positive value for the upper limit of the water volumetric fraction')
if any(x.ID == tmp[6] for x in G.mixingmodels):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' with ID {} already exists'.format(tmp[6]))
# Create a new instance of the Material class material (start index after pec & free_space)
s = PeplinskiSoil(tmp[6], float(tmp[0]), float(tmp[1]), float(tmp[2]), float(tmp[3]), (float(tmp[4]), float(tmp[5])))
if G.messages:
print('Mixing model (Peplinski) used to create {} with sand fraction {:g}, clay fraction {:g}, bulk density {:g}g/cm3, sand particle density {:g}g/cm3, and water volumetric fraction {:g} to {:g} created.'.format(s.ID, s.S, s.C, s.rb, s.rs, s.mu[0], s.mu[1]))
# Append the new material object to the materials list
G.mixingmodels.append(s)
# Geometry views (creates VTK-based geometry files)
cmdname = '#geometry_view'
if multicmds[cmdname] is not None:
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 11:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires exactly eleven parameters')
xs = G.calculate_coord('x', tmp[0])
ys = G.calculate_coord('y', tmp[1])
zs = G.calculate_coord('z', tmp[2])
xf = G.calculate_coord('x', tmp[3])
yf = G.calculate_coord('y', tmp[4])
zf = G.calculate_coord('z', tmp[5])
dx = G.calculate_coord('x', tmp[6])
dy = G.calculate_coord('y', tmp[7])
dz = G.calculate_coord('z', tmp[8])
check_coordinates(xs, ys, zs, name='lower')
check_coordinates(xf, yf, zf, name='upper')
if xs >= xf or ys >= yf or zs >= zf:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' the lower coordinates should be less than the upper coordinates')
if dx < 0 or dy < 0 or dz < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' the step size should not be less than zero')
if dx > G.nx or dy > G.ny or dz > G.nz:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' the step size should be less than the domain size')
if dx < 1 or dy < 1 or dz < 1:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' the step size should not be less than the spatial discretisation')
if tmp[10].lower() != 'n' and tmp[10].lower() != 'f':
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires type to be either n (normal) or f (fine)')
if tmp[10].lower() == 'f' and (dx * G.dx != G.dx or dy * G.dy != G.dy or dz * G.dz != G.dz):
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires the spatial discretisation for the geometry view to be the same as the model for geometry view of type f (fine)')
# Set type of geometry file
if tmp[10].lower() == 'n':
fileext = '.vti'
else:
fileext = '.vtp'
g = GeometryView(xs, ys, zs, xf, yf, zf, dx, dy, dz, tmp[9], fileext)
if G.messages:
print('Geometry view from {:g}m, {:g}m, {:g}m, to {:g}m, {:g}m, {:g}m, discretisation {:g}m, {:g}m, {:g}m, with filename base {} created.'.format(xs * G.dx, ys * G.dy, zs * G.dz, xf * G.dx, yf * G.dy, zf * G.dz, dx * G.dx, dy * G.dy, dz * G.dz, g.basefilename))
# Append the new GeometryView object to the geometry views list
G.geometryviews.append(g)
# Geometry object(s) output
cmdname = '#geometry_objects_write'
if multicmds[cmdname] is not None:
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 7:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires exactly seven parameters')
xs = G.calculate_coord('x', tmp[0])
ys = G.calculate_coord('y', tmp[1])
zs = G.calculate_coord('z', tmp[2])
xf = G.calculate_coord('x', tmp[3])
yf = G.calculate_coord('y', tmp[4])
zf = G.calculate_coord('z', tmp[5])
check_coordinates(xs, ys, zs, name='lower')
check_coordinates(xf, yf, zf, name='upper')
if xs >= xf or ys >= yf or zs >= zf:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' the lower coordinates should be less than the upper coordinates')
g = GeometryObjects(xs, ys, zs, xf, yf, zf, tmp[6])
if G.messages:
print('Geometry objects in the volume from {:g}m, {:g}m, {:g}m, to {:g}m, {:g}m, {:g}m, will be written to {}, with materials written to {}'.format(xs * G.dx, ys * G.dy, zs * G.dz, xf * G.dx, yf * G.dy, zf * G.dz, g.filename, g.materialsfilename))
# Append the new GeometryView object to the geometry objects to write list
G.geometryobjectswrite.append(g)
# Complex frequency shifted (CFS) PML parameter
cmdname = '#pml_cfs'
if multicmds[cmdname] is not None:
if len(multicmds[cmdname]) > 2:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' can only be used up to two times, for up to a 2nd order PML')
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 12:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires exactly twelve parameters')
if tmp[0] not in CFSParameter.scalingprofiles.keys() or tmp[4] not in CFSParameter.scalingprofiles.keys() or tmp[8] not in CFSParameter.scalingprofiles.keys():
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' must have scaling type {}'.format(','.join(CFSParameter.scalingprofiles.keys())))
if tmp[1] not in CFSParameter.scalingdirections or tmp[5] not in CFSParameter.scalingdirections or tmp[9] not in CFSParameter.scalingdirections:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' must have scaling type {}'.format(','.join(CFSParameter.scalingdirections)))
if float(tmp[2]) < 0 or float(tmp[3]) < 0 or float(tmp[6]) < 0 or float(tmp[7]) < 0 or float(tmp[10]) < 0:
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' minimum and maximum scaling values must be positive')
if float(tmp[6]) < 1 and G.pmlformulation == 'HORIPML':
raise CmdInputError("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' minimum scaling value for kappa must be greater than or equal to one')
cfsalpha = CFSParameter()
cfsalpha.ID = 'alpha'
cfsalpha.scalingprofile = tmp[0]
cfsalpha.scalingdirection = tmp[1]
cfsalpha.min = float(tmp[2])
cfsalpha.max = float(tmp[3])
cfskappa = CFSParameter()
cfskappa.ID = 'kappa'
cfskappa.scalingprofile = tmp[4]
cfskappa.scalingdirection = tmp[5]
cfskappa.min = float(tmp[6])
cfskappa.max = float(tmp[7])
cfssigma = CFSParameter()
cfssigma.ID = 'sigma'
cfssigma.scalingprofile = tmp[8]
cfssigma.scalingdirection = tmp[9]
cfssigma.min = float(tmp[10])
if tmp[11] == 'None':
cfssigma.max = None
else:
cfssigma.max = float(tmp[11])
cfs = CFS()
cfs.alpha = cfsalpha
cfs.kappa = cfskappa
cfs.sigma = cfssigma
if G.messages:
print('PML CFS parameters: alpha (scaling: {}, scaling direction: {}, min: {:g}, max: {:g}), kappa (scaling: {}, scaling direction: {}, min: {:g}, max: {:g}), sigma (scaling: {}, scaling direction: {}, min: {:g}, max: {}) created.'.format(cfsalpha.scalingprofile, cfsalpha.scalingdirection, cfsalpha.min, cfsalpha.max, cfskappa.scalingprofile, cfskappa.scalingdirection, cfskappa.min, cfskappa.max, cfssigma.scalingprofile, cfssigma.scalingdirection, cfssigma.min, cfssigma.max))
G.cfs.append(cfs)
|
gprMax/gprMax
|
gprMax/input_cmds_multiuse.py
|
Python
|
gpl-3.0
| 47,381
|
[
"VTK"
] |
d790e52a2bd81d95e751d57f33e404afa0a3d1a8b129e97d9fb9fd7530486edc
|
"""
This module contains the necessary tools to discover and load
the handlers for serving HTTPS
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from tornado.web import url as TornadoURL, RequestHandler
from DIRAC import gConfig, gLogger, S_ERROR, S_OK
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.Base.private.ModuleLoader import ModuleLoader
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
def urlFinder(module):
"""
Tries to guess the url from module name.
The URL would be of the form ``/System/Component`` (e.g. ``DataManagement/FileCatalog``)
We search something which looks like ``<...>.<component>System.<...>.<service>Handler``
:param module: full module name (e.g. "DIRAC.something.something")
:returns: the deduced URL or None
"""
sections = module.split('.')
for section in sections:
# This condition is a bit long
# We search something which look like <...>.<component>System.<...>.<service>Handler
# If find we return /<component>/<service>
if section.endswith("System") and sections[-1].endswith("Handler"):
return "/".join(["", section[:-len("System")], sections[-1][:-len("Handler")]])
class HandlerManager(object):
"""
This utility class allows to load the handlers, generate the appropriate route,
and discover the handlers based on the CS.
In order for a service to be considered as using HTTPS, it must have
``protocol = https`` as an option.
Each of the Handler will have one associated route to it:
* Directly specified as ``LOCATION`` in the handler module
* automatically deduced from the module name, of the form
``System/Component`` (e.g. ``DataManagement/FileCatalog``)
"""
def __init__(self, autoDiscovery=True):
"""
Initialization function, you can set autoDiscovery=False to prevent automatic
discovery of handler. If disabled you can use loadHandlersByServiceName() to
load your handlers or loadHandlerInHandlerManager()
:param autoDiscovery: (default True) Disable the automatic discovery,
can be used to choose service we want to load.
"""
self.__handlers = {}
self.__objectLoader = ObjectLoader()
self.__autoDiscovery = autoDiscovery
self.loader = ModuleLoader("Service", PathFinder.getServiceSection, RequestHandler, moduleSuffix="Handler")
def __addHandler(self, handlerTuple, url=None):
"""
Function which add handler to list of known handlers
:param handlerTuple: (path, class)
"""
# Check if handler not already loaded
if not url or url not in self.__handlers:
gLogger.debug("Find new handler %s" % (handlerTuple[0]))
# If url is not given, try to discover it
if url is None:
# FIRST TRY: Url is hardcoded
try:
url = handlerTuple[1].LOCATION
# SECOND TRY: URL can be deduced from path
except AttributeError:
gLogger.debug("No location defined for %s try to get it from path" % handlerTuple[0])
url = urlFinder(handlerTuple[0])
# We add "/" if missing at begin, e.g. we found "Framework/Service"
# URL can't be relative in Tornado
if url and not url.startswith('/'):
url = "/%s" % url
elif not url:
gLogger.warn("URL not found for %s" % (handlerTuple[0]))
return S_ERROR("URL not found for %s" % (handlerTuple[0]))
# Finally add the URL to handlers
if url not in self.__handlers:
self.__handlers[url] = handlerTuple[1]
gLogger.info("New handler: %s with URL %s" % (handlerTuple[0], url))
else:
gLogger.debug("Handler already loaded %s" % (handlerTuple[0]))
return S_OK()
def discoverHandlers(self):
"""
Force the discovery of URL, automatic call when we try to get handlers for the first time.
You can disable the automatic call with autoDiscovery=False at initialization
"""
gLogger.debug("Trying to auto-discover the handlers for Tornado")
# Look in config
diracSystems = gConfig.getSections('/Systems')
serviceList = []
if diracSystems['OK']:
for system in diracSystems['Value']:
try:
instance = PathFinder.getSystemInstance(system)
services = gConfig.getSections('/Systems/%s/%s/Services' % (system, instance))
if services['OK']:
for service in services['Value']:
newservice = ("%s/%s" % (system, service))
# We search in the CS all handlers which used HTTPS as protocol
isHTTPS = gConfig.getValue('/Systems/%s/%s/Services/%s/Protocol' % (system, instance, service))
if isHTTPS and isHTTPS.lower() == 'https':
serviceList.append(newservice)
# On systems sometime you have things not related to services...
except RuntimeError:
pass
return self.loadHandlersByServiceName(serviceList)
def loadHandlersByServiceName(self, servicesNames):
"""
Load a list of handler from list of service using DIRAC moduleLoader
Use :py:class:`DIRAC.Core.Base.private.ModuleLoader`
:param servicesNames: list of service, e.g. ['Framework/Hello', 'Configuration/Server']
"""
# Use DIRAC system to load: search in CS if path is given and if not defined
# it search in place it should be (e.g. in DIRAC/FrameworkSystem/Service)
if not isinstance(servicesNames, list):
servicesNames = [servicesNames]
load = self.loader.loadModules(servicesNames)
if not load['OK']:
return load
for module in self.loader.getModules().values():
url = module['loadName']
# URL can be like https://domain:port/service/name or just service/name
# Here we just want the service name, for tornado
serviceTuple = url.replace('https://', '').split('/')[-2:]
url = "%s/%s" % (serviceTuple[0], serviceTuple[1])
self.__addHandler((module['loadName'], module['classObj']), url)
return S_OK()
def getHandlersURLs(self):
"""
Get all handler for usage in Tornado, as a list of tornado.web.url
If there is no handler found before, it try to find them
:returns: a list of URL (not the string with "https://..." but the tornado object)
see http://www.tornadoweb.org/en/stable/web.html#tornado.web.URLSpec
"""
if not self.__handlers and self.__autoDiscovery:
self.__autoDiscovery = False
self.discoverHandlers()
urls = []
for key in self.__handlers:
urls.append(TornadoURL(key, self.__handlers[key]))
return urls
def getHandlersDict(self):
"""
Return all handler dictionary
:returns: dictionary with absolute url as key ("/System/Service")
and tornado.web.url object as value
"""
if not self.__handlers and self.__autoDiscovery:
self.__autoDiscovery = False
res = self.discoverHandlers()
if not res['OK']:
gLogger.error("Could not load handlers", res)
return self.__handlers
|
yujikato/DIRAC
|
src/DIRAC/Core/Tornado/Server/HandlerManager.py
|
Python
|
gpl-3.0
| 7,117
|
[
"DIRAC"
] |
649733668fdfd34985941adde6c59eda3cadf818d44f80dfdf3c43de4422b768
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
# Plotting the results of scaling.i
import os
import sys
import matplotlib.pyplot as plt
f = open("reservoir_and_water_3_out.csv", "r")
header = f.readline().strip().split(",")
data = [list(map(float, line.strip().split(","))) for line in f.readlines()[1:]]
f.close()
index = {}
for i in range(len(header)):
index[header[i]] = i
years = [d[index["time"]] / 3600.0 / 24.0 / 365.0 for d in data]
all_minerals = ["Albite", "Anhydrite", "Anorthite", "Calcite", "Chalcedony", "Clinochl-7A", "Illite", "K-feldspar", "Kaolinite", "Quartz", "Paragonite", "Phlogopite", "Zoisite", "Laumontite", "mineral"]
cm3 = {}
for mineral in all_minerals:
cm3[mineral] = [x[index["cm3_" + mineral]] for x in data]
change = {}
for mineral in all_minerals:
change[mineral] = [c - cm3[mineral][0] for c in cm3[mineral]]
percentage_change = {}
for mineral in all_minerals:
percentage_change[mineral] = [100 * (c - cm3[mineral][0]) / cm3[mineral][0] for c in cm3[mineral]]
# Plot the absolute changes in mineral volume
sortit = sorted([[change[mineral][-1], mineral] for mineral in all_minerals[:-1]])
plotorder = [m[1] for m in sortit]
plt.figure(0)
for mineral in reversed(plotorder):
plt.semilogx(years, change[mineral], label=mineral)
plt.semilogx(years, change["mineral"], 'k--', label="Sum")
plt.legend()
plt.ylabel("Mineral volume change (cm$^{3}$)")
plt.xlabel("Years")
plt.title("Reservoir mineral volume when in contact with Water3 at 70$^{\circ}$C")
plt.tight_layout()
plt.savefig("../../../../geochemistry/doc/content/media/geochemistry/reservoir_and_water_3.png")
# Plot the percentage changes in mineral volume
sortit = sorted([[percentage_change[mineral][-1], mineral] for mineral in all_minerals[:-1]])
plotorder = [m[1] for m in sortit]
plt.figure(1)
for mineral in reversed(plotorder):
plt.semilogx(years, percentage_change[mineral], label=mineral)
plt.semilogx(years, percentage_change["mineral"], 'k--', label="Sum")
plt.ylim(-100, 100)
plt.legend()
plt.ylabel("Percentage volume change (%)")
plt.xlabel("Years")
plt.title("Reservoir mineral volume when in contact with Water3 at 70$^{\circ}$C")
plt.tight_layout()
plt.savefig("../../../../geochemistry/doc/content/media/geochemistry/reservoir_and_water_3_percentage.png")
plt.show()
sys.exit(0)
|
harterj/moose
|
modules/combined/examples/geochem-porous_flow/forge/reservoir_and_water_3.py
|
Python
|
lgpl-2.1
| 2,604
|
[
"MOOSE"
] |
0dbf9920762f3cd27cce781c68c80d22dad2356a0e0afa954574cbbbedf16830
|
#!/usr/bin/env python
"""Prevent unwanted files from being added to the source tree."""
import os
import sys
def main():
"""Main entry point."""
paths = sys.argv[1:] or sys.stdin.read().splitlines()
allowed_extensions = (
'.cs',
'.ps1',
'.psm1',
'.py',
)
skip = (
# allowed special cases
'lib/ansible/config/base.yml',
'lib/ansible/config/module_defaults.yml',
)
skip_directories = (
'lib/ansible.egg-info/',
'lib/ansible/galaxy/data/',
)
for path in paths:
if path in skip:
continue
if any(path.startswith(skip_directory) for skip_directory in skip_directories):
continue
if path.startswith('lib/') and not path.startswith('lib/ansible/'):
print('%s: all "lib" content must reside in the "lib/ansible" directory' % path)
continue
ext = os.path.splitext(path)[1]
if ext not in allowed_extensions:
print('%s: extension must be one of: %s' % (path, ', '.join(allowed_extensions)))
if __name__ == '__main__':
main()
|
albertomurillo/ansible
|
test/sanity/code-smell/no-unwanted-files.py
|
Python
|
gpl-3.0
| 1,137
|
[
"Galaxy"
] |
b6458c8e935d8e9a37d4d4db347c04c6f811f677be0e003883854437312d2cd9
|
#!/usr/bin/env python3
import numpy as np , pylab, tkinter
import math
import matplotlib.pyplot as plt
import mpmath as mp
hbar=0.658212 # Planck's constant (eV.fs/rad)
J0=0.05 # per-neighbor exchange interaction (eV)
S1 = np.array([1.0, 0.0, 0.0])
S2 = np.array([0.0, 1.0, 0.0])
alpha=0.01 # damping coefficient
pi=math.pi
N=30000 # number of timesteps
dt=0.1 # timestep (fs)
# Rodrigues rotation formula
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise
rotation about the given axis by theta radians
"""
axis = np.asarray(axis)
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
# calculating precession field of spin Sr
def calc_rot_vector(Sr,Sf):
rot = (J0/hbar)*(Sf-alpha*np.cross(Sf,Sr))/(1.0+alpha**2)
return rot
# second-order ST decomposition as implemented in LAMMPS
for t in range (0,N):
# advance s1 by dt/4
wf1 = calc_rot_vector(S1,S2)
theta=dt*np.linalg.norm(wf1)*0.25
axis=wf1/np.linalg.norm(wf1)
S1 = np.dot(rotation_matrix(axis, theta), S1)
# advance s2 by dt/2
wf2 = calc_rot_vector(S2,S1)
theta=dt*np.linalg.norm(wf2)*0.5
axis=wf2/np.linalg.norm(wf2)
S2 = np.dot(rotation_matrix(axis, theta), S2)
# advance s1 by dt/2
wf1 = calc_rot_vector(S1,S2)
theta=dt*np.linalg.norm(wf1)*0.5
axis=wf1/np.linalg.norm(wf1)
S1 = np.dot(rotation_matrix(axis, theta), S1)
# advance s2 by dt/2
wf2 = calc_rot_vector(S2,S1)
theta=dt*np.linalg.norm(wf2)*0.5
axis=wf2/np.linalg.norm(wf2)
S2 = np.dot(rotation_matrix(axis, theta), S2)
# advance s1 by dt/4
wf1 = calc_rot_vector(S1,S2)
theta=dt*np.linalg.norm(wf1)*0.25
axis=wf1/np.linalg.norm(wf1)
S1 = np.dot(rotation_matrix(axis, theta), S1)
# calc. average magnetization
Sm = (S1+S2)*0.5
# calc. energy
en = -2.0*J0*(np.dot(S1,S2))
# print res. in ps for comparison with LAMMPS
print(t*dt/1000.0,Sm[0],Sm[1],Sm[2],en)
|
pastewka/lammps
|
examples/SPIN/test_problems/validation_damped_exchange/llg_exchange.py
|
Python
|
gpl-2.0
| 2,292
|
[
"LAMMPS"
] |
4d382288625501c53f628de9d9ec347d41216a752808f85164e3766b2dbb876c
|
#!/usr/bin/env python3
from distutils.core import setup
from glob import glob
# Scripts whose names end in a-z or 1-9 (avoids emacs backup files)
scripts = glob('scripts/*[a-z,1-9]')
setup(name='vasputil',
version='master',
description='VASP utilities',
author='Janne Blomqvist',
author_email='Janne.Blomqvist@aalto.fi',
url='https://github.com/jabl/vasputil',
packages=['vasputil', 'vasputil.tests'],
scripts = scripts)
|
jabl/vasputil
|
setup.py
|
Python
|
lgpl-2.1
| 467
|
[
"VASP"
] |
06978cc40a56f730a29c0f060ee59288146e5a6cf87b5302062aa8e8f6c67635
|
import argparse
import os
import string
import sys
__author__ = 'Rob Edwards'
"""
Newick format:
((15:0.04110,((16:0.03869,17:0.11891):0.00888,14:0.10566):0.00609):0.09345,
(((6:0.00000,7:0.00000):0.01537,13:0.00839):0.00229,((1:0.00011,
4:0.00065):0.01946,(11:-0.01252,(10:0.00692,(8:0.00463,(((2:0.00178,
(3:0.00192,5:0.00082):0.00081):0.00254,9:0.00327):0.00157,
12:-0.00216):0.00082):0.00088):0.01351):0.05366):0.01353):0.01429,0:0.00383);
This is a parser that I wrote myself. (Mainly so that other people don't have to install biopython or something
similar). It works for vanilla newick trees, but not for more complex trees (e.g. that have trifurcating branches).
Use at your own risk!
"""
class Node(object):
"""A node object"""
def __init__(self, id):
self.id = id
self.left = None
self.right = None
self.parent = None
self.distance = ""
self.name = ""
self.side = None
class Newick_Tree(object):
def __init__(self):
pass
def count_nodes(self, root):
""" Count the number of nodes in the tree
:param root: The root node
:type root: Node
:return: The number of nodes
:rtype: int
"""
def count(node):
c = 1
if node.left:
c += count(node.left)
if node.right:
c += count(node.right)
return c
return count(root)
def parse(self, tree, verbose=False):
"""
Parse the string given by tree
:param tree: the string to parse
:param verbose: whether to make lots of output
:return:
"""
def process_tree(treestr, pos, node):
# sys.stderr.write("At pos {} tree has depth {}\n".format(pos, Tree().count_nodes(root)))
if treestr[pos] == '(':
pos += 1
# sys.stderr.write("LEFT: When adding node {} tree has depth {}\n".format(pos, Tree().count_nodes(root)))
newnode = Node(pos)
newnode.parent = node
node.left = newnode
newnode.side = "Left"
# sys.stderr.write("ADDED NODE {} to the LEFT\n".format(newnode.id))
return process_tree(treestr, pos, newnode)
elif treestr[pos] == ',':
pos += 1
# sys.stderr.write("RIGHT: When adding node {} tree has depth {}\n".format(pos, Tree().count_nodes(root)))
newnode = Node(pos)
if node.parent.right:
newnode = node.parent
else:
newnode.parent = node.parent
node.parent.right = newnode
newnode.side = 'Right'
# sys.stderr.write("ADDED NODE {} to the RIGHT\n".format(newnode.id))
return process_tree(treestr, pos, newnode)
elif treestr[pos] == ')':
pos += 1
if pos >= len(treestr):
return
while treestr[pos] in string.ascii_letters or treestr[pos] == '_' or treestr[pos] in string.digits or treestr[pos] in '-':
node.name += treestr[pos]
pos += 1
if verbose:
sys.stderr.write("At pos {} set node {} ({}) to {}\n".format(pos, node.id, node.side, node.name))
return process_tree(treestr, pos, node.parent)
elif treestr[pos] == ':':
pos += 1
try:
while treestr[pos] in string.digits or treestr[pos] == '.' or treestr[pos] in ['-', 'e']:
node.distance += treestr[pos]
pos += 1
except TypeError:
raise TypeError("TypeError: CANNOT ADD {} at POS {} to {} in node {}\n".format(treestr[pos], pos, node.distance, node.id))
if verbose:
sys.stderr.write("Set NODE {} dist to {}\n".format(node.id, node.distance))
node.distance = float(node.distance)
return process_tree(treestr, pos, node)
else:
while treestr[pos] in string.ascii_letters or treestr[pos] == '_' or treestr[pos] in string.digits or \
treestr[pos] in '-':
node.name += treestr[pos]
pos += 1
# sys.stderr.write("When adding node {} tree has depth {}\n".format(node.name, Tree().count_nodes(root)))
if verbose:
sys.stderr.write("At pos {} Set node {} ({}) to {}\n".format(pos, node.id, node.side, node.name))
return process_tree(treestr, pos, node)
parent = Node("root")
root = parent
pos = 0
tree = tree.rstrip(';')
pos = process_tree(tree, pos, parent)
if verbose:
sys.stderr.write("TREE HAS DEPTH {}\n".format(Tree().count_nodes(root)))
return parent
def print_tree(self, root):
"""
Print out a tree in newick format.
:param root: The root node of the tree
:type root: Node
:return:
:rtype:
"""
def process_child(node):
toreturn = ''
if node.left or node.right:
toreturn = '('
if node.left and node.right:
toreturn += process_child(node.left) + "," + process_child(node.right)
elif node.left:
toreturn += process_child(node.left)
elif node.right:
toreturn += process_child(node.right)
if node.left and node.right and node.name:
# the root node??
toreturn += ','
elif node.left or node.right:
toreturn += ")"
if node.name:
toreturn += node.name
toreturn += ":{}".format(node.distance)
return toreturn
print(process_child(root) + ");")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse a tree')
parser.add_argument('-t', help='tree file', required=True)
parser.add_argument('-v', help='verbose output during parsing', action='store_true')
args = parser.parse_args()
tre = []
with open(args.t, 'r') as f:
for l in f:
tre.append(l.strip())
root = Tree().parse(''.join(tre), args.v)
print("PARSED\n\nThere are {} nodes\n\n".format(Tree().count_nodes(root)))
print("Root has left {} and right {}\n".format(root.left, root.right))
Tree().print_tree(root)
|
linsalrob/EdwardsLab
|
roblib/newick.py
|
Python
|
mit
| 6,633
|
[
"Biopython"
] |
b0c78ada944b4742e13003d0414a0afcc01715f1a37c24136c83f68d5b255d2f
|
# -*- coding: utf-8 -*-
import re
import scrapy
from scrapyproject.showingspiders.showing_spider import ShowingSpider
from scrapyproject.items import (ShowingLoader, init_show_booking_loader)
from scrapyproject.utils import UnitedUtil
class UnitedSpider(ShowingSpider):
"""
united site spider.
"""
name = "united"
allowed_domains = ["www.unitedcinemas.jp"]
start_urls = [
'http://www.unitedcinemas.jp/index.html'
]
def parse(self, response):
"""
crawl theater list data first
"""
theater_list = response.xpath(
'//section[@class="rcol searchTheater"]//li')
for theater_element in theater_list:
if theater_element.xpath('./@class').extract_first() == "area":
continue
curr_cinema_url = theater_element.xpath(
'./a/@href').extract_first()
cinema_img = theater_element.xpath('./img/@src').extract_first()
cinema_name = theater_element.xpath('./a/img/@alt').extract_first()
if cinema_img is not None:
if "icon_uc_ss.gif" in cinema_img:
cinema_name = "ユナイテッド・シネマ" + cinema_name
elif "icon_cpx_ss.gif" in cinema_img:
cinema_name = "シネプレックス" + cinema_name
data_proto = ShowingLoader(response=response)
data_proto.add_cinema_name(cinema_name)
cinema_name = data_proto.get_output_value('cinema_name')
data_proto.add_cinema_site(
response.urljoin(curr_cinema_url), cinema_name)
data_proto.add_value('source', self.name)
if not self.is_cinema_crawl([cinema_name]):
continue
cinema_name_en = curr_cinema_url.split('/')[-2]
schedule_url = self.generate_cinema_schedule_url(
cinema_name_en, self.date)
request = scrapy.Request(schedule_url, callback=self.parse_cinema)
request.meta["data_proto"] = data_proto.load_item()
yield request
def generate_cinema_schedule_url(self, cinema_name_en, show_day):
"""
json data url for single cinema, all movies of curr cinema
"""
date = show_day[:4] + '-' + show_day[4:6] + '-' + show_day[6:]
url = 'http://www.unitedcinemas.jp/{cinema_name_en}'\
'/daily.php?date={date}'.format(
cinema_name_en=cinema_name_en, date=date)
return url
def parse_cinema(self, response):
data_proto = ShowingLoader(response=response)
data_proto.add_value(None, response.meta["data_proto"])
result_list = []
movie_section_list = response.xpath('//ul[@id="dailyList"]/li')
for curr_movie in movie_section_list:
self.parse_movie(response, curr_movie, data_proto, result_list)
for result in result_list:
if result:
yield result
def parse_movie(self, response, curr_movie, data_proto, result_list):
"""
parse movie showing data
"""
title = curr_movie.xpath('./h3/span/a[1]/text()').extract_first()
movie_data_proto = ShowingLoader(response=response)
movie_data_proto.add_value(None, data_proto.load_item())
movie_data_proto.add_title(title=title)
title_list = movie_data_proto.get_title_list()
if not self.is_movie_crawl(title_list):
return
screen_section_list = curr_movie.xpath('./ul/li')
for curr_screen in screen_section_list:
self.parse_screen(response, curr_screen,
movie_data_proto, result_list)
def parse_screen(self, response, curr_screen, data_proto, result_list):
screen_data_proto = ShowingLoader(response=response)
screen_data_proto.add_value(None, data_proto.load_item())
screen_name = curr_screen.xpath('./p/a/img/@alt').extract_first()
screen_name = 'screen' + re.findall(r'\d+', screen_name)[0]
screen_data_proto.add_screen_name(screen_name)
show_section_list = curr_screen.xpath('./ol/li')
for curr_showing in show_section_list:
self.parse_showing(response, curr_showing,
screen_data_proto, result_list)
def parse_showing(self, response, curr_showing, data_proto, result_list):
def parse_time(time_str):
time = time_str.split(":")
return (int(time[0]), int(time[1]))
showing_data_proto = ShowingLoader(response=response)
showing_data_proto.add_value(None, data_proto.load_item())
start_time = curr_showing.xpath(
'./div/ol/li[@class="startTime"]/text()').extract_first()
start_hour, start_minute = parse_time(start_time)
showing_data_proto.add_value('start_time', self.get_time_from_text(
start_hour, start_minute))
end_time = curr_showing.xpath(
'./div/ol/li[@class="endTime"]/text()').extract_first()[1:]
end_hour, end_minute = parse_time(end_time)
showing_data_proto.add_value('end_time', self.get_time_from_text(
end_hour, end_minute))
# handle free order seat type showings
seat_type = curr_showing.xpath(
'./div/ul/li[@class="seatIcon"]/img/@src').extract_first()
showing_data_proto.add_value(
'seat_type', UnitedUtil.standardize_seat_type(seat_type))
# query screen number from database
showing_data_proto.add_total_seat_count()
# check whether need to continue crawl booking data or stop now
if not self.crawl_booking_data:
result_list.append(showing_data_proto.load_item())
return
booking_data_proto = init_show_booking_loader(response=response)
booking_data_proto.add_value('showing', showing_data_proto.load_item())
book_status = curr_showing.xpath(
'./div/ul/li[@class="uolIcon"]//img[1]/@src').extract_first()
booking_data_proto.add_book_status(book_status, util=UnitedUtil)
book_status = booking_data_proto.get_output_value('book_status')
seat_type = showing_data_proto.get_output_value('seat_type')
if (seat_type == 'FreeSeat' or book_status in ['SoldOut', 'NotSold']):
# sold out or not sold
total_seat_count = showing_data_proto.get_output_value(
'total_seat_count')
book_seat_count = (
total_seat_count if book_status == 'SoldOut' else 0)
booking_data_proto.add_value('book_seat_count', book_seat_count)
booking_data_proto.add_time_data()
result_list.append(booking_data_proto.load_item())
return
else:
# normal, need to crawl book number on order page
# we will visit schedule page again to generate independent cookie
# as same cookie will lead to confirm page
url = curr_showing.xpath(
'./div/ul/li[@class="uolIcon"]/a/@href').extract_first()
# determine if next page is 4dx confirm page by title
title = showing_data_proto.get_output_value('title')
if '4DX' in title:
request = scrapy.Request(
url, callback=self.parse_4dx_confirm_page)
else:
request = scrapy.Request(
url, callback=self.parse_normal_showing)
request.meta["data_proto"] = booking_data_proto.load_item()
# use independent cookie to avoid affecting each other
request.meta["cookiejar"] = url
result_list.append(request)
def parse_4dx_confirm_page(self, response):
url = response.xpath('//form/@action').extract_first()
url = response.urljoin(url)
request = scrapy.Request(url, method='POST',
callback=self.parse_normal_showing)
request.meta["data_proto"] = response.meta['data_proto']
yield request
def parse_normal_showing(self, response):
result = init_show_booking_loader(
response=response, item=response.meta["data_proto"])
booked_seat_count = len(response.xpath(
'//img[contains(@src,"lb_non_selected")]'))
result.add_value('book_seat_count', booked_seat_count)
result.add_time_data()
yield result.load_item()
|
gas1121/JapanCinemaStatusSpider
|
scrapyproject/showingspiders/united.py
|
Python
|
mit
| 8,448
|
[
"VisIt"
] |
11247f8a0de75c8c39a7413add487402306af43acf9e5e5bccaa9cbdf2978a35
|
# -*- coding: utf-8 -*-
"""
For a undirected graph with tree characteristics, we can choose any node as the root. The result graph is then a rooted tree. Among all possible rooted trees, those with minimum height are called minimum height trees (MHTs). Given such a graph, write a function to find all the MHTs and return a list of their root labels.
Format
The graph contains n nodes which are labeled from 0 to n - 1. You will be given the number n and a list of undirected edges (each edge is a pair of labels).
You can assume that no duplicate edges will appear in edges. Since all edges are undirected, [0, 1] is the same as [1, 0] and thus will not appear together in edges.
Example 1:
Given n = 4, edges = [[1, 0], [1, 2], [1, 3]]
0
|
1
/ \
2 3
return [1]
Example 2:
Given n = 6, edges = [[0, 3], [1, 3], [2, 3], [4, 3], [5, 4]]
0 1 2
\ | /
3
|
4
|
5
return [3, 4]
Hint:
How many MHTs can a graph have at most?
Note:
(1) According to the definition of tree on Wikipedia: “a tree is an undirected graph in which any two vertices are connected by exactly one path. In other words, any connected graph without simple cycles is a tree.”
(2) The height of a rooted tree is the number of edges on the longest downward path between the root and a leaf.
"""
class Solution(object):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[int]
"""
def findMinHeightTrees(self, n, edges):
# the idea is to move from the leave nodes and move
# in-ward till we end up with either one or two roots
# same idea as topological sort
# base case
if n == 1: return [0]
# keep track of the the undirected edges
adj = [set() for i in range(n)]
for i, j in edges:
adj[i].add(j)
adj[j].add(i)
# leaves are those nodes that have in-degree of length 1
leaves = [i for i in range(n) if len(adj[i]) == 1]
# do BFS topological sorting
while n > 2:
n -= len(leaves)
# next level to the current leaves
next_leaves = []
# visit all neighbors to each leave
for i in leaves:
# no need to visit all i neighbors, we are only insterested
# in the shortest path so any neighbor is valid
j = adj[i].pop()
adj[j].remove(i)
# new leave found
if len(adj[j]) == 1:
next_leaves.append(j)
# set next level to be visited
leaves = next_leaves
return leaves
s = Solution()
print s.findMinHeightTrees(4, [[1,0],[1,2],[1,3]])
|
Ahmed--Mohsen/leetcode
|
minimum_height_trees.py
|
Python
|
mit
| 2,530
|
[
"VisIt"
] |
5538c4bd82f4ad42616b5658f23da8f2d291fd17e172189d96f4a29f7bdc8f0e
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sympy as sp
import re
'''
Calculus methods
'''
def eye2():
return sp.Matrix([[sp.Integer(1), sp.Integer(0)], [sp.Integer(0), sp.Integer(1)]])
def zeroVec2():
return sp.Matrix([sp.Integer(0), sp.Integer(0)])
def gradVec2(u_vec, x, y):
return sp.Matrix([[sp.diff(u_vec[0], x), sp.diff(u_vec[1],x)], [sp.diff(u_vec[0], y), sp.diff(u_vec[1], y)]])
def divTen2(tensor, x, y):
return sp.Matrix([sp.diff(tensor[0,0], x) + sp.diff(tensor[1,0], y), sp.diff(tensor[0, 1], x) + sp.diff(tensor[1,1], y)])
def divVec2(u_vec, x, y):
return sp.diff(u_vec[0], x) + sp.diff(u_vec[1], y)
def gradScalar2(u, x, y):
return sp.Matrix([sp.diff(u, x), sp.diff(u,y)])
def strain_rate(u_vec, x, y):
return gradVec2(u_vec, x, y) + gradVec2(u_vec, x, y).transpose()
def strain_rate_squared_2(u_vec, x, y):
tensor = gradVec2(u_vec, x, y) + gradVec2(u_vec, x, y).transpose()
rv = 0
for i in range(2):
for j in range(2):
rv += tensor[i, j] * tensor[i, j]
return rv
def laplace2(u, x, y):
return sp.diff(sp.diff(u, x), x) + sp.diff(sp.diff(u, y), y)
'''
Kernel operators and corresponding surface integral terms
'''
def L_advection(u, x, y):
ax, ay = sp.var('ax ay')
return sp.Matrix([ax, ay]).transpose() * sp.Matrix([sp.diff(u, x), sp.diff(u, y)])
def L_diffusion(u, x, y):
return -laplace2(u, x, y)
def bc_terms_diffusion(u, nvec, x, y):
return (-nvec.transpose() * gradScalar2(u, x, y))[0,0]
def L_momentum_traction(uvec, p, k, eps, x, y):
cmu = 0.09
mu, rho = sp.var('mu rho')
visc_term = (-mu * divTen2(gradVec2(uvec, x, y) + gradVec2(uvec, x, y).transpose(), x, y)).transpose()
conv_term = rho * uvec.transpose() * gradVec2(uvec, x, y)
pressure_term = gradScalar2(p, x, y).transpose()
turbulent_visc_term = -(divTen2(rho * cmu * k**2 / eps * (gradVec2(uvec, x, y) + gradVec2(uvec, x, y).transpose()), x, y)).transpose()
source = conv_term + visc_term + pressure_term + turbulent_visc_term
return source
def bc_terms_momentum_traction(uvec, nvec, p, k, eps, x, y, symbolic=True, parts=True):
if symbolic:
cmu = sp.var('c_{\mu}')
else:
cmu = 0.09
mu, rho = sp.var('mu rho')
visc_term = (-mu * nvec.transpose() * (gradVec2(uvec, x, y) + gradVec2(uvec, x, y).transpose())).transpose()
if parts:
pressure_term = (nvec.transpose() * eye2() * p).transpose()
else:
pressure_term = zeroVec2()
turbulent_visc_term = -(nvec.transpose() * (rho * cmu * k**2 / eps * (gradVec2(uvec, x, y) + gradVec2(uvec, x, y).transpose()))).transpose()
return visc_term + turbulent_visc_term + pressure_term
def L_momentum_traction_no_turbulence(uvec, p, x, y):
mu, rho = sp.var('mu rho')
visc_term = (-mu * divTen2(gradVec2(uvec, x, y) + gradVec2(uvec, x, y).transpose(), x, y)).transpose()
conv_term = rho * uvec.transpose() * gradVec2(uvec, x, y)
pressure_term = gradScalar2(p, x, y).transpose()
source = conv_term + visc_term + pressure_term
return source
def L_stokes_traction(uvec, p, x, y):
mu, rho = sp.var('mu rho')
visc_term = (-mu * divTen2(gradVec2(uvec, x, y) + gradVec2(uvec, x, y).transpose(), x, y)).transpose()
pressure_term = gradScalar2(p, x, y).transpose()
source = visc_term + pressure_term
return source
def bc_terms_momentum_traction_no_turbulence(uvec, nvec, p, x, y, parts=True):
mu, rho = sp.var('mu rho')
visc_term = (-mu * nvec.transpose() * strain_rate(uvec, x, y)).transpose()
if parts:
pressure_term = (nvec.transpose() * eye2() * p).transpose()
else:
pressure_term = zeroVec2()
return visc_term + pressure_term
def L_momentum_laplace(uvec, p, k, eps, x, y):
cmu = 0.09
mu, rho = sp.var('mu rho')
visc_term = (-mu * divTen2(gradVec2(uvec, x, y), x, y)).transpose()
conv_term = rho * uvec.transpose() * gradVec2(uvec, x, y)
pressure_term = gradScalar2(p, x, y).transpose()
turbulent_visc_term = -(divTen2(rho * cmu * k**2 / eps * (gradVec2(uvec, x, y)), x, y)).transpose()
source = conv_term + visc_term + pressure_term + turbulent_visc_term
return source
def L_momentum_laplace_no_turbulence(uvec, p, x, y):
mu, rho = sp.var('mu rho')
visc_term = (-mu * divTen2(gradVec2(uvec, x, y), x, y)).transpose()
conv_term = rho * uvec.transpose() * gradVec2(uvec, x, y)
pressure_term = gradScalar2(p, x, y).transpose()
source = conv_term + visc_term + pressure_term
return source
def L_stokes(uvec, p, x, y):
mu, rho = sp.var('mu rho')
visc_term = (-mu * divTen2(gradVec2(uvec, x, y), x, y)).transpose()
pressure_term = gradScalar2(p, x, y).transpose()
source = visc_term + pressure_term
return source
def L_pressure(uvec, x, y):
return -divVec2(uvec, x, y)
def L_kin(uvec, k, eps, x, y):
cmu = 0.09
sigk = 1.
sigeps = 1.3
c1eps = 1.44
c2eps = 1.92
conv_term = rho * uvec.transpose() * gradScalar2(k, x, y)
diff_term = - divVec2((mu + rho * cmu * k**2 / eps / sigk) * gradScalar2(k, x, y), x, y)
creation_term = - rho * cmu * k**2 / eps / 2 * strain_rate_squared_2(uvec, x, y)
destruction_term = rho * eps
terms = [conv_term[0,0], diff_term, creation_term, destruction_term]
L = 0
for term in terms:
L += term
return L
def L_eps(uvec, k, eps, x, y):
cmu = 0.09
sigk = 1.
sigeps = 1.3
c1eps = 1.44
c2eps = 1.92
conv_term = rho * uvec.transpose() * gradScalar2(eps, x, y)
diff_term = - divVec2((mu + rho * cmu * k**2 / eps / sigeps) * gradScalar2(eps, x, y), x, y)
creation_term = - rho * c1eps * cmu * k / 2 * strain_rate_squared_2(uvec, x, y)
destruction_term = rho * c2eps * eps**2 / k
terms = [conv_term[0,0], diff_term, creation_term, destruction_term]
L = 0
for term in terms:
L += term
return L
def L_coupled_gradient_source(v, x, y):
return (-gradScalar2(v, x, y).transpose() * gradScalar2(v, x, y))[0,0]
def bc_terms_eps(nvec, k, eps, x, y):
cmu = 0.09
sigeps = 1.3
mu, rho = sp.var('mu rho')
return - nvec.transpose() * (mu + rho * cmu * k**2 / eps / sigeps) * gradScalar2(eps, x, y)
'''
Boundary condition operators
'''
def wall_function_momentum_traction(uvec, nvec, p, k, eps, x, y, tau_type, symbolic=True, parts=True):
if symbolic:
cmu = sp.var('c_{\mu}')
yStarPlus = sp.var('y_{\mu}')
else:
cmu = 0.09
yStarPlus = 11.06
if tau_type == "vel":
uvec_norm = sp.sqrt(uvec.transpose() * uvec)[0, 0]
uTau = uvec_norm / yStarPlus
elif tau_type == "kin":
uTau = cmu**.25 * sp.sqrt(k)
else:
raise ValueError("Must either pass 'vel' or 'kin' for tau_type")
mu, rho = sp.var('mu rho')
normal_stress_term = (-nvec.transpose() * mu * strain_rate(uvec, x, y) * nvec * nvec.transpose()).transpose()
tangential_stress_term = uTau / yStarPlus * uvec
muT = rho * cmu * k * k / eps
turbulent_stress_term = (-nvec.transpose() * muT * strain_rate(uvec, x, y)).transpose()
if parts:
pressure_term = (nvec.transpose() * eye2() * p).transpose()
else:
pressure_term = zeroVec2()
return normal_stress_term + tangential_stress_term + turbulent_stress_term + pressure_term
def no_bc_bc(uvec, nvec, p, x, y, parts=True):
mu, rho = sp.var('mu rho')
visc_term = (-mu * nvec.transpose() * strain_rate(uvec, x, y)).transpose()
import pdb; pdb.set_trace()
if parts:
pressure_term = (nvec.transpose() * eye2() * p).transpose()
else:
pressure_term = zeroVec2()
return visc_term + pressure_term
def vacuum(u, nvec):
return u / sp.Integer(2)
def ins_epsilon_wall_function_bc(nvec, k, eps, x, y):
cmu = 0.09
sigEps = 1.3
kappa = 0.41
mu, rho = sp.var('mu rho')
muT = rho * cmu * k**2 / eps
return - (mu + muT / sigEps) * kappa * cmu**.25 * sp.sqrt(k) * eps * rho / muT
def coupled_gradient_bc(nvec, v, x, y):
return (-nvec.transpose() * gradScalar2(v, x, y))[0,0]
def coupled_value_bc(v, x, y):
return -v
'''
Writing utilities
'''
def prep_moose_input(sym_expr):
rep1 = re.sub(r'\*\*',r'^',str(sym_expr))
rep2 = re.sub(r'mu',r'${mu}',rep1)
rep3 = re.sub(r'rho',r'${rho}',rep2)
rep4 = re.sub(r'ax', r'${ax}', rep3)
rep5 = re.sub(r'ay', r'${ay}', rep4)
return rep5
|
nuclear-wizard/moose
|
modules/navier_stokes/python/ins_calc_routines.py
|
Python
|
lgpl-2.1
| 8,672
|
[
"MOOSE"
] |
edcba0b09037f729857b1b4c68b75692d48b7b0ab769c79f70b56f3670506219
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# peakfinder.py
"""
Class for finding blobs. Encapsulates a difference of gaussians (DoG)
algorithm and exposes methods to easilyt interact with the data and
results.
Copyright (c) 2016, David Hoffman
"""
import logging
# need math log too, for arbitrary base
from math import log
import dask
# we need a few extra features from matplot lib
import matplotlib.pyplot as plt
# Get our numerical stuff
import numpy as np
# need pandas for better data containers
import pandas as pd
import tqdm
# plotting
from dphtools.display import display_grid
from dphtools.utils import fft_gaussian_filter, mode, slice_maker
# ndimage imports
from scipy.ndimage import (
gaussian_filter,
maximum_filter,
median_filter,
minimum_filter,
uniform_filter1d,
)
from scipy.ndimage.measurements import find_objects, label
# specialty numpy and scipy imports
from scipy.signal import argrelmax
from scipy.spatial import cKDTree
from skimage._shared.utils import check_nD
# the difference of Gaussians algorithm
from skimage.draw import circle
from skimage.feature.peak import peak_local_max
from skimage.util import img_as_float
# import our 2D gaussian fitting class
from .gauss2d import Gauss2D, Gauss2Dz
logger = logging.getLogger(__name__)
from dask.diagnostics import ProgressBar
class PeakFinder(object):
"""
A class to find peaks in image data and then fit them.
Peak finder takes 2D data that is assumed to be made up of relatively
sparse, approximately gaussian peaks. To estimate the positions of the
peaks the [difference of Gaussians](https://en.wikipedia.org/wiki/Difference_of_Gaussians)
algorithm is used as implemented in `skimage`. Once peaks have been found
they are fit to a Gaussian function using the `Gauss2D` class in this
package. Peak data is saved in a pandas DataFrame
Parameters
----------
data : ndarray
2D data containing sparse gaussian peaks, ideally any background
should be removed prior to construction
sigma : float, optional, default: 1.0
the estimated width of the peaks
"""
def __init__(self, data, sigma=1.0, background="median"):
# some error checking
if not isinstance(data, np.ndarray):
raise TypeError("data is not a numpy array")
if data.ndim != 2:
raise ValueError("The parameter `data` must be a 2-dimensional array")
self._data = data
# make an initial guess of the threshold
if isinstance(background, str):
self.estimate_background(background)
else:
self.thresh = background
self._blobs = None
# estimated width of the blobs
self._blob_sigma = sigma
self._labels = None
# peak coefs from fits
self._fits = None
########################
# PROPERTY DEFINITIONS #
########################
@property
def data(self):
"""
The data contained in the PeakFinder object
"""
# This attribute should be read-only, which means that it should return
# a copy of the data not a pointer.
return self._data
@property
def fits(self):
"""Optimized parameters from the fit"""
# User should not be able to modify this, so return copy
return self._fits.copy()
@property
def blobs(self):
"""Estimated peak locations"""
# User should not be able to modify this, so return copy
# sort blobs by the max amp value, descending
blobs = self._blobs
return blobs[blobs[:, -1].argsort()][::-1]
@blobs.setter
def blobs(self, value):
if not isinstance(value, np.ndarray):
raise TypeError("Blobs must be an ndarray")
if value.ndim != 2:
raise TypeError("Blobs don't have the right dimensions")
if value.shape[-1] != 4:
raise TypeError("Blobs don't have enough variables")
# use a copy so that changes on the outside don't affect the internal
# variable
self._blobs = value.copy()
@property
def labels(self):
"""
Estimated peak locations
"""
# User should not be able to modify this, so return copy
return self._labels.copy()
@property
def thresh(self):
"""Threshold for peak detection"""
return self._thresh
@thresh.setter
def thresh(self, value):
self._thresh = value
@property
def blob_sigma(self):
"""Estimated Peak width"""
return self._blob_sigma
@blob_sigma.setter
def blob_sigma(self, value):
self._blob_sigma = value
###########
# Methods #
###########
def estimate_background(self, method="median"):
"""Estimate the background/threshold of the data
Two methods are available:
- "median" : calculates median value of data as thresh
- "mode" : if the data type is inexact it uses a histogram
to estimate the mode, if the data is an unsigned integer
then it uses `bincount`
The result of the method is that the `thresh` property is set
for the instance.
"""
if method == "median":
self.thresh = np.median(self.data)
elif method == "mode":
if np.issubdtype(self.data.dtype, np.inexact):
hist, bins = np.histogram(self.data.ravel(), "auto")
maxval = hist.argmax()
# choose center of bin, not edges
self.thresh = (bins[maxval] + bins[maxval + 1]) / 2
elif np.issubdtype(self.data.dtype, np.unsignedinteger):
self.thresh = mode(self.data)
else:
raise TypeError("Invalid type for method 'mode' {}".format(self.data.dtype))
else:
raise ValueError("Invalid option for `method`: {}".format(method))
logger.debug("Threshold = {}".format(self.thresh))
def find_blobs(self, method="dog", **kwargs):
"""
Estimate peak locations by using a difference of Gaussians algorithm
Parameters
----------
min_sigma : float
smallest sigma for DOG
Returns
-------
blobs : ndarray
blob parameters ordered as `y`, `x`, `sigma`, `amp`
"""
# cast to float
data = self.data.astype(float)
# take care of the default kwargs with 'good' values
default_kwargs = {
"min_sigma": self.blob_sigma / np.sqrt(1.6),
"max_sigma": self.blob_sigma * np.sqrt(1.6) * 0.9,
"threshold": self.thresh,
}
# update default_kwargs with user passed kwargs
default_kwargs.update(kwargs)
# double check sigmas
if default_kwargs["min_sigma"] >= default_kwargs["max_sigma"]:
default_kwargs["max_sigma"] = default_kwargs["min_sigma"]
# Perform the DOG
if method.lower() == "dog":
# NOTE: the threshold for `blob_dog` is the threshold in scale
# space i.e. the threshold is not intuitively clear.
blobs = better_blob_dog(data, **default_kwargs)
else:
raise NotImplementedError
# if no peaks found alert the user, but don't break their program
if blobs is None or len(blobs) == 0:
logger.warning("No peaks found")
else:
# blobs, as returned, has the third index as the estimated width
# for our application it will be beneficial to have the intensity
# at the estimated center as well
footprint = np.round(self.blob_sigma * 5)
max_img = maximum_filter(data, footprint)
# we just use mode, faster and more accurate for low
# background images.
diff_img = max_img - mode(data.astype(int))
y, x, s = blobs.T
blobs = np.vstack((y, x, s, diff_img[y.astype(int), x.astype(int)])).T
self._blobs = blobs
return self.blobs
def label_blobs(self, diameter=None):
"""
This function will create a labeled image from blobs
essentially it will be circles at each location with diameter of
4 sigma
"""
tolabel = np.zeros_like(self.data)
try:
blobs = self.blobs
except AttributeError:
# try to find blobs
blobs = self.find_blobs()
# if blobs is still none, exit
if blobs is None:
logger.warning("Labels could not be generated")
return None
# Need to make this an ellipse using both sigmas and angle
for blob in blobs:
if diameter is None:
radius = blob[2] * 4
else:
radius = diameter
rr, cc = circle(blob[0], blob[1], radius, self._data.shape)
tolabel[rr, cc] = 1
labels, num_labels = label(tolabel)
if num_labels != len(blobs):
logger.warning("Blobs have melded, fitting may be difficult")
self._labels = labels
return labels
def plot_blob_grid(self, window=11, **kwargs):
"""Display a grid of blobs"""
return display_grid(
{
i: self.data[slice_maker((y, x), window)]
for i, (y, x, s, r) in enumerate(self.blobs)
},
**kwargs
)
def plot_fits(self, window_width, residuals=False, **kwargs):
"""Generate a plot of the found peaks, individually"""
# check if the fitting has been performed yet, warn user if it hasn't
if self._fits is None:
raise RuntimeError("Blobs have not been fit yet, cannot show fits")
else:
fits = self._fits
# pull the labels and the data from the object
data = self.data
# find objects from labelled data
my_objects = [slice_maker(center, window_width) for center in fits[["y0", "x0"]].values]
# generate a nice layout
nb_labels = len(my_objects)
nrows = int(np.ceil(np.sqrt(nb_labels)))
ncols = int(np.ceil(nb_labels / nrows))
fig, axes = plt.subplots(nrows, ncols, figsize=(3 * ncols, 3 * nrows))
for n, (obj, ax) in enumerate(zip(my_objects, axes.ravel())):
ex = (obj[1].start, obj[1].stop - 1, obj[0].stop - 1, obj[0].start)
ax.set_title(n)
ax.grid("off")
# generate the model fit to display, from parameters.
dict_params = dict(fits.loc[n].dropna())
# recenter
dict_params["x0"] -= obj[1].start
dict_params["y0"] -= obj[0].start
params = Gauss2D.dict_to_params(dict_params)
fake_data = Gauss2D.gen_model(data[obj], *params)
if residuals:
ax.matshow(data[obj] - fake_data, extent=ex, **kwargs)
else:
ax.matshow(data[obj], extent=ex, **kwargs)
ax.contour(fake_data, extent=ex, colors="w", origin="image")
# # Remove empty plots
for ax in axes.ravel():
if not (len(ax.images)) and not (len(ax.lines)):
fig.delaxes(ax)
fig.tight_layout()
# return the fig and axes handles to user for later manipulation.
return fig, axes
def filter_blobs(self, minamp=None, maxamp=None):
amps = self.blobs[:, 3]
if maxamp is None:
maxamp = amps.max()
if maxamp is None:
minamp = amps.min()
self.blobs = self.blobs[np.logical_and(maxamp > amps, amps > minamp)]
return self.blobs
def fit_blobs(self, width=10, poly_coefs_df=None, **kwargs):
"""Fit blobs to Gaussian funtion.
Parameters
----------
width : int
The size of the fitting window in pixels
**kwargs is for Gauss2D optimize_params
"""
# If we don't have blobs, find them.
if self._blobs is None:
self.find_blobs()
@dask.delayed
def fitfunc(win, sub_data):
# fit the data as we should
if poly_coefs_df is None:
mypeak = Gauss2D(sub_data)
else:
mypeak = Gauss2Dz(sub_data, poly_coefs_df)
# optimize params
mypeak.optimize_params(**kwargs)
fit_coefs = mypeak.all_params_dict()
# need to place the fit coefs in the right place
fit_coefs["y0"] += win[0].start
fit_coefs["x0"] += win[1].start
# Calc SNR for each peak
fit_coefs["noise"] = mypeak.noise
fit_coefs["SNR"] = fit_coefs["amp"] / fit_coefs["noise"]
return fit_coefs
# iterate through blobs
windows = [slice_maker((int(y), int(x)), width) for y, x, s, r in self.blobs]
data_to_fit = [self.data[win] for win in windows]
peakfits = dask.delayed(
[fitfunc(win, sub_data) for win, sub_data in zip(windows, data_to_fit)]
)
# construct DataFrame
peakfits_df = pd.DataFrame(peakfits.compute())
# internalize DataFrame
self._fits = peakfits_df
# Return it to user
return peakfits_df
fit_blobs.__doc__ += Gauss2D.optimize_params.__doc__
def prune_blobs(self, radius):
"""
Pruner method takes blobs list with the third column replaced by
intensity instead of sigma and then removes the less intense blob
if its within diameter of a more intense blob.
Adapted from _prune_blobs in skimage.feature.blob
Parameters
----------
blobs : ndarray
A 2d array with each row representing 3 values,
`(y, x, intensity)` where `(y, x)` are coordinates
of the blob and `intensity` is the intensity of the
blob (value at (x, y)).
diameter : float
Allowed spacing between blobs
Returns
-------
A : ndarray
`array` with overlapping blobs removed.
"""
# make a copy of blobs otherwise it will be changed
# create the tree
blobs = self.blobs
kdtree = cKDTree(blobs[:, :2])
# query all pairs of points within diameter of each other
list_of_conflicts = list(kdtree.query_pairs(radius))
# sort the collisions by max amplitude of the pair
# we want to deal with collisions between the largest
# blobs and nearest neighbors first:
# Consider the following sceneario in 1D
# A-B-C
# are all the same distance and colliding with amplitudes
# A > B > C
# if we start with the smallest, both B and C will be discarded
# If we start with the largest, only B will be
# Sort in descending order
list_of_conflicts.sort(key=lambda x: max(blobs[x[0], -1], blobs[x[1], -1]), reverse=True)
# indices of pruned blobs
pruned_blobs = set()
# loop through conflicts
for idx_a, idx_b in list_of_conflicts:
# see if we've already pruned one of the pair
if (idx_a not in pruned_blobs) and (idx_b not in pruned_blobs):
# compare based on amplitude
if blobs[idx_a, -1] > blobs[idx_b, -1]:
pruned_blobs.add(idx_b)
else:
pruned_blobs.add(idx_a)
# generate the pruned list
# pruned_blobs_set = {(blobs[i, 0], blobs[i, 1])
# for i in pruned_blobs}
# set internal blobs array to blobs_array[blobs_array[:, 2] > 0]
self._blobs = blobs[[i for i in range(len(blobs)) if i not in pruned_blobs]]
# Return a copy of blobs incase user wants a one-liner
return self.blobs
def remove_edge_blobs(self, distance):
"""Remove blobs that are less than `distance` away from the image
edge"""
# find the maximum limits of the data
ymax, xmax = self._data.shape
# build a new array filtering out any blobs that are two close to
# the edge of the image
my_blobs = np.array(
[
blob
for blob in self.blobs
if (
(distance < blob[0] < ymax - distance)
and (distance < blob[1] < xmax - distance)
)
]
)
# resort the blobs, largest to smallest
if len(my_blobs) > 0:
my_blobs = my_blobs[my_blobs[:, 3].argsort()]
# set the internals and return them
self._blobs = my_blobs
return self.blobs
def plot_blobs(self, diameter=None, size=6, with_labels=True, **kwargs):
"""Plot the found blobs
Parameters
----------
diameter : numeric
diameter of the circles to draw, if omitted
the diameter will be 4 times the estimated
sigma
size : int
The size of the final plot
**kwargs : key word arguments
Any extra keyword arguments are passed along to plt.matshow
Returns
-------
fig, axs : plt.figure, ndarray of plt.axes
"""
if self.blobs is None:
raise RuntimeError("No blobs have been found")
ny, nx = self.data.shape
fig, ax = plt.subplots(1, 1, figsize=(size, size * ny / nx))
ax.matshow(self.data, **kwargs)
if with_labels:
for i, blob in enumerate(self.blobs):
y, x, s, r = blob
if diameter is None:
diameter = s * 4
c = plt.Circle(
(x, y),
radius=diameter / 2,
color="r",
linewidth=1,
fill=False,
transform=ax.transData,
)
ax.add_patch(c)
if not np.issubdtype(float, self.data.dtype):
r = int(r)
fmtstr = "{}"
else:
fmtstr = "{}:{:.0f}"
ax.annotate(
fmtstr.format(i, r),
xy=(x, y),
xytext=(x + diameter / 2, y + diameter / 2),
textcoords="data",
color="k",
backgroundcolor=(1, 1, 1, 0.5),
xycoords="data",
)
else:
ax.scatter(
self.blobs[:, 1],
self.blobs[:, 0],
s=self.blobs[:, 2] * 10,
marker="o",
facecolor="none",
edgecolor="w",
)
return fig, ax
def better_blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=0.03):
"""Finds blobs in the given grayscale image.
Blobs are found using the Difference of Gaussian (DoG) method [1]_.
For each blob found, the method returns its coordinates and the standard
deviation of the Gaussian kernel that detected the blob.
Parameters
----------
image : ndarray
Input grayscale image, blobs are assumed to be light on dark
background (white on black).
min_sigma : float, optional
The minimum standard deviation for Gaussian Kernel. Keep this low to
detect smaller blobs.
max_sigma : float, optional
The maximum standard deviation for Gaussian Kernel. Keep this high to
detect larger blobs.
sigma_ratio : float, optional
The ratio between the standard deviation of Gaussian Kernels used for
computing the Difference of Gaussians
threshold : float, optional.
The absolute lower bound for scale space maxima. Local maxima smaller
than thresh are ignored. Reduce this to detect blobs with less
intensities.
Returns
-------
A : (n, 3) ndarray
A 2d array with each row representing 3 values, ``(y, x, sigma)``
where ``(y, x)`` are coordinates of the blob and ``sigma`` is the
standard deviation of the Gaussian kernel which detected the blob.
References
----------
.. [1] http://en.wikipedia.org/wiki/Blob_detection# The_difference_of_Gaussians_approach
Notes
-----
The radius of each blob is approximately :math:`\sqrt{2}sigma`.
"""
check_nD(image, 2)
image = img_as_float(image)
sigma_ratio = float(sigma_ratio)
# k such that min_sigma*(sigma_ratio**k) > max_sigma
k = int(log(float(max_sigma) / min_sigma, sigma_ratio)) + 1
# a geometric progression of standard deviations for gaussian kernels
sigma_list = np.array([min_sigma * (sigma_ratio ** i) for i in range(k + 1)])
# Use the faster fft_gaussian_filter to speed things up.
gaussian_images = [fft_gaussian_filter(image, s) for s in sigma_list]
# computing difference between two successive Gaussian blurred images
# multiplying with standard deviation provides scale invariance
dog_images = [(gaussian_images[i] - gaussian_images[i + 1]) * sigma_list[i] for i in range(k)]
image_cube = np.dstack(dog_images)
# peak_local_max is looking in the image_cube, so threshold should
# be scaled by differences in sigma, i.e. sigma_ratio
local_maxima = peak_local_max(
image_cube,
threshold_abs=threshold,
footprint=np.ones((3, 3, 3)),
threshold_rel=0.0,
exclude_border=False,
)
if local_maxima.size:
# Convert local_maxima to float64
lm = local_maxima.astype(np.float64)
# Convert the last index to its corresponding scale value
lm[:, 2] = sigma_list[local_maxima[:, 2]]
local_maxima = lm
return local_maxima
##############################################################################
# Spectral Peak Finding Part #
##############################################################################
class SpectralPeakFinder(object):
"""
A class used to find peaks in data that has one spatial and one spectral
and one time dimension
Data is assumed to have dimensions time (0), space (1), spectral (2)
"""
# NOTE that the way this class is implemented it does not hide any of its
# variables or methods from the user.
def __init__(self, data):
"""
A class designed to find peaks in spectral/spatial/time data
"""
if not isinstance(data, np.ndarray):
raise TypeError("data is not a numpy array")
# this is **VERY** data _un_aware!
# this makes a copy, which means that original data should be safe
# we're casting to a signed 32 bit int which has enough bit depth to
# accomodate the original data (uint16) but also allows negative
# numbers.
self.data = data.astype(int)
self.peaks = None
def remove_background(self):
"""
Remove background from the data cube.
This method uses a relatively simple algorithm that first takes the
mean along the time dimension and then the median along the spatial
dimension
The assumption here is that peaks are relatively sparse along the
spatial dimension
NOTE: This function mutates the data internally
"""
# pull internal data
data = self.data
# take the median value along the time and spatial dimensions
# keep the dimensions so that broadcasting will work properly
# bg = np.median(data, axis=(0, 1), keepdims=True)
# this is much faster than the above but gives approximately the same
# results
bg = np.median(data.mean(0), 0)
self.data = data - bg
def fix_hot_pixels(self, cutoff=9):
"""
A method to remove "Salt and Pepper" noise from the image stack
This method assumes that hot pixels do not vary much with time and uses
this property to avoid performing a median filter for every time point.
Remember this function mutates the data internally
"""
# pull internal data
data = self.data
# calc the _mean_ projection
# the assumption is that if the hot pixel is in one frame it will be
# in all of them and the whole point of this method is to only perform
# the median filter once
mean_data = data.mean(0)
# do the one median filter, use a 3x3 footprint
# some articles suggest that a 2x2 is fine, but I'm not sure if I buy
# that
# NOTE: that because we're filtering _single_ pixels
mean_data_med = median_filter(mean_data, 3)
# subtract the median filtered data from the unfiltered data
data_minus = mean_data - mean_data_med
# calculate the z-score for each pixel
z_score = (data_minus - data_minus.mean()) / data_minus.std()
# find the points to remove
picked_points = (z_score > cutoff) * mean_data
# remove them from the data
data -= picked_points
# return the number of points removed
return np.count_nonzero(picked_points)
def fix_cosmic_rays(self, width, z_score_cutoff=2.5):
"""
Method to remove cosmic rays from good peaks.
Assumes that cosmic rays only show up for one frame and are *bright*
"""
# calculate the average around the peaks
mean_data_sum = uniform_filter1d(self.data, width, axis=1).sum(2)
z_score = (mean_data_sum.max(0) - mean_data_sum.mean(0)) / mean_data_sum.std(0)
bad_peaks = np.arange(len(z_score))[z_score > z_score_cutoff]
self.peaks = [p for p in self.peaks if p not in bad_peaks]
def calc_FoM(self, width, s_lambda=3, s_time=3, use_max=False):
"""
Calculate the figure of merit (FoM) of a dataset (t, x, and lambda)
In this case our figure of merit is calculated as the _maximum_ value
along the spectral dimension aver the
Parameters
----------
data : ndarray (NxMxK)
the array overwhich to calculate the SNR, assumes that it
has dimensions (time, position, spectrum)
width : int
the width overwhich to calculate the average in the spatial
dimension
s_lambda : float (optional)
the width of the gaussian kernel along the spectral dimension
s_time : float (optional)
the width of the gaussian kernel along the time dimension
use_max : bool (optional)
whether to use the max projection or not, will significantly speed
up the calculation but will raise the noise floor in the process.
Returns
-------
FoM : ndarray (NxK)
The calculated figure of merit (FoM)
"""
# before we make another copy we should trash the old one, if it exists
# if we don't do this it can lead to a memory leak.
try:
del self.g_mean_data
except AttributeError:
pass
# First calculate the moving average of the data along the spatial
# dimension cast as float64 for better precision, this is necessary
# for the later gaussian filters, but might as well do it now to avoid
# making more copies of the data than necessary.
if use_max:
data = self.data.max(0, keepdims=True).astype(float)
else:
data = self.data.astype(float)
mean_data = uniform_filter1d(data, width, axis=1)
# calculate the gaussian blue along the spectral and time dimensions
if s_time == 0 and s_lambda == 0:
g_mean_data = mean_data
else:
g_mean_data = gaussian_filter(mean_data, (s_time, 0, s_lambda))
g_mean_data_mean = g_mean_data.mean(axis=(0, 2))
g_mean_data_std = g_mean_data.std(axis=(0, 2))
g_mean_data_max = g_mean_data.max(axis=(0, 2))
FoM = (g_mean_data_max - g_mean_data_mean) / g_mean_data_std
self.FoM = FoM
self.g_mean_data = g_mean_data
def find_peaks(self, width, cutoff=7, cutoff_high=np.inf, presmooth=0, show=False):
"""
A function that finds peaks in the FoM trace.
"""
# find the local maxima in the SNR trace
# presmooth might make sense here
if presmooth:
FoM = gaussian_filter(self.FoM, presmooth)
width2 = int(2 * presmooth * np.sqrt(2 * np.log(2)))
elif presmooth is None:
FoM = gaussian_filter(self.FoM, width * (np.sqrt(2 * np.log(2))))
width2 = int(2 * width * (2 * np.log(2)))
else:
FoM = self.FoM
width2 = width
peaks = argrelmax(FoM * (FoM > cutoff), order=width)[0]
# here we look to see the *relative* intensity of the peak.
# set up our container
good_peaks = []
for p in peaks:
# find the lower side
pm = max(p - width2, 0)
# find the upper side
pp = min(p + width2, len(FoM) - 1)
# test if peak minus sides is within cutoff
# Below tests a *relative* cutoff
# should test an absolute cutoff as well
if FoM[p] - min(FoM[pm], FoM[pp]) > cutoff:
# if not, add peak
good_peaks.append(p)
# peaks = peaks[FoM[peaks] < cutoff_high]
# Show the peaks?
if show:
fig, ax = plt.subplots(1, 1)
ax.plot(FoM)
ax.plot(good_peaks, FoM[good_peaks], "ro")
ax.axis("tight")
self.peaks = good_peaks
def refine_peaks(self, window_width=8):
"""
A function that refines peaks.
Because of the way the FoM is calculated the highest SNR region isn't
identified because the noise is approximated by the std. This function
will search the nearby are for a peak (using the smoothed data) and
will return that point instead.
Parameters
----------
window_width : int (optional)
the window in which to search for a peak.
"""
new_peaks = []
# take the max of the data along the time axis
max_data = self.g_mean_data.max(0)
ny, nx = max_data.shape
ny = window_width * 2
# NOTE: this implementation is pretty slow. But I'm not quite sure how
# to speed it up.
for peak in self.peaks:
# find the max
dy, dx = np.unravel_index(
max_data[peak - window_width : peak + window_width].argmax(), (ny, nx)
)
new_peaks.append(peak - window_width + dy)
self.peaks = np.array(new_peaks)
def _plot_peaks_lines(self):
"""
A helper function to plot a max intensity projection with redlines
marking the location of the found peaks.
"""
figmat, axmat = plt.subplots(1, 1, squeeze=True, sharex=True)
axmat.matshow(self.data.max(0))
axmat.set_yticks(self.peaks)
for peak in self.peaks:
axmat.axhline(peak, color="r")
def plot_peaks(self):
"""
A utility function to plot the found peaks.
"""
peaks = self.peaks
FoM = self.FoM
g_mean_data = self.g_mean_data
nz, ny, nx = g_mean_data.shape
# plot the found peaks in the SNR trace
print(g_mean_data.shape)
# self._plot_peaks_lines()
for peak in peaks:
# need to ensure a reasonable ratio
ratio = nz / nx
if ratio < 0.05:
ratio = 0.05
fig, (ax0, ax1) = plt.subplots(
2, 1, squeeze=True, sharex=True, figsize=(12, 12 * ratio * 2)
)
ax0.matshow(g_mean_data[:, peak, :])
ax0.axis("tight")
ax0.set_xticks([])
ax1.plot(g_mean_data[:, peak, :].max(0))
ax1.axis("tight")
fig.suptitle("{}, Max SNR {:.3f}".format(peak, FoM[peak]), y=1, fontsize=14)
fig.tight_layout()
class SpectralPeakFinder1d(SpectralPeakFinder):
"""
A class to find peaks in a single frame.
"""
def __init__(self, data):
# reshape the data so that it can use the previous methods without
# changes
super().__init__(data.reshape(1, *data.shape))
# overload the plot peaks function
def plot_peaks(self):
"""
A utility function to plot the found peaks.
"""
peaks = self.peaks
FoM = self.FoM
g_mean_data = self.g_mean_data
nz, ny, nx = g_mean_data.shape
# plot the found peaks in the SNR trace
self._plot_peaks_lines()
data_dict = {
"{}, Max SNR {:.3f}".format(peak, FoM[peak]): g_mean_data[0, peak, :] for peak in peaks
}
return display_grid(data_dict)
def fix_cosmic_rays(self, *args, **kwargs):
"""
This method is invalid for this type of data
"""
raise ValueError("This method is not valid for 1d data.")
|
david-hoffman/peaks
|
peaks/peakfinder.py
|
Python
|
apache-2.0
| 33,428
|
[
"Gaussian"
] |
ff7a81993d468b8f0311ba7c8fae37dcb45bbaf2cef3a6e4fdf5a50909237440
|
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* Copyright (c) Schrodinger, LLC.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
if __name__=='pymol.externing':
import os
import pymol
import string
import parsing
import threading
import cmd
import traceback
from glob import glob
from cmd import _cmd,lock,unlock,Shortcut,QuietException, \
_feedback,fb_module,fb_mask, exp_path, \
DEFAULT_ERROR, DEFAULT_SUCCESS, _raising, is_ok, is_error
def cd(dir="~",complain=1,quiet=1):
'''
DESCRIPTION
"cd" changes the current working directory.
USAGE
cd <path>
SEE ALSO
pwd, ls, system
'''
dir = exp_path(dir)
try:
os.chdir(dir) # raises on error
if not quiet:
print " cd: now in %s"%os.getcwd()
except:
if complain:
traceback.print_exc()
return DEFAULT_SUCCESS
def pwd():
'''
DESCRIPTION
Print current working directory.
USAGE
pwd
SEE ALSO
cd, ls, system
'''
print os.getcwd()
return DEFAULT_SUCCESS
def ls(pattern=None):
'''
DESCRIPTION
List contents of the current working directory.
USAGE
ls [pattern]
dir [pattern]
EXAMPLES
ls
ls *.pml
SEE ALSO
cd, pwd, system
'''
if pattern==None:
pattern = "*"
else:
pattern = exp_path(pattern)
if '*' not in pattern:
lst = glob(os.path.join(pattern, '*'))
else:
lst = []
if not len(lst):
lst = glob(pattern)
if len(lst):
lst.sort()
lst = parsing.list_to_str_list(lst)
for a in lst:
print a
else:
print " ls: Nothing found. Is that a valid path?"
return DEFAULT_SUCCESS
def system(command,async=0,_self=cmd):
'''
DESCRIPTION
"system" executes a command in a subshell under Unix or Windows.
USAGE
system command
PYMOL API
cmd.system(string command,int async=0)
NOTES
async can only be specified from the Python level (not the command language)
if async is 0 (default), then the result code from "system" is returned in r
if async is 1, then the command is run in a separate thread whose object is
returned
SEE ALSO
ls, cd, pwd
'''
r = None
if async:
r = threading.Thread(target=_cmd.system,args=(str(command),1))
r.start()
else:
r = _cmd.system(_self._COb,str(command),0)
return r # special meaning
def paste(_self=cmd): # INTERNAL
r=DEFAULT_SUCCESS
lst = []
if hasattr(pymol,"machine_get_clipboard"):
lst = pymol.machine_get_clipboard()
if len(lst):
new_lst = []
for a in lst:
while len(a):
if ord(a[-1])>32:
break
else:
a=a[:-1]
# if nothing in the queue, this special string is printed; so
# we ignore it
if len(a):
if a=="""PRIMARY selection doesn't exist or form "STRING" not defined""":
new_list = []
else:
new_lst.append(a)
r = _cmd.paste(_self._COb,new_lst)
if _raising(r,_self): raise pymol.CmdException
return r
|
gratefulfrog/lib
|
python/pymol/externing.py
|
Python
|
gpl-2.0
| 4,073
|
[
"PyMOL"
] |
b20e74502fd838eee511518f7ced367087a0ec08caf29fa6158f9cc677cdcb37
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import importlib
from importlib.metadata import version as get_version
from DIRAC import S_OK
from DIRAC.Core.Utilities.Extensions import extensionsByPriority
def getCurrentVersion():
"""Get a string corresponding to the current version of the DIRAC package and all the installed
extension packages
"""
for ext in extensionsByPriority():
try:
return S_OK(importlib.import_module(ext).version)
except (ImportError, AttributeError):
pass
def getVersion():
"""Get a dictionary corresponding to the current version of the DIRAC package and all the installed
extension packages
"""
vDict = {"Extensions": {}}
for ext in extensionsByPriority():
version = get_version(ext)
vDict["Extensions"][ext] = version
return S_OK(vDict)
|
ic-hep/DIRAC
|
src/DIRAC/Core/Utilities/Version.py
|
Python
|
gpl-3.0
| 952
|
[
"DIRAC"
] |
6cf2d61c050fa9549cbff4a922a53ce6b4c1598318ab30cfb349ad0dc5b3ae5d
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# freeseer - vga/presentation capture software
#
# Copyright (C) 2011, 2013 Free and Open Source Software Learning Centre
# http://fosslc.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For support, questions, suggestions or any other inquiries, visit:
# http://wiki.github.com/Freeseer/freeseer/
from PyQt4.QtCore import QString
from PyQt4.QtCore import SIGNAL
from PyQt4.QtGui import QCheckBox
from PyQt4.QtGui import QComboBox
from PyQt4.QtGui import QDialog
from PyQt4.QtGui import QFont
from PyQt4.QtGui import QFormLayout
from PyQt4.QtGui import QHBoxLayout
from PyQt4.QtGui import QIcon
from PyQt4.QtGui import QLabel
from PyQt4.QtGui import QLineEdit
from PyQt4.QtGui import QPixmap
from PyQt4.QtGui import QPushButton
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QVBoxLayout
try:
_fromUtf8 = QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
from freeseer.frontend.qtcommon import resource # noqa
class ReportDialog(QDialog):
"""Failure report dialog for Freeseer"""
def __init__(self, parent=None):
QWidget.__init__(self, parent)
icon = QIcon()
icon.addPixmap(QPixmap(_fromUtf8(":/freeseer/logo.png")), QIcon.Normal, QIcon.Off)
self.setWindowIcon(icon)
self.mainWidget = QWidget()
self.mainLayout = QVBoxLayout()
self.setLayout(self.mainLayout)
boldFont = QFont()
boldFont.setBold(True)
self.infoLayout = QFormLayout()
self.mainLayout.addLayout(self.infoLayout)
self.reportLayout = QHBoxLayout()
self.mainLayout.addLayout(self.reportLayout)
self.buttonLayout = QHBoxLayout()
self.mainLayout.addLayout(self.buttonLayout)
# Talk infomation
self.titleLabel = QLabel("Title:")
self.titleLabel2 = QLabel()
self.titleLabel2.setFont(boldFont)
self.speakerLabel = QLabel("Speaker:")
self.speakerLabel2 = QLabel()
self.speakerLabel2.setFont(boldFont)
self.eventLabel = QLabel("Event:")
self.eventLabel2 = QLabel()
self.eventLabel2.setFont(boldFont)
self.roomLabel = QLabel("Room:")
self.roomLabel2 = QLabel()
self.roomLabel2.setFont(boldFont)
self.startTimeLabel = QLabel("Start Time:")
self.startTimeLabel2 = QLabel()
self.startTimeLabel2.setFont(boldFont)
self.endTimeLabel = QLabel("End Time:")
self.endTimeLabel2 = QLabel()
self.endTimeLabel2.setFont(boldFont)
self.infoLayout.addRow(self.titleLabel, self.titleLabel2)
self.infoLayout.addRow(self.speakerLabel, self.speakerLabel2)
self.infoLayout.addRow(self.eventLabel, self.eventLabel2)
self.infoLayout.addRow(self.roomLabel, self.roomLabel2)
self.infoLayout.addRow(self.startTimeLabel, self.startTimeLabel2)
self.infoLayout.addRow(self.endTimeLabel, self.endTimeLabel2)
#Report
self.commentLabel = QLabel("Comment")
self.commentEdit = QLineEdit()
self.reportCombo = QComboBox()
# Prototype for report options. Please define these in the
# record.py logic file under retranslate() so that translations
# work.
# self.options = ['No Audio', 'No Video', 'No Audio/Video']
# for i in self.options:
# self.reportCombo.addItem(i)
self.releaseCheckBox = QCheckBox("Release Received")
self.reportLayout.addWidget(self.commentLabel)
self.reportLayout.addWidget(self.commentEdit)
self.reportLayout.addWidget(self.reportCombo)
self.reportLayout.addWidget(self.releaseCheckBox)
#Buttons
self.reportButton = QPushButton("Report")
self.closeButton = QPushButton("Close")
self.buttonLayout.addWidget(self.closeButton)
self.buttonLayout.addWidget(self.reportButton)
self.connect(self.closeButton, SIGNAL("clicked()"), self.close)
|
Freeseer/freeseer
|
src/freeseer/frontend/record/ReportDialog.py
|
Python
|
gpl-3.0
| 4,570
|
[
"VisIt"
] |
aefe8c22b7bf0ae290b0845d2f0a88e6a22141d8433b3cb28dd8c17e9783ca0e
|
import numpy as np
from multiphenotype_utils import (get_continuous_features_as_matrix, add_id, remove_id_and_get_mat,
partition_dataframe_into_binary_and_continuous, divide_idxs_into_batches)
import pandas as pd
import tensorflow as tf
from dimreducer import DimReducer
from general_autoencoder import GeneralAutoencoder
from standard_autoencoder import StandardAutoencoder
from variational_autoencoder import VariationalAutoencoder
class VariationalLaplacianAutoencoder(VariationalAutoencoder):
"""
Implements a variational autoencoder with independent Laplacian priors.
This code is identical to the Gaussian variational except where explicitly noted in comments.
"""
def __init__(self,
**kwargs):
super(VariationalLaplacianAutoencoder, self).__init__(**kwargs)
def encode(self, X):
num_layers = len(self.encoder_layer_sizes)
# Get mu
mu = X
for idx in range(num_layers):
mu = tf.matmul(mu, self.weights['encoder_h%i' % (idx)]) \
+ self.biases['encoder_b%i' % (idx)]
# No non-linearity on the last layer
if idx != num_layers - 1:
mu = self.non_linearity(mu)
Z_mu = mu
# Get sigma (often called b, but we call it sigma to avoid renaming everything).
sigma = X
for idx in range(num_layers):
sigma = tf.matmul(sigma, self.weights['encoder_h%i_sigma' % (idx)]) \
+ self.biases['encoder_b%i_sigma' % (idx)]
# No non-linearity on the last layer
if idx != num_layers - 1:
sigma = self.non_linearity(sigma)
sigma = sigma * self.sigma_scaling # scale so sigma doesn't explode when we exponentiate it.
sigma = tf.exp(sigma)
Z_sigma = sigma
# Important: this deviates from the standard Gaussian autoencoder
# Sample from Laplacian(mu, sigma).
# See https://en.wikipedia.org/wiki/Laplace_distribution#Generating_random_variables_according_to_the_Laplace_distribution
# Z = mu - b * sgn(eps) * ln(1 - 2|eps|) where eps ~ U(-.5, .5)
# add in small constant (1e-8) for numerical stability in sampling; otherwise log can explode.
eps = tf.random_uniform(tf.shape(Z_mu),
dtype=tf.float32,
minval=-.5,
maxval=.5,
seed=self.random_seed)
Z = Z_mu - Z_sigma * tf.sign(eps) * tf.log(1 - 2 * tf.abs(eps) + 1e-8)
return Z, Z_mu, Z_sigma
def set_up_regularization_loss_structure(self):
"""
This function sets up the basic loss structure. Should define self.reg_loss.
"""
self.reg_loss = self.get_regularization_loss(self.Z_mu, self.Z_sigma)
def get_regularization_loss(self, Z_mu, Z_sigma):
# Important: this deviates from the standard Gaussian autoencoder
# We assume that the prior is a Laplacian with sigma = 1, mu = 0.
# to compute q log p: https://www.wolframalpha.com/input/?i=integrate++1+%2F+(2+*+pi)+*+exp(-abs(x+-+7)+%2F+pi)+*+(-abs(x)+%2F+1)+from+-infinity+to+infinity
# to compute q log q: https://www.wolframalpha.com/input/?i=integrate++1+%2F+(2+*+pi)+*+exp(-abs(x+-+7)+%2F+pi)+*+(-abs(x+-+7)+%2F+pi)+from+-infinity+to+infinity
# We want to compute KL(Q, P) and we have
# mu_p = 0, sigma_p = 1. Then:
# KL(Q, P) = -log(sigma) - 1 - (- abs(mu) - sigma * exp(-abs(mu) / sigma))
# KL(Q, P) = -log(sigma) - 1 + abs(mu) + sigma * exp(-abs(mu) / sigma)
# which vanishes, as it should, when sigma = 1, mu = 0.
kl_div_loss = -tf.log(Z_sigma) - 1 + tf.abs(Z_mu) + Z_sigma * tf.exp(-tf.abs(Z_mu) / Z_sigma)
kl_div_loss = tf.reduce_mean(
tf.reduce_sum(
kl_div_loss,
axis=1),
axis=0)
return kl_div_loss
|
epierson9/multiphenotype_methods
|
laplacian_variational_autoencoder.py
|
Python
|
mit
| 4,025
|
[
"Gaussian"
] |
37d2cd277e25160b12f2da04f624a9c1ac50bf978935b7735fa48fb8e24a847a
|
from WebAppDIRAC.Lib.WebHandler import WebHandler, WErr, WOK, asyncGen
from DIRAC.Core.DISET.RPCClient import RPCClient
import json
class NotepadHandler(WebHandler):
AUTH_PROPS = "authenticated"
def index(self):
pass
|
chaen/WebAppDIRAC
|
WebApp/handler/NotepadHandler.py
|
Python
|
gpl-3.0
| 228
|
[
"DIRAC"
] |
d3eb1ca7ada8496ff5f1b58558562b95c508b5f2873010a5b93325aa5b121823
|
import os.path as op
import gzip
import string
import logging
import urllib.request
import re
from functools import wraps
from tempfile import NamedTemporaryFile
from collections import defaultdict, OrderedDict
import six
import numpy as np
import Bio
from Bio.PDB import PDBIO, Select, NeighborSearch
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.MMCIFParser import MMCIFParser
from Bio.PDB.Polypeptide import PPBuilder
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
from . import errors
logger = logging.getLogger(__name__)
A_DICT = {
'A': 'ALA', 'R': 'ARG', 'N': 'ASN', 'D': 'ASP', 'C': 'CYS', 'E': 'GLU',
'Q': 'GLN', 'G': 'GLY', 'H': 'HIS', 'I': 'ILE', 'L': 'LEU', 'K': 'LYS',
'M': 'MET', 'F': 'PHE', 'P': 'PRO', 'S': 'SER', 'T': 'THR', 'W': 'TRP',
'Y': 'TYR', 'V': 'VAL', 'U': 'SEC', 'O': 'PYL',
'B': 'ASX', 'Z': 'GLX', 'J': 'XLE', 'X': 'XAA', '*': 'TER'
}
AAA_DICT = dict([(value, key) for key, value in list(A_DICT.items())])
AAA_DICT['UNK'] = 'X'
AAA_DICT['MSE'] = 'M'
AAA_DICT['CSD'] = 'C'
# Phosphorylated residues
# AAA_DICT['SEP'] = 'S' # PHOSPHOSERINE
# AAA_DICT['TPO'] = 'T' # PHOSPHOTHREONINE
# AAA_DICT['SEP'] = 'Y' # O-PHOSPHOTYROSINE
# Methylated lysines
AAA_DICT['MLZ'] = 'K'
AAA_DICT['MLY'] = 'K'
AAA_DICT['M3L'] = 'K'
AMINO_ACIDS = list(AAA_DICT.keys())
METHYLATED_LYSINES = ['MLZ', 'MLY', 'M3L']
LYSINE_ATOMS = ['N', 'CA', 'CB', 'CG', 'CD', 'CE', 'NZ', 'C', 'O']
# %% Functions for downloading and parsing pdb files
class MMCIFParserMod(MMCIFParser):
def __init__(self, temp_dir):
self.temp_dir = temp_dir
def get_structure(self, structure_id, gzip_fh):
"""Altered `get_structure` method which accepts gzip file handles as input."""
with NamedTemporaryFile(mode='w', dir=self.temp_dir) as temp_fh:
temp_fh.writelines(gzip_fh.readlines())
temp_fh.flush()
temp_fh.seek(0)
return super(MMCIFParserMod, self).get_structure(structure_id, temp_fh.name)
def get_pdb_id(pdb_file):
pdb_id = op.basename(pdb_file)
for ext in ['\.gz$', '\.pdb$', '\.ent$', '\.cif$', '^pdb']:
pdb_id = re.sub(ext, '', pdb_id)
if pdb_id.startswith('ent') and len(pdb_id) > 4:
pdb_id = pdb_id[3:]
pdb_id = pdb_id.upper()
return pdb_id.upper()
def get_pdb_file(pdb_id, pdb_database_dir, pdb_type='ent'):
"""Get PDB file from a local mirror of the PDB database."""
if pdb_type == 'ent':
# Original PDB structure.
prefix = 'pdb'
suffix = '.ent.gz'
relative_pdb_file = (
pdb_id[1:3].lower() + '/' + prefix + pdb_id.lower() + suffix
)
elif pdb_type == 'cif':
# mmCIF pdb structure.
prefix = ''
suffix = '.cif.gz'
relative_pdb_file = (
'../mmCIF/' +
pdb_id[1:3].lower() + '/' + prefix + pdb_id.lower() + suffix
)
elif pdb_type == 'pdb':
# The first biological unit.
prefix = ''
suffix = '.pdb1.gz'
relative_pdb_file = (
'../../../biounit/coordinates/divided/' +
pdb_id[1:3].lower() + '/' + prefix + pdb_id.lower() + suffix
)
elif pdb_type == 'raw':
# Just a PDB file in some folder.
relative_pdb_file = ''
else:
raise Exception
pdb_file = op.join(pdb_database_dir, relative_pdb_file)
return pdb_file
def download_pdb_file(pdb_id, output_dir):
"""Move PDB structure to the local working directory."""
PDB_URL = 'http://www.rcsb.org/pdb/files/{}.pdb'
PDB_EURO_URL = 'http://www.ebi.ac.uk/pdbe/entry-files/download/pdb{}.ent'
output_pdb_filename = op.join(output_dir, pdb_id + '.pdb')
# If the PDB already exists, do nothing...
if op.isfile(output_pdb_filename):
logger.debug('PDB file {} already exists...'.format(output_pdb_filename))
return output_pdb_filename
# Download the PDB file from the internet...
logger.info('Downloading PDB {}...'.format(pdb_id + '.pdb'))
try:
pdb_url = PDB_URL.format(pdb_id)
logger.debug(pdb_url)
response = urllib.request.urlopen(pdb_url)
except urllib.error.URLError as e:
logger.debug(e)
pdb_url = PDB_EURO_URL.format(pdb_id.lower())
logger.debug(pdb_url)
response = urllib.request.urlopen(pdb_url)
with open(output_pdb_filename, 'wb') as ofh:
ofh.write(response.read())
return output_pdb_filename
def get_pdb_structure(pdb_file, pdb_id=None, quiet=True):
"""Set QUIET to False to output warnings like incomplete chains etc."""
if pdb_id is None:
pdb_id = get_pdb_id(pdb_file)
parser = PDBParser(get_header=True, QUIET=quiet)
if pdb_file.endswith('.gz'):
with gzip.open(pdb_file, 'rt') as ifh:
structure = parser.get_structure(pdb_id, ifh)
else:
structure = parser.get_structure(pdb_id, pdb_file)
# Rename empty chains (i.e. chain.id == ' ')
model = structure[0]
chain_ids = {chain.id for chain in model.child_list}
for chain in model.child_list:
if chain.id in [' ', 'Z']:
chain_ids.remove(chain.id)
chain.id = next(c for c in string.ascii_uppercase if c not in chain_ids)
chain_ids.add(chain.id)
model.child_dict = {chain.id: chain for chain in model.child_list}
return structure
def load_pdb(pdb_file, **kwargs):
return get_pdb_structure(pdb_file, **kwargs)
# %%
def euclidean_distance(a, b):
"""Calculate the Euclidean distance between two lists or tuples of arbitrary length."""
return np.sqrt(sum((a - b)**2 for a, b in zip(a, b)))
def calculate_distance(atom_1, atom_2, cutoff=None):
"""Calculate the distance between two points in 3D space.
Parameters
----------
cutoff : float, optional
The maximum distance allowable between two points.
"""
if ((type(atom_1) == type(atom_2) == list) or
(type(atom_1) == type(atom_2) == tuple)):
a = atom_1
b = atom_2
elif hasattr(atom_1, 'coord') and hasattr(atom_2, 'coord'):
a = atom_1.coord
b = atom_2.coord
else:
raise Exception('Unsupported format {} {}'.format(type(atom_1), type(atom_2)))
assert(len(a) == 3 and len(b) == 3)
if cutoff is None or all(abs(p - q) <= cutoff for p, q in zip(a, b)):
return euclidean_distance(a, b)
#
def get_chain_seqres_sequence(chain, aa_only=False):
"""Get the amino acid sequence for the construct coding for the given chain.
Extracts a sequence from a PDB file. Usefull when interested in the
sequence that was used for crystallization and not the ATOM sequence.
Parameters
----------
aa_only : bool
If aa_only is set to `False`, selenomethionines will be included in the sequence.
See: http://biopython.org/DIST/docs/api/Bio.PDB.Polypeptide-module.html.
"""
sequence = Seq('', IUPAC.protein)
for pb in PPBuilder().build_peptides(chain, aa_only=aa_only):
sequence += sequence + pb.get_sequence()
return sequence
def get_chain_sequence_and_numbering(chain, domain_def_tuple=None, include_hetatms=False):
"""Get the amino acid sequence and a list of residue ids for the given chain.
Parameters
----------
chain : Bio.PDB.Chain.Chain
The chain for which to get the amino acid sequence and numbering.
"""
if domain_def_tuple is not None:
start_resid, end_resid = domain_def_tuple
chain_numbering = []
chain_numbering_extended = []
chain_sequence = []
inside_domain = False
for res in chain:
#
resid = str(res.id[1]) + res.id[2].strip()
if domain_def_tuple is None or resid == start_resid:
inside_domain = True
if inside_domain and (include_hetatms or res.resname in AMINO_ACIDS):
chain_numbering.append(res.id[1])
chain_numbering_extended.append(resid)
chain_sequence.append(AAA_DICT.get(res.resname, '.'))
if domain_def_tuple is not None and resid == end_resid:
inside_domain = False
chain_sequence = ''.join(chain_sequence)
return chain_sequence, chain_numbering_extended
def convert_position_to_resid(chain, positions, domain_def_tuple=None):
"""Convert mutation_domain to mutation_modeller.
In mutation_modeller, the first amino acid in a chain may start
with something other than 1.
"""
__, chain_numbering = get_chain_sequence_and_numbering(
chain, domain_def_tuple)
logger.debug('chain_numbering: {}'.format(chain_numbering))
logger.debug('positions: {}'.format(positions))
return [chain_numbering[p - 1] for p in positions]
def get_structure_sequences(file_or_structure, seqres_sequence=False):
"""Return a dictionary of sequences for a given file or Structure.
Parameters
----------
file_or_structure : str | biopython.Structure | biopython.Model | biopython.Chain
PDB filename or biopython object from which to extract the sequence.
"""
if isinstance(file_or_structure, six.string_types):
structure = get_pdb_structure(file_or_structure)
model = structure[0]
elif isinstance(file_or_structure, Bio.PDB.Structure.Structure):
model = file_or_structure[0]
elif isinstance(file_or_structure, Bio.PDB.Model.Model):
model = file_or_structure
elif isinstance(file_or_structure, Bio.PDB.Chain.Chain):
model = [file_or_structure]
else:
raise Exception(
'Unexpected type {} for input ``file_or_structure`` {}!'
.format(file_or_structure, type(file_or_structure)))
chain_sequences = defaultdict(list)
for chain in model:
if seqres_sequence:
chain_sequence = get_chain_seqres_sequence(chain)
else:
chain_sequence, __ = get_chain_sequence_and_numbering(chain)
chain_sequences[chain.id] = chain_sequence
return chain_sequences
def suppress_logger(fn):
@wraps(fn)
def fn_quiet(*args, **kwargs):
level = logger.level
logger.setLevel(logging.WARNING)
try:
return fn(*args, **kwargs)
finally:
logger.setLevel(level)
return fn_quiet
def convert_aa(aa, quiet=False):
"""Convert amino acids from three letter code to one letter code or vice versa.
.. note:: Deprecated!
Use ``''.join(AAA_DICT[aaa] for aaa in aa)`` and ``''.join(A_DICT[a] for a in aa)``.
"""
if quiet:
return suppress_logger(convert_aa)(aa)
if len(aa) == 3:
try:
return AAA_DICT[aa.upper()]
except KeyError:
if not quiet:
logger.debug('Not a valid amino acid: {}'.format(aa))
return
if len(aa) == 1:
try:
return A_DICT[aa.upper()]
except KeyError:
if not quiet:
logger.debug('Not a valid amino acid: {}'.format(aa))
return
if not quiet:
logger.debug('Not a valid amino acid: {}'.format(aa))
# STANDALONE FUNCTIONS
def get_interactions(model, chain_id, r_cutoff=6):
"""
"""
interactions = {}
for chain_id_2, chain_2 in model.child_dict.items():
if chain_id == chain_id_2:
continue
interactions[chain_id_2] = (
get_interactions_between_chains(model, chain_id, chain_id_2, r_cutoff)
)
return {k: v for (k, v) in interactions.items() if v}
def get_interactions_between_chains(model, chain_id_1, chain_id_2, r_cutoff=6):
"""Calculate interactions between the residues of the two chains.
An interaction is defines as a pair of residues where at least one pair of atom
is closer than r_cutoff.
.. deprecated:: 1.0
Use python:fn:`get_interacting_residues` instead.
It gives you both the residue index and the resnum.
Returns
-------
OrderedDict
Keys are (residue_number, residue_amino_acid) tuples
(e.g. ('0', 'M'), ('1', 'Q'), ...).
Values are lists of (residue_number, residue_amino_acid) tuples.
(e.g. [('0', 'M'), ('1', 'Q'), ...]).
"""
try:
from Bio.PDB import NeighborSearch
except ImportError as e:
logger.warning('Importing Biopython NeighborSearch returned an error: {}'.format(e))
logger.warning('Using the the slow version of the neighbour-finding algorithm...')
return get_interactions_between_chains_slow(model, chain_id_1, chain_id_2, r_cutoff)
# Extract the chains of interest from the model
chain_1 = None
chain_2 = None
for child in model.get_list():
if child.id == chain_id_1:
chain_1 = child
if child.id == chain_id_2:
chain_2 = child
if chain_1 is None or chain_2 is None:
raise Exception('Chains %s and %s were not found in the model' % (chain_id_1, chain_id_2))
ns = NeighborSearch(list(chain_2.get_atoms()))
interactions_between_chains = OrderedDict()
for idx, residue_1 in enumerate(chain_1):
if residue_1.resname in AMINO_ACIDS and residue_1.id[0] == ' ':
resnum_1 = str(residue_1.id[1]) + residue_1.id[2].strip()
resaa_1 = convert_aa(residue_1.get_resname(), quiet=True)
interacting_residues = set()
for atom_1 in residue_1:
interacting_residues.update(ns.search(atom_1.get_coord(), r_cutoff, 'R'))
interacting_resids = []
for residue_2 in interacting_residues:
resnum_2 = str(residue_2.id[1]) + residue_2.id[2].strip()
resaa_2 = convert_aa(residue_2.get_resname(), quiet=True)
if residue_2.resname in AMINO_ACIDS and residue_2.id[0] == ' ':
interacting_resids.append((resnum_2, resaa_2,))
if interacting_resids:
interacting_resids.sort(
key=lambda x: int(''.join([c for c in x[0] if c.isdigit()])))
interactions_between_chains[(resnum_1, resaa_1)] = interacting_resids
return interactions_between_chains
def get_interactions_between_chains_slow(model, pdb_chain_1, pdb_chain_2, r_cutoff=5):
"""Calculate interactions between residues in pdb_chain_1 and pdb_chain_2.
An interaction is defines as a pair of residues where at least one pair of atom
is closer than r_cutoff. The default value for r_cutoff is 5 Angstroms.
.. deprecated:: 1.0
Use :func:`get_interacting_residues` instead.
It gives you both the residue index and the resnum.
"""
# Extract the chains of interest from the model
chain_1 = None
chain_2 = None
for child in model.get_list():
if child.id == pdb_chain_1:
chain_1 = child
if child.id == pdb_chain_2:
chain_2 = child
if chain_1 is None or chain_2 is None:
raise Exception(
'Chains %s and %s were not found in the model' % (pdb_chain_1, pdb_chain_2))
interactions_between_chains = OrderedDict()
for idx, residue_1 in enumerate(chain_1):
if residue_1.resname in AMINO_ACIDS and residue_1.id[0] == ' ':
resnum_1 = str(residue_1.id[1]) + residue_1.id[2].strip()
resaa_1 = convert_aa(residue_1.get_resname())
interacting_resids = []
for residue_2 in chain_2:
resnum_2 = str(residue_2.id[1]) + residue_2.id[2].strip()
resaa_2 = convert_aa(residue_2.get_resname())
r_min = None
if residue_2.resname in AMINO_ACIDS and residue_2.id[0] == ' ':
for atom_1 in residue_1:
for atom_2 in residue_2:
r = calculate_distance(atom_1, atom_2, r_cutoff)
if r is not None:
if r_min and r < r_min:
r_min = r
elif not r_min:
r_min = r
if r_min:
interacting_resids.append((resnum_2, resaa_2, r_min,))
if interacting_resids:
interactions_between_chains[(resnum_1, resaa_1)] = interacting_resids
return interactions_between_chains
def chain_is_hetatm(chain):
"""Return True if the chain is made up entirely of HETATMs."""
hetatms = [None] * len(chain)
for i in range(len(chain.child_list)):
res = chain.child_list[i]
hetatms[i] = res.resname not in AAA_DICT
if all(hetatms):
return True
elif not any(hetatms):
return False
else:
# Something went wrong.
sequence, numbering = get_chain_sequence_and_numbering(chain)
message = (
'Some but not all residues in chain {} are hetatms!\n'.format(chain.id) +
'sequence: {}\n'.format(sequence) +
'numbering: {}\n'.format(numbering)
)
logger.debug(message)
False
def get_aa_residues(chain):
aa_residues = [residue.id for residue in chain if residue.resname in AAA_DICT]
return aa_residues
def get_interacting_residues(model, r_cutoff=5, skip_hetatm_chains=True):
"""Return residue-residue interactions between all chains in `model`.
Parameters
----------
model : biopython.Model
Model to analyse.
Returns
-------
dict
A dictionary of interactions between chains i (0..n-1) and j (i+1..n).
Keys are (chain_idx, chain_id, residue_idx, residue_resnum, residue_amino_acid) tuples.
(e.g. (0, 'A', 0, '0', 'M'), (0, 1, '2', 'K'), ...)
Values are a list of tuples having the same format as the keys.
Examples
--------
You can reverse the order of keys and values like this::
complement = dict()
for key, values in get_interacting_chains(model):
for value in values:
complement.setdefault(value, set()).add(key)
You can get a list of all interacting chains using this command::
{(key[0], value[0])
for (key, values) in get_interacting_chains(model).items()
for value in values}
"""
from Bio.PDB import NeighborSearch
interactions_between_chains = dict()
# Chain 1
for chain_1_idx, chain_1 in enumerate(model):
if skip_hetatm_chains and chain_is_hetatm(chain_1):
message = (
"Skipping chain_1 with idx {} because it contains only hetatms."
.format(chain_1_idx)
)
logger.debug(message)
continue
chain_1_residue_ids = get_aa_residues(chain_1)
# Chain 2
for j, chain_2 in enumerate(model.child_list[chain_1_idx + 1:]):
chain_2_idx = chain_1_idx + 1 + j
if skip_hetatm_chains and chain_is_hetatm(chain_2):
message = (
"Skipping chain_2 with idx {} because it contains only hetatms."
.format(chain_2_idx)
)
logger.debug(message)
continue
chain_2_residue_ids = get_aa_residues(chain_2)
ns = NeighborSearch(list(chain_2.get_atoms()))
# Residue 1
for residue_1 in chain_1:
try:
residue_1_idx = chain_1_residue_ids.index(residue_1.id)
except ValueError:
continue
residue_1_resnum = str(residue_1.id[1]) + residue_1.id[2].strip()
residue_1_aa = convert_aa(residue_1.resname, quiet=True)
residue_1_key = (
chain_1_idx, chain_1.id, residue_1_idx, residue_1_resnum, residue_1_aa
)
interacting_residues = set()
for atom_1 in residue_1:
interacting_residues.update(ns.search(atom_1.get_coord(), r_cutoff, 'R'))
# Residue 2
interacting_residue_ids = []
for residue_2 in interacting_residues:
try:
residue_2_idx = chain_2_residue_ids.index(residue_2.id)
except ValueError:
continue
residue_2_resnum = str(residue_2.id[1]) + residue_2.id[2].strip()
residue_2_aa = convert_aa(residue_2.get_resname(), quiet=True)
residue_2_key = (
chain_2_idx, chain_2.id, residue_2_idx, residue_2_resnum, residue_2_aa
)
interacting_residue_ids.append(residue_2_key)
if interacting_residue_ids:
interactions_between_chains\
.setdefault(residue_1_key, set())\
.update(interacting_residue_ids)
return interactions_between_chains
def decode_domain_def(domains, merge=True, return_string=False):
"""Return a tuple of tuples of strings, preserving letter numbering (e.g. 10B)."""
if not domains:
return None, None
if domains[-1] == ',':
domains = domains[:-1]
x = domains
if return_string:
domain_fragments = [[r.strip() for r in ro.split(':')] for ro in x.split(',')]
else:
domain_fragments = [[int(r.strip()) for r in ro.split(':')] for ro in x.split(',')]
domain_merged = [domain_fragments[0][0], domain_fragments[-1][-1]]
if merge:
return domain_merged
else:
return domain_fragments
# Additions for `pipeline_structure`
class SelectChains(Select):
"""Only accept the specified chains when saving."""
def __init__(self, chain_letters, ns_chain_letters=None, ns=None, r_cutoff=None):
self.chain_letters = chain_letters
self.ns_chain_letters = ns_chain_letters
self.ns = ns
self.r_cutoff = r_cutoff
def accept_residue(self, residue):
chain_id = residue.parent.id
if chain_id in self.chain_letters:
return True
elif (self.ns_chain_letters and self.ns) and (chain_id in self.ns_chain_letters):
for atom in residue:
if self.ns.search(atom.get_coord(), self.r_cutoff, 'C'):
return True
return False
class StructureParser:
""".
Attributes
----------
pdb_id : ___
domain_boundaries : list of lists of lists
Elements in the outer list correspond to domains in each chain of the
pdb. Elements of the inner list contain the start and end of each
fragment of each domain. For example, if there is only one chain
with pdb domain boundaries 1-10:20-45, this would correspond to
domain_boundaries [[[1,10],[20,45]]].
"""
def __init__(self, pdb_file, chain_ids=None, domain_defs=[]):
""".
Parameters
----------
pdb_file : str
Full path and filename of the structure.
output_dir : str
Folder where to save extracted structures and sequences.
chain_ids : list
Chains of the structure that should be kept.
"""
self.pdb_id = get_pdb_id(pdb_file)
self.pdb_file = pdb_file
self.input_structure = get_pdb_structure(self.pdb_file, self.pdb_id)
if chain_ids is None:
self.chain_ids = [chain.id for chain in self.input_structure[0].child_list]
elif isinstance(chain_ids, str):
self.chain_ids = chain_ids.split(',')
elif isinstance(chain_ids, list) or isinstance(chain_ids, tuple):
self.chain_ids = list(chain_ids)
else:
raise Exception
self.r_cutoff = 6 # remove hetatms more than x A away from the main chain(s)
self.domain_boundaries = []
for domain_def in domain_defs:
self.domain_boundaries.append(
decode_domain_def(domain_def, merge=False, return_string=True)
)
self.unique_id = ('pdb_id: {}, chain_ids: {}'.format(self.pdb_id, self.chain_ids))
def extract(self):
"""Extract the wanted chains out of the PDB file.
Remove water atoms and selects the domain regions (i.e. selects only those parts
of the domain that are within the domain boundaries specified).
"""
logger.debug('Extracting {}...'.format(self.unique_id))
model = self.input_structure[0] # assuming that model 0 is always the desired one
new_structure = Bio.PDB.Structure.Structure(self.pdb_id)
new_model = Bio.PDB.Model.Model(0)
# Always assigning hetatms to chain 'Z' may lead to undesirable performance
# when the PDB stucture actually has a chain 'Z'.
# As of 2015, there are ~1300 structures with chain 'Z' in the elaspic.domain table.
# TODO: Convert `pdb_chain` tables in the database to use binary collation.
# I think the Bio.PDB module may have to be upgraded too as it currently does not support
# lowercase chain ids.
hetatm_chain_id = 'Z'
hetatm_chain = Bio.PDB.Chain.Chain(hetatm_chain_id)
# Loop over every chain and every residue and make sure that everything is ok
chain_idx = 0
while chain_idx < len(self.chain_ids):
chain_id = self.chain_ids[chain_idx]
chain = model[chain_id]
chain_numbering, domain_start_idxs, domain_end_idxs = (
self._get_domain_def_idxs_for_chain(chain, chain_idx)
)
logger.debug(
'domain_def: %s, domain_start_idxs: %s, domain_end_idxs: %s',
self.domain_boundaries, domain_start_idxs, domain_end_idxs
)
res_idx = 0
while res_idx < len(chain):
res = chain.child_list[res_idx]
original_res_id = res.id
# Move water to the hetatm chain
if res.id[0] == 'W':
self._move_hetatm_to_hetatm_chain(chain, hetatm_chain, res, echo=False)
continue
# # Move heteroatoms to the hetatm chain
# if res.id[0] != ' ':
# self._move_hetatm_to_hetatm_chain(chain, hetatm_chain, res, echo=True)
# continue
# Now treating all unusual amino acids as hetatms
# Convert methylated lysines to regular lysines
if res.resname in METHYLATED_LYSINES:
self._correct_methylated_lysines(res)
# Move hetatms to the hetatm chain
if res.resname not in AMINO_ACIDS:
self._move_hetatm_to_hetatm_chain(chain, hetatm_chain, res)
continue
# Cut each chain to domain boundaries
residue_is_outside_domain = (
self._residue_outside_domain(
chain, chain_numbering, domain_start_idxs, domain_end_idxs, res)
)
if residue_is_outside_domain:
chain.detach_child(original_res_id)
continue
res_idx += 1
if len(chain):
new_model.add(chain)
chain_idx += 1
else:
logger.debug('Chain {} is empty! Removing...'.format(chain.id))
self.chain_ids.remove(chain.id)
# Make sure that the new model is not empty
if not list(new_model.get_atoms()):
raise errors.PDBEmptySequenceError(self.unique_id)
# Remove hetatms if they are > 6A away from the chains of interest.
self._remove_distant_hatatms(new_model, hetatm_chain)
if hetatm_chain:
logger.debug('Adding hetatm chain of length {}'.format(len(hetatm_chain)))
new_model.add(hetatm_chain)
self.hetatm_chain_id = hetatm_chain_id
else:
self.hetatm_chain_id = None
# If the hetatm chain is not empty, add it to the model
new_structure.add(new_model)
self.structure = new_structure
logger.debug('PDB {} extracted successfully.'.format(self.pdb_id))
self.interactions_between_chains = (
get_interacting_residues(self.structure[0], self.r_cutoff, True)
)
self.interacting_chain_ids = {
(key[1], value[1])
for (key, values) in self.interactions_between_chains.items()
for value in values
}
self.interacting_chain_idxs = {
(key[0], value[0])
for (key, values) in self.interactions_between_chains.items()
for value in values
}
def get_chain_sequence_and_numbering(self, chain_id, *args, **varargs):
"""Call ``get_chain_sequence_and_numbering`` using chain with id ``chain_id``."""
chain = self.structure[0][chain_id]
return get_chain_sequence_and_numbering(chain, *args, **varargs)
def get_chain_seqres_sequence(self, chain_id, *args, **varargs):
"""Call ``get_chain_seqres_sequence`` using chain with id ``chain_id``."""
chain = self.structure[0][chain_id]
return get_chain_seqres_sequence(chain, *args, **varargs)
def save_structure(self, output_dir='', remove_disordered=False):
if remove_disordered:
self._unset_disordered_flags()
io = PDBIO()
io.set_structure(self.structure)
try:
# Save all chains together
outFile = op.join(output_dir, self.pdb_id + ''.join(self.chain_ids) + '.pdb')
io.save(outFile)
if len(self.chain_ids) > 1:
# Save each chain individually
for chain_id in self.chain_ids:
chain = self.structure[0][chain_id]
if chain_is_hetatm(chain):
continue
outFile = op.join(output_dir, self.pdb_id + chain_id + '.pdb')
atom_list = [atom for atom in self.structure[0][chain_id].get_atoms()]
hetatm_chain_ns = NeighborSearch(atom_list)
select = SelectChains(
chain_id, self.hetatm_chain_id, hetatm_chain_ns, self.r_cutoff)
io.save(outFile, select=select)
if len(self.chain_ids) > 2:
# Save each interacting chain pair.
for chain_ids in self.interacting_chain_ids:
outFile = op.join(output_dir, self.pdb_id + ''.join(chain_ids) + '.pdb')
atom_list = [
atom for atom
in self.structure[0][chain_id].get_atoms()
for chain_id in chain_ids
]
hetatm_chain_ns = NeighborSearch(atom_list)
select = SelectChains(
chain_ids, self.hetatm_chain_id, hetatm_chain_ns, self.r_cutoff)
io.save(outFile, select=select)
except AttributeError as e:
if remove_disordered:
raise(e)
self.save_structure(output_dir=output_dir, remove_disordered=True)
def save_sequences(self, output_dir=''):
self.chain_numbering_extended_dict = {}
self.chain_sequence_dict = {}
for chain_id in self.chain_ids:
chain_sequence, chain_numbering_extended = (
self.get_chain_sequence_and_numbering(chain_id)
)
self.chain_numbering_extended_dict[chain_id] = chain_numbering_extended
self.chain_sequence_dict[chain_id] = chain_sequence
with open(op.join(output_dir, self.pdb_id + chain_id + '.fasta'), 'w') as f:
f.write('>' + self.pdb_id + chain_id + '\n')
f.write(chain_sequence + '\n')
f.write('\n')
def _get_domain_def_idxs_for_chain(self, chain, chain_idx):
if not self.domain_boundaries or not self.domain_boundaries[chain_idx]:
return None, None, None
__, chain_numbering = get_chain_sequence_and_numbering(chain)
try:
domain_start_idxs, domain_end_idxs = [
tuple(chain_numbering.index(resid) for resid in resids)
for resids in zip(*self.domain_boundaries[chain_idx])]
except Exception as e:
print(str(e))
raise errors.PDBDomainDefsError(self.unique_id)
return chain_numbering, domain_start_idxs, domain_end_idxs
def _correct_methylated_lysines(self, res):
new_resname = 'LYS'
new_resid = (' ', res.id[1], res.id[2])
logger.debug(
'Renaming residue {} {} to {} {}'
.format(res.resname, res.id, new_resname, new_resid))
res.resname = new_resname
res.id = new_resid
atom_idx = 0
while atom_idx < len(res):
atom_id = res.child_list[atom_idx].id
if atom_id not in LYSINE_ATOMS:
logger.debug(
'Removing atom {} from residue {} {}.'.format(atom_id, res.resname, res.id)
)
res.detach_child(atom_id)
else:
atom_idx += 1
def _move_hetatm_to_hetatm_chain(self, chain, hetatm_chain, res, echo=False):
# logger.debug(
# 'Moving hetatm residue {} {} to the hetatm chain'
# .format(res.resname, res.id))
chain.detach_child(res.id)
hetatm_res = res
hetatm_res.id = (hetatm_res.id[0], len(hetatm_chain) + 1, hetatm_res.id[2], )
hetatm_chain.add(hetatm_res)
def _residue_outside_domain(
self, chain, chain_numbering, domain_start_idxs, domain_end_idxs, res):
"""Return `True` if residue ``res`` is outside the domain."""
if domain_start_idxs is None or domain_end_idxs is None:
return False
resid = str(res.id[1]) + res.id[2].strip()
resid_idx = chain_numbering.index(resid)
for domain_start_idx, domain_end_idx in zip(domain_start_idxs, domain_end_idxs):
if resid_idx >= domain_start_idx and resid_idx <= domain_end_idx:
# Residue is inside the domain
return False
# Residue is outside the domain
return True
def _remove_distant_hatatms(self, new_model, hetatm_chain):
"""Detach hetatms that are more than ``self.r_cutoff`` away from the main chain(s)."""
ns = NeighborSearch(list(new_model.get_atoms()))
hetatm_chain.id = [
c for c in reversed(string.ascii_uppercase) if c not in self.chain_ids][0]
res_idx = 0
while res_idx < len(hetatm_chain):
res_1 = hetatm_chain.child_list[res_idx]
in_contact = False
for atom_1 in res_1:
interacting_residues = ns.search(atom_1.get_coord(), self.r_cutoff, 'R')
if interacting_residues:
# logger.debug(res_1.id)
# logger.debug(interacting_residues)
in_contact = True
if in_contact:
res_idx += 1
continue
# logger.debug('Detaching child: {}'.format(res_1.id))
hetatm_chain.detach_child(res_1.id)
def _unset_disordered_flags(self):
"""Change atom and residue ``disordered`` flag to `False`.
Otherwise, Biopython may crash when saving the PDB structure.
"""
logger.debug('Setting all residues and atoms marked as disorded to non-disorded')
for m in self.structure:
for c in m:
for r in c:
if r.is_disordered() or r.disordered:
logger.debug(
'Changing disordered_flag on residue {} from {} to 0'
.format(r, r.disordered))
r.disordered = 0
for a in r:
if a.is_disordered() or a.disordered_flag:
logger.debug(
'Changing disordered_flag on atom {} from {} to 0'
.format(a, a.disordered_flag))
a.disordered_flag = 0
|
ostrokach/elaspic
|
elaspic/structure_tools.py
|
Python
|
mit
| 36,218
|
[
"Biopython"
] |
1cc156a90fea2f6d62ec9e65e27f689e5da65584a65af7085a3af0bf645a93b4
|
####
#Copyright 2011 Samuel Volchenboum, Jonathan Goya, Gene Selkov, Chaim Kirby,
#
#This file is part of Validator.
#
#Validator is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Validator is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Validator. If not, see <http://www.gnu.org/licenses/>.
#####
#!/usr/bin/env python
import sys, os, commands, string, time, subprocess
galaxyhome=os.environ.get('GALAXY_HOME')
if sys.argv[2]=="false": #Input_data1_source=default input
DAT_FILE = galaxyhome + "/tools/proteonics/" + sys.argv[3] #for VM Galaxy
# DAT_FILE = "/meida/Work/galaxy-proteonics/tools/proteonics/" + sys.argv[3] #for test
else:
DAT_FILE = sys.argv[3] #Input_data1_source=uploaded input
if sys.argv[5]=="none": #Input_data2_source=none
CPAS_FILE = ""
elif sys.argv[5]=="default":
# CPAS_FILE = "/meida/Work/galaxy-proteonics/tools/proteonics/" + sys.argv[6] #for test
CPAS_FILE = galaxyhome + "/tools/proteonics/" + sys.argv[6] #for VM Galaxy
else:
CPAS_FILE = sys.argv[6] #Input_data2_source=uploaded input
PEAK_CUTOFF = sys.argv[8]
MS1_CUTOFF = sys.argv[10]
MS2_CUTOFF = sys.argv[12]
MAX_PEAK_DIFF = sys.argv[14]
htmlout = sys.argv[16] #/media/Work/galaxy-globus-crdata/database/files/000/dataset_66.dat
OUTPUT_FOLDER = htmlout.split('.')[0]+'_files' #/media/Work/galaxy-globus-crdata/database/files/000/dataset_66_files
if not os.path.exists(OUTPUT_FOLDER):
os.makedirs(OUTPUT_FOLDER)
else:
pass
print "python","validator_cli.py","--input-file1",DAT_FILE,"--input-file2",CPAS_FILE,"--peak-cutoff",PEAK_CUTOFF,"--ms1-cutoff", MS1_CUTOFF,"--ms2-cutoff", MS2_CUTOFF,"--max-peak-diff", MAX_PEAK_DIFF,"--output-folder", OUTPUT_FOLDER
'''
usage: validator_cli.py [-h] [--input-file1 DAT_FILE]
[--input-file2 CPAS_FILE]
[--peak-cutoff PEAK_CUTOFF]
[--ms1-cutoff MS1_CUTOFF]
[--ms2-cutoff MS2_CUTOFF]
[--max-peak-diff MAX_PEAK_DIFF]
[--output-folder OUTPUT_FOLDER]
'''
galhtmlprefix = """<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="generator" content="Galaxy %s tool output - see http://getgalaxy.org/" />
<title></title>
<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
</head>
<body>
<div class="document">
"""
galhtmlattr = """Galaxy tool %s run at %s</b><br/>"""
galhtmlpostfix = """</div></body></html>\n"""
def timenow():
"""return current time as a string
"""
return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
def htmloutput(htmlout,outputfolder):
rstyle="""<style type="text/css">
tr.d0 td {background-color: oldlace; color: black;}
tr.d1 td {background-color: aliceblue; color: black;}
</style>"""
res = [rstyle,]
res.append(galhtmlprefix % os.path.basename(sys.argv[0]))
res.append(galhtmlattr % ('validator',timenow()))
flist = [x for x in os.listdir(outputfolder) if not x.startswith('.')]
if len(flist) > 0:
res.append('<b>The following output files were created (click the filename to view/download a copy):</b><hr/>')
res.append('<table>\n')
for i,f in enumerate(flist):
fn = os.path.split(f)[-1]
res.append('<tr><td><a href="%s">%s</a></td></tr>\n' % (fn,fn))
res.append('</table><p/>\n')
res.append(galhtmlpostfix)
outf = open(htmlout,'w')
outf.write(''.join(res))
outf.write('\n')
outf.close()
os.chdir(galaxyhome + "/tools/proteonics/")
if CPAS_FILE == '':
subprocess.call(["python","validator_cli.py","--input-file1",DAT_FILE,"--peak-cutoff",PEAK_CUTOFF,"--ms1-cutoff", MS1_CUTOFF,"--ms2-cutoff", MS2_CUTOFF,"--max-peak-diff", MAX_PEAK_DIFF,"--output-folder", OUTPUT_FOLDER])
else:
subprocess.call(["python","validator_cli.py","--input-file1",DAT_FILE,"--input-file2", CPAS_FILE, "--peak-cutoff",PEAK_CUTOFF,"--ms1-cutoff", MS1_CUTOFF,"--ms2-cutoff", MS2_CUTOFF,"--max-peak-diff", MAX_PEAK_DIFF,"--output-folder", OUTPUT_FOLDER])
htmloutput(htmlout,OUTPUT_FOLDER)
|
chicagohemeonc/Validator-Max
|
code/validator_mediator.py
|
Python
|
gpl-3.0
| 4,744
|
[
"Galaxy"
] |
2cd1ff5952ff3df78d9a2890042ccc5ae2a8357ecd712d34ac7b76e49b0d94ed
|
#!/usr/bin/env python
'''unit testing code for pysam.
Execute in the :file:`tests` directory as it requires the Makefile
and data files located there.
'''
import sys
import os
import shutil
import gzip
import pysam
import unittest
import itertools
import subprocess
import glob
import re
DATADIR = 'tabix_data'
IS_PYTHON3 = sys.version_info[0] >= 3
def myzip_open(infile, mode="r"):
'''open compressed file and decode.'''
def _convert(f):
for l in f:
yield l.decode("ascii")
if IS_PYTHON3:
if mode == "r":
return _convert(gzip.open(infile, "r"))
else:
return gzip.open(mode)
def loadAndConvert(infile, encode=True):
'''load data from infile and convert all fields to bytes.
infile can be either plain or compressed (ending in .gz).
'''
data = []
if infile.endswith(".gz"):
for line in gzip.open(infile):
line = line.decode("ascii")
if line.startswith("#"):
continue
d = line.strip().split("\t")
if encode:
data.append([x.encode("ascii") for x in d])
else:
data.append(d)
else:
with open(infile) as f:
for line in f:
if line.startswith("#"):
continue
d = line.strip().split("\t")
if encode:
data.append([x.encode("ascii") for x in d])
else:
data.append(d)
return data
def splitToBytes(s):
'''split string and return list of bytes.'''
return [x.encode("ascii") for x in s.split("\t")]
def checkBinaryEqual(filename1, filename2):
'''return true if the two files are binary equal.'''
if os.path.getsize(filename1) != os.path.getsize(filename2):
return False
infile1 = open(filename1, "rb")
infile2 = open(filename2, "rb")
d1, d2 = infile1.read(), infile2.read()
found = False
for c1, c2 in zip(d1, d2):
if c1 != c2:
break
else:
found = True
infile1.close()
infile2.close()
return found
class TestIndexing(unittest.TestCase):
filename = os.path.join(DATADIR, "example.gtf.gz")
filename_idx = os.path.join(DATADIR, "example.gtf.gz.tbi")
def setUp(self):
self.tmpfilename = "tmp_%i.gtf.gz" % id(self)
shutil.copyfile(self.filename, self.tmpfilename)
def testIndexPreset(self):
'''test indexing via preset.'''
pysam.tabix_index(self.tmpfilename, preset="gff")
checkBinaryEqual(self.tmpfilename + ".tbi", self.filename_idx)
def tearDown(self):
os.unlink(self.tmpfilename)
os.unlink(self.tmpfilename + ".tbi")
class TestCompression(unittest.TestCase):
filename = os.path.join(DATADIR, "example.gtf.gz")
filename_idx = os.path.join(DATADIR, "example.gtf.gz.tbi")
preset = "gff"
def setUp(self):
self.tmpfilename = "tmp_%i" % id(self)
infile = gzip.open(self.filename, "rb")
outfile = open(self.tmpfilename, "wb")
outfile.write(infile.read())
outfile.close()
infile.close()
def testCompression(self):
'''see also issue 106'''
pysam.tabix_compress(self.tmpfilename, self.tmpfilename + ".gz")
checkBinaryEqual(self.tmpfilename, self.tmpfilename + ".gz")
def testIndexPresetUncompressed(self):
'''test indexing via preset.'''
pysam.tabix_index(self.tmpfilename, preset=self.preset)
# check if uncompressed file has been removed
self.assertEqual(os.path.exists(self.tmpfilename), False)
checkBinaryEqual(self.tmpfilename + ".gz", self.filename)
checkBinaryEqual(self.tmpfilename + ".gz.tbi", self.filename_idx)
def testIndexPresetCompressed(self):
'''test indexing via preset.'''
pysam.tabix_compress(self.tmpfilename, self.tmpfilename + ".gz")
pysam.tabix_index(self.tmpfilename + ".gz", preset=self.preset)
checkBinaryEqual(self.tmpfilename + ".gz", self.filename)
checkBinaryEqual(self.tmpfilename + ".gz.tbi", self.filename_idx)
def tearDown(self):
try:
os.unlink(self.tmpfilename)
os.unlink(self.tmpfilename + ".gz")
os.unlink(self.tmpfilename + ".gz.tbi")
except OSError:
pass
class TestCompressionSam(TestCompression):
filename = os.path.join(DATADIR, "example.sam.gz")
filename_index = os.path.join(DATADIR, "example.sam.gz.tbi")
preset = "sam"
class TestCompressionBed(TestCompression):
filename = os.path.join(DATADIR, "example.bed.gz")
filename_index = os.path.join(DATADIR, "example.bed.gz.tbi")
preset = "bed"
class TestCompressionVCF(TestCompression):
filename = os.path.join(DATADIR, "example.vcf.gz")
filename_index = os.path.join(DATADIR, "example.vcf.gz.tbi")
preset = "vcf"
class TestIteration(unittest.TestCase):
filename = os.path.join(DATADIR, "example.gtf.gz")
def setUp(self):
self.tabix = pysam.Tabixfile(self.filename)
lines = []
inf = gzip.open(self.filename, "rb")
for line in inf:
line = line.decode('ascii')
if line.startswith("#"):
continue
lines.append(line)
inf.close()
# creates index of contig, start, end, adds content without newline.
self.compare = [
(x[0][0], int(x[0][3]), int(x[0][4]), x[1])
for x in [(y.split("\t"), y[:-1]) for y in lines]]
def getSubset(self, contig=None, start=None, end=None):
if contig == None:
# all lines
subset = [x[3] for x in self.compare]
else:
if start != None and end == None:
# until end of contig
subset = [x[3]
for x in self.compare if x[0] == contig and x[2] > start]
elif start == None and end != None:
# from start of contig
subset = [x[3]
for x in self.compare if x[0] == contig and x[1] <= end]
elif start == None and end == None:
subset = [x[3] for x in self.compare if x[0] == contig]
else:
# all within interval
subset = [x[3] for x in self.compare if x[0] == contig and
min(x[2], end) - max(x[1], start) > 0]
return subset
def checkPairwise(self, result, ref):
'''check pairwise results.
'''
result.sort()
ref.sort()
a = set(result)
b = set(ref)
self.assertEqual(len(result), len(ref),
"unexpected number of results: result=%i, expected ref=%i, differences are %s: %s"
% (len(result), len(ref),
a.difference(b),
b.difference(a)))
for x, d in enumerate(list(zip(result, ref))):
self.assertEqual(d[0], d[1],
"unexpected results in pair %i:\n'%s', expected\n'%s'" %
(x,
d[0],
d[1]))
def testAll(self):
result = list(self.tabix.fetch())
ref = self.getSubset()
self.checkPairwise(result, ref)
def testPerContig(self):
for contig in ("chr1", "chr2", "chr1", "chr2"):
result = list(self.tabix.fetch(contig))
ref = self.getSubset(contig)
self.checkPairwise(result, ref)
def testPerContigToEnd(self):
end = None
for contig in ("chr1", "chr2", "chr1", "chr2"):
for start in range(0, 200000, 1000):
result = list(self.tabix.fetch(contig, start, end))
ref = self.getSubset(contig, start, end)
self.checkPairwise(result, ref)
def testPerContigFromStart(self):
start = None
for contig in ("chr1", "chr2", "chr1", "chr2"):
for end in range(0, 200000, 1000):
result = list(self.tabix.fetch(contig, start, end))
ref = self.getSubset(contig, start, end)
self.checkPairwise(result, ref)
def testPerContig(self):
start, end = None, None
for contig in ("chr1", "chr2", "chr1", "chr2"):
result = list(self.tabix.fetch(contig, start, end))
ref = self.getSubset(contig, start, end)
self.checkPairwise(result, ref)
def testPerInterval(self):
start, end = None, None
for contig in ("chr1", "chr2", "chr1", "chr2"):
for start in range(0, 200000, 2000):
for end in range(start, start + 2000, 500):
result = list(self.tabix.fetch(contig, start, end))
ref = self.getSubset(contig, start, end)
self.checkPairwise(result, ref)
def testInvalidIntervals(self):
# invalid intervals (start > end)
self.assertRaises(ValueError, self.tabix.fetch, "chr1", 0, -10)
self.assertRaises(ValueError, self.tabix.fetch, "chr1", 200, 0)
# out of range intervals
self.assertRaises(IndexError, self.tabix.fetch, "chr1", -10, 200)
self.assertRaises(IndexError, self.tabix.fetch, "chr1", -10, -20)
# unknown chromosome
self.assertRaises(KeyError, self.tabix.fetch, "chrUn")
# out of range access
# to be implemented
# self.assertRaises( IndexError, self.tabix.fetch, "chr1", 1000000, 2000000 )
# raise no error for invalid intervals
self.tabix.fetch("chr1", 100, 100)
def testGetContigs(self):
self.assertEqual(sorted(self.tabix.contigs), [b"chr1", b"chr2"])
# check that contigs is read-only
self.assertRaises(
AttributeError, setattr, self.tabix, "contigs", ["chr1", "chr2"])
def testHeader(self):
ref = []
inf = gzip.open(self.filename)
for x in inf:
x = x.decode("ascii")
if not x.startswith("#"):
break
ref.append(x[:-1].encode('ascii'))
inf.close()
header = list(self.tabix.header)
self.assertEqual(ref, header)
def testReopening(self):
'''test repeated opening of the same file.'''
def func1():
# opens any tabix file
inf = pysam.Tabixfile(self.filename)
return
for i in range(10000):
func1()
class TestParser(unittest.TestCase):
filename = os.path.join(DATADIR, "example.gtf.gz")
def setUp(self):
self.tabix = pysam.Tabixfile(self.filename)
self.compare = loadAndConvert(self.filename)
def testRead(self):
for x, r in enumerate(self.tabix.fetch(parser=pysam.asTuple())):
self.assertEqual(self.compare[x], list(r))
self.assertEqual(len(self.compare[x]), len(r))
# test indexing
for c in range(0, len(r)):
self.assertEqual(self.compare[x][c], r[c])
# test slicing access
for c in range(0, len(r) - 1):
for cc in range(c + 1, len(r)):
self.assertEqual(self.compare[x][c:cc],
r[c:cc])
def testWrite(self):
for x, r in enumerate(self.tabix.fetch(parser=pysam.asTuple())):
self.assertEqual(self.compare[x], list(r))
c = list(r)
for y in range(len(r)):
r[y] = "test_%05i" % y
c[y] = "test_%05i" % y
self.assertEqual([x.encode("ascii") for x in c], list(r))
self.assertEqual("\t".join(c), str(r))
# check second assignment
for y in range(len(r)):
r[y] = "test_%05i" % y
self.assertEqual([x.encode("ascii") for x in c], list(r))
self.assertEqual("\t".join(c), str(r))
def testUnset(self):
for x, r in enumerate(self.tabix.fetch(parser=pysam.asTuple())):
self.assertEqual(self.compare[x], list(r))
c = list(r)
e = [x.decode('ascii') for x in r]
for y in range(len(r)):
r[y] = None
c[y] = None
e[y] = ""
self.assertEqual(c, list(r))
self.assertEqual("\t".join(e), str(r))
def testIteratorCompressed(self):
'''test iteration from compressed file.'''
with gzip.open(self.filename) as infile:
for x, r in enumerate(pysam.tabix_iterator(infile, pysam.asTuple())):
self.assertEqual(self.compare[x], list(r))
self.assertEqual(len(self.compare[x]), len(r))
# test indexing
for c in range(0, len(r)):
self.assertEqual(self.compare[x][c], r[c])
# test slicing access
for c in range(0, len(r) - 1):
for cc in range(c + 1, len(r)):
self.assertEqual(self.compare[x][c:cc],
r[c:cc])
def testIteratorUncompressed(self):
'''test iteration from uncompressed file.'''
tmpfilename = 'tmp_testIteratorUncompressed'
infile = gzip.open(self.filename, "rb")
outfile = open(tmpfilename, "wb")
outfile.write(infile.read())
outfile.close()
infile.close()
with open(tmpfilename) as infile:
for x, r in enumerate(pysam.tabix_iterator(infile, pysam.asTuple())):
self.assertEqual(self.compare[x], list(r))
self.assertEqual(len(self.compare[x]), len(r))
# test indexing
for c in range(0, len(r)):
self.assertEqual(self.compare[x][c], r[c])
# test slicing access
for c in range(0, len(r) - 1):
for cc in range(c + 1, len(r)):
self.assertEqual(self.compare[x][c:cc],
r[c:cc])
os.unlink(tmpfilename)
class TestIterators(unittest.TestCase):
filename = os.path.join(DATADIR, "example.gtf.gz")
iterator = pysam.tabix_generic_iterator
parser = pysam.asTuple
is_compressed = False
def setUp(self):
self.tabix = pysam.Tabixfile(self.filename)
self.compare = loadAndConvert(self.filename)
self.tmpfilename_uncompressed = 'tmp_TestIterators'
infile = gzip.open(self.filename, "rb")
outfile = open(self.tmpfilename_uncompressed, "wb")
outfile.write(infile.read())
outfile.close()
infile.close()
def open(self):
if self.is_compressed:
infile = gzip.open(self.filename)
else:
infile = open(self.tmpfilename_uncompressed)
return infile
def testIteration(self):
infile = self.open()
for x, r in enumerate(self.iterator(infile, self.parser())):
self.assertEqual(self.compare[x], list(r))
self.assertEqual(len(self.compare[x]), len(r))
# test indexing
for c in range(0, len(r)):
self.assertEqual(self.compare[x][c], r[c])
# test slicing access
for c in range(0, len(r) - 1):
for cc in range(c + 1, len(r)):
self.assertEqual(self.compare[x][c:cc],
r[c:cc])
def testClosedFile(self):
'''test for error when iterating from closed file.'''
infile = self.open()
infile.close()
# iterating from a closed file should raise a value error
self.assertRaises(ValueError, self.iterator, infile, self.parser())
def testClosedFileIteration(self):
'''test for error when iterating from file that has been closed'''
infile = self.open()
i = self.iterator(infile, self.parser())
x = i.next()
infile.close()
# Not implemented
#self.assertRaises( ValueError, i.next )
def tearUp(self):
os.unlink(self.tmpfilename_uncompressed)
class TestIteratorsGenericCompressed(TestIterators):
is_compressed = True
class TestIteratorsFileCompressed(TestIterators):
iterator = pysam.tabix_file_iterator
is_compressed = True
class TestIteratorsFileUncompressed(TestIterators):
iterator = pysam.tabix_file_iterator
is_compressed = False
class TestGTF(TestParser):
def testRead(self):
for x, r in enumerate(self.tabix.fetch(parser=pysam.asGTF())):
c = self.compare[x]
self.assertEqual(len(c), len(r))
self.assertEqual(list(c), list(r))
self.assertEqual(c, splitToBytes(str(r)))
self.assertTrue(r.gene_id.startswith("ENSG"))
if r.feature != b'gene':
self.assertTrue(r.transcript_id.startswith("ENST"))
self.assertEqual(c[0], r.contig)
class TestBed(unittest.TestCase):
filename = os.path.join(DATADIR, "example.bed.gz")
def setUp(self):
self.tabix = pysam.Tabixfile(self.filename)
self.compare = loadAndConvert(self.filename)
def testRead(self):
for x, r in enumerate(self.tabix.fetch(parser=pysam.asBed())):
c = self.compare[x]
self.assertEqual(len(c), len(r))
self.assertEqual(c, splitToBytes(str(r)))
self.assertEqual(list(c), list(r))
self.assertEqual(c[0], r.contig)
self.assertEqual(int(c[1]), r.start)
self.assertEqual(int(c[2]), r.end)
def testWrite(self):
for x, r in enumerate(self.tabix.fetch(parser=pysam.asBed())):
c = self.compare[x]
self.assertEqual(c, splitToBytes(str(r)))
self.assertEqual(list(c), list(r))
r.contig = "test"
self.assertEqual(b"test", r.contig)
self.assertEqual(b"test", r[0])
r.start += 1
self.assertEqual(int(c[1]) + 1, r.start)
self.assertEqual(str(int(c[1]) + 1), r[1].decode("ascii"))
r.end += 1
self.assertEqual(int(c[2]) + 1, r.end)
self.assertEqual(str(int(c[2]) + 1), r[2].decode("ascii"))
class TestVCF(unittest.TestCase):
filename = os.path.join(DATADIR, "example.vcf40")
def setUp(self):
self.tmpfilename = "tmp_%s.vcf" % id(self)
shutil.copyfile(self.filename, self.tmpfilename)
pysam.tabix_index(self.tmpfilename, preset="vcf")
def tearDown(self):
os.unlink(self.tmpfilename + ".gz")
if os.path.exists(self.tmpfilename + ".gz.tbi"):
os.unlink(self.tmpfilename + ".gz.tbi")
class TestVCFFromTabix(TestVCF):
columns = ("contig", "pos", "id",
"ref", "alt", "qual",
"filter", "info", "format")
def setUp(self):
TestVCF.setUp(self)
self.tabix = pysam.Tabixfile(self.tmpfilename + ".gz")
self.compare = loadAndConvert(self.filename)
def testRead(self):
ncolumns = len(self.columns)
for x, r in enumerate(self.tabix.fetch(parser=pysam.asVCF())):
c = self.compare[x]
for y, field in enumerate(self.columns):
# it is ok to have a missing format column
if y == 8 and y == len(c):
continue
if field == "pos":
self.assertEqual(int(c[y]) - 1, getattr(r, field))
self.assertEqual(int(c[y]) - 1, r.pos)
else:
self.assertEqual(c[y], getattr(r, field),
"mismatch in field %s: %s != %s" %
(field, c[y], getattr(r, field)))
if len(c) == 8:
self.assertEqual(0, len(r))
else:
self.assertEqual(len(c), len(r) + ncolumns)
for y in range(len(c) - ncolumns):
self.assertEqual(c[ncolumns + y], r[y])
def testWrite(self):
ncolumns = len(self.columns)
for x, r in enumerate(self.tabix.fetch(parser=pysam.asVCF())):
c = self.compare[x]
# check unmodified string
cmp_string = str(r)
ref_string = "\t".join([x.decode() for x in c])
self.assertEqual(ref_string, cmp_string)
# set fields and compare field-wise
for y, field in enumerate(self.columns):
# it is ok to have a missing format column
if y == 8 and y == len(c):
continue
if field == "pos":
rpos = getattr(r, field)
self.assertEqual(int(c[y]) - 1, rpos)
self.assertEqual(int(c[y]) - 1, r.pos)
# increment pos by 1
setattr(r, field, rpos + 1)
self.assertEqual(getattr(r, field), rpos + 1)
c[y] = str(int(c[y]) + 1)
else:
setattr(r, field, "test_%i" % y)
c[y] = ("test_%i" % y).encode('ascii')
self.assertEqual(c[y], getattr(r, field),
"mismatch in field %s: %s != %s" %
(field, c[y], getattr(r, field)))
if len(c) == 8:
self.assertEqual(0, len(r))
else:
self.assertEqual(len(c), len(r) + ncolumns)
for y in range(len(c) - ncolumns):
c[ncolumns + y] = ("test_%i" % y).encode('ascii')
r[y] = ("test_%i" % y).encode('ascii')
self.assertEqual(c[ncolumns + y], r[y])
class TestVCFFromVCF(TestVCF):
columns = ("chrom", "pos", "id",
"ref", "alt", "qual",
"filter", "info", "format")
# tests failing while parsing
fail_on_parsing = (
(5, "Flag fields should not have a value"),
(9, "aouao"),
(13, "aoeu"),
(18, "Error BAD_NUMBER_OF_PARAMETERS"),
(24, "Error HEADING_NOT_SEPARATED_BY_TABS"))
# tests failing on opening
fail_on_opening = ((24, "Error HEADING_NOT_SEPARATED_BY_TABS"),
)
def setUp(self):
TestVCF.setUp(self)
self.vcf = pysam.VCF()
self.compare = loadAndConvert(self.filename, encode=False)
def testConnecting(self):
fn = os.path.basename(self.filename)
for x, msg in self.fail_on_opening:
if "%i.vcf" % x == fn:
self.assertRaises(ValueError,
self.vcf.connect,
self.tmpfilename + ".gz")
else:
self.vcf.connect(self.tmpfilename + ".gz")
def testParsing(self):
ncolumns = len(self.columns)
fn = os.path.basename(self.filename)
with open(self.filename) as f:
for x, msg in self.fail_on_opening:
if "%i.vcf" % x == fn:
self.assertRaises(ValueError, self.vcf.parse, f)
return
else:
iter = self.vcf.parse(f)
for x, msg in self.fail_on_parsing:
if "%i.vcf" % x == fn:
self.assertRaises(ValueError, list, iter)
break
# python 2.7
# self.assertRaisesRegexp( ValueError, re.compile(msg), self.vcf.parse, f )
else:
# do the actual parsing
for x, r in enumerate(iter):
c = self.compare[x]
for y, field in enumerate(self.columns):
# it is ok to have a missing format column
if y == 8 and y == len(c):
continue
val = r[field]
if field == "pos":
self.assertEqual(int(c[y]) - 1, val)
elif field == "alt":
if c[y] == ".":
# convert . to empty list
self.assertEqual([], val,
"mismatch in field %s: expected %s, got %s" %
(field, c[y], val))
else:
# convert to list
self.assertEqual(c[y].split(","), val,
"mismatch in field %s: expected %s, got %s" %
(field, c[y], val))
elif field == "filter":
if c[y] == "PASS" or c[y] == ".":
# convert PASS to empty list
self.assertEqual([], val,
"mismatch in field %s: expected %s, got %s" %
(field, c[y], val))
else:
# convert to list
self.assertEqual(c[y].split(";"), val,
"mismatch in field %s: expected %s, got %s" %
(field, c[y], val))
elif field == "info":
# tests for info field not implemented
pass
elif field == "qual" and c[y] == ".":
self.assertEqual(-1, val,
"mismatch in field %s: expected %s, got %s" %
(field, c[y], val))
elif field == "format":
# format field converted to list
self.assertEqual(c[y].split(":"), val,
"mismatch in field %s: expected %s, got %s" %
(field, c[y], val))
elif type(val) in (int, float):
if c[y] == ".":
self.assertEqual(None, val,
"mismatch in field %s: expected %s, got %s" %
(field, c[y], val))
else:
self.assertEqual(float(c[y]), float(val),
"mismatch in field %s: expected %s, got %s" %
(field, c[y], val))
else:
self.assertEqual(c[y], val,
"mismatch in field %s: expected %s, got %s" %
(field, c[y], val))
############################################################################
# create a test class for each example vcf file.
# Two samples are created -
# 1. Testing pysam/tabix access
# 2. Testing the VCF class
vcf_files = glob.glob(os.path.join(DATADIR, "vcf", "*.vcf"))
for vcf_file in vcf_files:
n = "VCFFromTabixTest_%s" % os.path.basename(vcf_file[:-4])
globals()[n] = type(n, (TestVCFFromTabix,), dict(filename=vcf_file,))
n = "VCFFromVCFTest_%s" % os.path.basename(vcf_file[:-4])
globals()[n] = type(n, (TestVCFFromVCF,), dict(filename=vcf_file,))
############################################################################
class TestRemoteFileHTTP(unittest.TestCase):
url = "http://genserv.anat.ox.ac.uk/downloads/pysam/test/example.gtf.gz"
region = "chr1:1-1000"
local = os.path.join(DATADIR, "example.gtf.gz")
def testFetchAll(self):
remote_file = pysam.Tabixfile(self.url, "r")
remote_result = list(remote_file.fetch())
local_file = pysam.Tabixfile(self.local, "r")
local_result = list(local_file.fetch())
self.assertEqual(len(remote_result), len(local_result))
for x, y in zip(remote_result, local_result):
self.assertEqual(x, y)
class TestIndexArgument(unittest.TestCase):
filename_src = os.path.join(DATADIR, "example.vcf.gz")
filename_dst = "tmp_example.vcf.gz"
index_src = os.path.join(DATADIR, "example.vcf.gz.tbi")
index_dst = "tmp_index_example.vcf.gz.tbi"
preset = "vcf"
def testFetchAll(self):
shutil.copyfile(self.filename_src, self.filename_dst)
shutil.copyfile(self.index_src, self.index_dst)
same_basename_file = pysam.Tabixfile(
self.filename_src, "r", index=self.index_src)
same_basename_results = list(same_basename_file.fetch())
diff_index_file = pysam.Tabixfile(
self.filename_dst, "r", index=self.index_dst)
diff_index_result = list(diff_index_file.fetch())
self.assertEqual(len(same_basename_results), len(diff_index_result))
for x, y in zip(same_basename_results, diff_index_result):
self.assertEqual(x, y)
if __name__ == "__main__":
unittest.main()
|
genome-vendor/python-pysam
|
tests/tabix_test.py
|
Python
|
mit
| 29,309
|
[
"pysam"
] |
a37a4364a703adcb31fa928f4079b1410f177c94699a8d1fc03f94d243ea858e
|
__author__ = "E. A. Tacao <e.a.tacao |at| estadao.com.br>"
__date__ = "12 Fev 2006, 22:00 GMT-03:00"
__version__ = "0.03"
__doc__ = """
ButtonTreeCtrlPanel is a widget where one can place check buttons, tri-state
check buttons, radio buttons, both, and the ability to display them
hierarchically.
About:
ButtonTreeCtrlPanel is distributed under the wxWidgets license.
For all kind of problems, requests, enhancements, bug reports, etc,
please drop me an e-mail.
For updates please visit <http://j.domaindlx.com/elements28/wxpython/>.
"""
import cStringIO
import wx
from wx.lib.newevent import NewEvent
#----------------------------------------------------------------------------
(ButtonTreeCtrlPanelEvent, EVT_BUTTONTREECTRLPANEL) = NewEvent()
EVT_CHANGED = EVT_BUTTONTREECTRLPANEL
#----------------------------------------------------------------------------
class ButtonTreeCtrlPanel(wx.Panel):
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.WANTS_CHARS):
wx.Panel.__init__(self, parent, id, pos, size, style)
self.tree = wx.TreeCtrl(self, style=wx.TR_NO_LINES|wx.TR_HIDE_ROOT)
il = self.il = wx.ImageList(16, 16)
self.tree.SetImageList(il)
for bl in ["checkbox_checked", "checkbox_unchecked", "checkbox_tri",
"radiobox_checked", "radiobox_unchecked"]:
bitmap = getattr(self.__class__, bl).GetBitmap()
setattr(self, bl, il.Add(bitmap))
bmp = wx.ArtProvider.GetBitmap(wx.ART_FOLDER, wx.ART_TOOLBAR, (16, 16))
self.empty_bitmap = il.Add(bmp)
self.root = self.tree.AddRoot("Root Item for ButtonTreeCtrlPanel")
self.Bind(wx.EVT_SIZE, self.OnSize)
self.tree.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftClicks)
self.tree.Bind(wx.EVT_LEFT_DOWN, self.OnLeftClicks)
self.tree.Bind(wx.EVT_RIGHT_DOWN, self.OnRightClick)
self.allitems = []
wx.CallAfter(self.OnSize)
def _doLogicTest(self, style, value, item):
if style in [wx.CHK_2STATE, wx.CHK_3STATE]:
n = [self.checkbox_unchecked, self.checkbox_checked, \
self.checkbox_tri][value]
self.tree.SetPyData(item, (value, style))
self.tree.SetItemImage(item, n, wx.TreeItemIcon_Normal)
elif style == wx.RB_SINGLE:
if value:
parent = self.tree.GetItemParent(item)
(child, cookie) = self.tree.GetFirstChild(parent)
if self.tree.GetPyData(child):
self.tree.SetPyData(child, (False, wx.RB_SINGLE))
self.tree.SetItemImage(child, self.radiobox_unchecked, \
wx.TreeItemIcon_Normal)
for x in range(1, self.tree.GetChildrenCount(parent, False)):
(child, cookie) = self.tree.GetNextChild(parent, cookie)
if self.tree.GetPyData(child):
self.tree.SetPyData(child, (False, wx.RB_SINGLE))
self.tree.SetItemImage(child, self.radiobox_unchecked, \
wx.TreeItemIcon_Normal)
self.tree.SetPyData(item, (True, wx.RB_SINGLE))
self.tree.SetItemImage(item, self.radiobox_checked, \
wx.TreeItemIcon_Normal)
else:
self.tree.SetPyData(item, (False, wx.RB_SINGLE))
self.tree.SetItemImage(item, self.radiobox_unchecked, \
wx.TreeItemIcon_Normal)
def _getItems(self, parent=None, value=None):
if not parent:
parent = self.root
cil = []
(child, cookie) = self.tree.GetFirstChild(parent)
if child.IsOk():
d = self.tree.GetPyData(child)
if value is None or (d and d[0] == value):
cil.append(child)
for x in range(1, self.tree.GetChildrenCount(parent, False)):
(child, cookie) = self.tree.GetNextChild(parent, cookie)
if child.IsOk():
d = self.tree.GetPyData(child)
if value is None or (d and d[0] == value):
cil.append(child)
return cil
def AddItem(self, label, bmp=None, parent=None, style=None, value=False):
v = None
if bmp:
n = self.il.Add(bmp)
if not parent:
parent = self.root
if style is not None:
v = (value, style)
this_item = self.tree.AppendItem(parent, label)
self.tree.SetPyData(this_item, v)
if v:
self._doLogicTest(style, value, this_item)
else:
if bmp is None:
bmp = self.empty_bitmap
else:
bmp = self.il.Add(bmp)
self.tree.SetItemImage(this_item, bmp, wx.TreeItemIcon_Normal)
self.allitems.append(this_item)
[self.tree.Expand(x) for x in self.allitems]
return this_item
def ExpandItem(self, item):
self.tree.Expand(item)
def CollapseItem(self, item):
self.tree.Collapse(item)
def EnsureFirstVisible(self):
(child, cookie) = self.tree.GetFirstChild(self.root)
if child.IsOk():
self.tree.SelectItem(child)
self.tree.EnsureVisible(child)
def SetItemValue(self, item, value):
data = self.tree.GetPyData(item)
if data:
self._doLogicTest(data[1], value, item)
def GetItemValue(self, item):
data = self.tree.GetPyData(item)
if data:
return data[0]
else:
return None
def GetItemByLabel(self, label, parent=None):
r = None
for item in self._getItems(parent):
if self.tree.GetItemText(item) == label:
r = item; break
return r
def GetAllItems(self):
return self.allitems
def GetRootItems(self):
cil = []
for x in range(0, len(self.allitems)):
d = self.tree.GetPyData(self.allitems[x])
if not d:
cil.append(self.allitems[x])
return cil
def GetStringRootItems(self):
return [self.tree.GetItemText(x) for x in self.GetRootItems]
def GetItemsUnchecked(self, parent=None):
return self._getItems(parent, 0)
def GetItemsChecked(self, parent=None):
return self._getItems(parent, 1)
def GetItemsTri(self, parent=None):
return self._getItems(parent, 2)
def GetStringItemsUnchecked(self, parent=None):
return [self.tree.GetItemText(x) \
for x in self.GetItemsUnchecked(parent)]
def GetStringItemsChecked(self, parent=None):
return [self.tree.GetItemText(x) for x in self.GetItemsChecked(parent)]
def GetStringItemsTri(self, parent=None):
return [self.tree.GetItemText(x) for x in self.GetItemsTri(parent)]
def OnRightClick(self, evt):
item, flags = self.tree.HitTest(evt.GetPosition())
self.tree.SelectItem(item)
def OnLeftClicks(self, evt):
item, flags = self.tree.HitTest(evt.GetPosition())
if item:
text, data = self.tree.GetItemText(item), self.tree.GetPyData(item)
if data:
style = data[1]
if style == wx.CHK_2STATE:
value = not data[0]
elif style == wx.CHK_3STATE:
value = data[0] + 1
if value == 3: value = 0
else:
value = True
self._doLogicTest(style, value, item)
if value <> data[0]:
nevt = ButtonTreeCtrlPanelEvent(obj=self, id=self.GetId(),
item=item, val=value)
wx.PostEvent(self, nevt)
evt.Skip()
def OnSize(self, evt=None):
self.tree.SetSize(self.GetClientSize())
# # Images generated by encode_bitmaps.py -----------------------------
from wx.lib.embeddedimage import PyEmbeddedImage
ButtonTreeCtrlPanel.checkbox_unchecked = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAEFJ"
"REFUOI3tkzsOACAUwsrT+9/Yz6yDieJkZKfpAFIknITVBjJAq6XtFhVJ9wxm6iqzrW3wAU8A"
"hiGdTNo2kHvnDr+YDCrzE+JlAAAAAElFTkSuQmCC")
ButtonTreeCtrlPanel.radiobox_checked = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAHFJ"
"REFUOI2tUtESgCAIA+3//1jpqW7R5tkRb8o2GODeulWildhmdqhEzBH49tad4TxbyMQXIQk9"
"BJCcgSpHZ8DaVRZugasCAmOOYJXxT24BQau5lNcoBdCK8m8mtqAILE87YJ7VHP49pJXQ9il/"
"jfIaT195QDiwOHL5AAAAAElFTkSuQmCC")
ButtonTreeCtrlPanel.radiobox_unchecked = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAGdJ"
"REFUOI3NkksSgDAIQ4F6/xtru9LBmHTq4EJ2Hchr+LhHs0pESW1mm0r0Y+/57dGc1Tm2gMKH"
"AEA3QBZjocrRGTC7qoULcP6gCnMuuylv4UcA1h8GmxN1wCAK/O0hzUDLp/w2ylsY3w4wQW9/"
"cegAAAAASUVORK5CYII=")
ButtonTreeCtrlPanel.checkbox_checked = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAGdJ"
"REFUOI2tk1EOgDAIQ1vm/W+s82uJqbAxkW9eU6CQ1lApK9EADgDo19l3QVrjfw5UdVbqNu0g"
"GjMlMNvRS0CbVwt2HQzoCUf7CUfIwK6ANq8u4zoYUOas4QgZGJAgfYl0OcqsvvMNP8koKiUm"
"7JsAAAAASUVORK5CYII=")
ButtonTreeCtrlPanel.checkbox_tri = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAHBJ"
"REFUOI2tk0EOgDAIBJfqq9Sj+mj1aP1We2piCCCKnJnN0GyJUofIpBANoAeAaRzKW/DMF/1n"
"wFOt4bZug2PfxDNdARosBvBlC1YNGnSH52UV30c9wQOLAXzZglWDBj3BaoAXBliRvlQ6XGWK"
"fucKTYUl4c5UOHYAAAAASUVORK5CYII=")
#
##
### eof
|
ktan2020/legacy-automation
|
win/Lib/site-packages/wx-3.0-msw/wx/lib/analogclock/lib_setup/buttontreectrlpanel.py
|
Python
|
mit
| 10,055
|
[
"VisIt"
] |
72d76c07440c2aeb3f5e7975674c127b2853fae6bd35968afe605fa6356c8132
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.