text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#
# A file that opens the neuroConstruct project LarkumEtAl2009 and run multiple simulations stimulating ech terminal apical branch with varying number of synapses.
#
# Author: Matteo Farinella
from sys import *
from java.io import File
from java.lang import System
from java.util import ArrayList
from ucl.physiol.neuroconstruct.project import ProjectManager
from ucl.physiol.neuroconstruct.neuron import NeuronFileManager
from ucl.physiol.neuroconstruct.utils import NumberGenerator
from ucl.physiol.neuroconstruct.nmodleditor.processes import ProcessManager
from ucl.physiol.neuroconstruct.simulation import SimulationData
from ucl.physiol.neuroconstruct.gui import SimulationRerunFrame
from ucl.physiol.neuroconstruct.gui.plotter import PlotManager
from ucl.physiol.neuroconstruct.gui.plotter import PlotCanvas
from ucl.physiol.neuroconstruct.dataset import DataSet
from math import *
import time
import shutil
import random
import os
import subprocess
# Load the original project
projName = "LarkumEtAl2009"
projFile = File("/home/matteo/neuroConstruct/models/"+projName+"/"+projName+".ncx")
print "Loading project from file: " + projFile.getAbsolutePath()+", exists: "+ str(projFile.exists())
pm = ProjectManager()
myProject = pm.loadProject(projFile)
simConfig = myProject.simConfigInfo.getSimConfig("Default Simulation Configuration")#
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
numGenerated = myProject.generatedCellPositions.getNumberInAllCellGroups()
simsRunning = []
def updateSimsRunning():
simsFinished = []
for sim in simsRunning:
timeFile = File(myProject.getProjectMainDirectory(), "simulations/"+sim+"/time.dat")
#print "Checking file: "+timeFile.getAbsolutePath() +", exists: "+ str(timeFile.exists())
if (timeFile.exists()):
simsFinished.append(sim)
if(len(simsFinished)>0):
for sim in simsFinished:
simsRunning.remove(sim)
if numGenerated > 0:
print "Generating NEURON scripts..."
myProject.neuronFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
myProject.neuronSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
myProject.neuronSettings.setGraphicsMode(False) # Run NEURON without GUI
# Note same network structure will be used for each!
# Change this number to the number of processors you wish to use on your local machine
maxNumSimultaneousSims = 100
#multiple simulation settings:
prefix = "" #string that will be added to the name of the simulations to identify the simulation set
trials = 10
Nbranches = 28
Configuration = ["NMDAspike input"]
apical_branch = ["apical17","apical18","apical21","apical23","apical24","apical25","apical27","apical28","apical31","apical34","apical35","apical37","apical38","apical44","apical46","apical52","apical53","apical54","apical56","apical57","apical61","apical62","apical65","apical67","apical68","apical69","apical72","apical73"]
apical_ID =[4460,4571,4793,4961,4994,5225,5477,5526,5990,6221,6274,6523,6542,6972,7462,8026,8044,8088,8324,8468,8685,8800,8966,9137,9160,9186,9592,9639]
apical_lenght = [98,69,78,26,34,166,161,49,143,55,87,25,38,73,194,19,22,26,25,129,138,95,42,89,21,62,26,18]
apical_plot = ["pyrCML_apical17_V","pyrCML_apical18_V","pyrCML_apical21_V","pyrCML_apical23_V","pyrCML_apical24_V","pyrCML_apical25_V","pyrCML_apical27_V","pyrCML_apical28_V","pyrCML_apical31_V","pyrCML_apical34_V","pyrCML_apical35_V","pyrCML_apical37_V","pyrCML_apical38_V","pyrCML_apical44_V","pyrCML_apical46_V","pyrCML_apical52_V","pyrCML_apical53_V","pyrCML_apical54_V","pyrCML_apical56_V","pyrCML_apical57_V","pyrCML_apical61_V","pyrCML_apical62_V","pyrCML_apical65_V","pyrCML_apical67_V","pyrCML_apical68_V","pyrCML_apical69_V","pyrCML_apical72_V","pyrCML_apical73_V"]
print "Going to run " +str(int(trials*Nbranches)) + " simulations"
refStored = []
simGroups = ArrayList()
simInputs = ArrayList()
simPlots = ArrayList()
stringConfig = Configuration[0]
print "nConstruct using SIMULATION CONFIGURATION: " +stringConfig
simConfig = myProject.simConfigInfo.getSimConfig(stringConfig)
for y in range(1, len(apical_branch)):
branch = apical_branch[y]
prefix = ""#branch
for x in range(1, 7):
synapses = x*5
for t in range(0,trials):
#empty vectors
simGroups = ArrayList()
simInputs = ArrayList()
simPlots = ArrayList()
stim = myProject.elecInputInfo.getStim("NMDAspike")
location = stim.getSegChooser()
location.setGroup(branch)
location.setNumberOfSegments(synapses)
myProject.elecInputInfo.updateStim(stim)
simGroups.add("pyrCML_group")
simInputs.add(stim.getReference())
simPlots.add(apical_plot[y])
simPlots.add("pyrCML_soma_V")
simConfig.setCellGroups(simGroups)
simConfig.setInputs(simInputs)
simConfig.setPlots(simPlots)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
print "NMDAspike in "+branch+" triggered by "+str(synapses)+" synapses"
#########################################################################################'''
simRef = prefix+"IO_ID"+str(apical_ID[y])+"_"+str(synapses)+"syn_"+str(t)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
##### RUN BLOCK #####
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(60) # Wait for sim to be kicked off
#####################'''
### end for i (trials)
### end for j (noise)
######## Extracting simulations results ###############
y=-1
for sim in refStored:
y=y+1
pullSimFilename = "pullsim.sh"
path = "/home/matteo/neuroConstruct/models/"+projName
print "\n------ Checking directory: " + path +"/simulations"+"/"+sim
pullsimFile = path+"/simulations/"+sim+"/"+pullSimFilename
if os.path.isfile(pullsimFile):
print pullSimFilename+" exists and will be executed..."
process = subprocess.Popen("cd "+path+"/simulations/"+sim+"/"+";./"+pullSimFilename, shell=True, stdout=subprocess.PIPE)
stdout_value = process.communicate()[0]
process.wait()
else:
print "Simulation not finished"
if os.path.isfile(path+"/simulations/"+sim+"/pyrCML_group_0.dat"):
print "Simulation results recovered from remote cluster."
simDir = File(path+"/simulations/"+sim)
newFileSoma = path+"/recordings/"+sim+".soma"
shutil.copyfile(path+"/simulations/"+sim+"/pyrCML_group_0.dat" , newFileSoma)
newFileApical = path+"/recordings/"+sim+".apical"
for ID in apical_ID:
if os.path.isfile(path+"/simulations/"+sim+"/pyrCML_group_0."+str(ID)+".dat"):
shutil.copyfile(path+"/simulations/"+sim+"/pyrCML_group_0."+str(ID)+".dat" , newFileApical)
print "Simulation was successful. "
print "Results saved."
print
else:
print "Simulation failed!"
### '''
|
pgleeson/TestArea
|
models/LarkumEtAl2009/pythonScripts/PNMDAs_singlebranches.py
|
Python
|
gpl-2.0
| 8,519
|
[
"NEURON"
] |
65105d5e6a6ed726062aebeae3ad406ee0c9c5fa475a1467162fec955b7b84e1
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class NetcdfFortran(AutotoolsPackage):
"""Fortran interface for NetCDF4"""
homepage = "http://www.unidata.ucar.edu/software/netcdf"
url = "http://www.unidata.ucar.edu/downloads/netcdf/ftp/netcdf-fortran-4.4.3.tar.gz"
version('4.4.4', 'e855c789cd72e1b8bc1354366bf6ac72')
version('4.4.3', 'bfd4ae23a34635b273d3eb0d91cbde9e')
depends_on('netcdf')
@property
def libs(self):
libraries = ['libnetcdff']
# This package installs both shared and static libraries. Permit
# clients to query which one they want.
query_parameters = self.spec.last_query.extra_parameters
shared = 'shared' in query_parameters
return find_libraries(
libraries, root=self.prefix, shared=shared, recurse=True
)
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/netcdf-fortran/package.py
|
Python
|
lgpl-2.1
| 2,046
|
[
"NetCDF"
] |
4d48de2ae946304626e18e2fd7f511cf189f508803fc9f0a2f61c912c80ec896
|
from __future__ import print_function
from pprint import pprint
import sys
sys.path.append( 'external/pycparser' )
from pycparser import c_parser, c_ast, parse_file
# Portable cpp path for Windows and Linux/Unix
CPPPATH = '../utils/cpp.exe' if sys.platform == 'win32' else 'cpp'
class IdVisitor(c_ast.NodeVisitor):
def __init__(self):
self.idList_ = []
def visit_ID(self, node):
self.idList_.append( node.name )
def idDefs(filename):
ast = parse_file(
filename,
use_cpp=True,
cpp_path=CPPPATH,
cpp_args=[ "-nostdinc" ]
)
# c.f. http://stackoverflow.com/questions/10353902/any-way-to-get-the-c-preproccessor-to-ignore-all-includes
v = IdVisitor()
v.visit(ast)
print( v.idList_ )
if __name__ == "__main__":
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = 'c_files/hash.c'
idDefs(filename)
|
arunksaha/casescore
|
pycparser_id.py
|
Python
|
bsd-3-clause
| 916
|
[
"VisIt"
] |
bd7bdc9f0805d908768fbf7de9e3c5d27584d829800239e01f67b92411a2c30a
|
"""Accessors for NAMD FEP datasets.
"""
from os.path import dirname, join
from glob import glob
from .. import Bunch
def load_tyr2ala():
"""Load the NAMD tyrosine to alanine mutation dataset.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'forward': glob(join(module_path, 'tyr2ala/in-aqua/forward/*.fepout.bz2')),
'backward': glob(join(module_path, 'tyr2ala/in-aqua/backward/*.fepout.bz2'))}
with open(join(module_path, 'tyr2ala', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_idws():
"""Load the NAMD IDWS dataset.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'forward': glob(join(module_path, 'idws', 'idws?.fepout.bz2'))}
with open(join(module_path, 'idws', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_restarted():
"""Load the NAMD IDWS dataset.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'both': glob(join(module_path, 'restarted', 'restarted*.fepout.bz2'))}
with open(join(module_path, 'restarted', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_restarted_reversed():
"""Load the NAMD IDWS dataset, run from lambda = 1 -> 0, with interruptions and restarts.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'both': glob(join(module_path, 'restarted_reversed', 'restarted_reversed*.fepout.bz2'))}
with open(join(module_path, 'restarted_reversed', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
|
alchemistry/alchemtest
|
src/alchemtest/namd/access.py
|
Python
|
bsd-3-clause
| 2,582
|
[
"NAMD"
] |
1b88a67eff778b26c6c9f80e77fc4def5fed5d1ad7b53310fe47114982a70f74
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms. Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_out, dtype=np.intp),
np.ones(n_samples_in, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std : float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box : pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components : int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary : array of shape [n_features, n_components]
The dictionary with normalized components (D).
code : array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim : integer, optional (default=1)
The size of the random matrix to generate.
alpha : float between 0 and 1, optional (default=0.95)
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
|
RomainBrault/scikit-learn
|
sklearn/datasets/samples_generator.py
|
Python
|
bsd-3-clause
| 56,766
|
[
"Gaussian"
] |
21a9a21446b5da7145c383581a7ebe99366b3fefe490cc7ded3f332eedc3a2bf
|
import tensorflow as tf
import numpy as np
import pickle
import TrainingFeatureNetInf as TFNI
import sklearn.metrics as metrics
def Classifier(layer_num, neuron_num, Input_feature_shape, Input_label_shape, test_dict):
l_input = len(test_dict[Input_feature_shape][0])
hidden_layer_shape = TFNI.Hidden_layer_shape(layer_num, neuron_num)
classes = 2
#batch_size = 100
ANN = TFNI.neural_network(Input_feature_shape, l_input, hidden_layer_shape, classes) # Re-define the nural network shape.
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, "FNI.ckpt") # Restore variables from disk.
print("Model restored.")
correct = tf.equal(tf.argmax(ANN , 1), tf.argmax(Input_label_shape, 1)) #tf.equal(Predicted labels, True labels), Returns the truth value of (Predicted labels == True labels) element-wise
accuracy = tf.reduce_mean(tf.cast(correct, "float"))
prediction = tf.argmax(ANN,1).eval(test_dict)
y_true = np.argmax(test_dict[Input_label_shape],1)
precision = metrics.precision_score(np.array(y_true), np.array(prediction))
recall = metrics.recall_score(np.array(y_true), np.array(prediction))
print("predictions:", prediction)
print("Probabilities:", ANN.eval(test_dict))
print("Test Accuracy:", accuracy.eval(test_dict))
print("Precision:", precision)
print("Recall:", recall)
print(tf.trainable_variables())
return prediction, ANN.eval(test_dict), precision, recall
if __name__ == "__main__":
with open("FacebookFeatures of 2500 node pairs for experiment.pickle", 'rb') as pickle_file: # input testing data
_, _, _, test_x, test_y, test_edge_names = pickle.load(pickle_file)
print("Test data example:", test_x[0], test_y[0])
#Define Input tensor shape, features as x: height x Width, labels as y: width.
x_shape = tf.placeholder('float',[None, len(test_x[0])]) #features
y_shape = tf.placeholder('float') #label
feed_dict = {x_shape: test_x, y_shape: test_y}
Predictions, Probabilities, P, R = Classifier(1, 800, x_shape, y_shape, feed_dict) # define layer and neuron number read in classifier
Probabilities = np.array(Probabilities)
print("Output probability array:", Probabilities)
print("Node pairs classified as positive:", Probabilities[Probabilities[:, 0] >= Probabilities[:, 1]])
|
PassiveVision/Feature-Net-Learn
|
FeatureNetInf.py
|
Python
|
gpl-3.0
| 2,269
|
[
"NEURON"
] |
6b4f7bab6443b228cd13a2651d7b43937220c94e781bed1a61cde60671be0fc4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of web2py Web Framework (Copyrighted, 2007-2010).
Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>.
License: GPL v2
CONTENT_TYPE dictionary created against freedesktop.org' shared mime info
database version 0.70.
"""
__all__ = ['contenttype']
CONTENT_TYPE = {
'.123': 'application/vnd.lotus-1-2-3',
'.3ds': 'image/x-3ds',
'.3g2': 'video/3gpp',
'.3ga': 'video/3gpp',
'.3gp': 'video/3gpp',
'.3gpp': 'video/3gpp',
'.602': 'application/x-t602',
'.669': 'audio/x-mod',
'.7z': 'application/x-7z-compressed',
'.a': 'application/x-archive',
'.aac': 'audio/mp4',
'.abw': 'application/x-abiword',
'.abw.crashed': 'application/x-abiword',
'.abw.gz': 'application/x-abiword',
'.ac3': 'audio/ac3',
'.ace': 'application/x-ace',
'.adb': 'text/x-adasrc',
'.ads': 'text/x-adasrc',
'.afm': 'application/x-font-afm',
'.ag': 'image/x-applix-graphics',
'.ai': 'application/illustrator',
'.aif': 'audio/x-aiff',
'.aifc': 'audio/x-aiff',
'.aiff': 'audio/x-aiff',
'.al': 'application/x-perl',
'.alz': 'application/x-alz',
'.amr': 'audio/amr',
'.ani': 'application/x-navi-animation',
'.anim[1-9j]': 'video/x-anim',
'.anx': 'application/annodex',
'.ape': 'audio/x-ape',
'.arj': 'application/x-arj',
'.arw': 'image/x-sony-arw',
'.as': 'application/x-applix-spreadsheet',
'.asc': 'text/plain',
'.asf': 'video/x-ms-asf',
'.asp': 'application/x-asp',
'.ass': 'text/x-ssa',
'.asx': 'audio/x-ms-asx',
'.atom': 'application/atom+xml',
'.au': 'audio/basic',
'.avi': 'video/x-msvideo',
'.aw': 'application/x-applix-word',
'.awb': 'audio/amr-wb',
'.awk': 'application/x-awk',
'.axa': 'audio/annodex',
'.axv': 'video/annodex',
'.bak': 'application/x-trash',
'.bcpio': 'application/x-bcpio',
'.bdf': 'application/x-font-bdf',
'.bib': 'text/x-bibtex',
'.bin': 'application/octet-stream',
'.blend': 'application/x-blender',
'.blender': 'application/x-blender',
'.bmp': 'image/bmp',
'.bz': 'application/x-bzip',
'.bz2': 'application/x-bzip',
'.c': 'text/x-csrc',
'.c++': 'text/x-c++src',
'.cab': 'application/vnd.ms-cab-compressed',
'.cb7': 'application/x-cb7',
'.cbr': 'application/x-cbr',
'.cbt': 'application/x-cbt',
'.cbz': 'application/x-cbz',
'.cc': 'text/x-c++src',
'.cdf': 'application/x-netcdf',
'.cdr': 'application/vnd.corel-draw',
'.cer': 'application/x-x509-ca-cert',
'.cert': 'application/x-x509-ca-cert',
'.cgm': 'image/cgm',
'.chm': 'application/x-chm',
'.chrt': 'application/x-kchart',
'.class': 'application/x-java',
'.cls': 'text/x-tex',
'.cmake': 'text/x-cmake',
'.cpio': 'application/x-cpio',
'.cpio.gz': 'application/x-cpio-compressed',
'.cpp': 'text/x-c++src',
'.cr2': 'image/x-canon-cr2',
'.crt': 'application/x-x509-ca-cert',
'.crw': 'image/x-canon-crw',
'.cs': 'text/x-csharp',
'.csh': 'application/x-csh',
'.css': 'text/css',
'.cssl': 'text/css',
'.csv': 'text/csv',
'.cue': 'application/x-cue',
'.cur': 'image/x-win-bitmap',
'.cxx': 'text/x-c++src',
'.d': 'text/x-dsrc',
'.dar': 'application/x-dar',
'.dbf': 'application/x-dbf',
'.dc': 'application/x-dc-rom',
'.dcl': 'text/x-dcl',
'.dcm': 'application/dicom',
'.dcr': 'image/x-kodak-dcr',
'.dds': 'image/x-dds',
'.deb': 'application/x-deb',
'.der': 'application/x-x509-ca-cert',
'.desktop': 'application/x-desktop',
'.dia': 'application/x-dia-diagram',
'.diff': 'text/x-patch',
'.divx': 'video/x-msvideo',
'.djv': 'image/vnd.djvu',
'.djvu': 'image/vnd.djvu',
'.dng': 'image/x-adobe-dng',
'.doc': 'application/msword',
'.docbook': 'application/docbook+xml',
'.docm': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'.dot': 'text/vnd.graphviz',
'.dsl': 'text/x-dsl',
'.dtd': 'application/xml-dtd',
'.dtx': 'text/x-tex',
'.dv': 'video/dv',
'.dvi': 'application/x-dvi',
'.dvi.bz2': 'application/x-bzdvi',
'.dvi.gz': 'application/x-gzdvi',
'.dwg': 'image/vnd.dwg',
'.dxf': 'image/vnd.dxf',
'.e': 'text/x-eiffel',
'.egon': 'application/x-egon',
'.eif': 'text/x-eiffel',
'.el': 'text/x-emacs-lisp',
'.emf': 'image/x-emf',
'.emp': 'application/vnd.emusic-emusic_package',
'.ent': 'application/xml-external-parsed-entity',
'.eps': 'image/x-eps',
'.eps.bz2': 'image/x-bzeps',
'.eps.gz': 'image/x-gzeps',
'.epsf': 'image/x-eps',
'.epsf.bz2': 'image/x-bzeps',
'.epsf.gz': 'image/x-gzeps',
'.epsi': 'image/x-eps',
'.epsi.bz2': 'image/x-bzeps',
'.epsi.gz': 'image/x-gzeps',
'.epub': 'application/epub+zip',
'.erl': 'text/x-erlang',
'.es': 'application/ecmascript',
'.etheme': 'application/x-e-theme',
'.etx': 'text/x-setext',
'.exe': 'application/x-ms-dos-executable',
'.exr': 'image/x-exr',
'.ez': 'application/andrew-inset',
'.f': 'text/x-fortran',
'.f90': 'text/x-fortran',
'.f95': 'text/x-fortran',
'.fb2': 'application/x-fictionbook+xml',
'.fig': 'image/x-xfig',
'.fits': 'image/fits',
'.fl': 'application/x-fluid',
'.flac': 'audio/x-flac',
'.flc': 'video/x-flic',
'.fli': 'video/x-flic',
'.flv': 'video/x-flv',
'.flw': 'application/x-kivio',
'.fo': 'text/x-xslfo',
'.for': 'text/x-fortran',
'.g3': 'image/fax-g3',
'.gb': 'application/x-gameboy-rom',
'.gba': 'application/x-gba-rom',
'.gcrd': 'text/directory',
'.ged': 'application/x-gedcom',
'.gedcom': 'application/x-gedcom',
'.gen': 'application/x-genesis-rom',
'.gf': 'application/x-tex-gf',
'.gg': 'application/x-sms-rom',
'.gif': 'image/gif',
'.glade': 'application/x-glade',
'.gmo': 'application/x-gettext-translation',
'.gnc': 'application/x-gnucash',
'.gnd': 'application/gnunet-directory',
'.gnucash': 'application/x-gnucash',
'.gnumeric': 'application/x-gnumeric',
'.gnuplot': 'application/x-gnuplot',
'.gp': 'application/x-gnuplot',
'.gpg': 'application/pgp-encrypted',
'.gplt': 'application/x-gnuplot',
'.gra': 'application/x-graphite',
'.gsf': 'application/x-font-type1',
'.gsm': 'audio/x-gsm',
'.gtar': 'application/x-tar',
'.gv': 'text/vnd.graphviz',
'.gvp': 'text/x-google-video-pointer',
'.gz': 'application/x-gzip',
'.h': 'text/x-chdr',
'.h++': 'text/x-c++hdr',
'.hdf': 'application/x-hdf',
'.hh': 'text/x-c++hdr',
'.hp': 'text/x-c++hdr',
'.hpgl': 'application/vnd.hp-hpgl',
'.hpp': 'text/x-c++hdr',
'.hs': 'text/x-haskell',
'.htm': 'text/html',
'.html': 'text/html',
'.hwp': 'application/x-hwp',
'.hwt': 'application/x-hwt',
'.hxx': 'text/x-c++hdr',
'.ica': 'application/x-ica',
'.icb': 'image/x-tga',
'.icns': 'image/x-icns',
'.ico': 'image/vnd.microsoft.icon',
'.ics': 'text/calendar',
'.idl': 'text/x-idl',
'.ief': 'image/ief',
'.iff': 'image/x-iff',
'.ilbm': 'image/x-ilbm',
'.ime': 'text/x-imelody',
'.imy': 'text/x-imelody',
'.ins': 'text/x-tex',
'.iptables': 'text/x-iptables',
'.iso': 'application/x-cd-image',
'.iso9660': 'application/x-cd-image',
'.it': 'audio/x-it',
'.j2k': 'image/jp2',
'.jad': 'text/vnd.sun.j2me.app-descriptor',
'.jar': 'application/x-java-archive',
'.java': 'text/x-java',
'.jng': 'image/x-jng',
'.jnlp': 'application/x-java-jnlp-file',
'.jp2': 'image/jp2',
'.jpc': 'image/jp2',
'.jpe': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.jpf': 'image/jp2',
'.jpg': 'image/jpeg',
'.jpr': 'application/x-jbuilder-project',
'.jpx': 'image/jp2',
'.js': 'application/javascript',
'.k25': 'image/x-kodak-k25',
'.kar': 'audio/midi',
'.karbon': 'application/x-karbon',
'.kdc': 'image/x-kodak-kdc',
'.kdelnk': 'application/x-desktop',
'.kexi': 'application/x-kexiproject-sqlite3',
'.kexic': 'application/x-kexi-connectiondata',
'.kexis': 'application/x-kexiproject-shortcut',
'.kfo': 'application/x-kformula',
'.kil': 'application/x-killustrator',
'.kino': 'application/smil',
'.kml': 'application/vnd.google-earth.kml+xml',
'.kmz': 'application/vnd.google-earth.kmz',
'.kon': 'application/x-kontour',
'.kpm': 'application/x-kpovmodeler',
'.kpr': 'application/x-kpresenter',
'.kpt': 'application/x-kpresenter',
'.kra': 'application/x-krita',
'.ksp': 'application/x-kspread',
'.kud': 'application/x-kugar',
'.kwd': 'application/x-kword',
'.kwt': 'application/x-kword',
'.la': 'application/x-shared-library-la',
'.latex': 'text/x-tex',
'.ldif': 'text/x-ldif',
'.lha': 'application/x-lha',
'.lhs': 'text/x-literate-haskell',
'.lhz': 'application/x-lhz',
'.log': 'text/x-log',
'.ltx': 'text/x-tex',
'.lua': 'text/x-lua',
'.lwo': 'image/x-lwo',
'.lwob': 'image/x-lwo',
'.lws': 'image/x-lws',
'.ly': 'text/x-lilypond',
'.lyx': 'application/x-lyx',
'.lz': 'application/x-lzip',
'.lzh': 'application/x-lha',
'.lzma': 'application/x-lzma',
'.lzo': 'application/x-lzop',
'.m': 'text/x-matlab',
'.m15': 'audio/x-mod',
'.m2t': 'video/mpeg',
'.m3u': 'audio/x-mpegurl',
'.m3u8': 'audio/x-mpegurl',
'.m4': 'application/x-m4',
'.m4a': 'audio/mp4',
'.m4b': 'audio/x-m4b',
'.m4v': 'video/mp4',
'.mab': 'application/x-markaby',
'.man': 'application/x-troff-man',
'.mbox': 'application/mbox',
'.md': 'application/x-genesis-rom',
'.mdb': 'application/vnd.ms-access',
'.mdi': 'image/vnd.ms-modi',
'.me': 'text/x-troff-me',
'.med': 'audio/x-mod',
'.metalink': 'application/metalink+xml',
'.mgp': 'application/x-magicpoint',
'.mid': 'audio/midi',
'.midi': 'audio/midi',
'.mif': 'application/x-mif',
'.minipsf': 'audio/x-minipsf',
'.mka': 'audio/x-matroska',
'.mkv': 'video/x-matroska',
'.ml': 'text/x-ocaml',
'.mli': 'text/x-ocaml',
'.mm': 'text/x-troff-mm',
'.mmf': 'application/x-smaf',
'.mml': 'text/mathml',
'.mng': 'video/x-mng',
'.mo': 'application/x-gettext-translation',
'.mo3': 'audio/x-mo3',
'.moc': 'text/x-moc',
'.mod': 'audio/x-mod',
'.mof': 'text/x-mof',
'.moov': 'video/quicktime',
'.mov': 'video/quicktime',
'.movie': 'video/x-sgi-movie',
'.mp+': 'audio/x-musepack',
'.mp2': 'video/mpeg',
'.mp3': 'audio/mpeg',
'.mp4': 'video/mp4',
'.mpc': 'audio/x-musepack',
'.mpe': 'video/mpeg',
'.mpeg': 'video/mpeg',
'.mpg': 'video/mpeg',
'.mpga': 'audio/mpeg',
'.mpp': 'audio/x-musepack',
'.mrl': 'text/x-mrml',
'.mrml': 'text/x-mrml',
'.mrw': 'image/x-minolta-mrw',
'.ms': 'text/x-troff-ms',
'.msi': 'application/x-msi',
'.msod': 'image/x-msod',
'.msx': 'application/x-msx-rom',
'.mtm': 'audio/x-mod',
'.mup': 'text/x-mup',
'.mxf': 'application/mxf',
'.n64': 'application/x-n64-rom',
'.nb': 'application/mathematica',
'.nc': 'application/x-netcdf',
'.nds': 'application/x-nintendo-ds-rom',
'.nef': 'image/x-nikon-nef',
'.nes': 'application/x-nes-rom',
'.nfo': 'text/x-nfo',
'.not': 'text/x-mup',
'.nsc': 'application/x-netshow-channel',
'.nsv': 'video/x-nsv',
'.o': 'application/x-object',
'.obj': 'application/x-tgif',
'.ocl': 'text/x-ocl',
'.oda': 'application/oda',
'.odb': 'application/vnd.oasis.opendocument.database',
'.odc': 'application/vnd.oasis.opendocument.chart',
'.odf': 'application/vnd.oasis.opendocument.formula',
'.odg': 'application/vnd.oasis.opendocument.graphics',
'.odi': 'application/vnd.oasis.opendocument.image',
'.odm': 'application/vnd.oasis.opendocument.text-master',
'.odp': 'application/vnd.oasis.opendocument.presentation',
'.ods': 'application/vnd.oasis.opendocument.spreadsheet',
'.odt': 'application/vnd.oasis.opendocument.text',
'.oga': 'audio/ogg',
'.ogg': 'video/x-theora+ogg',
'.ogm': 'video/x-ogm+ogg',
'.ogv': 'video/ogg',
'.ogx': 'application/ogg',
'.old': 'application/x-trash',
'.oleo': 'application/x-oleo',
'.opml': 'text/x-opml+xml',
'.ora': 'image/openraster',
'.orf': 'image/x-olympus-orf',
'.otc': 'application/vnd.oasis.opendocument.chart-template',
'.otf': 'application/x-font-otf',
'.otg': 'application/vnd.oasis.opendocument.graphics-template',
'.oth': 'application/vnd.oasis.opendocument.text-web',
'.otp': 'application/vnd.oasis.opendocument.presentation-template',
'.ots': 'application/vnd.oasis.opendocument.spreadsheet-template',
'.ott': 'application/vnd.oasis.opendocument.text-template',
'.owl': 'application/rdf+xml',
'.oxt': 'application/vnd.openofficeorg.extension',
'.p': 'text/x-pascal',
'.p10': 'application/pkcs10',
'.p12': 'application/x-pkcs12',
'.p7b': 'application/x-pkcs7-certificates',
'.p7s': 'application/pkcs7-signature',
'.pack': 'application/x-java-pack200',
'.pak': 'application/x-pak',
'.par2': 'application/x-par2',
'.pas': 'text/x-pascal',
'.patch': 'text/x-patch',
'.pbm': 'image/x-portable-bitmap',
'.pcd': 'image/x-photo-cd',
'.pcf': 'application/x-cisco-vpn-settings',
'.pcf.gz': 'application/x-font-pcf',
'.pcf.z': 'application/x-font-pcf',
'.pcl': 'application/vnd.hp-pcl',
'.pcx': 'image/x-pcx',
'.pdb': 'chemical/x-pdb',
'.pdc': 'application/x-aportisdoc',
'.pdf': 'application/pdf',
'.pdf.bz2': 'application/x-bzpdf',
'.pdf.gz': 'application/x-gzpdf',
'.pef': 'image/x-pentax-pef',
'.pem': 'application/x-x509-ca-cert',
'.perl': 'application/x-perl',
'.pfa': 'application/x-font-type1',
'.pfb': 'application/x-font-type1',
'.pfx': 'application/x-pkcs12',
'.pgm': 'image/x-portable-graymap',
'.pgn': 'application/x-chess-pgn',
'.pgp': 'application/pgp-encrypted',
'.php': 'application/x-php',
'.php3': 'application/x-php',
'.php4': 'application/x-php',
'.pict': 'image/x-pict',
'.pict1': 'image/x-pict',
'.pict2': 'image/x-pict',
'.pk': 'application/x-tex-pk',
'.pkipath': 'application/pkix-pkipath',
'.pkr': 'application/pgp-keys',
'.pl': 'application/x-perl',
'.pla': 'audio/x-iriver-pla',
'.pln': 'application/x-planperfect',
'.pls': 'audio/x-scpls',
'.pm': 'application/x-perl',
'.png': 'image/png',
'.pnm': 'image/x-portable-anymap',
'.pntg': 'image/x-macpaint',
'.po': 'text/x-gettext-translation',
'.por': 'application/x-spss-por',
'.pot': 'text/x-gettext-translation-template',
'.ppm': 'image/x-portable-pixmap',
'.pps': 'application/vnd.ms-powerpoint',
'.ppt': 'application/vnd.ms-powerpoint',
'.pptm': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'.pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'.ppz': 'application/vnd.ms-powerpoint',
'.prc': 'application/x-palm-database',
'.ps': 'application/postscript',
'.ps.bz2': 'application/x-bzpostscript',
'.ps.gz': 'application/x-gzpostscript',
'.psd': 'image/vnd.adobe.photoshop',
'.psf': 'audio/x-psf',
'.psf.gz': 'application/x-gz-font-linux-psf',
'.psflib': 'audio/x-psflib',
'.psid': 'audio/prs.sid',
'.psw': 'application/x-pocket-word',
'.pw': 'application/x-pw',
'.py': 'text/x-python',
'.pyc': 'application/x-python-bytecode',
'.pyo': 'application/x-python-bytecode',
'.qif': 'image/x-quicktime',
'.qt': 'video/quicktime',
'.qtif': 'image/x-quicktime',
'.qtl': 'application/x-quicktime-media-link',
'.qtvr': 'video/quicktime',
'.ra': 'audio/vnd.rn-realaudio',
'.raf': 'image/x-fuji-raf',
'.ram': 'application/ram',
'.rar': 'application/x-rar',
'.ras': 'image/x-cmu-raster',
'.raw': 'image/x-panasonic-raw',
'.rax': 'audio/vnd.rn-realaudio',
'.rb': 'application/x-ruby',
'.rdf': 'application/rdf+xml',
'.rdfs': 'application/rdf+xml',
'.reg': 'text/x-ms-regedit',
'.rej': 'application/x-reject',
'.rgb': 'image/x-rgb',
'.rle': 'image/rle',
'.rm': 'application/vnd.rn-realmedia',
'.rmj': 'application/vnd.rn-realmedia',
'.rmm': 'application/vnd.rn-realmedia',
'.rms': 'application/vnd.rn-realmedia',
'.rmvb': 'application/vnd.rn-realmedia',
'.rmx': 'application/vnd.rn-realmedia',
'.roff': 'text/troff',
'.rp': 'image/vnd.rn-realpix',
'.rpm': 'application/x-rpm',
'.rss': 'application/rss+xml',
'.rt': 'text/vnd.rn-realtext',
'.rtf': 'application/rtf',
'.rtx': 'text/richtext',
'.rv': 'video/vnd.rn-realvideo',
'.rvx': 'video/vnd.rn-realvideo',
'.s3m': 'audio/x-s3m',
'.sam': 'application/x-amipro',
'.sami': 'application/x-sami',
'.sav': 'application/x-spss-sav',
'.scm': 'text/x-scheme',
'.sda': 'application/vnd.stardivision.draw',
'.sdc': 'application/vnd.stardivision.calc',
'.sdd': 'application/vnd.stardivision.impress',
'.sdp': 'application/sdp',
'.sds': 'application/vnd.stardivision.chart',
'.sdw': 'application/vnd.stardivision.writer',
'.sgf': 'application/x-go-sgf',
'.sgi': 'image/x-sgi',
'.sgl': 'application/vnd.stardivision.writer',
'.sgm': 'text/sgml',
'.sgml': 'text/sgml',
'.sh': 'application/x-shellscript',
'.shar': 'application/x-shar',
'.shn': 'application/x-shorten',
'.siag': 'application/x-siag',
'.sid': 'audio/prs.sid',
'.sik': 'application/x-trash',
'.sis': 'application/vnd.symbian.install',
'.sisx': 'x-epoc/x-sisx-app',
'.sit': 'application/x-stuffit',
'.siv': 'application/sieve',
'.sk': 'image/x-skencil',
'.sk1': 'image/x-skencil',
'.skr': 'application/pgp-keys',
'.slk': 'text/spreadsheet',
'.smaf': 'application/x-smaf',
'.smc': 'application/x-snes-rom',
'.smd': 'application/vnd.stardivision.mail',
'.smf': 'application/vnd.stardivision.math',
'.smi': 'application/x-sami',
'.smil': 'application/smil',
'.sml': 'application/smil',
'.sms': 'application/x-sms-rom',
'.snd': 'audio/basic',
'.so': 'application/x-sharedlib',
'.spc': 'application/x-pkcs7-certificates',
'.spd': 'application/x-font-speedo',
'.spec': 'text/x-rpm-spec',
'.spl': 'application/x-shockwave-flash',
'.spx': 'audio/x-speex',
'.sql': 'text/x-sql',
'.sr2': 'image/x-sony-sr2',
'.src': 'application/x-wais-source',
'.srf': 'image/x-sony-srf',
'.srt': 'application/x-subrip',
'.ssa': 'text/x-ssa',
'.stc': 'application/vnd.sun.xml.calc.template',
'.std': 'application/vnd.sun.xml.draw.template',
'.sti': 'application/vnd.sun.xml.impress.template',
'.stm': 'audio/x-stm',
'.stw': 'application/vnd.sun.xml.writer.template',
'.sty': 'text/x-tex',
'.sub': 'text/x-subviewer',
'.sun': 'image/x-sun-raster',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc': 'application/x-sv4crc',
'.svg': 'image/svg+xml',
'.svgz': 'image/svg+xml-compressed',
'.swf': 'application/x-shockwave-flash',
'.sxc': 'application/vnd.sun.xml.calc',
'.sxd': 'application/vnd.sun.xml.draw',
'.sxg': 'application/vnd.sun.xml.writer.global',
'.sxi': 'application/vnd.sun.xml.impress',
'.sxm': 'application/vnd.sun.xml.math',
'.sxw': 'application/vnd.sun.xml.writer',
'.sylk': 'text/spreadsheet',
'.t': 'text/troff',
'.t2t': 'text/x-txt2tags',
'.tar': 'application/x-tar',
'.tar.bz': 'application/x-bzip-compressed-tar',
'.tar.bz2': 'application/x-bzip-compressed-tar',
'.tar.gz': 'application/x-compressed-tar',
'.tar.lzma': 'application/x-lzma-compressed-tar',
'.tar.lzo': 'application/x-tzo',
'.tar.xz': 'application/x-xz-compressed-tar',
'.tar.z': 'application/x-tarz',
'.tbz': 'application/x-bzip-compressed-tar',
'.tbz2': 'application/x-bzip-compressed-tar',
'.tcl': 'text/x-tcl',
'.tex': 'text/x-tex',
'.texi': 'text/x-texinfo',
'.texinfo': 'text/x-texinfo',
'.tga': 'image/x-tga',
'.tgz': 'application/x-compressed-tar',
'.theme': 'application/x-theme',
'.themepack': 'application/x-windows-themepack',
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.tk': 'text/x-tcl',
'.tlz': 'application/x-lzma-compressed-tar',
'.tnef': 'application/vnd.ms-tnef',
'.tnf': 'application/vnd.ms-tnef',
'.toc': 'application/x-cdrdao-toc',
'.torrent': 'application/x-bittorrent',
'.tpic': 'image/x-tga',
'.tr': 'text/troff',
'.ts': 'application/x-linguist',
'.tsv': 'text/tab-separated-values',
'.tta': 'audio/x-tta',
'.ttc': 'application/x-font-ttf',
'.ttf': 'application/x-font-ttf',
'.ttx': 'application/x-font-ttx',
'.txt': 'text/plain',
'.txz': 'application/x-xz-compressed-tar',
'.tzo': 'application/x-tzo',
'.ufraw': 'application/x-ufraw',
'.ui': 'application/x-designer',
'.uil': 'text/x-uil',
'.ult': 'audio/x-mod',
'.uni': 'audio/x-mod',
'.uri': 'text/x-uri',
'.url': 'text/x-uri',
'.ustar': 'application/x-ustar',
'.vala': 'text/x-vala',
'.vapi': 'text/x-vala',
'.vcf': 'text/directory',
'.vcs': 'text/calendar',
'.vct': 'text/directory',
'.vda': 'image/x-tga',
'.vhd': 'text/x-vhdl',
'.vhdl': 'text/x-vhdl',
'.viv': 'video/vivo',
'.vivo': 'video/vivo',
'.vlc': 'audio/x-mpegurl',
'.vob': 'video/mpeg',
'.voc': 'audio/x-voc',
'.vor': 'application/vnd.stardivision.writer',
'.vst': 'image/x-tga',
'.wav': 'audio/x-wav',
'.wax': 'audio/x-ms-asx',
'.wb1': 'application/x-quattropro',
'.wb2': 'application/x-quattropro',
'.wb3': 'application/x-quattropro',
'.wbmp': 'image/vnd.wap.wbmp',
'.wcm': 'application/vnd.ms-works',
'.wdb': 'application/vnd.ms-works',
'.wk1': 'application/vnd.lotus-1-2-3',
'.wk3': 'application/vnd.lotus-1-2-3',
'.wk4': 'application/vnd.lotus-1-2-3',
'.wks': 'application/vnd.ms-works',
'.wma': 'audio/x-ms-wma',
'.wmf': 'image/x-wmf',
'.wml': 'text/vnd.wap.wml',
'.wmls': 'text/vnd.wap.wmlscript',
'.wmv': 'video/x-ms-wmv',
'.wmx': 'audio/x-ms-asx',
'.wp': 'application/vnd.wordperfect',
'.wp4': 'application/vnd.wordperfect',
'.wp5': 'application/vnd.wordperfect',
'.wp6': 'application/vnd.wordperfect',
'.wpd': 'application/vnd.wordperfect',
'.wpg': 'application/x-wpg',
'.wpl': 'application/vnd.ms-wpl',
'.wpp': 'application/vnd.wordperfect',
'.wps': 'application/vnd.ms-works',
'.wri': 'application/x-mswrite',
'.wrl': 'model/vrml',
'.wv': 'audio/x-wavpack',
'.wvc': 'audio/x-wavpack-correction',
'.wvp': 'audio/x-wavpack',
'.wvx': 'audio/x-ms-asx',
'.x3f': 'image/x-sigma-x3f',
'.xac': 'application/x-gnucash',
'.xbel': 'application/x-xbel',
'.xbl': 'application/xml',
'.xbm': 'image/x-xbitmap',
'.xcf': 'image/x-xcf',
'.xcf.bz2': 'image/x-compressed-xcf',
'.xcf.gz': 'image/x-compressed-xcf',
'.xhtml': 'application/xhtml+xml',
'.xi': 'audio/x-xi',
'.xla': 'application/vnd.ms-excel',
'.xlc': 'application/vnd.ms-excel',
'.xld': 'application/vnd.ms-excel',
'.xlf': 'application/x-xliff',
'.xliff': 'application/x-xliff',
'.xll': 'application/vnd.ms-excel',
'.xlm': 'application/vnd.ms-excel',
'.xls': 'application/vnd.ms-excel',
'.xlsm': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'.xlt': 'application/vnd.ms-excel',
'.xlw': 'application/vnd.ms-excel',
'.xm': 'audio/x-xm',
'.xmf': 'audio/x-xmf',
'.xmi': 'text/x-xmi',
'.xml': 'application/xml',
'.xpm': 'image/x-xpixmap',
'.xps': 'application/vnd.ms-xpsdocument',
'.xsl': 'application/xml',
'.xslfo': 'text/x-xslfo',
'.xslt': 'application/xml',
'.xspf': 'application/xspf+xml',
'.xul': 'application/vnd.mozilla.xul+xml',
'.xwd': 'image/x-xwindowdump',
'.xyz': 'chemical/x-pdb',
'.xz': 'application/x-xz',
'.w2p': 'application/w2p',
'.z': 'application/x-compress',
'.zabw': 'application/x-abiword',
'.zip': 'application/zip',
'.zoo': 'application/x-zoo',
}
def contenttype(filename, default='text/plain'):
"""
Returns the Content-Type string matching extension of the given filename.
"""
i = filename.rfind('.')
if i>=0:
default = CONTENT_TYPE.get(filename[i:].lower(),default)
j = filename.rfind('.', 0, i)
if j>=0:
default = CONTENT_TYPE.get(filename[j:].lower(),default)
if default.startswith('text/'):
default += '; charset=utf-8'
return default
|
henkelis/sonospy
|
web2py/gluon/contenttype.py
|
Python
|
gpl-3.0
| 25,110
|
[
"NetCDF"
] |
e6c05d1f3dce61772be8ec38bfce1b82e95360b9a53084513d38af8bb60e5eb9
|
# -*- coding: utf-8 -*-
"""
CreateInflowFileFromLDASRunoff.py
RAPIDpy
Created by Alan D. Snow, 2015
Adapted from CreateInflowFileFromECMWFRunoff.py.
License: BSD-3-Clause
"""
from netCDF4 import Dataset
from .CreateInflowFileFromGriddedRunoff import \
CreateInflowFileFromGriddedRunoff
class CreateInflowFileFromLDASRunoff(CreateInflowFileFromGriddedRunoff):
"""Create Inflow File From LDAS Runoff
Base class for creating RAPID NetCDF input
of water inflow based on LDAS land surface model
runoff and previously created weight table.
"""
land_surface_model_name = "LDAS"
def __init__(self,
lat_dim, # "g0_lat_0",
lon_dim, # "g0_lon_1",
lat_var, # "g0_lat_0",
lon_var, # "g0_lon_1",
runoff_vars): # ["Qsb_GDS0_SFC_ave1h", "Qs_GDS0_SFC_ave1h"],
"""Define the attributes to look for"""
self.dims_oi = [lon_dim, lat_dim]
self.vars_oi = [lon_var, lat_var] + runoff_vars
self.runoff_vars = runoff_vars
self.length_time = {"Hourly": 1}
super(CreateInflowFileFromLDASRunoff, self).__init__()
def data_validation(self, in_nc):
"""Check the necessary dimensions and variables in the
input netcdf data"""
data_nc = Dataset(in_nc)
for dim in self.dims_oi:
if dim not in data_nc.dimensions.keys():
data_nc.close()
raise Exception(self.error_messages[1])
for var in self.vars_oi:
if var not in data_nc.variables.keys():
data_nc.close()
raise Exception(self.error_messages[2])
data_nc.close()
return
|
erdc-cm/RAPIDpy
|
RAPIDpy/inflow/CreateInflowFileFromLDASRunoff.py
|
Python
|
bsd-3-clause
| 1,728
|
[
"NetCDF"
] |
fc67096fe36befc43e22a7bc61f1469a9f64e895b8683c674159b0ac8c7b3bb6
|
"""
FlexGet build and development utilities - unfortunately this file is somewhat messy
"""
from __future__ import print_function
import glob
import os
import shutil
import sys
from paver.easy import environment, task, cmdopts, Bunch, path, call_task, might_call, consume_args
# These 2 packages do magic on import, even though they aren't used explicitly
import paver.virtual
import paver.setuputils
from paver.shell import sh
from paver.setuputils import setup, find_package_data, find_packages
sphinxcontrib = False
try:
from sphinxcontrib import paverutils
sphinxcontrib = True
except ImportError:
pass
sys.path.insert(0, '')
options = environment.options
install_requires = [
'FeedParser>=5.2.1',
# There is a bug in sqlalchemy 0.9.0, see gh#127
'SQLAlchemy >=0.7.5, !=0.9.0, <1.999',
'PyYAML',
# There is a bug in beautifulsoup 4.2.0 that breaks imdb parsing, see http://flexget.com/ticket/2091
'beautifulsoup4>=4.1, !=4.2.0, <4.5',
'html5lib>=0.11',
'PyRSS2Gen',
'pynzb',
'progressbar',
'rpyc',
'jinja2',
# There is a bug in requests 2.4.0 where it leaks urllib3 exceptions
'requests>=1.0, !=2.4.0, <2.99',
'python-dateutil!=2.0, !=2.2',
'jsonschema>=2.0',
'tmdb3',
'path.py',
'guessit>=0.9.3, <0.10.4',
'apscheduler',
'flask>=0.7',
'flask-restful>=0.3.3',
'ordereddict>=1.1',
'flask-restplus==0.7.2',
'cherrypy>=3.7.0',
'flask-assets>=0.11',
'cssmin>=0.2.0',
'flask-compress>=1.2.1',
'flask-login>=0.3.2',
'pyparsing>=2.0.3',
'pyScss>=1.3.4',
'pytvmaze>=1.4.3'
]
if sys.version_info < (2, 7):
# argparse is part of the standard library in python 2.7+
install_requires.append('argparse')
entry_points = {'console_scripts': ['flexget = flexget:main']}
# Provide an alternate exe on windows which does not cause a pop-up when scheduled
if sys.platform.startswith('win'):
entry_points.setdefault('gui_scripts', []).append('flexget-headless = flexget:main')
with open("README.rst") as readme:
long_description = readme.read()
# Populates __version__ without importing the package
__version__ = None
execfile('flexget/_version.py')
if not __version__:
print('Could not find __version__ from flexget/_version.py')
sys.exit(1)
setup(
name='FlexGet',
version=__version__, # release task may edit this
description='FlexGet is a program aimed to automate downloading or processing content (torrents, podcasts, etc.) '
'from different sources like RSS-feeds, html-pages, various sites and more.',
long_description=long_description,
author='Marko Koivusalo',
author_email='marko.koivusalo@gmail.com',
license='MIT',
url='http://flexget.com',
download_url='http://download.flexget.com',
install_requires=install_requires,
packages=find_packages(exclude=['tests']),
package_data=find_package_data('flexget', package='flexget',
exclude=['FlexGet.egg-info', '*.pyc'],
exclude_directories=['node_modules', 'bower_components'],
only_in_packages=False), # NOTE: the exclude does not seem to work
zip_safe=False,
test_suite='nose.collector',
extras_require={
'memusage': ['guppy'],
'NZB': ['pynzb'],
'TaskTray': ['pywin32'],
},
entry_points=entry_points,
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
)
options(
minilib=Bunch(
# 'version' is included as workaround to https://github.com/paver/paver/issues/112, TODO: remove
extra_files=['virtual', 'svn', 'version']
),
virtualenv=Bunch(
paver_command_line='develop'
),
# sphinxcontrib.paverutils
sphinx=Bunch(
docroot='docs',
builddir='build',
builder='html',
confdir='docs'
),
)
def set_init_version(ver):
"""Replaces the version with ``ver`` in _version.py"""
import fileinput
for line in fileinput.FileInput('flexget/_version.py', inplace=1):
if line.startswith('__version__ = '):
line = "__version__ = '%s'\n" % ver
print(line, end='')
@task
def version():
"""Prints the version number of the source"""
print(__version__)
@task
@cmdopts([('dev', None, 'Bumps to new development version instead of release version.')])
def increment_version(options):
"""Increments either release or dev version by 1"""
print('current version: %s' % __version__)
ver_split = __version__.split('.')
dev = options.increment_version.get('dev')
if 'dev' in ver_split[-1]:
if dev:
# If this is already a development version, increment the dev count by 1
ver_split[-1] = 'dev%d' % (int(ver_split[-1].strip('dev') or 0) + 1)
else:
# Just strip off dev tag for next release version
ver_split = ver_split[:-1]
else:
# Increment the revision number by one
if len(ver_split) == 2:
# We don't have a revision number, assume 0
ver_split.append('1')
else:
ver_split[-1] = str(int(ver_split[-1]) + 1)
if dev:
ver_split.append('dev')
new_version = '.'.join(ver_split)
print('new version: %s' % new_version)
set_init_version(new_version)
@task
@cmdopts([
('online', None, 'Run online tests')
])
def test(options):
"""Run FlexGet unit tests"""
options.setdefault('test', Bunch())
import nose
from nose.plugins.manager import DefaultPluginManager
cfg = nose.config.Config(plugins=DefaultPluginManager(), verbosity=2)
args = []
# Adding the -v flag makes the tests fail in python 2.7
#args.append('-v')
args.append('--processes=4')
args.append('-x')
if not options.test.get('online'):
args.append('--attr=!online')
args.append('--where=tests')
# Store current path since --where changes it, restore when leaving
cwd = os.getcwd()
try:
return nose.run(argv=args, config=cfg)
finally:
os.chdir(cwd)
@task
def clean():
"""Cleans up the virtualenv"""
for p in ('bin', 'Scripts', 'build', 'dist', 'include', 'lib', 'man',
'share', 'FlexGet.egg-info', 'paver-minilib.zip', 'setup.py'):
pth = path(p)
if pth.isdir():
pth.rmtree()
elif pth.isfile():
pth.remove()
for pkg in set(options.setup.packages) | set(('tests',)):
for filename in glob.glob(pkg.replace('.', os.sep) + "/*.py[oc~]"):
path(filename).remove()
@task
@cmdopts([
('dist-dir=', 'd', 'directory to put final built distributions in'),
('revision=', 'r', 'minor revision number of this build')
])
def sdist(options):
"""Build tar.gz distribution package"""
print('sdist version: %s' % __version__)
# clean previous build
print('Cleaning build...')
for p in ['build']:
pth = path(p)
if pth.isdir():
pth.rmtree()
elif pth.isfile():
pth.remove()
else:
print('Unable to remove %s' % pth)
# remove pre-compiled pycs from tests, I don't know why paver even tries to include them ...
# seems to happen only with sdist though
for pyc in path('tests/').files('*.pyc'):
pyc.remove()
for t in ['minilib', 'generate_setup', 'setuptools.command.sdist']:
call_task(t)
@task
def coverage():
"""Make coverage.flexget.com"""
# --with-coverage --cover-package=flexget --cover-html --cover-html-dir /var/www/flexget_coverage/
import nose
from nose.plugins.manager import DefaultPluginManager
cfg = nose.config.Config(plugins=DefaultPluginManager(), verbosity=2)
argv = ['bin/paver']
argv.extend(['--attr=!online'])
argv.append('--with-coverage')
argv.append('--cover-html')
argv.extend(['--cover-package', 'flexget'])
argv.extend(['--cover-html-dir', '/var/www/flexget_coverage/'])
nose.run(argv=argv, config=cfg)
print('Coverage generated')
@task
@cmdopts([
('docs-dir=', 'd', 'directory to put the documetation in')
])
def docs():
if not sphinxcontrib:
print('ERROR: requires sphinxcontrib-paverutils')
sys.exit(1)
from paver import tasks
if not os.path.exists('build'):
os.mkdir('build')
if not os.path.exists(os.path.join('build', 'sphinx')):
os.mkdir(os.path.join('build', 'sphinx'))
setup_section = tasks.environment.options.setdefault("sphinx", Bunch())
setup_section.update(outdir=options.docs.get('docs_dir', 'build/sphinx'))
call_task('sphinxcontrib.paverutils.html')
@task
@might_call('test', 'sdist')
@cmdopts([('no-tests', None, 'skips unit tests')])
def release(options):
"""Run tests then make an sdist if successful."""
if not options.release.get('no_tests'):
if not test():
print('Unit tests did not pass')
sys.exit(1)
print('Making src release')
sdist()
@task
def install_tools():
"""Install development / jenkins tools and dependencies"""
try:
import pip
except ImportError:
print('FATAL: Unable to import pip, please install it and run this again!')
sys.exit(1)
try:
import sphinxcontrib
print('sphinxcontrib INSTALLED')
except ImportError:
pip.main(['install', 'sphinxcontrib-paverutils'])
pip.main(['install', '-r', 'jenkins-requirements.txt'])
@task
def clean_compiled():
for root, dirs, files in os.walk('flexget'):
for name in files:
fqn = os.path.join(root, name)
if fqn[-3:] == 'pyc' or fqn[-3:] == 'pyo' or fqn[-5:] == 'cover':
print('Deleting %s' % fqn)
os.remove(fqn)
@task
@consume_args
def pep8(args):
try:
import pep8
except:
print('Run bin/paver install_tools')
sys.exit(1)
# Ignoring certain errors
ignore = [
'E711', 'E712', # These are comparisons to singletons i.e. == False, and == None. We need these for sqlalchemy.
'W291', 'W293', 'E261',
'E128' # E128 continuation line under-indented for visual indent
]
styleguide = pep8.StyleGuide(show_source=True, ignore=ignore, repeat=1, max_line_length=120,
parse_argv=args)
styleguide.input_dir('flexget')
@task
@cmdopts([
('file=', 'f', 'name of the requirements file to create')
])
def requirements(options):
filename = options.requirements.get('file', 'requirements.txt')
with open(filename, mode='w') as req_file:
req_file.write('\n'.join(options.install_requires))
@task
def build_webui():
cwd = os.path.join('flexget', 'ui')
# Cleanup previous builds
for folder in ['bower_components' 'node_modules']:
folder = os.path.join(cwd, folder)
if os.path.exists(folder):
shutil.rmtree(folder)
# Install npm packages
sh(['npm', 'install'], cwd=cwd)
# Build the ui
sh(['bower', 'install'], cwd=cwd)
# Build the ui
sh('gulp', cwd=cwd)
|
tsnoam/Flexget
|
pavement.py
|
Python
|
mit
| 11,572
|
[
"GULP"
] |
997e5ba2985f9d20c8a8da6e22d24e2f66b400c4fbf58f4a890f1062065a9aac
|
"""
Tool Input Translation.
"""
import logging
from galaxy.util.bunch import Bunch
log = logging.getLogger( __name__ )
class ToolInputTranslator( object ):
"""
Handles Tool input translation.
This is used for data source tools
>>> from galaxy.util import Params
>>> from elementtree.ElementTree import XML
>>> translator = ToolInputTranslator.from_element( XML(
... '''
... <request_param_translation>
... <request_param galaxy_name="URL_method" remote_name="URL_method" missing="post" />
... <request_param galaxy_name="URL" remote_name="URL" missing="" >
... <append_param separator="&" first_separator="?" join="=">
... <value name="_export" missing="1" />
... <value name="GALAXY_URL" missing="0" />
... </append_param>
... </request_param>
... <request_param galaxy_name="dbkey" remote_name="db" missing="?" />
... <request_param galaxy_name="organism" remote_name="org" missing="unknown species" />
... <request_param galaxy_name="table" remote_name="hgta_table" missing="unknown table" />
... <request_param galaxy_name="description" remote_name="hgta_regionType" missing="no description" />
... <request_param galaxy_name="data_type" remote_name="hgta_outputType" missing="tabular" >
... <value_translation>
... <value galaxy_value="tabular" remote_value="primaryTable" />
... <value galaxy_value="tabular" remote_value="selectedFields" />
... <value galaxy_value="wig" remote_value="wigData" />
... <value galaxy_value="interval" remote_value="tab" />
... <value galaxy_value="html" remote_value="hyperlinks" />
... <value galaxy_value="fasta" remote_value="sequence" />
... </value_translation>
... </request_param>
... </request_param_translation>
... ''' ) )
>>> params = Params( { 'db':'hg17', 'URL':'URL_value', 'org':'Human', 'hgta_outputType':'primaryTable' } )
>>> translator.translate( params )
>>> print params
{'hgta_outputType': 'primaryTable', 'data_type': 'tabular', 'table': 'unknown table', 'URL': 'URL_value?GALAXY_URL=0&_export=1', 'org': 'Human', 'URL_method': 'post', 'db': 'hg17', 'organism': 'Human', 'dbkey': 'hg17', 'description': 'no description'}
"""
@classmethod
def from_element( cls, elem ):
"""Loads the proper filter by the type attribute of elem"""
rval = ToolInputTranslator()
for req_param in elem.findall( "request_param" ):
# req_param tags must look like <request_param galaxy_name="dbkey" remote_name="GENOME" missing="" />
#trans_list = []
remote_name = req_param.get( "remote_name" )
galaxy_name = req_param.get( "galaxy_name" )
missing = req_param.get( "missing" )
value_trans = {}
append_param = None
value_trans_elem = req_param.find( 'value_translation' )
if value_trans_elem:
for value_elem in value_trans_elem.findall( 'value' ):
remote_value = value_elem.get( "remote_value" )
galaxy_value = value_elem.get( "galaxy_value" )
if None not in [ remote_value, galaxy_value ]:
value_trans[ remote_value ] = galaxy_value
append_param_elem = req_param.find( "append_param" )
if append_param_elem:
separator = append_param_elem.get( 'separator', ',' )
first_separator = append_param_elem.get( 'first_separator', None )
join_str = append_param_elem.get( 'join', '=' )
append_dict = {}
for value_elem in append_param_elem.findall( 'value' ):
value_name = value_elem.get( 'name' )
value_missing = value_elem.get( 'missing' )
if None not in [ value_name, value_missing ]:
append_dict[ value_name ] = value_missing
append_param = Bunch( separator = separator, first_separator = first_separator, join_str = join_str, append_dict = append_dict )
rval.param_trans_dict[ remote_name ] = Bunch( galaxy_name = galaxy_name, missing = missing, value_trans = value_trans, append_param = append_param )
return rval
def __init__( self ):
self.param_trans_dict = {}
def translate( self, params ):
"""
update params in-place
"""
for remote_name, translator in self.param_trans_dict.iteritems():
galaxy_name = translator.galaxy_name #NB: if a param by name galaxy_name is provided, it is always thrown away unless galaxy_name == remote_name
value = params.get( remote_name, translator.missing ) #get value from input params, or use default value specified in tool config
if translator.value_trans and value in translator.value_trans:
value = translator.value_trans[ value ]
if translator.append_param:
for param_name, missing_value in translator.append_param.append_dict.iteritems():
param_value = params.get( param_name, missing_value )
if translator.append_param.first_separator and translator.append_param.first_separator not in value:
sep = translator.append_param.first_separator
else:
sep = translator.append_param.separator
value += '%s%s%s%s' % ( sep, param_name, translator.append_param.join_str, param_value )
params.update( { galaxy_name: value } )
|
volpino/Yeps-EURAC
|
lib/galaxy/tools/parameters/input_translation.py
|
Python
|
mit
| 5,691
|
[
"Galaxy"
] |
a57bea74bf9960b23aedac49aa7a4225cecac3119ef0c8f6fb3cc677c1b370ce
|
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 21817 $
# Date: $Date: 2005-07-21 13:39:57 -0700 (Thu, 21 Jul 2005) $
# Copyright: This module has been placed in the public domain.
"""
Parser for Python modules. Requires Python 2.2 or higher.
The `parse_module()` function takes a module's text and file name,
runs it through the module parser (using compiler.py and tokenize.py)
and produces a parse tree of the source code, using the nodes as found
in pynodes.py. For example, given this module (x.py)::
# comment
'''Docstring'''
'''Additional docstring'''
__docformat__ = 'reStructuredText'
a = 1
'''Attribute docstring'''
class C(Super):
'''C's docstring'''
class_attribute = 1
'''class_attribute's docstring'''
def __init__(self, text=None):
'''__init__'s docstring'''
self.instance_attribute = (text * 7
+ ' whaddyaknow')
'''instance_attribute's docstring'''
def f(x, # parameter x
y=a*5, # parameter y
*args): # parameter args
'''f's docstring'''
return [x + item for item in args]
f.function_attribute = 1
'''f.function_attribute's docstring'''
The module parser will produce this module documentation tree::
<module_section filename="test data">
<docstring>
Docstring
<docstring lineno="5">
Additional docstring
<attribute lineno="7">
<object_name>
__docformat__
<expression_value lineno="7">
'reStructuredText'
<attribute lineno="9">
<object_name>
a
<expression_value lineno="9">
1
<docstring lineno="10">
Attribute docstring
<class_section lineno="12">
<object_name>
C
<class_base>
Super
<docstring lineno="12">
C's docstring
<attribute lineno="16">
<object_name>
class_attribute
<expression_value lineno="16">
1
<docstring lineno="17">
class_attribute's docstring
<method_section lineno="19">
<object_name>
__init__
<docstring lineno="19">
__init__'s docstring
<parameter_list lineno="19">
<parameter lineno="19">
<object_name>
self
<parameter lineno="19">
<object_name>
text
<parameter_default lineno="19">
None
<attribute lineno="22">
<object_name>
self.instance_attribute
<expression_value lineno="22">
(text * 7 + ' whaddyaknow')
<docstring lineno="24">
instance_attribute's docstring
<function_section lineno="27">
<object_name>
f
<docstring lineno="27">
f's docstring
<parameter_list lineno="27">
<parameter lineno="27">
<object_name>
x
<comment>
# parameter x
<parameter lineno="27">
<object_name>
y
<parameter_default lineno="27">
a * 5
<comment>
# parameter y
<parameter excess_positional="1" lineno="27">
<object_name>
args
<comment>
# parameter args
<attribute lineno="33">
<object_name>
f.function_attribute
<expression_value lineno="33">
1
<docstring lineno="34">
f.function_attribute's docstring
(Comments are not implemented yet.)
compiler.parse() provides most of what's needed for this doctree, and
"tokenize" can be used to get the rest. We can determine the line
number from the compiler.parse() AST, and the TokenParser.rhs(lineno)
method provides the rest.
The Docutils Python reader component will transform this module doctree into a
Python-specific Docutils doctree, and then a `stylist transform`_ will
further transform it into a generic doctree. Namespaces will have to be
compiled for each of the scopes, but I'm not certain at what stage of
processing.
It's very important to keep all docstring processing out of this, so that it's
a completely generic and not tool-specific.
> Why perform all of those transformations? Why not go from the AST to a
> generic doctree? Or, even from the AST to the final output?
I want the docutils.readers.python.moduleparser.parse_module() function to
produce a standard documentation-oriented tree that can be used by any tool.
We can develop it together without having to compromise on the rest of our
design (i.e., HappyDoc doesn't have to be made to work like Docutils, and
vice-versa). It would be a higher-level version of what compiler.py provides.
The Python reader component transforms this generic AST into a Python-specific
doctree (it knows about modules, classes, functions, etc.), but this is
specific to Docutils and cannot be used by HappyDoc or others. The stylist
transform does the final layout, converting Python-specific structures
("class" sections, etc.) into a generic doctree using primitives (tables,
sections, lists, etc.). This generic doctree does *not* know about Python
structures any more. The advantage is that this doctree can be handed off to
any of the output writers to create any output format we like.
The latter two transforms are separate because I want to be able to have
multiple independent layout styles (multiple runtime-selectable "stylist
transforms"). Each of the existing tools (HappyDoc, pydoc, epydoc, Crystal,
etc.) has its own fixed format. I personally don't like the tables-based
format produced by these tools, and I'd like to be able to customize the
format easily. That's the goal of stylist transforms, which are independent
from the Reader component itself. One stylist transform could produce
HappyDoc-like output, another could produce output similar to module docs in
the Python library reference manual, and so on.
It's for exactly this reason:
>> It's very important to keep all docstring processing out of this, so that
>> it's a completely generic and not tool-specific.
... but it goes past docstring processing. It's also important to keep style
decisions and tool-specific data transforms out of this module parser.
Issues
======
* At what point should namespaces be computed? Should they be part of the
basic AST produced by the ASTVisitor walk, or generated by another tree
traversal?
* At what point should a distinction be made between local variables &
instance attributes in __init__ methods?
* Docstrings are getting their lineno from their parents. Should the
TokenParser find the real line no's?
* Comments: include them? How and when? Only full-line comments, or
parameter comments too? (See function "f" above for an example.)
* Module could use more docstrings & refactoring in places.
"""
__docformat__ = 'reStructuredText'
import sys
import compiler
import compiler.ast
import tokenize
import token
from compiler.consts import OP_ASSIGN
from compiler.visitor import ASTVisitor
from types import StringType, UnicodeType, TupleType
from docutils.readers.python import pynodes
from docutils.nodes import Text
def parse_module(module_text, filename):
"""Return a module documentation tree from `module_text`."""
ast = compiler.parse(module_text)
token_parser = TokenParser(module_text)
visitor = ModuleVisitor(filename, token_parser)
compiler.walk(ast, visitor, walker=visitor)
return visitor.module
class BaseVisitor(ASTVisitor):
def __init__(self, token_parser):
ASTVisitor.__init__(self)
self.token_parser = token_parser
self.context = []
self.documentable = None
def default(self, node, *args):
self.documentable = None
#print 'in default (%s)' % node.__class__.__name__
#ASTVisitor.default(self, node, *args)
def default_visit(self, node, *args):
#print 'in default_visit (%s)' % node.__class__.__name__
ASTVisitor.default(self, node, *args)
class DocstringVisitor(BaseVisitor):
def visitDiscard(self, node):
if self.documentable:
self.visit(node.expr)
def visitConst(self, node):
if self.documentable:
if type(node.value) in (StringType, UnicodeType):
self.documentable.append(make_docstring(node.value, node.lineno))
else:
self.documentable = None
def visitStmt(self, node):
self.default_visit(node)
class AssignmentVisitor(DocstringVisitor):
def visitAssign(self, node):
visitor = AttributeVisitor(self.token_parser)
compiler.walk(node, visitor, walker=visitor)
if visitor.attributes:
self.context[-1].extend(visitor.attributes)
if len(visitor.attributes) == 1:
self.documentable = visitor.attributes[0]
else:
self.documentable = None
class ModuleVisitor(AssignmentVisitor):
def __init__(self, filename, token_parser):
AssignmentVisitor.__init__(self, token_parser)
self.filename = filename
self.module = None
def visitModule(self, node):
self.module = module = pynodes.module_section()
module['filename'] = self.filename
append_docstring(module, node.doc, node.lineno)
self.context.append(module)
self.documentable = module
self.visit(node.node)
self.context.pop()
def visitImport(self, node):
self.context[-1] += make_import_group(names=node.names,
lineno=node.lineno)
self.documentable = None
def visitFrom(self, node):
self.context[-1].append(
make_import_group(names=node.names, from_name=node.modname,
lineno=node.lineno))
self.documentable = None
def visitFunction(self, node):
visitor = FunctionVisitor(self.token_parser,
function_class=pynodes.function_section)
compiler.walk(node, visitor, walker=visitor)
self.context[-1].append(visitor.function)
def visitClass(self, node):
visitor = ClassVisitor(self.token_parser)
compiler.walk(node, visitor, walker=visitor)
self.context[-1].append(visitor.klass)
class AttributeVisitor(BaseVisitor):
def __init__(self, token_parser):
BaseVisitor.__init__(self, token_parser)
self.attributes = pynodes.class_attribute_section()
def visitAssign(self, node):
# Don't visit the expression itself, just the attribute nodes:
for child in node.nodes:
self.dispatch(child)
expression_text = self.token_parser.rhs(node.lineno)
expression = pynodes.expression_value()
expression.append(Text(expression_text))
for attribute in self.attributes:
attribute.append(expression)
def visitAssName(self, node):
self.attributes.append(make_attribute(node.name,
lineno=node.lineno))
def visitAssTuple(self, node):
attributes = self.attributes
self.attributes = []
self.default_visit(node)
n = pynodes.attribute_tuple()
n.extend(self.attributes)
n['lineno'] = self.attributes[0]['lineno']
attributes.append(n)
self.attributes = attributes
#self.attributes.append(att_tuple)
def visitAssAttr(self, node):
self.default_visit(node, node.attrname)
def visitGetattr(self, node, suffix):
self.default_visit(node, node.attrname + '.' + suffix)
def visitName(self, node, suffix):
self.attributes.append(make_attribute(node.name + '.' + suffix,
lineno=node.lineno))
class FunctionVisitor(DocstringVisitor):
in_function = 0
def __init__(self, token_parser, function_class):
DocstringVisitor.__init__(self, token_parser)
self.function_class = function_class
def visitFunction(self, node):
if self.in_function:
self.documentable = None
# Don't bother with nested function definitions.
return
self.in_function = 1
self.function = function = make_function_like_section(
name=node.name,
lineno=node.lineno,
doc=node.doc,
function_class=self.function_class)
self.context.append(function)
self.documentable = function
self.parse_parameter_list(node)
self.visit(node.code)
self.context.pop()
def parse_parameter_list(self, node):
parameters = []
special = []
argnames = list(node.argnames)
if node.kwargs:
special.append(make_parameter(argnames[-1], excess_keyword=1))
argnames.pop()
if node.varargs:
special.append(make_parameter(argnames[-1],
excess_positional=1))
argnames.pop()
defaults = list(node.defaults)
defaults = [None] * (len(argnames) - len(defaults)) + defaults
function_parameters = self.token_parser.function_parameters(
node.lineno)
#print >>sys.stderr, function_parameters
for argname, default in zip(argnames, defaults):
if type(argname) is TupleType:
parameter = pynodes.parameter_tuple()
for tuplearg in argname:
parameter.append(make_parameter(tuplearg))
argname = normalize_parameter_name(argname)
else:
parameter = make_parameter(argname)
if default:
n_default = pynodes.parameter_default()
n_default.append(Text(function_parameters[argname]))
parameter.append(n_default)
parameters.append(parameter)
if parameters or special:
special.reverse()
parameters.extend(special)
parameter_list = pynodes.parameter_list()
parameter_list.extend(parameters)
self.function.append(parameter_list)
class ClassVisitor(AssignmentVisitor):
in_class = 0
def __init__(self, token_parser):
AssignmentVisitor.__init__(self, token_parser)
self.bases = []
def visitClass(self, node):
if self.in_class:
self.documentable = None
# Don't bother with nested class definitions.
return
self.in_class = 1
#import mypdb as pdb
#pdb.set_trace()
for base in node.bases:
self.visit(base)
self.klass = klass = make_class_section(node.name, self.bases,
doc=node.doc,
lineno=node.lineno)
self.context.append(klass)
self.documentable = klass
self.visit(node.code)
self.context.pop()
def visitGetattr(self, node, suffix=None):
if suffix:
name = node.attrname + '.' + suffix
else:
name = node.attrname
self.default_visit(node, name)
def visitName(self, node, suffix=None):
if suffix:
name = node.name + '.' + suffix
else:
name = node.name
self.bases.append(name)
def visitFunction(self, node):
if node.name == '__init__':
visitor = InitMethodVisitor(self.token_parser,
function_class=pynodes.method_section)
compiler.walk(node, visitor, walker=visitor)
else:
visitor = FunctionVisitor(self.token_parser,
function_class=pynodes.method_section)
compiler.walk(node, visitor, walker=visitor)
self.context[-1].append(visitor.function)
class InitMethodVisitor(FunctionVisitor, AssignmentVisitor): pass
class TokenParser:
def __init__(self, text):
self.text = text + '\n\n'
self.lines = self.text.splitlines(1)
self.generator = tokenize.generate_tokens(iter(self.lines).next)
self.next()
def __iter__(self):
return self
def next(self):
self.token = self.generator.next()
self.type, self.string, self.start, self.end, self.line = self.token
return self.token
def goto_line(self, lineno):
while self.start[0] < lineno:
self.next()
return token
def rhs(self, lineno):
"""
Return a whitespace-normalized expression string from the right-hand
side of an assignment at line `lineno`.
"""
self.goto_line(lineno)
while self.string != '=':
self.next()
self.stack = None
while self.type != token.NEWLINE and self.string != ';':
if self.string == '=' and not self.stack:
self.tokens = []
self.stack = []
self._type = None
self._string = None
self._backquote = 0
else:
self.note_token()
self.next()
self.next()
text = ''.join(self.tokens)
return text.strip()
closers = {')': '(', ']': '[', '}': '{'}
openers = {'(': 1, '[': 1, '{': 1}
del_ws_prefix = {'.': 1, '=': 1, ')': 1, ']': 1, '}': 1, ':': 1, ',': 1}
no_ws_suffix = {'.': 1, '=': 1, '(': 1, '[': 1, '{': 1}
def note_token(self):
if self.type == tokenize.NL:
return
del_ws = self.del_ws_prefix.has_key(self.string)
append_ws = not self.no_ws_suffix.has_key(self.string)
if self.openers.has_key(self.string):
self.stack.append(self.string)
if (self._type == token.NAME
or self.closers.has_key(self._string)):
del_ws = 1
elif self.closers.has_key(self.string):
assert self.stack[-1] == self.closers[self.string]
self.stack.pop()
elif self.string == '`':
if self._backquote:
del_ws = 1
assert self.stack[-1] == '`'
self.stack.pop()
else:
append_ws = 0
self.stack.append('`')
self._backquote = not self._backquote
if del_ws and self.tokens and self.tokens[-1] == ' ':
del self.tokens[-1]
self.tokens.append(self.string)
self._type = self.type
self._string = self.string
if append_ws:
self.tokens.append(' ')
def function_parameters(self, lineno):
"""
Return a dictionary mapping parameters to defaults
(whitespace-normalized strings).
"""
self.goto_line(lineno)
while self.string != 'def':
self.next()
while self.string != '(':
self.next()
name = None
default = None
parameter_tuple = None
self.tokens = []
parameters = {}
self.stack = [self.string]
self.next()
while 1:
if len(self.stack) == 1:
if parameter_tuple:
# Just encountered ")".
#print >>sys.stderr, 'parameter_tuple: %r' % self.tokens
name = ''.join(self.tokens).strip()
self.tokens = []
parameter_tuple = None
if self.string in (')', ','):
if name:
if self.tokens:
default_text = ''.join(self.tokens).strip()
else:
default_text = None
parameters[name] = default_text
self.tokens = []
name = None
default = None
if self.string == ')':
break
elif self.type == token.NAME:
if name and default:
self.note_token()
else:
assert name is None, (
'token=%r name=%r parameters=%r stack=%r'
% (self.token, name, parameters, self.stack))
name = self.string
#print >>sys.stderr, 'name=%r' % name
elif self.string == '=':
assert name is not None, 'token=%r' % (self.token,)
assert default is None, 'token=%r' % (self.token,)
assert self.tokens == [], 'token=%r' % (self.token,)
default = 1
self._type = None
self._string = None
self._backquote = 0
elif name:
self.note_token()
elif self.string == '(':
parameter_tuple = 1
self._type = None
self._string = None
self._backquote = 0
self.note_token()
else: # ignore these tokens:
assert (self.string in ('*', '**', '\n')
or self.type == tokenize.COMMENT), (
'token=%r' % (self.token,))
else:
self.note_token()
self.next()
return parameters
def make_docstring(doc, lineno):
n = pynodes.docstring()
if lineno:
# Really, only module docstrings don't have a line
# (@@: but maybe they should)
n['lineno'] = lineno
n.append(Text(doc))
return n
def append_docstring(node, doc, lineno):
if doc:
node.append(make_docstring(doc, lineno))
def make_class_section(name, bases, lineno, doc):
n = pynodes.class_section()
n['lineno'] = lineno
n.append(make_object_name(name))
for base in bases:
b = pynodes.class_base()
b.append(make_object_name(base))
n.append(b)
append_docstring(n, doc, lineno)
return n
def make_object_name(name):
n = pynodes.object_name()
n.append(Text(name))
return n
def make_function_like_section(name, lineno, doc, function_class):
n = function_class()
n['lineno'] = lineno
n.append(make_object_name(name))
append_docstring(n, doc, lineno)
return n
def make_import_group(names, lineno, from_name=None):
n = pynodes.import_group()
n['lineno'] = lineno
if from_name:
n_from = pynodes.import_from()
n_from.append(Text(from_name))
n.append(n_from)
for name, alias in names:
n_name = pynodes.import_name()
n_name.append(Text(name))
if alias:
n_alias = pynodes.import_alias()
n_alias.append(Text(alias))
n_name.append(n_alias)
n.append(n_name)
return n
def make_class_attribute(name, lineno):
n = pynodes.class_attribute()
n['lineno'] = lineno
n.append(Text(name))
return n
def make_attribute(name, lineno):
n = pynodes.attribute()
n['lineno'] = lineno
n.append(make_object_name(name))
return n
def make_parameter(name, excess_keyword=0, excess_positional=0):
"""
excess_keyword and excess_positional must be either 1 or 0, and
not both of them can be 1.
"""
n = pynodes.parameter()
n.append(make_object_name(name))
assert not excess_keyword or not excess_positional
if excess_keyword:
n['excess_keyword'] = 1
if excess_positional:
n['excess_positional'] = 1
return n
def trim_docstring(text):
"""
Trim indentation and blank lines from docstring text & return it.
See PEP 257.
"""
if not text:
return text
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = text.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxint
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxint:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def normalize_parameter_name(name):
"""
Converts a tuple like ``('a', ('b', 'c'), 'd')`` into ``'(a, (b, c), d)'``
"""
if type(name) is TupleType:
return '(%s)' % ', '.join([normalize_parameter_name(n) for n in name])
else:
return name
if __name__ == '__main__':
import sys
args = sys.argv[1:]
if args[0] == '-v':
filename = args[1]
module_text = open(filename).read()
ast = compiler.parse(module_text)
visitor = compiler.visitor.ExampleASTVisitor()
compiler.walk(ast, visitor, walker=visitor, verbose=1)
else:
filename = args[0]
content = open(filename).read()
print parse_module(content, filename).pformat()
|
garinh/cs
|
docs/support/docutils/readers/python/moduleparser.py
|
Python
|
lgpl-2.1
| 25,839
|
[
"CRYSTAL",
"VisIt"
] |
08d3c62ff75d5f437b09c60f334c59ba346e54c2733d6860865fdf5338b731d0
|
"""
Basic SparseCFProjection with associated sparse CFs and output,
response, and learning function. If sparse component cannot be imported,
SparseCFProjection will fall back to a basic dense CFProjection.
CFSOF and CFSLF Plugin function allow any single CF output function to
be applied to the sparse CFs, but may suffer a serious performance
loss. For real work, such functions should be implemented at the
Cython or C++ level.
"""
import numpy as np
import math
from scipy.ndimage.filters import gaussian_filter
import param
import imagen as ig
from copy import copy
import topo
from topo.base.cf import CFProjection, NullCFError, _create_mask, simple_vectorize
from topo.submodel import Model
from imagen import patterngenerator
from imagen.patterngenerator import PatternGenerator
from topo.base.functionfamily import TransferFn, IdentityTF
from topo.base.functionfamily import LearningFn, Hebbian
from topo.base.functionfamily import ResponseFn, DotProduct
from topo.base.sheetcoords import Slice
from topo.submodel import register_submodel_decorators
use_sparse = True
try:
import sparse
except:
use_sparse = False
sparse_type = np.float32
class CFSPLF_Plugin(param.Parameterized):
"""CFSPLearningFunction applying the specified single_cf_fn to each Sparse CF."""
single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),doc="""
Accepts a LearningFn that will be applied to each CF individually.""")
def constant_sum_connection_rate(self,n_units,learning_rate):
"""
Return the learning rate for a single connection assuming that
the total rate is to be divided evenly among all the units in
the connection field.
"""
return float(learning_rate)/n_units
def __call__(self, projection, **params):
"""Apply the specified single_cf_fn to every sparse CF."""
single_connection_learning_rate = self.constant_sum_connection_rate(projection.n_units,projection.learning_rate)
# avoid evaluating these references each time in the loop
single_cf_fn = self.single_cf_fn
for cf in projection.flatcfs:
temp_weights = cf.weights
single_cf_fn(cf.get_input_matrix(projection.src.activity),
projection.dest.activity.flat[cf.oned_idx], temp_weights,
single_connection_learning_rate)
temp_weights *= cf.mask
cf.weights = temp_weights
class CFSPOF_Plugin(param.Parameterized):
"""
Applies the specified single_cf_fn to each SparseCF in the SparseCFProjection.
"""
single_cf_fn = param.ClassSelector(TransferFn,default=IdentityTF(),
doc="Accepts a TransferFn that will be applied to each CF individually.")
def __call__(self, projection, **params):
if type(self.single_cf_fn) is not IdentityTF:
single_cf_fn = self.single_cf_fn
for cf in projection.flatcfs:
temp_weights = cf.weights
single_cf_fn(cf.weights)
cf.weights = temp_weights
del cf.norm_total
class CFSPOF_Prune(CFSPOF_Plugin):
"""
Prunes specified percentage of connections from CFs in SparseCFProjection
at specified interval.
"""
interval = param.Number(default=1000,bounds=(0,None),doc="""
Time interval at which pruning step will be applied.""")
percentile = param.Number(default=10.0,bounds=(0,100),doc="""
Percentile boundary below which connections will be pruned.""")
def __call__(self, projection, **params):
time = math.ceil(topo.sim.time())
if (time == 0):
if not hasattr(self,"initial_conns"):
self.initial_conns = {}
self.initial_conns[projection.name] = projection.n_conns()
elif (time % self.interval) == 0:
for cf in projection.flatcfs:
dim1,dim2 = cf.weights.shape
temp_weights = cf.weights
percentile = np.percentile(temp_weights[temp_weights.nonzero()],self.percentile)
temp_weights[np.where(temp_weights<=percentile)] = 0.0
cf.weights = temp_weights
projection.weights.prune()
self.message("%s has %f%% of initial connections" % (projection.name, (float(projection.n_conns())/self.initial_conns[projection.name])*100))
class CFSPOF_SproutRetract(CFSPOF_Plugin):
"""
Sprouting and retraction weights output function. At a preset time
interval, the function removes and adds connections based on a
piecewise function, which determines the number of connections to
alter and the sprouting and retraction ratios, eventually allowing
connections to converge on the target_sparsity. The function
ensures the full turnover_rate is applied at the maximal distances
from the target sparsity, i.e. at 0% and 100% density. As the
projection approaches the target sparsity, it will asymptote, but a
residual turnover will ensure that a fixed amount of connections
will continue to sprout and retract.
Retraction deletes the x lowest weights, while sprouting applies a
convolution with a Gaussian kernel to the existing connections,
growing connections at locations with the highest probabilities.
Still experimental and not scientifically validated.
"""
interval = param.Number(default=1000,bounds=(0,None),doc="""
Time interval between sprout/retract steps.""")
residual_turnover = param.Number(default=0.01,bounds=(0,1.0),doc="""
Constant turnover rate independent of current sparsity.""")
turnover_rate = param.Number(default=0.1,bounds=(0,1.0),doc="""
Percentage of weights to change per interval, assuming
currently fully dense and target is fully sparse.""")
target_sparsity = param.Number(default=0.15,bounds=(0,1.0),doc="""
Sparsity level at which sprouting and retraction cancel out.""")
kernel_sigma = param.Number(default=1.0,bounds=(0.0,10.0),doc="""
Gaussian spatial variance for weights to diffuse per interval.""")
disk_mask = param.Boolean(default=True,doc="""
Limits connection sprouting to a disk.""")
def __call__(self, projection, **params):
time = math.ceil(topo.sim.time())
if self.disk_mask:
self.disk = ig.Disk(size=1.0,smoothing=0.0)
# Get CF and src sheet shapes
cf_x,cf_y = projection.dest.activity.shape
src_x,src_y = projection.src.activity.shape
# Initialize sparse triplet arrays
y_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
x_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
val_array = np.zeros((src_x*src_y*cf_y),dtype=sparse_type)
# Create new sparse matrix to accumulate into
sum_sparse = sparse.csarray_float(projection.src.activity.shape,projection.dest.activity.shape)
# Counters for logging
sprout_sum = 0; prune_sum = 0; unit_total = 0
self.mask_total = 0
if (time == 0):
if not hasattr(self,"initial_conns"):
self.initial_conns = {}
self.initial_conns[projection.name] = projection.n_conns()
elif (time % self.interval) == 0:
idx=0
for cidx,cf in enumerate(projection.flatcfs):
temp_weights = cf.weights
dense_unit_mask = (1.0 - (temp_weights>0.0))
dim1,dim2 = temp_weights.shape
sprout_count,prune_idx,nnz = self.calc_ratios(temp_weights)
self.prune(temp_weights,prune_idx)
nnz_pp = np.count_nonzero(temp_weights)
prune_sum += (nnz_pp-nnz)
if sprout_count:
self.sprout(temp_weights,dense_unit_mask,sprout_count)
nnz_ps = np.count_nonzero(temp_weights)
sprout_sum += nnz_ps - nnz_pp
unit_total += nnz_ps
# Populate sparse array chunk
temp_sparse = sparse.csarray_float(projection.src.activity.shape,projection.dest.activity.shape)
x1,x2,y1,y2 = cf.input_sheet_slice.tolist()
for cnx in range(dim1):
val_array[idx:idx+dim2] = temp_weights[cnx,:]
x_val = (x1+cnx) * src_y + y1
x_array[idx:idx+dim2] = range(x_val,x_val+dim2)
y_array[idx:idx+dim2] = cidx
idx += dim2
# Populate combined sparse array with sparse array chunk
if (cidx+1)%cf_y == 0:
nnz_idx = val_array.nonzero()
temp_sparse.setTriplets(x_array[nnz_idx],y_array[nnz_idx],val_array[nnz_idx])
sum_sparse += temp_sparse
x_array *= 0; y_array *= 0; val_array *= 0.0
idx=0
projection.weights = sum_sparse
del temp_sparse, sum_sparse
projection.weights.compress()
def sprout(self, temp_weights, mask, sprout_count):
"""
Applies a Gaussian blur to the existing connection field,
selecting the n units with the highest probabilities to sprout
new connections, where n is set by the sprout_count. New
connections are initialized at the minimal strength of the
current CF.
"""
dim1,dim2 = temp_weights.shape
init_weight = temp_weights[temp_weights.nonzero()].min()
blurred_weights = gaussian_filter(temp_weights, sigma=self.kernel_sigma)
blurred_weights = (blurred_weights - blurred_weights.min()) / blurred_weights.max()
sprout_prob_map = (blurred_weights * np.random.rand(dim1,dim2)) * mask
if self.disk_mask:
sprout_prob_map *= self.disk(xdensity=dim2,ydensity=dim1)
sprout_inds = np.unravel_index(np.argsort(sprout_prob_map.flatten())[-sprout_count:],(dim1,dim2))
temp_weights[sprout_inds] = init_weight
def prune(self, temp_weights, prune_idx):
"""
Retracts n connections with the lowest weights, where n is
determined by the piecewise linear function in the calc_ratios
method.
"""
sorted_weights = np.sort(temp_weights.flatten())
threshold = sorted_weights[prune_idx]
temp_weights[temp_weights < threshold] = 0.0
def calc_ratios(self,temp_weights):
"""
Uses a piecewise linear function to determine the unit
proportion of sprouting and retraction and the associated
turnover rates.
Above the target sparsity the sprout/retract ratio scales
linearly up to maximal density, i.e. at full density 100% of
the turnover is put into retraction while at full sparsity
all the turnover is put into sprouting new connections. At
the target density sprouting and retraction are equal.
The turnover is determined also determined by the piecewise
linear function. At maximal distance from the target sparsity,
i.e. at full sparsity or density, the full turnover rate will
be used and as the target sparsity is approached from either
side this term decays to zero. Therefore, a residual turnover
is introduced to ensure that even at the target sparsity some
connections continue to sprout and retract.
"""
dim1,dim2 = temp_weights.shape
if self.disk_mask:
masked_units = len(self.disk(xdensity=dim2,ydensity=dim1).nonzero()[0])
else:
masked_units = dim1*dim2
self.mask_total += masked_units
max_units = dim1*dim2
nnz = np.count_nonzero(temp_weights)
cf_sparsity = nnz / float(masked_units)
delta_sparsity = cf_sparsity - self.target_sparsity
if delta_sparsity > 0:
relative_sparsity = delta_sparsity/(1.0 - self.target_sparsity)
else:
relative_sparsity = delta_sparsity/self.target_sparsity
# Total number of units to modify, broken down into units for pruning and sprouting
delta_units = (abs(self.turnover_rate * relative_sparsity) + self.residual_turnover) * masked_units
prune_factor = 0.5 + (0.5*relative_sparsity)
prune_count = int(delta_units * prune_factor)
prune_idx = (max_units-nnz)+prune_count
sprout_count = int(delta_units * (1-prune_factor))
return sprout_count, prune_idx, nnz
class CFSPRF_Plugin(param.Parameterized):
"""
Generic large-scale response function based on a simple single-CF function.
Applies the single_cf_fn to each CF in turn. For the default single_cf_fn
of DotProduct(), does a basic dot product of each CF with the corresponding
slice of the input array. This function is likely to be slow to run, but
it is easy to extend with any arbitrary single-CF response function.
The single_cf_fn must be a function f(X,W) that takes two identically
shaped matrices X (the input) and W (the CF weights) and computes a scalar
activation value based on those weights.
"""
single_cf_fn = param.ClassSelector(ResponseFn,default=DotProduct(),doc="""
Accepts a ResponseFn that will be applied to each CF individually.""")
def __call__(self, projection, **params):
single_cf_fn = self.single_cf_fn
for i,cf in enumerate(projection.flatcfs):
X = cf.input_sheet_slice.submatrix(projection.src.activity)
projection.activity.flat[i] = single_cf_fn(X,cf.weights)
projection.activity *= projection.strength
class compute_sparse_joint_norm_totals(param.ParameterizedFunction):
"""
Compute norm_total for each CF in each projection from a group to be
normalized jointly.
"""
def __call__(self, projlist,active_units_mask=True):
# Assumes that all Projections in the list have the same r,c size
assert len(projlist)>=1
joint_sum = np.zeros(projlist[0].dest.shape,dtype=np.float64)
for p in projlist:
if not p.has_norm_total:
p.norm_total *= 0.0
p.weights.CFWeightTotals(p.norm_total)
p.has_norm_total=True
joint_sum = np.add.reduce([proj.norm_total for proj in projlist],dtype=np.float64)
for p in projlist:
p.norm_total = joint_sum.copy()
def CFPOF_DivisiveNormalizeL1_Sparse(projection):
"""
Sparse CF Projection output function applying L1 divisive normalization
to individual CFs.
"""
if not projection.has_norm_total:
projection.norm_total *= 0.0
projection.weights.CFWeightTotals(projection.norm_total)
projection.weights.DivisiveNormalizeL1(projection.norm_total)
projection.has_norm_total = False
def CFPLF_Hebbian_Sparse(projection):
"""
Sparse CF Projection learning function applying Hebbian learning
to the weights in a projection.
"""
single_conn_lr = projection.learning_rate/projection.n_units
projection.norm_total *= 0.0
projection.weights.Hebbian(projection.src.activity,projection.dest.activity,
projection.norm_total,single_conn_lr)
projection.has_norm_total = True
def CFPLF_Hebbian_Sparse_opt(projection):
"""
Sparse CF Projection learning function, which calls an optimized Hebbian
learning function while skipping over inactive units.
"""
single_conn_lr = projection.learning_rate/projection.n_units
projection.norm_total *= 0.0
projection.weights.Hebbian_opt(projection.src.activity,projection.dest.activity,
projection.norm_total,single_conn_lr,projection.initialized)
projection.has_norm_total = True
def CFPRF_DotProduct_Sparse(projection):
"""
Sparse CF Projection response function calculating the dot-product
between incoming activities and CF weights.
"""
projection.weights.DotProduct(projection.strength, projection.input_buffer, projection.activity)
def CFPRF_DotProduct_Sparse_opt(projection):
"""
Sparse CF Projection response function calculating the dot-product
between incoming activities and CF weights. Optimization skips
inactive units if a certain percentage of neurons is inactive.
"""
nnz_ratio = np.count_nonzero(projection.src.activity) / len(projection.src.activity.flatten())
if nnz_ratio < 0.1:
projection.weights.DotProduct_opt(projection.strength, projection.src.activity, projection.activity)
else:
projection.weights.DotProduct(projection.strength, projection.src.activity, projection.activity)
class SparseConnectionField(param.Parameterized):
"""
A set of weights on one input Sheet.
Each ConnectionField contributes to the activity of one unit on
the output sheet, and is normally used as part of a Projection
including many other ConnectionFields.
"""
# ALERT: need bounds, more docs
x = param.Number(default=0.0,doc="Sheet X coordinate of CF")
y = param.Number(default=0.0,doc="Sheet Y coordinate of CF")
weights_generator = param.ClassSelector(PatternGenerator,
default=patterngenerator.Constant(),constant=True,doc="""
Generates initial weights values.""")
min_matrix_radius=param.Integer(default=1)
output_fns = param.HookList(default=[],class_=TransferFn,precedence=0.08,doc="""
Optional function(s) to apply to the pattern array after it has been created.
Can be used for normalization, thresholding, etc.""")
# Class attribute to switch to legacy weight generation if False
independent_weight_generation = True
def get_bounds(self,input_sheet=None):
if not input_sheet == None:
return self.input_sheet_slice.compute_bounds(input_sheet)
else:
return self.input_sheet_slice.compute_bounds(self.input_sheet)
def __get_shape_mask(self):
cf_shape = self.projection.cf_shape
bounds = self.projection.bounds_template
xdensity = self.projection.src.xdensity
ydensity = self.projection.src.xdensity
center_r,center_c = self.projection.src.sheet2matrixidx(0,0)
center_x,center_y = self.projection.src.matrixidx2sheet(center_r,center_c)
cf_mask = cf_shape(x=center_x,y=center_y,bounds=bounds,xdensity=xdensity,ydensity=ydensity)
return cf_mask
shape_mask = property(__get_shape_mask)
def __get_norm_total(self):
return self.projection.norm_total[self.matrix_idx[0],self.matrix_idx[1]]
def __set_norm_total(self,new_norm_total):
self.projection.norm_total[self.matrix_idx[0],self.matrix_idx[1]] = new_norm_total
def __del_norm_total(self):
self.projection.norm_total[self.matrix_idx[0],self.matrix_idx[1]] = 0.0
norm_total = property(__get_norm_total,__set_norm_total,__del_norm_total)
def __get_mask(self):
x1,x2,y1,y2 = self.input_sheet_slice.tolist()
mask = np.zeros((x2-x1,y2-y1),dtype=np.bool)
inds = np.ravel_multi_index(np.mgrid[x1:x2,y1:y2],self.projection.src.shape).flatten()
nz_flat = self.projection.weights[inds,self.oned_idx].toarray()
nz_inds = nz_flat.reshape(x2-x1,y2-y1).nonzero()
mask[nz_inds] = True
return mask
mask = property(__get_mask,
"""
The mask property returns an array of bools representing the
zero weights in the CF weights array.
It is useful when applying additive functions on the weights
array, to ensure zero values are not accidentally overwritten.
The mask cannot be changed via the property, only by changing
the weights directly.
""")
def __get_weights(self):
"""
get_weights accesses the sparse CF matrix and returns the CF
in dense form.
"""
x1,x2,y1,y2 = self.src_slice
inds = np.ravel_multi_index(np.mgrid[x1:x2,y1:y2],self.projection.src.shape).flatten()
return self.projection.weights[inds,self.oned_idx].toarray().reshape(x2-x1,y2-y1)
def __set_weights(self,arr):
"""
Takes an input array, which has to match the CF shape, and
creates an mgrid of the appropriate size, adds the proper
offsets and passes the values and indices to the sparse matrix
representation.
"""
x1,x2,y1,y2 = self.src_slice
(dim1,dim2) = arr.shape
assert (dim1,dim2) == (x2-x1,y2-y1), "Array does not match CF shape."
(x,y) = np.mgrid[0:dim1,0:dim2] # Create mgrid of CF size
x_ind = np.array(x)+x1; y_ind = np.array(y) + y1; # Add slice offsets
row_inds = np.ravel_multi_index((x_ind,y_ind),self.projection.src.shape).flatten().astype(np.int32)
col_inds = np.array([self.oned_idx]*len(row_inds),dtype=np.int32)
self.projection.weights.put(arr[x,y].flatten(),row_inds,col_inds)
weights = property(__get_weights,__set_weights)
def __init__(self,template,input_sheet,projection,label=None,**params):
"""
Initializes the CF object and stores meta information about the CF's
shape and position in the SparseCFProjection to allow for easier
initialization.
"""
super(SparseConnectionField,self).__init__(**params)
self.input_sheet = input_sheet
self.projection = projection
self.label = label
self.matrix_idx = self.projection.dest.sheet2matrixidx(self.x,self.y)
self.oned_idx = self.matrix_idx[0] * self.projection.dest.shape[1] + self.matrix_idx[1]
template = copy(template)
if not isinstance(template,Slice):
template = Slice(template,self.input_sheet,force_odd=True,
min_matrix_radius=self.min_matrix_radius)
self.weights_slice = self._create_input_sheet_slice(template)
self.src_slice = tuple(self.input_sheet_slice.tolist())
def _init_weights(self,mask_template):
mask = self.weights_slice.submatrix(mask_template)
mask = np.array(mask,copy=1)
pattern_params = dict(x=self.x,y=self.y,
bounds=self.get_bounds(self.input_sheet),
xdensity=self.input_sheet.xdensity,
ydensity=self.input_sheet.ydensity,
mask=mask)
controlled_weights = (param.Dynamic.time_dependent
and isinstance(param.Dynamic.time_fn,
param.Time)
and self.independent_weight_generation)
if controlled_weights:
with param.Dynamic.time_fn as t:
t(0) # Initialize at time zero.
# Controls random streams
label = '' if self.label is None else self.label
name = "%s_CF (%.5f, %.5f)" % (label, self.x, self.y)
w = self.weights_generator(**dict(pattern_params,
name=name))
else:
w = self.weights_generator(**pattern_params)
w = w.astype(sparse_type)
for of in self.output_fns:
of(w)
return w
def _create_input_sheet_slice(self,template):
"""
Create the input_sheet_slice, which provides the appropriate
Slice for this CF on the input_sheet (as well as providing
this CF's exact bounds).
Also creates the weights_slice, which provides the Slice for
this weights matrix (in case it must be cropped at an edge).
"""
# copy required because the template gets modified here but
# needs to be used again
input_sheet_slice = copy(template)
input_sheet_slice.positionedcrop(self.x,self.y,self.input_sheet)
input_sheet_slice.crop_to_sheet(self.input_sheet)
# weights matrix cannot have a zero-sized dimension (could
# happen at this stage because of cropping)
nrows,ncols = input_sheet_slice.shape_on_sheet()
if nrows<1 or ncols<1:
raise NullCFError(self.x,self.y,self.input_sheet,nrows,ncols)
self.input_sheet_slice = input_sheet_slice
# not copied because we don't use again
template.positionlesscrop(self.x,self.y,self.input_sheet)
return template
def get_input_matrix(self, activity):
return self.input_sheet_slice.submatrix(activity)
class SparseCFProjection(CFProjection):
"""
A projection composed of SparseConnectionFields from a Sheet into
a ProjectionSheet.
SparseCFProjection computes its activity using a response_fn which
can either be an optimized function implemented as part of the
sparse matrix class or an unoptimized function, which requests the
weights in dense format. The initial contents of the
SparseConnectionFields mapping from the input Sheet into the
target ProjectionSheet are controlled by the weights_generator,
cf_shape, and weights_output_fn parameters, while the location of
the ConnectionField is controlled by the coord_mapper parameter.
Any subclass has to implement the interface activate(self) that
computes the response from the input and stores it in the activity
array.
"""
cf_type = param.Parameter(default=SparseConnectionField,doc="""
Type of ConnectionField to use when creating individual CFs.""")
learning_fn = param.Callable(default=CFPLF_Hebbian_Sparse,doc="""
Function for computing changes to the weights based on one activation step.""")
response_fn = param.Callable(default=CFPRF_DotProduct_Sparse,doc="""
Function for computing the Projection response to an input pattern.""")
weights_output_fns = param.HookList(default=[CFPOF_DivisiveNormalizeL1_Sparse],doc="""
Functions applied to each CF after learning.""")
initialized = param.Boolean(default=False)
def __init__(self,initialize_cfs=True,**params):
"""
Initialize the Projection with a set of cf_type objects
(typically SparseConnectionFields), each located at the
location in the source sheet corresponding to the unit in the
target sheet. The cf_type objects are stored in the 'cfs'
array.
The nominal_bounds_template specified may be altered: the
bounds must be fitted to the Sheet's matrix, and the weights
matrix must have odd dimensions. These altered bounds are
passed to the individual connection fields.
A mask for the weights matrix is constructed. The shape is
specified by cf_shape; the size defaults to the size
of the nominal_bounds_template.
"""
super(CFProjection,self).__init__(**params)
self.weights_generator.set_dynamic_time_fn(None,sublistattr='generators')
# get the actual bounds_template by adjusting a copy of the
# nominal_bounds_template to ensure an odd slice, and to be
# cropped to sheet if necessary
self._slice_template = Slice(copy(self.nominal_bounds_template),
self.src,force_odd=True,
min_matrix_radius=self.min_matrix_radius)
self.bounds_template = self._slice_template.compute_bounds(self.src)
self.mask_template = _create_mask(self.cf_shape,self.bounds_template,
self.src,self.autosize_mask,
self.mask_threshold)
self.n_units = self._calc_n_units()
self.activity = np.array(self.dest.activity)
self.norm_total = np.array(self.dest.activity,dtype=np.float64)
self.has_norm_total = False
if initialize_cfs:
self._create_cfs()
if self.apply_output_fns_init:
self.apply_learn_output_fns()
self.input_buffer = None
def __getstate__(self):
"""
Method to support pickling of sparse weights object.
"""
state_dict = self.__dict__.copy()
state_dict['triplets'] = state_dict['weights'].getTriplets()
state_dict['weight_shape'] = (self.src.activity.shape,self.dest.activity.shape)
del state_dict['weights']
return state_dict
def __setstate__(self,state_dict):
"""
Method to support unpickling of sparse weights object.
"""
self.__dict__.update(state_dict)
self.weights = sparse.csarray_float(self.weight_shape[0],self.weight_shape[1])
rowInds, colInds, values = self.triplets
self.weights.setTriplets(rowInds,colInds,values)
del self.triplets
del self.weight_shape
def _create_cfs(self):
"""
Creates the CF objects, initializing the weights one by one
and adding them to the sparse weights object in chunks.
"""
vectorized_create_cf = simple_vectorize(self._create_cf)
self.cfs = vectorized_create_cf(*self._generate_coords())
self.flatcfs = list(self.cfs.flat)
self.weights = sparse.csarray_float(self.src.activity.shape,self.dest.activity.shape)
cf_x,cf_y = self.dest.activity.shape
src_x,src_y = self.src.activity.shape
y_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
x_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
val_array = np.zeros((src_x*src_y*cf_y),dtype=np.float32)
# Iterate over the CFs
for x in range(cf_x):
temp_sparse = sparse.csarray_float(self.src.activity.shape,self.dest.activity.shape)
idx = 0
for y in range(cf_y):
cf = self.cfs[x][y]
label = cf.label + ('-%d' % self.seed if self.seed is not None else '')
name = "%s_CF (%.5f, %.5f)" % ('' if label is None else label, cf.x,cf.y)
x1,x2,y1,y2 = cf.input_sheet_slice.tolist()
if self.same_cf_shape_for_all_cfs:
mask_template = self.mask_template
else:
mask_template = _create_mask(self.cf_shape,self.bounds_template,
self.src,self.autosize_mask,
self.mask_threshold, name=name)
weights = self.cfs[x][y]._init_weights(mask_template)
cn_x,cn_y = weights.shape
y_val = x * cf_y + y
for cnx in range(cn_x):
val_array[idx:idx+cn_y] = weights[cnx,:]
x_val = (x1+cnx) * src_y + y1
x_array[idx:idx+cn_y] = range(x_val,x_val+cn_y)
y_array[idx:idx+cn_y] = y_val
idx += cn_y
nnz_idx = val_array.nonzero()
temp_sparse.setTriplets(x_array[nnz_idx],y_array[nnz_idx],val_array[nnz_idx])
self.weights += temp_sparse
x_array *= 0; y_array *= 0; val_array *= 0.0
del temp_sparse
self.weights.compress()
self.debug("Sparse projection %r loaded" % self.name)
def _create_cf(self,x,y):
"""
Create a ConnectionField at x,y in the src sheet.
"""
label = self.hash_format.format(name=self.name,
src=self.src.name,
dest=self.dest.name)
try:
CF = self.cf_type(template=self._slice_template,
projection=self,input_sheet=self.src,x=x,y=y,
weights_generator=self.weights_generator,
min_matrix_radius=self.min_matrix_radius,
label=label)
except NullCFError:
if self.allow_null_cfs:
CF = None
else:
raise
return CF
def get_sheet_mask(self):
return np.ones(self.activity.shape, dtype=self.activity.dtype)
def get_active_units_mask(self):
return np.ones(self.activity.shape, dtype=self.activity.dtype)
def activate(self,input_activity):
"""Activate using the specified response_fn and output_fn."""
if self.input_fns:
input_activity = input_activity.copy()
for iaf in self.input_fns:
iaf(input_activity)
self.input_buffer = input_activity
self.activity *=0.0
self.response_fn(self)
for of in self.output_fns:
of(self.activity)
def learn(self):
"""
For a SparseCFProjection, learn consists of calling the learning_fn.
"""
# Learning is performed if the input_buffer has already been set,
# i.e. there is an input to the Projection.
if self.input_buffer is not None:
self.learning_fn(self)
def apply_learn_output_fns(self,active_units_mask=True):
"""
Apply the weights_output_fns to each unit.
"""
for of in self.weights_output_fns: of(self)
def n_bytes(self):
"""
Estimates the size on the basis of the number non-zeros in the
sparse matrix, asssuming indices and values are stored using
32-bit integers and floats respectively.
"""
return self.n_conns() * (3 * 4)
def n_conns(self):
"""
Returns number of nonzero weights.
"""
return self.weights.getnnz()
if not use_sparse:
print "WARNING: Sparse component could not be imported, replacing SparseCFProjection with regular CFProjection"
def SparseCFProjection(*args, **kwargs): # pyflakes:ignore (optimized version provided)
return CFProjection(*args,**kwargs)
register_submodel_decorators([SparseCFProjection])
sparse_components = [CFSPLF_Plugin,
CFSPOF_Plugin,
CFSPOF_Prune,
CFSPOF_SproutRetract,
CFSPRF_Plugin,
compute_sparse_joint_norm_totals,
CFPOF_DivisiveNormalizeL1_Sparse,
CFPLF_Hebbian_Sparse,
CFPLF_Hebbian_Sparse_opt,
CFPRF_DotProduct_Sparse,
CFPRF_DotProduct_Sparse_opt,
SparseConnectionField,
SparseCFProjection]
__all__ = sparse_components
|
ioam/topographica
|
topo/sparse/sparsecf.py
|
Python
|
bsd-3-clause
| 34,459
|
[
"Gaussian"
] |
c66ebad9cfaa1ce11c7c8a36ded2506e9b4798b7ce35cb80e7e0f0a2af0d1bdf
|
# DeepCrystal Technologies 2017 - Patrick Hop
# MIT License - have fun!!
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
np.random.seed(123)
from sklearn.ensemble import RandomForestRegressor
from sklearn import svm
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
from deepchem.models.tensorgraph.models.graph_models import GraphConvModel
BATCH_SIZE = 128
# Set to higher values to get better numbers
MAX_EPOCH = 1
LR = 1e-3
LMBDA = 1e-4
def retrieve_datasets():
os.system(
'wget -c %s' %
'https://s3-us-west-1.amazonaws.com/deep-crystal-california/az_logd.csv')
os.system(
'wget -c %s' %
'https://s3-us-west-1.amazonaws.com/deep-crystal-california/az_hppb.csv')
os.system(
'wget -c %s' %
'https://s3-us-west-1.amazonaws.com/deep-crystal-california/az_clearance.csv'
)
def load_dataset(dataset_file, featurizer='ECFP', split='index'):
tasks = ['exp']
if featurizer == 'ECFP':
featurizer = dc.feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = dc.feat.ConvMolFeaturizer()
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file, shard_size=8192)
transformers = [
dc.trans.NormalizationTransformer(transform_y=True, dataset=dataset)
]
for transformer in transformers:
dataset = transformer.transform(dataset)
splitters = {
'index': dc.splits.IndexSplitter(),
'random': dc.splits.RandomSplitter(),
'scaffold': dc.splits.ScaffoldSplitter()
}
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dataset)
return tasks, (train, valid, test), transformers
def experiment(dataset_file, method='GraphConv', split='scaffold'):
featurizer = 'ECFP'
if method == 'GraphConv':
featurizer = 'GraphConv'
tasks, datasets, transformers = load_dataset(
dataset_file, featurizer=featurizer, split=split)
train, val, test = datasets
model = None
if method == 'GraphConv':
model = GraphConvModel(len(tasks), batch_size=BATCH_SIZE, mode="regression")
elif method == 'RF':
def model_builder_rf(model_dir):
sklearn_model = RandomForestRegressor(n_estimators=100)
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder_rf)
elif method == 'SVR':
def model_builder_svr(model_dir):
sklearn_model = svm.SVR(kernel='linear')
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder_svr)
return model, train, val, test, transformers
#======================================================================
# Run Benchmarks {GC-DNN, SVR, RF}
def main():
print("About to retrieve datasets")
retrieve_datasets()
MODEL = "GraphConv"
SPLIT = "scaffold"
DATASET = "az_hppb.csv"
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
print("About to build model")
model, train, val, test, transformers = experiment(
DATASET, method=MODEL, split=SPLIT)
if MODEL == 'GraphConv':
print("running GraphConv search")
best_val_score = 0.0
train_score = 0.0
for l in range(0, MAX_EPOCH):
print("epoch %d" % l)
model.fit(train, nb_epoch=1)
latest_train_score = model.evaluate(train, [metric],
transformers)['mean-pearson_r2_score']
latest_val_score = model.evaluate(val, [metric],
transformers)['mean-pearson_r2_score']
if latest_val_score > best_val_score:
best_val_score = latest_val_score
train_score = latest_train_score
print((MODEL, SPLIT, DATASET, train_score, best_val_score))
else:
model.fit(train)
train_score = model.evaluate(train, [metric],
transformers)['mean-pearson_r2_score']
val_score = model.evaluate(val, [metric],
transformers)['mean-pearson_r2_score']
print((MODEL, SPLIT, DATASET, train_score, val_score))
if __name__ == "__main__":
main()
|
Agent007/deepchem
|
examples/adme/run_benchmarks.py
|
Python
|
mit
| 4,255
|
[
"CRYSTAL"
] |
49f2d33e1cfc0555c0a56bc08821c24e36e048261a2d9992106dba4acb7473ae
|
from math import sqrt
from random import random
import itertools
from itertools import chain
import functools as fcn
def generate_graph(cities):
'''
Generates Travelling Salesman's Graph (undirected graph)
given cities salesman needs to visit
cities - [(x1, y1), (x2, y2), ..., (xn, yn)]
- locations of n cities
- n = len(cities)
'''
adjacency_matrix = \
[[sqrt((x2-x1) ** 2 + (y2 - y1) ** 2) for (x1, y1) in cities]
for (x2, y2) in cities]
return adjacency_matrix
def __evaluate_path(adjacency_matrix, tsp_path):
'''
function for evaluation of cycle given list
tsp_path = [city1, city2, ..., cityn]
'''
iter0, iter1 = itertools.tee(tsp_path)
next(iter1, None)
return sum(adjacency_matrix[city1][city2]
for city1, city2 in zip(iter0, iter1))
def get_evaluate_path(adjacency_matrix):
''' evaluate tsp path '''
return fcn.partial(__evaluate_path, adjacency_matrix)
def form_cycle(tsp_path, start_city=0):
'''
returns an iterator containing cycle
[start_city] -> tsp_path -> [start_city]
'''
start_list = [start_city]
return chain(start_list, tsp_path, start_list)
def __evaluate_cycle(adjacency_matrix, start_city, tsp_path):
''' evaluate path that is part of the cycle '''
assert start_city not in tsp_path
return __evaluate_path(adjacency_matrix, form_cycle(tsp_path, start_city))
def get_evaluate_cycle(adjacency_matrix, start_city=0):
''' evaluate path that is part of the cycle '''
return fcn.partial(__evaluate_cycle, adjacency_matrix, start_city)
def random_cities(num_cities=100):
'''
create graph for Travelling Salesman
Problem with n cities randomly distributed on
square [0, 1) x [0, 1)
'''
x = [random() for _ in range(num_cities)]
y = [random() for _ in range(num_cities)]
cities = list(zip(x, y))
return cities
|
sglumac/pyislands
|
pyislands/permutation/tsp/graph.py
|
Python
|
mit
| 1,943
|
[
"VisIt"
] |
fd180ec16338965d7df084334873584375a5f605473fddeb1ac8eae3e534f7af
|
import os
import configparser
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext
from pyspark.sql.types import *
import Utils
class Cohort:
#
# Filter out users by using values set in
# property files. If no value is set then the
# filter is not applied
#
def __init__(self, data, config, sqlContext):
self.utils = Utils.Utils(sqlContext)
self.sqlContext = sqlContext
# set instance variables based on properties
self.env = config.get('branch','env')
self.year_of_birth_min = config.get(self.env+'.cohort','year_of_birth_min')
self.year_of_birth_max = config.get(self.env+'.cohort','year_of_birth_max')
self.events_start_date = config.get(self.env+'.cohort','events_start_date')
self.events_end_date = config.get(self.env+'.cohort','events_end_date')
self.filter_dead = config.get(self.env+'.cohort','filter_dead')
self.filter_alive = config.get(self.env+'.cohort','filter_alive')
self.filter_male = config.get(self.env+'.cohort','filter_male')
self.filter_female = config.get(self.env+'.cohort','filter_female')
self.write_csv_output = config.get(self.env+'.cohort','write_csv_output')
self.csv_output_dir = config.get(self.env+'.cohort','csv_output_dir')
self.csv_output_codec = config.get(self.env+'.cohort','csv_output_codec')
self.filter_care_sites = config.get(self.env+'.cohort','filter_care_sites').split(",")
self.inpatient_only = config.get(self.env+'.cohort','inpatient_only')
self.inpatient_condition_primary_diagnosis = config.get(self.env+'.cohort','inpatient_condition_primary_diagnosis')
self.inpatient_procedure_primary_diagnosis = config.get(self.env+'.cohort','inpatient_procedure_primary_diagnosis')
if not self.filter_care_sites[0]:
self.filter_care_sites = []
self.include_care_sites = config.get(self.env+'.cohort','include_care_sites').split(",")
if not self.include_care_sites[0]:
self.include_care_sites = []
# apply filter functions
if self.year_of_birth_max is not None:
self.filterByMaxYearOfBirth(data)
if self.year_of_birth_min is not None:
self.filterByMinYearOfBirth(data)
if self.filter_male == "True":
self.filterMale(data)
if self.filter_female == "True":
self.filterFemale(data)
if len(self.filter_care_sites) > 0:
self.filterCareSites(data)
if len(self.include_care_sites) > 0:
self.includeCareSites(data)
if len(self.events_start_date) != 0 or len(self.events_end_date) != 0:
self.filterByEventDate(data, self.events_start_date, self.events_end_date)
# write the filtered data back out to files
if self.write_csv_output == "True":
self.utils.writeRawData(data,self.csv_output_codec,self.csv_output_dir)
# filter by users who have not had an inpatient stay
if self.inpatient_only == "True":
self.filterInpatientOnly(data)
# after filtering data, reset the caches
self.resetDataCache(data)
# reset data caches
def resetDataCache(self, data):
for key, value in data.items():
data[key].registerTempTable(key)
data[key].cache()
# filter by maximum year of birth
def filterByMaxYearOfBirth(self, data):
data['person'] = data['person'].filter(data['person'].YEAR_OF_BIRTH <= self.year_of_birth_max)
# filter by minimum year of birth
def filterByMinYearOfBirth(self, data):
data['person'] = data['person'].filter(data['person'].YEAR_OF_BIRTH >= self.year_of_birth_min)
#
# filter by minimum events
# currently only filters data from condition_occurrence, procedure_occurrence, visit_occurrence, measurement,
# observation, and device_exposure
#
def filterByEventDate(self, data, start_date, end_date):
if start_date is not None and len(start_date) != 0:
data['condition_occurrence'] = data['condition_occurrence'].filter(data['condition_occurrence'].CONDITION_START_DATE >= start_date)
data['procedure_occurrence'] = data['procedure_occurrence'].filter(data['procedure_occurrence'].PROCEDURE_DATE >= start_date)
data['visit_occurrence'] = data['visit_occurrence'].filter(data['visit_occurrence'].VISIT_START_DATE >= start_date)
data['measurement'] = data['measurement'].filter(data['measurement'].MEASUREMENT_DATE >= start_date)
data['observation'] = data['observation'].filter(data['observation'].OBSERVATION_DATE >= start_date)
data['device_exposure'] = data['device_exposure'].filter(data['device_exposure'].DEVICE_EXPOSURE_START_DATE >= start_date)
if end_date is not None and len(end_date) != 0:
data['condition_occurrence'] = data['condition_occurrence'].filter(data['condition_occurrence'].CONDITION_END_DATE <= end_date)
data['procedure_occurrence'] = data['procedure_occurrence'].filter(data['procedure_occurrence'].PROCEDURE_DATE <= end_date)
data['visit_occurrence'] = data['visit_occurrence'].filter(data['visit_occurrence'].VISIT_END_DATE <= end_date)
data['measurement'] = data['measurement'].filter(data['measurement'].MEASUREMENT_DATE <= end_date)
data['observation'] = data['observation'].filter(data['observation'].OBSERVATION_DATE <= end_date)
data['device_exposure'] = data['device_exposure'].filter(data['device_exposure'].DEVICE_EXPOSURE_END_DATE <= end_date)
# filter out Male patients
def filterMale(self, data):
data['person'] = data['person'].filter(data['person'].GENDER_SOURCE_VALUE != 'Male')
# filter out Female patients
def filterFemale(self, data):
data['person'] = data['person'].filter(data['person'].GENDER_SOURCE_VALUE != 'Female')
# filter out dead patients
def filterDead(self, data):
pass
# filter out alive patients
def filterAlive(self, data):
pass
# filter out persons who have these primary care sites
def filterCareSites(self, data):
# make sure null is not filtered also when filtering my list by adding an additional check
data['person'] = data['person'].filter(~data['person'].CARE_SITE_ID.isin(self.filter_care_sites) | data['person'].CARE_SITE_ID.isNull())
# include only persons that have primary care sites
def includeCareSites(self, data):
data['person'] = data['person'].where(data['person'].CARE_SITE_ID.isin(self.include_care_sites))
# filter out users that have not had a visit to the hospital
# first join person with visit_occurrence then drop the visit_occurrence column and get rid of duplicates
def filterNoHospitalVisit(self, data):
df = self.sqlContext.sql("select person.*, visit_occurrence.VISIT_OCCURRENCE_ID from person inner join visit_occurrence on person.PERSON_ID=visit_occurrence.PERSON_ID")
df = df.drop('VISIT_OCCURRENCE_ID')
df = df.dropDuplicates(['PERSON_ID'])
data['person'] = df
# filter out users that have not had an inpatient stay at the hospital
# first join person with visit_occurrence then drop the visit_occurrence column and get rid of duplicates
def filterInpatientOnly(self, data):
# get all patients that have an inpatient condition_occurrence
icd_co_temp = self.utils.filterDataframeByCodes(data['condition_occurrence'],
self.inpatient_condition_primary_diagnosis,
'CONDITION_TYPE_CONCEPT_ID')
icd_co_temp.registerTempTable('condition_occurrence_primary')
dfc = self.sqlContext.sql("select person.*, condition_occurrence_primary.CONDITION_TYPE_CONCEPT_ID from person inner join condition_occurrence_primary on person.PERSON_ID=condition_occurrence_primary.PERSON_ID")
dfc = dfc.drop('CONDITION_TYPE_CONCEPT_ID')
# get all patients that have an inpatient procedure_occurrence
icd_po_temp = self.utils.filterDataframeByCodes(data['procedure_occurrence'],
self.inpatient_procedure_primary_diagnosis,
'PROCEDURE_TYPE_CONCEPT_ID')
icd_po_temp.registerTempTable('procedure_occurrence_primary')
dfp = self.sqlContext.sql("select person.*, procedure_occurrence_primary.PROCEDURE_TYPE_CONCEPT_ID from person inner join procedure_occurrence_primary on person.PERSON_ID=procedure_occurrence_primary.PERSON_ID")
dfp = dfp.drop('PROCEDURE_TYPE_CONCEPT_ID')
# join the two patient dataframes
df = dfc.unionAll(dfp)
df = df.dropDuplicates(['PERSON_ID'])
data['person'] = df
|
opme/SurgeonScorecard
|
python/scorecard/Cohort.py
|
Python
|
apache-2.0
| 8,735
|
[
"VisIt"
] |
072d43448d379d6a268c9b26eb2e5ff1bc0198d4999ebb815f46fc3949a4a069
|
"""Adding icons and menu items using the freedesktop.org system.
(xdg = X Desktop Group)
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
import shutil, os, tempfile
from logging import info, warn
from zeroinstall import SafeException
from zeroinstall.support import basedir
from zeroinstall.injector import namespaces
_template = """[Desktop Entry]
# This file was generated by 0install.
# See the Zero Install project for details: http://0install.net
Type=Application
Version=1.0
Name=%(name)s
Comment=%(comment)s
Exec=%(0launch)s -- %(iface)s %%f
Categories=Application;%(category)s
"""
_icon_template = """Icon=%s
"""
def add_to_menu(iface, icon_path, category, zlaunch=None):
"""Write a .desktop file for this application.
@param iface: the program being added
@param icon_path: the path of the icon, or None
@param category: the freedesktop.org menu category"""
tmpdir = tempfile.mkdtemp(prefix = 'zero2desktop-')
try:
desktop_name = os.path.join(tmpdir, 'zeroinstall-%s.desktop' % iface.get_name().lower().replace(os.sep, '-').replace(' ', ''))
desktop = open(desktop_name, 'w')
desktop.write(_template % {'name': iface.get_name(),
'comment': iface.summary,
'0launch': zlaunch or '0launch',
'iface': iface.uri,
'category': category})
if icon_path:
desktop.write(_icon_template % icon_path)
if len(iface.get_metadata(namespaces.XMLNS_IFACE, 'needs-terminal')):
desktop.write('Terminal=true\n')
desktop.close()
status = os.spawnlp(os.P_WAIT, 'xdg-desktop-menu', 'xdg-desktop-menu', 'install', desktop_name)
finally:
shutil.rmtree(tmpdir)
if status:
raise SafeException(_('Failed to run xdg-desktop-menu (error code %d)') % status)
def discover_existing_apps():
"""Search through the configured XDG datadirs looking for .desktop files created by L{add_to_menu}.
@return: a map from application URIs to .desktop filenames"""
already_installed = {}
for d in basedir.load_data_paths('applications'):
for desktop_file in os.listdir(d):
if desktop_file.startswith('zeroinstall-') and desktop_file.endswith('.desktop'):
full = os.path.join(d, desktop_file)
try:
for line in open(full):
line = line.strip()
if line.startswith('Exec=0launch '):
bits = line.split(' -- ', 1)
if ' ' in bits[0]:
uri = bits[0].split(' ', 1)[1] # 0launch URI -- %u
else:
uri = bits[1].split(' ', 1)[0].strip() # 0launch -- URI %u
already_installed[uri] = full
break
else:
info(_("Failed to find Exec line in %s"), full)
except Exception as ex:
warn(_("Failed to load .desktop file %(filename)s: %(exceptions"), {'filename': full, 'exception': ex})
return already_installed
|
dabrahams/zeroinstall
|
zeroinstall/gtkui/xdgutils.py
|
Python
|
lgpl-2.1
| 2,917
|
[
"VisIt"
] |
62f20b69abfea3fc5792d79fa0f6c6f03c1db9024f5e30d380288ef8a19d3f20
|
# -*- coding: utf-8 -*-
##############################################################################
# 2014 E2OpenPlugins #
# #
# This file is open source software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
##############################################################################
# Simulate the oe-a boxbranding module (Only functions required by OWIF) #
##############################################################################
from Plugins.Extensions.OpenWebif.__init__ import _
from Components.About import about
from socket import has_ipv6
from Tools.Directories import fileExists, pathExists
import string
import os, hashlib
try:
from Components.About import about
except:
pass
tpmloaded = 1
try:
from enigma import eTPM
if not hasattr(eTPM, 'getData'):
tpmloaded = 0
except:
tpmloaded = 0
def validate_certificate(cert, key):
buf = decrypt_block(cert[8:], key)
if buf is None:
return None
return buf[36:107] + cert[139:196]
def get_random():
try:
xor = lambda a,b: ''.join(chr(ord(c)^ord(d)) for c,d in zip(a,b*100))
random = urandom(8)
x = str(time())[-8:]
result = xor(random, x)
return result
except:
return None
def bin2long(s):
return reduce( lambda x,y:(x<<8L)+y, map(ord, s))
def long2bin(l):
res = ""
for byte in range(128):
res += chr((l >> (1024 - (byte + 1) * 8)) & 0xff)
return res
def rsa_pub1024(src, mod):
return long2bin(pow(bin2long(src), 65537, bin2long(mod)))
def decrypt_block(src, mod):
if len(src) != 128 and len(src) != 202:
return None
dest = rsa_pub1024(src[:128], mod)
hash = hashlib.sha1(dest[1:107])
if len(src) == 202:
hash.update(src[131:192])
result = hash.digest()
if result == dest[107:127]:
return dest
return None
def tpm_check():
try:
tpm = eTPM()
rootkey = ['\x9f', '|', '\xe4', 'G', '\xc9', '\xb4', '\xf4', '#', '&', '\xce', '\xb3', '\xfe', '\xda', '\xc9', 'U', '`', '\xd8', '\x8c', 's', 'o', '\x90', '\x9b', '\\', 'b', '\xc0', '\x89', '\xd1', '\x8c', '\x9e', 'J', 'T', '\xc5', 'X', '\xa1', '\xb8', '\x13', '5', 'E', '\x02', '\xc9', '\xb2', '\xe6', 't', '\x89', '\xde', '\xcd', '\x9d', '\x11', '\xdd', '\xc7', '\xf4', '\xe4', '\xe4', '\xbc', '\xdb', '\x9c', '\xea', '}', '\xad', '\xda', 't', 'r', '\x9b', '\xdc', '\xbc', '\x18', '3', '\xe7', '\xaf', '|', '\xae', '\x0c', '\xe3', '\xb5', '\x84', '\x8d', '\r', '\x8d', '\x9d', '2', '\xd0', '\xce', '\xd5', 'q', '\t', '\x84', 'c', '\xa8', ')', '\x99', '\xdc', '<', '"', 'x', '\xe8', '\x87', '\x8f', '\x02', ';', 'S', 'm', '\xd5', '\xf0', '\xa3', '_', '\xb7', 'T', '\t', '\xde', '\xa7', '\xf1', '\xc9', '\xae', '\x8a', '\xd7', '\xd2', '\xcf', '\xb2', '.', '\x13', '\xfb', '\xac', 'j', '\xdf', '\xb1', '\x1d', ':', '?']
random = None
result = None
l2r = False
l2k = None
l3k = None
l2c = tpm.getData(eTPM.DT_LEVEL2_CERT)
if l2c is None:
return 0
l2k = validate_certificate(l2c, rootkey)
if l2k is None:
return 0
l3c = tpm.getData(eTPM.DT_LEVEL3_CERT)
if l3c is None:
return 0
l3k = validate_certificate(l3c, l2k)
if l3k is None:
return 0
random = get_random()
if random is None:
return 0
value = tpm.computeSignature(random)
result = decrypt_block(value, l3k)
if result is None:
return 0
if result [80:88] != random:
return 0
return 1
except:
return 0
def getAllInfo():
info = {}
brand = "unknown"
model = "unknown"
procmodel = "unknown"
orgdream = 0
if tpmloaded:
orgdream = tpm_check()
if fileExists("/proc/stb/info/hwmodel"):
brand = "DAGS"
f = open("/proc/stb/info/hwmodel",'r')
procmodel = f.readline().strip()
f.close()
if (procmodel.startswith("optimuss") or procmodel.startswith("pingulux")):
brand = "Edision"
model = procmodel.replace("optimmuss", "Optimuss ").replace("plus", " Plus").replace(" os", " OS")
elif (procmodel.startswith("fusion") or procmodel.startswith("purehd")):
brand = "Xsarius"
if procmodel == "fusionhd":
model = procmodel.replace("fusionhd", "Fusion HD")
elif procmodel == "fusionhdse":
model = procmodel.replace("fusionhdse", "Fusion HD SE")
elif procmodel == "purehd":
model = procmodel.replace("purehd", "PureHD")
elif fileExists("/proc/stb/info/azmodel"):
brand = "AZBox"
f = open("/proc/stb/info/model",'r') # To-Do: Check if "model" is really correct ...
procmodel = f.readline().strip()
f.close()
model = procmodel.lower()
elif fileExists("/proc/stb/info/gbmodel"):
brand = "GigaBlue"
f = open("/proc/stb/info/gbmodel",'r')
procmodel = f.readline().strip()
f.close()
model = procmodel.upper().replace("GBQUAD", "Quad").replace("PLUS", " Plus")
elif fileExists("/proc/stb/info/vumodel") and not fileExists("/proc/stb/info/boxtype"):
brand = "Vu+"
f = open("/proc/stb/info/vumodel",'r')
procmodel = f.readline().strip()
f.close()
model = procmodel.title().replace("olose", "olo SE").replace("olo2se", "olo2 SE").replace("2", "²")
elif fileExists("/proc/boxtype"):
f = open("/proc/boxtype",'r')
procmodel = f.readline().strip().lower()
f.close()
if procmodel in ("adb2850", "adb2849", "bska", "bsla", "bxzb", "bzzb"):
brand = "Advanced Digital Broadcast"
if procmodel in ("bska", "bxzb"):
model = "ADB 5800S"
elif procmodel in ("bsla", "bzzb"):
model = "ADB 5800SX"
elif procmodel == "adb2849":
model = "ADB 2849ST"
else:
model = "ADB 2850ST"
elif procmodel in ("esi88", "uhd88"):
brand = "Sagemcom"
if procmodel == "uhd88":
model = "UHD 88"
else:
model = "ESI 88"
elif fileExists("/proc/stb/info/boxtype"):
f = open("/proc/stb/info/boxtype",'r')
procmodel = f.readline().strip().lower()
f.close()
if procmodel.startswith("et"):
if procmodel == "et7000mini":
brand = "Galaxy Innovations"
model = "ET-7000 Mini"
else:
brand = "Xtrend"
model = procmodel.upper()
elif procmodel.startswith("xpeed"):
brand = "Golden Interstar"
model = procmodel
elif procmodel.startswith("xp"):
brand = "MaxDigital"
model = procmodel
elif procmodel.startswith("ixuss"):
brand = "Medialink"
model = procmodel.replace(" ", "")
elif procmodel.startswith("formuler"):
brand = "Formuler"
model = procmodel.replace("formuler","")
elif procmodel.startswith("g300"):
brand = "Miraclebox"
model = "Premiun twin+"
elif procmodel == "7000s":
brand = "Miraclebox"
model = "Premium micro"
elif procmodel.startswith("ini"):
if procmodel.endswith("9000ru"):
brand = "Sezam"
model = "Marvel"
elif procmodel.endswith("5000ru"):
brand = "Sezam"
model = "hdx"
elif procmodel.endswith("1000ru"):
brand = "Sezam"
model = "hde"
elif procmodel.endswith("5000sv"):
brand = "Miraclebox"
model = "mbtwin"
elif procmodel.endswith("1000sv"):
brand = "Miraclebox"
model = "mbmini"
elif procmodel.endswith("1000de"):
brand = "Golden Interstar"
model = "Xpeed LX"
elif procmodel.endswith("9000de"):
brand = "Golden Interstar"
model = "Xpeed LX3"
elif procmodel.endswith("1000lx"):
brand = "Golden Interstar"
model = "Xpeed LX"
elif procmodel.endswith("de"):
brand = "Golden Interstar"
elif procmodel.endswith("1000am"):
brand = "Atemio"
model = "5x00"
else:
brand = "Venton"
model = "HDx"
elif procmodel.startswith("unibox-"):
brand = "Venton"
model = "HDe"
elif procmodel == "hd1100":
brand = "Mut@nt"
model = "hd1100"
elif procmodel == "hd1200":
brand = "Mut@nt"
model = "hd1200"
elif procmodel == "hd1265":
brand = "Mut@nt"
model = "hd1265"
elif procmodel == "hd2400":
brand = "Mut@nt"
model = "hd2400"
elif procmodel == "hd51":
brand = "Mut@nt"
model = "hd51"
elif procmodel == "hd500c":
brand = "Mut@nt"
model = "hd500c"
elif procmodel == "arivalink200":
brand = "Ferguson"
model = "Ariva @Link 200"
elif procmodel.startswith("spark"):
brand = "Fulan"
if procmodel == "spark7162":
model = "Spark 7162"
else:
model = "Spark"
elif procmodel == "spycat":
brand = "Spycat"
model = "spycat"
elif procmodel == "spycatmini":
brand = "Spycat"
model = "spycatmini"
elif procmodel == "wetekplay":
brand = "WeTeK"
model = procmodel
elif procmodel.startswith("osm"):
brand = "Edision"
model = procmodel
elif procmodel == "h5":
brand = "Zgemma"
model = "H5"
elif procmodel == "lc":
brand = "Zgemma"
model = "LC"
elif fileExists("/proc/stb/info/model"):
f = open("/proc/stb/info/model",'r')
procmodel = f.readline().strip().lower()
f.close()
if procmodel == "tf7700hdpvr":
brand = "Topfield"
model = "TF7700 HDPVR"
elif procmodel == "dsi87":
brand = "Sagemcom"
model = "DSI 87"
elif procmodel.startswith("spark"):
brand = "Fulan"
if procmodel == "spark7162":
model = "Spark 7162"
else:
model = "Spark"
elif (procmodel.startswith("dm") and not procmodel == "dm8000"):
brand = "Dream Multimedia"
model = procmodel.replace("dm", "DM", 1)
# A "dm8000" is only a Dreambox if it passes the tpm verification:
elif procmodel == "dm8000" and orgdream:
brand = "Dream Multimedia"
model = "DM8000"
else:
model = procmodel
if fileExists("/etc/.box"):
distro = "HDMU"
f = open("/etc/.box",'r')
tempmodel = f.readline().strip().lower()
f.close()
if tempmodel.startswith("ufs") or model.startswith("ufc"):
brand = "Kathrein"
model = tempmodel.title()
procmodel = tempmodel
elif tempmodel.startswith("spark"):
brand = "Fulan"
model = tempmodel.title()
procmodel = tempmodel
elif tempmodel.startswith("xcombo"):
brand = "EVO"
model = "enfinityX combo plus"
procmodel = "vg2000"
elif tempmodel.startswith("sf"):
brand = "Octagon"
model = tempmodel
if tempmodel == "sf3038":
procmodel = "g300"
elif tempmodel == "sf108":
procmodel = "vg5000"
elif tempmodel == "sf98":
procmodel = "yh7362"
elif tempmodel == "sf228":
procmodel = tempmodel
elif tempmodel == "sf208":
procmodel = tempmodel
elif tempmodel.startswith("atemio"):
brand = "Atemio"
if tempmodel == "atemio6000":
model = "6000"
elif tempmodel == "atemio6100":
model = "6100"
else:
model = "Nemesis"
procmodel = tempmodel
elif tempmodel.startswith("wetek"):
brand = "Wetek"
model = "Play"
procmodel = tempmodel
elif tempmodel in ("xpeedlx", "xpeedlx3"):
brand = "Golden Media"
model = tempmodel
procmodel = "xpeedlx"
elif tempmodel.startswith("xpeedlxc"):
brand = "Golden Interstar"
model = tempmodel
procmodel = "xpeedlxc"
type = procmodel
if type in ("et9000", "et9100", "et9200", "et9500"):
type = "et9x00"
elif type in ("et5000", "et6000", "et6x00"):
type = "et5x00"
elif type == "et4000":
type = "et4x00"
elif type == "xp1000":
type = "xp1000"
elif type in ("bska", "bxzb"):
type = "nbox_white"
elif type in ("bsla", "bzzb"):
type = "nbox"
elif type == "sagemcom88":
type = "esi88"
elif type == "vg2000":
type = "xcombo"
elif type in ("vg5000", "g300"):
type = "sf3038"
elif type in ("tf7700hdpvr", "topf"):
type = "topf"
info['brand'] = brand
info['model'] = model
info['procmodel'] = procmodel
info['type'] = type
remote = "dmm"
if procmodel in ("solo", "duo", "uno", "solo2", "solose", "zero", "solo4k", "uno4k", "ultimo4k"):
remote = "vu_normal"
elif procmodel == "duo2":
remote = "vu_duo2"
elif procmodel == "ultimo":
remote = "vu_ultimo"
elif procmodel == "e3hd":
remote = "e3hd"
elif procmodel in ("et9x00", "et9000", "et9100", "et9200", "et9500"):
remote = "et9x00"
elif procmodel in ("et5x00", "et5000", "et6x00", "et6000"):
remote = "et5x00"
elif procmodel in ("et4x00", "et4000"):
remote = "et4x00"
elif procmodel == "et6500":
remote = "et6500"
elif procmodel in ("et8x00", "et8000", "et8500", "et8500s","et1x000", "et10000"):
remote = "et8000"
elif procmodel in ("et7x00", "et7000", "et7500"):
remote = "et7x00"
elif procmodel == "et7000mini":
remote = "et7000mini"
elif procmodel == "gbquad":
remote = "gigablue"
elif procmodel == "gbquadplus":
remote = "gbquadplus"
elif procmodel == "gbquad4k":
remote = "gbquad4k"
elif procmodel in ("formuler1", "formuler3", "formuler4"):
remote = "formuler1"
elif procmodel in ("azboxme", "azboxminime", "me", "minime"):
remote = "me"
elif procmodel in ("optimussos1", "optimussos1plus", "optimussos2", "optimussos2plus"):
remote = "optimuss"
elif procmodel in ("premium", "premium+"):
remote = "premium"
elif procmodel in ("elite", "ultra"):
remote = "elite"
elif procmodel in ("ini-1000", "ini-1000ru"):
remote = "ini-1000"
elif procmodel in ("ini-1000sv", "ini-5000sv", "ini-9000de"):
remote = "miraclebox"
elif procmodel in ("7000s"):
remote = "miraclebox2"
elif procmodel == "ini-3000":
remote = "ini-3000"
elif procmodel in ("ini-7012", "ini-7000", "ini-5000", "ini-5000ru"):
remote = "ini-7000"
elif procmodel.startswith("spark"):
remote = "spark"
elif procmodel == "xp1000":
remote = "xp1000"
elif procmodel in ("xpeedlx", "xpeedlx3"):
remote = "xpeedlx"
elif procmodel.startswith("xpeedlxc"):
remote = "xpeedlxc"
elif procmodel in ("adb2850", "adb2849", "bska", "bsla", "bxzb", "bzzb", "esi88", "uhd88", "dsi87", "arivalink200"):
remote = "nbox"
elif procmodel in ("hd1100", "hd1200", "hd1265", "hd51", "hd500c"):
remote = "hd1x00"
elif procmodel == "hd2400":
remote = "hd2400"
elif procmodel in ("spycat", "spycatmini"):
remote = "spycat"
elif procmodel.startswith("ixuss"):
remote = procmodel.replace(" ", "")
elif procmodel == "vg2000":
remote = "xcombo"
elif procmodel == "vg5000":
remote = "sf3038"
elif procmodel in ("sf208", "sf228"):
remote = "sf2x8"
elif procmodel == "yh7362":
remote = "sf98"
elif procmodel.startswith("atemio"):
remote = "atemio"
elif procmodel == "dm8000" and orgdream:
remote = "dmm1"
elif procmodel in ("dm7080", "dm7020hd", "dm7020hdv2", "dm800sev2", "dm500hdv2", "dm820"):
remote = "dmm2"
elif procmodel == "wetekplay":
remote = procmodel
elif procmodel.startswith("osm"):
remote = "osmini"
elif procmodel in ("fusionhd"):
remote = procmodel
elif procmodel in ("fusionhdse"):
remote = procmodel
elif procmodel in ("purehd"):
remote = procmodel
elif procmodel in ("h5", "lc"):
remote = "h5"
info['remote'] = remote
kernel = about.getKernelVersionString()[0]
distro = "unknown"
imagever = "unknown"
imagebuild = ""
driverdate = "unknown"
# Assume OE 1.6
oever = "OE 1.6"
if kernel>2:
oever = "OE 2.0"
if fileExists("/etc/.box"):
distro = "HDMU"
oever = "Up to date"
elif fileExists("/etc/bhversion"):
distro = "Black Hole"
f = open("/etc/bhversion",'r')
imagever = f.readline().strip()
f.close()
if kernel>2:
oever = "OpenVuplus 2.1"
elif fileExists("/etc/vtiversion.info"):
distro = "VTi-Team Image"
f = open("/etc/vtiversion.info",'r')
imagever = f.readline().strip().replace("VTi-Team Image ", "").replace("Release ", "").replace("v.", "")
f.close()
oever = "OE 1.6"
imagelist = imagever.split('.')
imagebuild = imagelist.pop()
imagever = ".".join(imagelist)
if kernel>2:
oever = "OpenVuplus 2.1"
if ((imagever == "5.1") or (imagever[0] > 5)):
oever = "OpenVuplus 2.1"
elif fileExists("/var/grun/grcstype"):
distro = "Graterlia OS"
try:
imagever = about.getImageVersionString()
except:
pass
# ToDo: If your distro gets detected as OpenPLi, feel free to add a detection for your distro here ...
else:
# OE 2.2 uses apt, not opkg
if not fileExists("/etc/opkg/all-feed.conf"):
oever = "OE 2.2"
else:
try:
f = open("/etc/opkg/all-feed.conf",'r')
oeline = f.readline().strip().lower()
f.close()
distro = oeline.split( )[1].replace("-all","")
except:
pass
if distro == "openpli":
imagever = "2.1"
# Todo: Detect OpenPLi 3.0
if has_ipv6:
# IPv6 support for Python was added in 4.0
imagever = "4.0"
oever = "PLi-OE"
imagelist = imagever.split('.')
imagebuild = imagelist.pop()
imagever = ".".join(imagelist)
elif distro == "openrsi":
oever = "PLi-OE"
else:
try:
imagever = about.getImageVersionString()
except:
pass
if (distro == "unknown" and brand == "Vu+" and fileExists("/etc/version")):
# Since OE-A uses boxbranding and bh or vti can be detected, there isn't much else left for Vu+ boxes
distro = "Vu+ original"
f = open("/etc/version",'r')
imagever = f.readline().strip()
f.close()
if kernel>2:
oever = "OpenVuplus 2.1"
# reporting the installed dvb-module version is as close as we get without too much hassle
driverdate = 'unknown'
try:
driverdate = os.popen('/usr/bin/opkg -V0 list_installed *dvb-modules*').readline().split( )[2]
except:
try:
driverdate = os.popen('/usr/bin/opkg -V0 list_installed *dvb-proxy*').readline().split( )[2]
except:
try:
driverdate = os.popen('/usr/bin/opkg -V0 list_installed *kernel-core-default-gos*').readline().split( )[2]
except:
pass
info['oever'] = oever
info['distro'] = distro
info['imagever'] = imagever
info['imagebuild'] = imagebuild
info['driverdate'] = driverdate
return info
STATIC_INFO_DIC = getAllInfo()
def getMachineBuild():
return STATIC_INFO_DIC['procmodel']
def getMachineBrand():
return STATIC_INFO_DIC['brand']
def getMachineName():
return STATIC_INFO_DIC['model']
def getMachineProcModel():
return STATIC_INFO_DIC['procmodel']
def getBoxType():
return STATIC_INFO_DIC['type']
def getOEVersion():
return STATIC_INFO_DIC['oever']
def getDriverDate():
return STATIC_INFO_DIC['driverdate']
def getImageVersion():
return STATIC_INFO_DIC['imagever']
def getImageBuild():
return STATIC_INFO_DIC['imagebuild']
def getImageDistro():
return STATIC_INFO_DIC['distro']
class rc_model:
def getRcFolder(self):
return STATIC_INFO_DIC['remote']
|
HDMU/e2openplugin-OpenWebif
|
plugin/controllers/models/owibranding.py
|
Python
|
gpl-2.0
| 18,232
|
[
"Galaxy"
] |
b755aad72826f48db65b3f68acde2e801fab1ca4519793ac08154da1598efa20
|
__author__ = 'kpaskov'
import re
from urllib.parse import urlparse
from behave import step
from selenium.common.exceptions import NoSuchElementException
@step('I visit "{url}" for "{obj}"')
def visit_page_for(context, url, obj):
context.browser.get(context.base_url + url.replace('?', obj))
@step('I click the button with id "{button_id}"')
def click_button_with_id(context, button_id):
try:
button = context.browser.find_element_by_id(button_id)
button.click()
except NoSuchElementException:
assert 0, 'No element with id ' + button_id
@step('I should see an element with id "{element_id}"')
def should_see_element_with_id(context, element_id):
try:
context.browser.find_element_by_id(element_id)
except NoSuchElementException:
assert 0, 'No element with id ' + element_id
@step('I should not see an element with id "{element_id}"')
def should_see_element_with_id(context, element_id):
try:
context.browser.find_element_by_id(element_id)
assert 0, 'Element with id ' + element_id + ' is present.'
except NoSuchElementException:
pass
@step('I should see an element with class_name "{element_class}"')
def should_see_element_with_class_name(context, element_class):
try:
context.browser.find_element_by_class_name(element_class)
except NoSuchElementException:
assert 0, 'No element with class ' + element_class
@step('I should see an element with css_selector "{css_selector}"')
def should_see_element_with_css_selector(context, css_selector):
try:
context.browser.find_element_by_css_selector(css_selector)
except:
assert 0, 'No element with CSS selector ' + css_selector
@step('I should see an element "{element_id}" with text "{text}"')
def should_see_element_with_id_with_text(context, element_id, text):
try:
element = context.browser.find_element_by_id(element_id)
assert element.text == text, 'Text does not match: ' + element.text
except NoSuchElementException:
assert 0, 'No element with id ' + element_id
@step('the title should be "{title}"')
def title_should_be(context, title):
assert context.browser.title == title, 'Wrong title'
@step('the table with id "{table_id}" should have rows in it')
def table_should_have_rows(context, table_id):
try:
num_rows = len(context.browser.find_elements_by_xpath("//table[@id='" + table_id + "']/tbody/tr"))
assert num_rows > 0, 'Only ' + str(num_rows) + ' entries in table.'
except NoSuchElementException:
assert 0, 'No element with id.'
@step('the reference list with id "{reference_list_id}" should have rows in it')
def reference_list_should_have_rows(context, reference_list_id):
try:
num_rows = len(context.browser.find_elements_by_xpath("//ul[@id='" + reference_list_id + "']/li"))
assert num_rows > 1, 'Only ' + str(num_rows) + ' entries in reference list.'
except NoSuchElementException:
assert 0, 'No element with id.'
@step('the resource list with id "{resource_list_id}" should have rows in it')
def resource_list_should_have_rows(context, resource_list_id):
try:
num_rows = len(context.browser.find_elements_by_xpath("//p[@id='" + resource_list_id + "']/a"))
assert num_rows > 0, 'Only ' + str(num_rows) + ' entries in resource list.'
except NoSuchElementException:
assert 0, 'No element with id.'
@step('the network with id "{network_id}" should appear')
def network_should_appear(context, network_id):
try:
assert len(context.browser.find_elements_by_xpath("//div[@id='" + network_id + "']/div/canvas")) == 5, 'Network not drawn.'
except NoSuchElementException:
assert 0, 'No element with id.'
@step('the button with id "{button_id}" should be disabled')
def button_with_id_should_be_disabled(context, button_id):
try:
button = context.browser.find_element_by_id(button_id)
assert button.get_attribute('disabled'), 'Button is not disabled.'
old_title = context.browser.title
button.click()
assert context.browser.title == old_title, 'Disabled button took us to a different page.'
except NoSuchElementException:
assert 0, 'No element with id ' + button_id
@step('I should download a file named "{filename}"')
def download_a_file_named(context, filename):
pass
@step('I wait {num_sec} seconds')
def wait(context, num_sec):
from time import sleep
sleep(float(num_sec))
assert True
@step('I should not see a loader')
def should_not_see_a_loader(context):
try:
context.browser.find_element_by_class_name('loader')
assert 0, 'Loader is still visible.'
except NoSuchElementException:
pass
@step('I should see the text "{text}"')
def should_see_text(context, text):
src = context.browser.page_source
text_found = re.search(text, src)
if text_found:
pass
else:
assert 0, 'Text not present.'
@step('I search {query}')
def type_text(context, query,):
search_container = context.browser.find_element_by_id('searchform')
input_el = search_container.find_element_by_id('txt_search_container').find_element_by_id('txt_search')
search_button = search_container.find_element_by_id('search-submit-btn')
input_el.click()
input_el.send_keys(query.strip('"'))
search_button.click()
pass
@step('I should be at {desired_url}')
def test_url(context, desired_url):
desired_url = desired_url.strip('"')
absolute_url = context.browser.current_url
url_obj = urlparse(absolute_url)
query = url_obj.query
if (query != ''):
query = '?' + query
current_url = url_obj.path + query
if current_url == desired_url:
pass
else:
assert 0, "Current URL doesn't match desired URL."
|
yeastgenome/SGDFrontend
|
src/sgd/frontend/yeastgenome/tests/features/steps/__init__.py
|
Python
|
mit
| 5,859
|
[
"VisIt"
] |
601b324aab92bcf34a50f35f665551c411eaf113e2f439841edd46737cfa47ee
|
# coding=utf8
#
# Copyright 2013 Dreamlab Onet.pl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 3.0.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, visit
#
# http://www.gnu.org/licenses/lgpl.txt
#
import logging
_conf = None
def get_config():
global _conf
if _conf is None:
_conf = dict(
logging_level=logging.NOTSET,
trim_len=80,
random_port_tries=3,
test_method_prefix='test'
)
return _conf
|
tikan/rmock
|
src/rmock/config.py
|
Python
|
lgpl-3.0
| 946
|
[
"VisIt"
] |
7b40acc48edfba784abea4a62c48290452489c39c99dda73cb7cf18aaabac3eb
|
#Raspberry Pi, Minecraft Bombs - Turn any block into a bomb!
#import the minecraft.py module from the minecraft directory
import minecraft.minecraft as minecraft
#import minecraft block module
import minecraft.block as block
#import time, so delays can be used
import time
#import threading, so threads can be used
import threading
class ExplodingBlock(threading.Thread):
def __init__(self, pos, fuseInSecs, blastRadius):
#Setup object
threading.Thread.__init__(self)
self.pos = pos
self.fuseInSecs = fuseInSecs
self.blastRadius = blastRadius
def run(self):
#Open connect to minecraft
mc = minecraft.Minecraft.create()
#Get values
pos = self.pos
blastRadius = self.blastRadius
#Explode the block!
# get block type
blockType = mc.getBlock(pos.x, pos.y, pos.z)
# flash the block
for fuse in range(0, self.fuseInSecs):
mc.setBlock(pos.x, pos.y, pos.z, block.AIR)
time.sleep(0.5)
mc.setBlock(pos.x, pos.y, pos.z, blockType)
time.sleep(0.5)
# create sphere of air
for x in range(blastRadius*-1,blastRadius):
for y in range(blastRadius*-1, blastRadius):
for z in range(blastRadius*-1,blastRadius):
if x**2 + y**2 + z**2 < blastRadius**2:
mc.setBlock(pos.x + x, pos.y + y, pos.z + z, block.AIR)
if __name__ == "__main__":
time.sleep(5)
#Connect to minecraft by creating the minecraft object
# - minecraft needs to be running and in a game
mc = minecraft.Minecraft.create()
#Post a message to the minecraft chat window
mc.postToChat("Minecraft Bombs, Hit (Right Click) a Block, www.stuffaboutcode.com")
#loop until Ctrl C
try:
while True:
#Get the block hit events
blockHits = mc.events.pollBlockHits()
# if a block has been hit
if blockHits:
# for each block that has been hit
for blockHit in blockHits:
#Create and run the exploding block class in its own thread
# pass the position of the block, fuse time in seconds and blast radius
# threads are used so multiple exploding blocks can be created
explodingBlock = ExplodingBlock(blockHit.pos, 3, 3)
explodingBlock.daemon
explodingBlock.start()
time.sleep(0.1)
except KeyboardInterrupt:
print("stopped")import minecraft as minecraft
mc = minecraft.Minecraft.create()
while True:
blockHits = mc.events.pollBlockHits()
if blockHits:
for blockHit in blockHits:
print blockHit.pos.x
print blockHit.pos.y
print blockHit.pos.z
print blockHIt.face
print blockHit.type
print blockHit.entityId
|
mohsraspi/mhscs14
|
tristan/bomb.py
|
Python
|
gpl-2.0
| 3,239
|
[
"BLAST"
] |
b709fc48e0fcc3360b17fcea135736ab06da154c8449e7339d851ab4dfd313ad
|
# gpdm.py: Implementation of GPDM for single sequence
# This can be used as a starting point for further implementations
# Author: Nishanth
# Date: 2017/01/17
# Source: GPflow source code
import numpy as np
import tensorflow as tf
from GPflow.tf_wraps import eye
from GPflow.model import GPModel
from GPflow._settings import settings
from GPflow.mean_functions import Zero
from GPflow.param import Param, DataHolder
from GPflow.densities import multivariate_normal
from GPflow import kernels, transforms, likelihoods
float_type = settings.dtypes.float_type
def PCA_reduce(X, Q):
"""
A helpful function for linearly reducing the dimensionality of the data X
to Q.
:param X: data array of size N (number of points) x D (dimensions)
:param Q: Number of latent dimensions, Q < D
:return: PCA projection array of size N x Q.
"""
assert Q <= X.shape[1], 'Cannot have more latent dimensions than observed'
evecs, evals = np.linalg.eigh(np.cov(X.T))
i = np.argsort(evecs)[::-1]
W = evals[:, i]
W = W[:, :Q]
return (X - X.mean(0)).dot(W)
class GPDM(GPModel):
"""
Gaussian Process Dynamical Model.
This is a vanilla implementation of GPDM with uniformly sampled single sequence
and mean prediction implementation.
"""
def __init__(self, Y, latent_dim, X_mean=None, map_kern=None, dyn_kern=None):
"""
Initialise GPDM object. This method only works with a Gaussian likelihood.
:param Y: data matrix (N x D)
:param X_mean: latent positions (N x Q), by default initialized using PCA.
:param kern: kernel specification, by default RBF
:param mean_function: mean function, by default None.
"""
# initialize latent_positions
if X_mean is None:
X_mean = PCA_reduce(Y, latent_dim)
# define kernel functions
if map_kern is None:
map_kern = kernels.RBF(latent_dim)
if dyn_kern is None:
dyn_kern = kernels.RBF(latent_dim) + kernels.Linear(latent_dim)
# initialize variables
self.num_latent = X_mean.shape[1]
# initialize parent GPModel
mean_function = Zero()
likelihood = likelihoods.Gaussian()
Y = DataHolder(Y, on_shape_change='pass')
X = DataHolder(X_mean, on_shape_change='pass')
GPModel.__init__(self, X, Y, map_kern, likelihood, mean_function)
# initialize dynamics parameters
self.dyn_kern = dyn_kern
self.dyn_mean_function = Zero()
self.dyn_likelihood = likelihoods.Gaussian()
# set latent positions as model param
del self.X
self.X = Param(X_mean)
def build_likelihood(self):
"""
Construct a tensorflow function to compute the likelihood.
\log p(Y | theta).
"""
# dynamics log likelihood
K_dyn = self.dyn_kern.K(self.X[:-1,:]) + eye(tf.shape(self.X[:-1,:])[0])*self.dyn_likelihood.variance
L_dyn = tf.cholesky(K_dyn)
# log likelihood is defined using multivariate_normal function
diff_dyn = self.X[1:,:] - self.dyn_mean_function(self.X[:-1,:])
alpha_dyn = tf.matrix_triangular_solve(L_dyn, diff_dyn, lower=True)
# initialize model parameters
num_dims_dyn = 1 if tf.rank(self.X[1:,:]) == 1 else tf.shape(self.X[1:,:])[1]
num_dims_dyn = tf.cast(num_dims_dyn, float_type)
num_points_dyn = tf.cast(tf.shape(self.X[1:,:])[0], float_type)
# compute log likelihood
llh_dyn = - 0.5 * num_dims_dyn * num_points_dyn * np.log(2 * np.pi)
llh_dyn += - num_dims_dyn * tf.reduce_sum(tf.log(tf.diag_part(L_dyn)))
llh_dyn += - 0.5 * tf.reduce_sum(tf.square(alpha_dyn))
# mapping log likelihood
K_map = self.kern.K(self.X) + eye(tf.shape(self.X)[0])*self.likelihood.variance
L_map = tf.cholesky(K_map)
# log likelihood is defined using multivariate_normal function
diff_map = self.Y - self.mean_function(self.X)
alpha_map = tf.matrix_triangular_solve(L_map, diff_map, lower=True)
# initialize model parameters
num_dims_map = 1 if tf.rank(self.Y) == 1 else tf.shape(self.Y)[1]
num_dims_map = tf.cast(num_dims_map, float_type)
num_points_map = tf.cast(tf.shape(self.Y)[0], float_type)
# compute log likelihood
llh_map = - 0.5 * num_dims_map * num_points_map * np.log(2 * np.pi)
llh_map += - num_dims_map * tf.reduce_sum(tf.log(tf.diag_part(L_map)))
llh_map += - 0.5 * tf.reduce_sum(tf.square(alpha_map))
return llh_dyn+llh_map
def build_predict(self, Xnew, full_cov=False):
"""
Xnew is a data matrix, point at which we want to predict.
This method computes, p(F* | Y ), where F* are points on the GP at Xnew.
This will be similar to GP Regression.
"""
# compute kernel for test points
Kx = self.kern.K(self.X, Xnew)
# compute kernel matrix and cholesky decomp.
K = self.kern.K(self.X) + eye(tf.shape(self.X)[0]) * self.likelihood.variance
L = tf.cholesky(K)
# compute L^-1kx
A = tf.matrix_triangular_solve(L, Kx, lower=True)
# compute L^-1(y-mu(x))
V = tf.matrix_triangular_solve(L, self.Y - self.mean_function(self.X))
# compute fmean = kx^TK^-1(y-mu(x))
fmean = tf.matmul(tf.transpose(A), V) + self.mean_function(Xnew)
# diag var or full variance
if full_cov:
# compute kxx - kxTK^-1kx
fvar = self.kern.K(Xnew) - tf.matmul(tf.transpose(A), A)
shape = tf.stack([1, 1, tf.shape(self.Y)[1]])
fvar = tf.tile(tf.expand_dims(fvar, 2), shape)
else:
# compute single value for variance
fvar = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(A), 0)
fvar = tf.tile(tf.reshape(fvar, (-1, 1)), [1, tf.shape(self.Y)[1]])
return fmean, fvar
|
ShibataLabPrivate/GPyWorkshop
|
Experiments/gpdm.py
|
Python
|
mit
| 5,965
|
[
"Gaussian"
] |
57eac4f3eec3b4d32d0733e2f23d9fe87bc21103c4d2abbcc0c48bdc83616542
|
# Copyright (C) 2008 CAMd
# Please see the accompanying LICENSE file for further information.
from __future__ import division
import numpy as np
from gpaw.utilities.blas import rk, r2k, gemm
from gpaw.matrix_descriptor import BandMatrixDescriptor, \
BlacsBandMatrixDescriptor
class MatrixOperator:
"""Base class for overlap and hamiltonian operators.
Due to optimized BLAS usage, matrices are considered
transposed both upon input and output.
As both the overlap and Hamiltonian matrices are Hermitian, they
can be considered as transposed *or* conjugated as compared to
standard definitions.
"""
# This class has 100% parallel unittest coverage by parallel/ut_hsops.py!
# If you add to or change any aspect of the code, please update the test.
nblocks = 1
async = True
hermitian = True
def __init__(self, ksl, nblocks=None, async=None, hermitian=None):
"""The constructor now calculates the work array sizes, but does not
allocate them. Here is a summary of the relevant variables and the
cases handled.
Given::
J = nblocks The number of blocks to divide bands and
grid points into.
N = mynbands The number of bands on this MPI task
M = np.ceil(N/float(J)) The number of bands in each block.
G = gd.n_c.prod() The number of grid points on this MPI task.
g = np.ceil(G/float(J)) The number of grid points in each block.
X and Q The workspaces to be calculated.
Note that different values of J can lead to the same values of M
and G. Q is relatively simple to calculate, symmetric case needs
*roughly* half as much storage space as the non-symmetric case.
X is much more difficult. Read below.
X is the band index of the workspace array. It is allocated in units
of the wavefunctions. Here is the condition on X and some intermediate
variables::
M > 0 At least one band in a block
X >= M Blocking over band index must have enough space.
X * G >= N * g Blocking over grid index must have enough space.
There are two different parallel matrix multiples here:
1. calculate_matrix_elements contracts on grid index
2. matrix_multiply contracts on the band index
We simply needed to make sure that we have enough workspace for
both of these multiples since we re-use the workspace arrays.
Cases::
Simplest case is G % J = M % J = 0: X = M.
If g * N > M * G, then we need to increase the buffer size by one
wavefunction unit greater than the simple case, thus X = M + 1.
"""
self.bd = ksl.bd
self.gd = ksl.gd
self.blockcomm = ksl.blockcomm
self.bmd = ksl.new_descriptor() #XXX take hermitian as argument?
self.dtype = ksl.dtype
self.buffer_size = ksl.buffer_size
if nblocks is not None:
self.nblocks = nblocks
if async is not None:
self.async = async
if hermitian is not None:
self.hermitian = hermitian
# default for work spaces
self.work1_xG = None
self.work2_xG = None
self.A_qnn = None
self.A_nn = None
mynbands = self.bd.mynbands
ngroups = self.bd.comm.size
G = self.gd.n_c.prod()
# If buffer_size keyword exist, use it to calculate closest
# corresponding value of nblocks. An *attempt* is made
# such that actual buffer size used does not exceed the
# value specified by buffer_size.
# Maximum allowable buffer_size corresponds to nblock = 1
# which is all the wavefunctions.
# Give error if the buffer_size is so small that it cannot
# contain a single wavefunction
if self.buffer_size is not None: # buffersize is in KiB
sizeof_single_wfs = float(self.gd.bytecount(self.dtype))
numberof_wfs = self.buffer_size*1024/sizeof_single_wfs
assert numberof_wfs > 0 # buffer_size is too small
self.nblocks = max(int(mynbands//numberof_wfs),1)
# Calculate Q and X for allocating arrays later
self.X = 1 # not used for ngroups == 1 and J == 1
self.Q = 1
J = self.nblocks
M = int(np.ceil(mynbands / float(J)))
g = int(np.ceil(G / float(J)))
assert M > 0 # must have at least one wave function in a block
if ngroups == 1 and J == 1:
pass
else:
if g*mynbands > M*G: # then more space is needed
self.X = M + 1
assert self.X*G >= g*mynbands
else:
self.X = M
if ngroups > 1:
if self.hermitian:
self.Q = ngroups // 2 + 1
else:
self.Q = ngroups
def allocate_work_arrays(self):
J = self.nblocks
ngroups = self.bd.comm.size
mynbands = self.bd.mynbands
dtype = self.dtype
if ngroups == 1 and J == 1:
self.work1_xG = self.gd.zeros(mynbands, dtype)
else:
self.work1_xG = self.gd.zeros(self.X, dtype)
self.work2_xG = self.gd.zeros(self.X, dtype)
if ngroups > 1:
self.A_qnn = np.zeros((self.Q, mynbands, mynbands), dtype)
self.A_nn = self.bmd.zeros(dtype=dtype)
def estimate_memory(self, mem, dtype):
J = self.nblocks
ngroups = self.bd.comm.size
mynbands = self.bd.mynbands
nbands = self.bd.nbands
gdbytes = self.gd.bytecount(dtype)
count = self.Q * mynbands**2
# Code semipasted from allocate_work_arrays
if ngroups == 1 and J == 1:
mem.subnode('work1_xG', mynbands * gdbytes)
else:
mem.subnode('work1_xG', self.X * gdbytes)
mem.subnode('work2_xG', self.X * gdbytes)
mem.subnode('A_qnn', count * mem.itemsize[dtype])
self.bmd.estimate_memory(mem.subnode('Band Matrices'), dtype)
def _pseudo_braket(self, bra_xG, ket_yG, A_yx, square=None):
"""Calculate matrix elements of braket pairs of pseudo wave functions.
Low-level helper function. Results will be put in the *A_yx* array::
/ ~ * ~
A = | dG bra (G) ket (G)
nn' / n n'
Parameters:
bra_xG: ndarray
Set of bra-like vectors in which the matrix elements are evaluated.
key_yG: ndarray
Set of ket-like vectors in which the matrix elements are evaluated.
A_yx: ndarray
Matrix in which to put calculated elements. Take care: Due to the
difference in Fortran/C array order and the inherent BLAS nature,
the matrix has to be filled in transposed (conjugated in future?).
"""
assert bra_xG.shape[1:] == ket_yG.shape[1:]
assert (ket_yG.shape[0], bra_xG.shape[0]) == A_yx.shape
if square is None:
square = (bra_xG.shape[0]==ket_yG.shape[0])
dv = self.gd.dv
if ket_yG is bra_xG:
rk(dv, bra_xG, 0.0, A_yx)
elif self.hermitian and square:
r2k(0.5 * dv, bra_xG, ket_yG, 0.0, A_yx)
else:
gemm(dv, bra_xG, ket_yG, 0.0, A_yx, 'c')
def _initialize_cycle(self, sbuf_mG, rbuf_mG, sbuf_In, rbuf_In, auxiliary):
"""Initializes send/receive cycle of pseudo wave functions, as well as
an optional auxiliary send/receive cycle of corresponding projections.
Low-level helper function. Results in the following communications::
Rank below This rank Rank above
Asynchronous: ... o/i <-- sbuf_mG -- o/i <-- rbuf_mG -- o/i ...
Synchronous: blank blank blank
Auxiliary: ... o/i <-- sbuf_In -- o/i <-- rbuf_In -- o/i ...
A letter 'o' signifies a non-blocking send and 'i' a matching receive.
Parameters:
sbuf_mG: ndarray
Send buffer for the outgoing set of pseudo wave functions.
rbuf_mG: ndarray
Receive buffer for the incoming set of pseudo wave functions.
sbuf_In: ndarray, ignored if not auxiliary
Send buffer for the outgoing set of atomic projector overlaps.
rbuf_In: ndarray, ignored if not auxiliary
Receive buffer for the incoming set of atomic projector overlaps.
auxiliary: bool
Determines whether to initiate the auxiliary send/receive cycle.
"""
band_comm = self.bd.comm
rankm = (band_comm.rank - 1) % band_comm.size
rankp = (band_comm.rank + 1) % band_comm.size
self.req, self.req2 = [], []
# If asyncronous, non-blocking send/receives of psit_nG's start here.
if self.async:
self.req.append(band_comm.send(sbuf_mG, rankm, 11, False))
self.req.append(band_comm.receive(rbuf_mG, rankp, 11, False))
# Auxiliary asyncronous cycle, also send/receive of P_ani's.
if auxiliary:
self.req2.append(band_comm.send(sbuf_In, rankm, 31, False))
self.req2.append(band_comm.receive(rbuf_In, rankp, 31, False))
def _finish_cycle(self, sbuf_mG, rbuf_mG, sbuf_In, rbuf_In, auxiliary):
"""Completes a send/receive cycle of pseudo wave functions, as well as
an optional auxiliary send/receive cycle of corresponding projections.
Low-level helper function. Results in the following communications::
Rank below This rank Rank above
Asynchronous: ... w/w <-- sbuf_mG -- w/w <-- rbuf_mG -- w/w ...
Synchronous: ... O/I <-- sbuf_mG -- O/I <-- rbuf_mG -- O/I ...
Auxiliary: ... w/w <-- sbuf_In -- w/w <-- rbuf_In -- w/w ...
A letter 'w' signifies wait for initialized non-blocking communication.
The letter 'O' signifies a blocking send and 'I' a matching receive.
Parameters:
Same as _initialize_cycle.
Returns:
sbuf_mG: ndarray
New send buffer with the received set of pseudo wave functions.
rbuf_mG: ndarray
New receive buffer (has the sent set of pseudo wave functions).
sbuf_In: ndarray, same as input if not auxiliary
New send buffer with the received set of atomic projector overlaps.
rbuf_In: ndarray, same as input if not auxiliary
New receive buffer (has the sent set of atomic projector overlaps).
"""
band_comm = self.bd.comm
rankm = (band_comm.rank - 1) % band_comm.size
rankp = (band_comm.rank + 1) % band_comm.size
# If syncronous, blocking send/receives of psit_nG's carried out here.
if self.async:
assert len(self.req) == 2, 'Expected asynchronous request pairs.'
band_comm.waitall(self.req)
else:
assert len(self.req) == 0, 'Got unexpected asynchronous requests.'
band_comm.sendreceive(sbuf_mG, rankm, rbuf_mG, rankp, 11, 11)
sbuf_mG, rbuf_mG = rbuf_mG, sbuf_mG
# Auxiliary asyncronous cycle, also wait for P_ani's.
if auxiliary:
assert len(self.req2) == 2, 'Expected asynchronous request pairs.'
band_comm.waitall(self.req2)
sbuf_In, rbuf_In = rbuf_In, sbuf_In
return sbuf_mG, rbuf_mG, sbuf_In, rbuf_In
def suggest_temporary_buffer(self):
"""Return a *suggested* buffer for calculating A(psit_nG) during
a call to calculate_matrix_elements. Work arrays will be allocated
if they are not already available.
Note that the temporary buffer is merely a reference to (part of) a
work array, hence data race conditions occur if you're not careful.
"""
dtype = self.dtype
if self.work1_xG is None:
self.allocate_work_arrays()
else:
assert self.work1_xG.dtype == dtype
J = self.nblocks
N = self.bd.mynbands
B = self.bd.comm.size
if B == 1 and J == 1:
return self.work1_xG
else:
M = int(np.ceil(N / float(J)))
assert M > 0 # must have at least one wave function in group
return self.work1_xG[:M]
def calculate_matrix_elements(self, psit_nG, P_ani, A, dA):
"""Calculate matrix elements for A-operator.
Results will be put in the *A_nn* array::
___
~ ^ ~ \ ~ ~a a ~a ~
A = <psi |A|psi > + ) <psi |p > dA <p |psi >
nn' n n' /___ n i ii' i' n'
aii'
Fills in the lower part of *A_nn*, but only on domain and band masters.
Parameters:
psit_nG: ndarray
Set of vectors in which the matrix elements are evaluated.
P_ani: dict
Dictionary of projector overlap integrals P_ni = <p_i | psit_nG>.
A: function
Functional form of the operator A which works on psit_nG.
Must accept and return an ndarray of the same shape as psit_nG.
dA: function
Operator which works on | phi_i >. Must accept atomic
index a and P_ni and return an ndarray with the same shape
as P_ni, thus representing P_ni multiplied by dA_ii.
"""
band_comm = self.bd.comm
domain_comm = self.gd.comm
block_comm = self.blockcomm
B = band_comm.size
J = self.nblocks
N = self.bd.mynbands
M = int(np.ceil(N / float(J)))
if self.work1_xG is None:
self.allocate_work_arrays()
else:
assert self.work1_xG.dtype == psit_nG.dtype
A_NN = self.A_nn
if B == 1 and J == 1:
# Simple case:
Apsit_nG = A(psit_nG)
self._pseudo_braket(psit_nG, Apsit_nG, A_NN)
for a, P_ni in P_ani.items():
gemm(1.0, P_ni, dA(a, P_ni), 1.0, A_NN, 'c')
domain_comm.sum(A_NN, 0)
return self.bmd.redistribute_output(A_NN)
# Now it gets nasty! We parallelize over B groups of bands and
# each band group is blocked in J smaller slices (less memory).
Q = self.Q
# Buffer for storage of blocks of calculated matrix elements.
if B == 1:
A_qnn = A_NN.reshape((1, N, N))
else:
A_qnn = self.A_qnn
# Buffers for send/receive of operated-on versions of P_ani's.
sbuf_In = rbuf_In = None
if P_ani:
sbuf_In = np.concatenate([dA(a, P_ni).T
for a, P_ni in P_ani.items()])
if B > 1:
rbuf_In = np.empty_like(sbuf_In)
# Because of the amount of communication involved, we need to
# be syncronized up to this point but only on the 1D band_comm
# communication ring
band_comm.barrier()
if J*M == N + M: # remove extra slice
J -= 1
for j in range(J):
n1 = j * M
n2 = n1 + M
if n2 > N:
n2 = N
M = n2 - n1
psit_mG = psit_nG[n1:n2]
temp_mG = A(psit_mG)
sbuf_mG = temp_mG[:M] # necessary only for last slice
rbuf_mG = self.work2_xG[:M]
cycle_P_ani = (j == J - 1 and P_ani)
for q in range(Q):
A_nn = A_qnn[q]
A_mn = A_nn[n1:n2]
# Start sending currently buffered kets to rank below
# and receiving next set of kets from rank above us.
# If we're at the last slice, start cycling P_ani too.
if q < Q - 1:
self._initialize_cycle(sbuf_mG, rbuf_mG,
sbuf_In, rbuf_In, cycle_P_ani)
# Calculate pseudo-braket contributions for the current slice
# of bands in the current mynbands x mynbands matrix block.
# The special case may no longer be valid when. Better to be
# conservative, than to risk it. Moreover, this special case
# seems is an accident waiting to happen. Always doing the
# more general case is safer.
# if q == 0 and self.hermitian and not self.bd.strided:
# # Special case, we only need the lower part:
# self._pseudo_braket(psit_nG[:n2], sbuf_mG, A_mn[:, :n2])
# else:
self._pseudo_braket(psit_nG, sbuf_mG, A_mn, square=False)
# If we're at the last slice, add contributions from P_ani's.
if cycle_P_ani:
I1 = 0
for P_ni in P_ani.values():
I2 = I1 + P_ni.shape[1]
gemm(1.0, P_ni, sbuf_In[I1:I2].T.copy(),
1.0, A_nn, 'c')
I1 = I2
# Wait for all send/receives to finish before next iteration.
# Swap send and receive buffer such that next becomes current.
# If we're at the last slice, also finishes the P_ani cycle.
if q < Q - 1:
sbuf_mG, rbuf_mG, sbuf_In, rbuf_In = self._finish_cycle(
sbuf_mG, rbuf_mG, sbuf_In, rbuf_In, cycle_P_ani)
# First iteration was special because we had the ket to ourself
if q == 0:
rbuf_mG = self.work1_xG[:M]
domain_comm.sum(A_qnn, 0)
if B == 1:
return self.bmd.redistribute_output(A_NN)
if domain_comm.rank == 0:
self.bmd.assemble_blocks(A_qnn, A_NN, self.hermitian)
# Because of the amount of communication involved, we need to
# be syncronized up to this point.
block_comm.barrier()
return self.bmd.redistribute_output(A_NN)
def matrix_multiply(self, C_NN, psit_nG, P_ani=None):
"""Calculate new linear combinations of wave functions.
Results will be put in the *P_ani* dict and a new psit_nG returned::
__ __
~ \ ~ ~a ~ \ ~a ~
psi <-- ) C psi and <p |psi > <-- ) C <p |psi >
n /__ nn' n' i n /__ nn' i n'
n' n'
Parameters:
C_NN: ndarray
Matrix representation of the requested linear combinations. Even
with a hermitian operator, this matrix need not be self-adjoint.
However, unlike the results from calculate_matrix_elements, it is
assumed that all matrix elements are filled in (use e.g. tri2full).
psit_nG: ndarray
Set of vectors in which the matrix elements are evaluated.
P_ani: dict
Dictionary of projector overlap integrals P_ni = <p_i | psit_nG>.
"""
band_comm = self.bd.comm
domain_comm = self.gd.comm
B = band_comm.size
J = self.nblocks
N = self.bd.mynbands
if self.work1_xG is None:
self.allocate_work_arrays()
else:
assert self.work1_xG.dtype == psit_nG.dtype
C_NN = self.bmd.redistribute_input(C_NN)
if B == 1 and J == 1:
# Simple case:
newpsit_nG = self.work1_xG
gemm(1.0, psit_nG, C_NN, 0.0, newpsit_nG)
self.work1_xG = psit_nG
if P_ani:
for P_ni in P_ani.values():
gemm(1.0, P_ni.copy(), C_NN, 0.0, P_ni)
return newpsit_nG
# Now it gets nasty! We parallelize over B groups of bands and
# each grid chunk is divided in J smaller slices (less memory).
Q = B # always non-hermitian XXX
rank = band_comm.rank
shape = psit_nG.shape
psit_nG = psit_nG.reshape(N, -1)
G = psit_nG.shape[1] # number of grid-points
g = int(np.ceil(G / float(J)))
# Buffers for send/receive of pre-multiplication versions of P_ani's.
sbuf_In = rbuf_In = None
if P_ani:
sbuf_In = np.concatenate([P_ni.T for P_ni in P_ani.values()])
if B > 1:
rbuf_In = np.empty_like(sbuf_In)
# Because of the amount of communication involved, we need to
# be syncronized up to this point but only on the 1D band_comm
# communication ring
band_comm.barrier()
if g*J == G + g: # remove extra slice
J -= 1
for j in range(J):
G1 = j * g
G2 = G1 + g
if G2 > G:
G2 = G
g = G2 - G1
sbuf_ng = self.work1_xG.reshape(-1)[:N * g].reshape(N, g)
rbuf_ng = self.work2_xG.reshape(-1)[:N * g].reshape(N, g)
sbuf_ng[:] = psit_nG[:, G1:G2]
beta = 0.0
cycle_P_ani = (j == J - 1 and P_ani)
for q in range(Q):
# Start sending currently buffered kets to rank below
# and receiving next set of kets from rank above us.
# If we're at the last slice, start cycling P_ani too.
if q < Q - 1:
self._initialize_cycle(sbuf_ng, rbuf_ng,
sbuf_In, rbuf_In, cycle_P_ani)
# Calculate wave-function contributions from the current slice
# of grid data by the current mynbands x mynbands matrix block.
C_nn = self.bmd.extract_block(C_NN, (rank + q) % B, rank)
gemm(1.0, sbuf_ng, C_nn, beta, psit_nG[:, G1:G2])
# If we're at the last slice, add contributions to P_ani's.
if cycle_P_ani:
I1 = 0
for P_ni in P_ani.values():
I2 = I1 + P_ni.shape[1]
gemm(1.0, sbuf_In[I1:I2].T.copy(), C_nn, beta, P_ni)
I1 = I2
# Wait for all send/receives to finish before next iteration.
# Swap send and receive buffer such that next becomes current.
# If we're at the last slice, also finishes the P_ani cycle.
if q < Q - 1:
sbuf_ng, rbuf_ng, sbuf_In, rbuf_In = self._finish_cycle(
sbuf_ng, rbuf_ng, sbuf_In, rbuf_In, cycle_P_ani)
# First iteration was special because we initialized the kets
if q == 0:
beta = 1.0
psit_nG.shape = shape
return psit_nG
|
qsnake/gpaw
|
gpaw/hs_operators.py
|
Python
|
gpl-3.0
| 23,175
|
[
"GPAW"
] |
bcae24b4486369b21cfa592b802d71539324b7834d62a1640bca8871b63c0422
|
# -*- coding: utf-8 -*-
### THIS IS THE FILE THAT DEALS WITH ASSET.PROGRESS FILES
# It's core idea is to be able to manage checklist of the
# project asset by asset
# in the previous version, the checklist was constructed
# with a limitation of one indentation of a sub-task
# in this one I want to make sure people can make as many
# layers of subtaks as they need.
# also I will remove the not-needed X button and change it
# to simple deletion
# Importing stuff to make everything work
# system
import os
import socket
# graphics interface
import gtk
import pango
import cairo
import glib
import datetime
import itemselector
# self made modules
import dialogs
import history
### READ FILE ####
def openckecklist(filepath, rang=[9,-1], minus=0):
# open file
File = open(filepath, "r")
File = File.read()
# black placeholder for the checklist LIST
checklist = ["[ ]"]
if rang[1] == -1:
rang[1] = len(File.split("\n"))+1
for index, line in enumerate(File.split("\n")):
line = line[minus:]
if line.startswith("[") and index in range(rang[0],rang[1]) and line[line.find("]")+2] != "#":
#every indentation is a list
part = [line]
indent = minus/4
#recurcive method... running the function with in itself.
def checkindent(part, indexb, indent, minus):
indentb = indent + 1
for index, line in enumerate(File.split("\n")):
if line.startswith(" "*indentb+"[") and index > indexb:# and index in range(rang[0],rang[1]):
#line = line[minus:]
partb = [line[line.find("["):]]
partb = checkindent(partb, index, indentb, minus) #here
if partb[0][partb[0].find("]")+2] != "#":
part.append(partb)
if line.startswith(" "*(indent)+"[") and index > indexb:
break
return part
part = checkindent(part, index, indent, minus) # and here
checklist.append(part)
return checklist # returning checklist
### GET THE FINAL FRACTION ###
def partcalculate(part):
fraction = 0.0
if part[0].startswith("[V]"):
fraction = 1.0
else:
for i in part[1:]:
fraction = fraction + (partcalculate(i) / len(part[1:]))
# int() FUNCTION in python doesn't hold well enough
# when dealing with 1.0 calcalted by the algorythm
#it's something to do with how float numbers are stored
#in memory i guess.
#On some checklists it was returning 99% instead of 100%
#so the quick fix
if fraction > 0.9999:
fraction = 1.0
return fraction
### CHECKLIST MANAGER WINDOW ###
# and finally we need a window editor for those graphs
class checkwindow:
def __init__(self, w=False, pf="/", title="Checklist", FILE=None, highlight=None):
print "\033[1;31m ⬥ CHECKLIST EDITOR : \033[1;m"
print "\033[1;32m ⬦ File "+FILE+" \033[1;m"
print "\033[1;32m ⬦ Title "+title+" \033[1;m"
print "\033[1;32m ⬦ Highlight "+str(highlight)+" \033[1;m"
#saving all the input to SELF
self.widget = w
self.title = title
self.FILE = FILE
self.FILENAME = FILE
self.highlight = highlight
self.LIST = openckecklist(self.FILE)
self.mainpercent = partcalculate(self.LIST)
self.open()
self.pf = pf
self.win = gtk.Window()
self.win.set_title(self.title+" "+FILE.replace(self.pf, ""))
self.win.set_default_size(800,800)
self.win.set_position(gtk.WIN_POS_CENTER)
self.mainbox = gtk.VBox(False)
self.win.add(self.mainbox)
self.allowed = True #allowed to refresh frame
# HELPERS
self.dW = 0
self.DH = 0
self.mpx = 0
self.mpy = 0
self.mpf = 0
self.offset = 0
## ICONS
self.ok = gtk.gdk.pixbuf_new_from_file(self.pf+"/py_data/icons/ok.png")
self.plus = gtk.gdk.pixbuf_new_from_file(self.pf+"/py_data/icons/plus.png")
self.delete = gtk.gdk.pixbuf_new_from_file(self.pf+"/py_data/icons/delete.png")
self.move = gtk.gdk.pixbuf_new_from_file(self.pf+"/py_data/icons/move.png")
self.edit = gtk.gdk.pixbuf_new_from_file(self.pf+"/py_data/icons/edit.png")
self.schedule = gtk.gdk.pixbuf_new_from_file(self.pf+"/py_data/icons/schedule.png")
self.closed = gtk.gdk.pixbuf_new_from_file(self.pf+"/py_data/icons/closed.png")
self.openicon = gtk.gdk.pixbuf_new_from_file(self.pf+"/py_data/icons/open.png")
self.pasteicon = gtk.gdk.pixbuf_new_from_file(self.pf+"/py_data/icons/paste.png")
# FOR GRAB FEATURE
self.tool = "select"
self.grab = [-1]
self.grab_text = ""
self.grabbed = False
self.initframe = True
graph = gtk.DrawingArea()
graph.set_size_request(500,700)
self.mainbox.pack_start(graph)
graph.connect("expose-event", self.framegraph)
self.win.show_all()
def open(self):
self.FILE = open(self.FILENAME, "r")
self.FILE = self.FILE.read().split("\n")
print "\033[1;32m ⬦ Content (only index tasks) : \033[1;m"
for i in self.FILE:
if i.startswith("[ ]") or i.startswith("[V]"):
print "\033[1;34m "+str(i.replace("[ ]", "☐").replace("[V]", "☑"))+" \033[1;m"
self.colapsed = []
for n, i in enumerate(self.FILE):
self.colapsed.append(False)
self.FILE.append("[ ] !!!LASTLINE!!!")
def save(self):
n = []
for i in self.FILE:
if i != "":
n.append(i)
self.FILE = n
save = open(self.FILENAME, "w")
if self.FILE[-1] == "":
self.FILE = self.FILE[:-1]
for i in self.FILE:
if i != "[ ] !!!LASTLINE!!!":
save.write(i+"\n")
save.close()
def get_line_path(self, ind, line):
line = line.replace("].", "] ")
sep = "=:>"
p = ""
if line.startswith("["):
p = line[line.find("]")+1:]
else:
parts = []
now = ind+9
nowline = line
curindent = len(nowline[:nowline.find("[")])
for i in range(len(self.FILE)):
try:
len(self.FILE[now-1][:self.FILE[now-1].find("[")])
except:
break
if len(self.FILE[now-1][:self.FILE[now-1].find("[")]) < curindent:
nowline = self.FILE[now-1][self.FILE[now-1].find("]")+1:]
parts.append(nowline)
curindent = curindent - 4
now = now - 1
for i in parts[::-1]:
p = p+i+sep
p = p + line[line.find("]")+1:]
return p
#### THIS FUNCTION DRAWS THE PIXELS IN THE WINDOW ####
def framegraph(self, widget, event):
w, h = widget.window.get_size()
xgc = widget.window.new_gc()
mx, my, fx = widget.window.get_pointer()
# GETTING WHETHER THE WINDOW IS ACTIVE
self.winactive = self.win.is_active()
ctx = widget.window.cairo_create()
ctx.select_font_face("Monospace", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
xgc.line_width = 2
# BACKGROUND COLOR
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#2b2b2b")) ## CHOSE COLOR
widget.window.draw_rectangle(xgc, True, 0, 0, w, h) ## FILL FRAME
# BANNER IMAGE FOR INSPIRATION
# updating the image if let's say we changed it
if self.dW == 0 and self.DH == 0:
self.banner = self.pf+"/py_data/banner.png"
self.pixbuf = gtk.gdk.pixbuf_new_from_file(self.banner)
#lets get how much to scale H
scaleimageH = int( float(self.pixbuf.get_height()) / self.pixbuf.get_width() * w)
#scaling image to the frame
drawpix = self.pixbuf.scale_simple(w, scaleimageH, gtk.gdk.INTERP_NEAREST)
#drawing image
widget.window.draw_pixbuf(None, drawpix, 0, 0, 0, (h - drawpix.get_height()) / 2, -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
#UI Backdrop
ctx3 = widget.window.cairo_create()
ctx3.set_source_rgba(0.1,0.1,0.1,0.95)
ctx3.rectangle(0, 0, w, h)
ctx3.fill()
#############################################################################
############################# DRAW HERE #####################################
#############################################################################
removestring = []
if self.tool == "select":
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
elif self.tool == "grab":
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
if self.grabbed:
self.tool = "select"
self.grabbed = False
#self.open()
if "GDK_BUTTON3" in str(fx) and "GDK_BUTTON3" not in str(self.mpf) and self.win.is_active():
self.tool = "select"
prevline = "[ ]"
sofar = [False, 0]
foundhightlight = False
yline = -40
for ind, line in enumerate(self.FILE[9:]):
line = line.decode("utf-8")
notlastline = True
#if it's the LASTLINE BUGFIXER THINGY
if line == "[ ] !!!LASTLINE!!!":
notlastline = False
#reloadfile = False
if "[ ]" in line or "[V]" in line or "[v]" in line:
notcomment = True
#COMMENTS
if line[line.find("]")+2] == "#":
notcomment = False
try:
if self.colapsed[ind+9] and not self.grabbed:
continue
except:
pass
if ind not in self.grab:
yline = yline + 40
ymove = yline+self.offset
#try:
# if ymove not in range(0-40,h): ############# THIS IS THE ATTEMPT AT OPTIMIZATION ################
# continue # tho it breaks the scroll sometimes #
#except: # I have an idea to disable scroll limits and see what's gonna happen
# pass
xmove = line.find("[")*20 + 50
put = " "
gpos = ((len(self.grab_text[:self.grab_text.find("\n")])*12)+35+35)-35
checkedhigher = False #THIS WILL BE IF IT'S CHECKED HIGHER IN THE HIRACHY
checked = False # ONLY FOR VISUAL CONFORMATION
#every even darker
if (yline/40 % 2) == 0 and self.tool != "grab":
ctx3 = widget.window.cairo_create()
ctx3.set_source_rgba(0,0,0,0.4)
ctx3.rectangle(0, ymove, w, 39)
ctx3.fill()
if not notcomment:
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#2c2c2c")) ## CHOSE COLOR
widget.window.draw_rectangle(xgc, True, xmove-50, ymove, w, 40)
if my in range(ymove, ymove+35) and self.tool == "select" and notlastline and notcomment:
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#414141")) ## CHOSE COLOR
widget.window.draw_rectangle(xgc, True, xmove-50, ymove, w, 39)
### LETS TRY TO FIND THE % OF EACH PART IN THE CHECKLIST
if "[V]" in line:
checkpercent = 1.0
checkedhigher = True
checked = True
else:
checkpercent = 0.0
s_ind = ind
try:
if line.find("[") < self.FILE[9+ind+1].find("[") :#and ymove in range(0, h):
nextline = ""
fn = -1
then = -1
for n, l in enumerate(self.FILE[ind+9:]):
if line.find("[") == l.find("["):
fn = fn + 1
if fn == 1:
then = n
if line.find("[") > l.find("["):
break
fn = fn + 1
if fn == 1:
then = n
s_ind = then
if "[ ]" in line and "[V]" not in line:
checkpercent = partcalculate(openckecklist(self.FILENAME, [ind+9, then+ind+9], line.find("[")))
except:
pass
# CHECKING COLAPSED
def checkcolapsed():
colapsed = False
try:
if self.FILE[ind+10].find("[") > line.find("["):
if my in range(ymove, ymove+35) and mx in range(xmove-30, xmove-10) and self.tool == "select" :
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#1c1c1c")) ## CHOSE COLOR
widget.window.draw_rectangle(xgc, True, xmove-30, ymove+5, 20, 20)
# IF CLICKED
if "GDK_BUTTON1" in str(fx) and "GDK_BUTTON1" not in str(self.mpf) and self.win.is_active():
put = "."
if line[line.find("]")+1:].startswith("."):
put = " "
self.FILE[ind+9] = line[:line.find("]")+1]+put+line[line.find("]")+2:]
self.save()
self.open()
try:
if not line[line.find("]")+1:].startswith("."):
if self.tool == "select":
widget.window.draw_pixbuf(None, self.openicon, 0, 0, xmove-30, ymove+5 , -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
else:
if self.tool == "select":
widget.window.draw_pixbuf(None, self.closed, 0, 0, xmove-30, ymove+5 , -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
colapsed = True
except:
pass
except:
pass
if colapsed:
try:
for i in range(ind+10, s_ind+ind+9):
self.colapsed[i] = True
except:
pass
else:
for i in range(ind+10, s_ind+ind+9):
try:
self.colapsed[i] = False
except:
pass
checkcolapsed()
## HIGLIGHT
if self.highlight and self.tool == "select":
if self.highlight.endswith(self.get_line_path(ind, line)):
foundhightlight = True
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#395384")) ## CHOSE COLOR
widget.window.draw_rectangle(xgc, True, xmove, ymove, w, 39)
if self.initframe:
self.offset = 0-ymove+100
# IF GRABBING IS ABOVE THIS TASK
if my in range(ymove, ymove+35) and self.tool == "grab" :
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#1c1c1c")) ## CHOSE COLOR
widget.window.draw_rectangle(xgc, True, int(float(mx - gpos)/80)*80+50, ymove, w, 40)
gl = self.grab_text.split("\n")[0]
ctx.set_source_rgb(1,1,1)
if checkedhigher or "[V]" in gl:
ctx.set_source_rgb(0.7,0.4,0.2) #395384
ctx.set_font_size(20)
ctx.move_to( int(float(mx - gpos)/80)*80+50+40, ymove+25)
ctx.show_text(gl[gl.find("]")+2:])
yline = yline + 40
ymove = yline+self.offset
widget.window.draw_line(xgc, int(float(mx - gpos)/80)*80+50, 0, int(float(mx - gpos)/80)*80+50, h )
# IF RELESED
if "GDK_BUTTON1" not in str(fx) and "GDK_BUTTON1" in str(self.mpf) and self.win.is_active() and not self.grabbed:
for i in self.grab:
self.FILE[i+9] = "!!!DELETE!!!"
for n, i in enumerate(self.grab_text.split("\n")[::-1]):
#if n == -1:
#i = i[:i.find("]")+1]+" "+i[i.find("]")+2:]
self.FILE.insert(ind+9, " "*((int(float(mx - gpos)/80)*80)/20)+i)
for i in self.grab:
self.FILE.remove("!!!DELETE!!!")
# refrashing the file
self.grab_text = ""
self.grab = [-1]
#reloadfile = True
self.save()
self.open()
self.grabbed = True
self.colapsed = []
for n, i in enumerate(self.FILE):
self.colapsed.append(False)
checkcolapsed()
# TEXT OF THE TASK
if notlastline:
ctx.set_source_rgb(1,1,1)
if checkedhigher or "[V]" in line or checkpercent == 1.0:
ctx.set_source_rgb(0.7,0.4,0.2) #395384
if not notcomment:
ctx.set_source_rgb(0.4,0.5,0.5)
ctx.set_font_size(20)
ctx.move_to( xmove+40, ymove+25)
if ind not in self.grab and notcomment:
ctx.show_text(line[line.find("]")+2:])
if not notcomment:
ctx.show_text(line[line.find("]")+3:])
ctx.move_to( xmove+10, ymove+25)
ctx.show_text("#")
if "[ ]" in line and not "[V]" in line and checkpercent > 0.0 and checkpercent < 1.0:
ctx.set_source_rgb(0.7,0.7,0.7)
ctx.set_font_size(10)
ctx.move_to( xmove+2+40, ymove+37)
ctx.show_text(str(int(checkpercent*100))+"%")
#d0d0d0
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#d0d0d0")) ## CHOSE COLOR
widget.window.draw_rectangle(xgc, True, xmove+75, ymove+31, w-30-(xmove+75)-40, 5)
#cb9165
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#cb9165")) ## CHOSE COLOR
widget.window.draw_rectangle(xgc, True, xmove+75, ymove+31, int(round((w-30-(xmove+75)-40)*checkpercent)), 5)
# CHECK BUTTON
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#5c5c5c")) ## CHOSE COLOR
# IF MOUSE OVER
if my in range(ymove+5, ymove+5+20) and mx in range(xmove+5, xmove+5+20) and self.tool == "select" and notlastline and notcomment:
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND1))
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#cb9165"))
# IF CLICKED
if "GDK_BUTTON1" in str(fx) and "GDK_BUTTON1" not in str(self.mpf) and self.win.is_active():
put = "V"
if line[line.find("[")+1:].startswith("V") or line[line.find("[")+1:].startswith("v") or checkpercent == 1.0:
put = " "
self.FILE[ind+9] = line[:line.find("[")+1]+put+line[line.find("]"):]
if self.FILE[ind+10].find("[") > line.find("["):
allcomment = True
for n, i in enumerate(self.FILE):
if n in range(ind+10, ind+s_ind+9):
self.FILE[n] = i[:i.find("[")+1]+put+i[i.find("]"):]
if self.FILE[n][line.find("]")+2+4] != "#" and line.find("[")+4 == line.find("["):
allcomment = False
if not allcomment:
self.FILE[ind+9] = line[:line.find("[")+1]+" "+line[line.find("]"):]
#WRITTING TO HYSTORY
history.write(self.pf ,self.FILENAME, self.get_line_path(ind, line)+" ["+put+"]")
# refrashing the file
#reloadfile = True
self.save()
self.open()
if notlastline and notcomment:
widget.window.draw_rectangle(xgc, True, xmove+5, ymove+5, 20, 20)
if line[line.find("[")+1:].startswith("V") or line[line.find("[")+1:].startswith("v") or checkpercent == 1.0 : # IF THE LINE IS CHECKED
if notcomment:
widget.window.draw_pixbuf(None, self.ok, 0, 0, xmove+7, ymove , -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
#HERE I WANT TO ADD A SPECIAL THING THAT MAKES IT SO IF YOU CHECKED THE THING THERE IS NO ADD SCHEDULES
removestring.append(self.get_line_path(ind, line))
#foundhightlight = False
checked = True
# ADD SUBTASK
if my in range(ymove+5, ymove+5+20) and mx in range(xmove+(len(line[line.find("]")+1:])*12)+35, xmove+(len(line[line.find("]")+1:])*12)+35+20) and self.tool == "select" and notlastline:# and not checkedhigher:
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND1))
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#cb9165"))
widget.window.draw_rectangle(xgc, True, xmove+(len(line[line.find("]")+1:])*12)+35, ymove+5-2, 22, 22)
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#1c1c1c"))
widget.window.draw_rectangle(xgc, True, xmove+(len(line[line.find("]")+1:])*12)+35+30+35+35, ymove+5-2, 160, 27)
ctx.set_source_rgb(1,1,1)
ctx.set_font_size(20)
ctx.move_to( xmove+(len(line[line.find("]")+1:])*12)+35+35+35+35, ymove+5+20)
ctx.show_text("Add Subtask")
# IF CLICKED
if "GDK_BUTTON1" in str(fx) and "GDK_BUTTON1" not in str(self.mpf) and self.win.is_active():
def ee(theline, p_line, line):
Pname = ""
comm = False
if line[line.find("]")+2] != "#":
Pname = dialogs.PickName("New Subtask")
else:
comm = True
Pname = dialogs.PickName("#New Sub-Comment")
if comm and not Pname.startswith("#"):
Pname = "#"+Pname
if Pname != "":
self.FILE[theline+9] = line[:line.find("[")+1]+" "+line[line.find("]"):]
if self.FILE[theline+10].find("[") > line.find("["):
self.FILE.insert(theline+p_line+9, line[:line.find("[")]+" [ ] "+Pname)
else:
self.FILE.insert(theline+10, line[:line.find("[")]+" [ ] "+Pname)
# refrashing the file
#reloadfile = True
self.save()
self.open()
glib.timeout_add(10, ee, ind, s_ind , line)
if self.tool == "select" and notlastline and notcomment:
widget.window.draw_pixbuf(None, self.plus, 0, 0, xmove+(len(line[line.find("]")+1:])*12)+35, ymove+5 , -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
if my in range(ymove, ymove+35) and not notcomment and notlastline:
widget.window.draw_pixbuf(None, self.plus, 0, 0, xmove+(len(line[line.find("]")+1:])*12)+35, ymove+5 , -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
# ACTIVATE GRAB BUTTON
if my in range(ymove+5, ymove+5+20) and mx in range(xmove+(len(line[line.find("]")+1:])*12)+35+35, xmove+(len(line[line.find("]")+1:])*12)+35+20+35) and self.tool == "select" and notlastline:
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#cb9165"))
widget.window.draw_rectangle(xgc, True, xmove+(len(line[line.find("]")+1:])*12)+35+35, ymove+5-2, 22, 22)
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#1c1c1c"))
widget.window.draw_rectangle(xgc, True, xmove+(len(line[line.find("]")+1:])*12)+35+30+35+35, ymove+5, 160, 27)
ctx.set_source_rgb(1,1,1)
ctx.set_font_size(20)
ctx.move_to( xmove+(len(line[line.find("]")+1:])*12)+35+35+35+35, ymove+5+20)
ctx.show_text("Move Task")
# IF CLICKED
if "GDK_BUTTON1" in str(fx) and "GDK_BUTTON1" not in str(self.mpf) and self.win.is_active():
self.tool = "grab"
self.grab = [ind]
self.grab_text = line[line.find("["):]
if self.FILE[ind+10].find("[") > line.find("["):
for n, i in enumerate(self.FILE):
if n in range(ind+10, ind+s_ind+9):
self.grab_text = self.grab_text + "\n" + i[line.find("["):]
self.grab.append(n-9)
if self.tool == "select" and notlastline and notcomment:# and not checkedhigher:
widget.window.draw_pixbuf(None, self.move, 0, 0, xmove+(len(line[line.find("]")+1:])*12)+35+35, ymove+5 , -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
if my in range(ymove, ymove+35) and not notcomment and notlastline:
widget.window.draw_pixbuf(None, self.move, 0, 0, xmove+(len(line[line.find("]")+1:])*12)+35+35, ymove+5 , -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
## ADD TO SCHEDULE
# EDIT TASK'S STRING
# removestring
if not checked:
#checking is task has a scheduling already
o = open(self.pf+"/schedule.data","r")
o = o.read().split("\n")
alreadyexist = False
for task in o:
if task.endswith(self.get_line_path(ind, line)) and self.FILENAME.replace(self.pf, "") in task:
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#1c1c1c"))
widget.window.draw_rectangle(xgc, True, xmove+(len(line[line.find("]")+1:])*12)+35+30+35+35, ymove+5-2, 130, 27)
ctx.set_source_rgb(1,1,1)
ctx.set_font_size(20)
ctx.move_to( xmove+(len(line[line.find("]")+1:])*12)+35+35+35+35, ymove+5+20)
ctx.show_text(task[:task.find(" ")])
alreadyexist = True
if my in range(ymove+5, ymove+5+20) and mx in range(xmove+(len(line[line.find("]")+1:])*12)+35+35+35, xmove+(len(line[line.find("]")+1:])*12)+35+20+35+35) and self.tool == "select" and notlastline and notcomment:
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND1))
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#cb9165"))
widget.window.draw_rectangle(xgc, True, xmove+(len(line[line.find("]")+1:])*12)+35+35+35, ymove+5, 22, 22)
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#1c1c1c"))
widget.window.draw_rectangle(xgc, True, xmove+(len(line[line.find("]")+1:])*12)+35+30+35+35, ymove+5, 200, 27)
ctx.set_source_rgb(1,1,1)
ctx.set_font_size(20)
ctx.move_to( xmove+(len(line[line.find("]")+1:])*12)+35+35+35+35, ymove+5+20)
ctx.show_text("Remove Schedule")
# IF CLICKED
if "GDK_BUTTON1" in str(fx) and "GDK_BUTTON1" not in str(self.mpf) and self.win.is_active():
removestring.append(self.get_line_path(ind, line))
if my in range(ymove+5, ymove+5+20) and mx in range(xmove+(len(line[line.find("]")+1:])*12)+35+35+35, xmove+(len(line[line.find("]")+1:])*12)+35+20+35+35) and self.tool == "select" and not alreadyexist and notlastline and notcomment:
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND1))
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#cb9165"))
widget.window.draw_rectangle(xgc, True, xmove+(len(line[line.find("]")+1:])*12)+35+35+35, ymove+5, 22, 22)
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#1c1c1c"))
widget.window.draw_rectangle(xgc, True, xmove+(len(line[line.find("]")+1:])*12)+35+30+35+35, ymove+5, 200, 27)
ctx.set_source_rgb(1,1,1)
ctx.set_font_size(20)
ctx.move_to( xmove+(len(line[line.find("]")+1:])*12)+35+35+35+35, ymove+5+20)
ctx.show_text("Add To Schedule")
# IF CLICKED
if "GDK_BUTTON1" in str(fx) and "GDK_BUTTON1" not in str(self.mpf) and self.win.is_active():
def ee(ind, line):
#MAKING A STRING TO WRITE TO THE SCHEDULE.DATA FILE
#IT CONTAINS 3 PARTS
# date
#spacebar
# path to the .progress (checklist) file
# spacebar
# path to the task with in the file
# EXAMPLE: 2018/12/31 /dev/chr/character/asset.progress Modeling=:>BaseModeling
# GETTING DATE
y, m, d = int(datetime.datetime.now().year), int(datetime.datetime.now().month)-1, int(datetime.datetime.now().day)
y, m, d = dialogs.GetDate(y, m, d)
y, m, d = str(y), str(m+1), str(d)
if len(m) < 2:
m = "0"+m
if len(d) < 2:
d = "0"+d
newdate = y+"/"+m+"/"+d
### ADDING THE FILENAME TO THE STRING
schstr = newdate+" "+self.FILENAME.replace(self.pf, "")
#WRITTING TO HYSTORY
history.write(self.pf ,self.FILENAME, self.get_line_path(ind, line)+" [Scheduled] "+newdate)
# GETTING THE PATH WITH IN
p = self.get_line_path(ind, line)
schstr = schstr+" "+p
# OPENING EXISTANT FILE
o = open(self.pf+"/schedule.data","r")
o = o.read().split("\n")
if o [-1] == "":
o = o[:-1]
o.append(schstr)
# SORTING WHEN ADDING
try:
dl = []
d = o[0][:o[0].find(" ")]
tdl = []
for i in o:
if i[:i.find(" ")] == d:
tdl.append(i)
else:
#if schstr[:schstr.find(" ")] == d:
# tdl.append(schstr)
dl.append(tdl)
tdl = []
tdl.append(i)
d = i[:i.find(" ")]
dl.append(tdl)
tdl = []
tdl.append(i)
d = i[:i.find(" ")]
dl = sorted(dl)
o = []
for i in dl:
for b in i:
if b != "":
o.append(b)
except Exception as c:
o = sorted(o)
s = open(self.pf+"/schedule.data","w")
for i in o:
s.write(i+"\n")
s.close()
glib.timeout_add(10, ee, ind, line)
if self.tool == "select" and notlastline and notcomment:
widget.window.draw_pixbuf(None, self.schedule, 0, 0, xmove+(len(line[line.find("]")+1:])*12)+35+35+35, ymove+5 , -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
# EIDTING TASK'S NAME
if mx in range(xmove+25, xmove+(len(line[line.find("]")+1:])*12)+35) and my in range(ymove+5, ymove+5+25) and notlastline:
#widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.display_get_default(), self.edit, 1,20))
widget.window.draw_pixbuf(None, self.edit, 0, 0, mx+2, my-24 , -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
# IF CLICKED
if "GDK_BUTTON1" in str(fx) and "GDK_BUTTON1" not in str(self.mpf) and self.win.is_active():
def ee(ind, line):
newtext = dialogs.PickName(line[line.find("]")+2:])
if newtext != "": # if returned something (if pressed ok and has text)
self.FILE[ind+9] = line[:line.find("]")+2]+newtext
# refrashing the file
#reloadfile = True
self.save()
self.open()
glib.timeout_add(10, ee, ind, line)
# DELETE TASK
# IF MOUSE OVER
if my in range(ymove+5, ymove+5+20) and mx in range(w-40, w-40+20) and self.tool == "select" and notlastline:
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND1))
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#cb9165"))
widget.window.draw_rectangle(xgc, True, w-42, ymove+5-2, 22, 22)
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#1c1c1c"))
widget.window.draw_rectangle(xgc, True, w-210, ymove+5-2, 160, 27)
ctx.set_source_rgb(1,1,1)
ctx.set_font_size(20)
ctx.move_to( w-200, ymove+20+5)
ctx.show_text("Delete Task")
# IF CLICKED
if "GDK_BUTTON1" in str(fx) and "GDK_BUTTON1" not in str(self.mpf) and self.win.is_active():
if self.FILE[ind+10].find("[") > line.find("["):
for i in range(ind, ind+s_ind):
removestring.append(self.get_line_path(i, self.FILE[i+9]))
self.FILE[i+9] = "!!!DELETE!!!"
for i in range(ind, ind+s_ind):
self.FILE.remove("!!!DELETE!!!")
else:
removestring.append(self.get_line_path(ind, line))
self.FILE[ind+9] = "!!!DELETE!!!"
self.FILE.remove("!!!DELETE!!!")
# refrashing the file
#reloadfile = True
self.save()
self.open()
if self.tool == "select" and notlastline and notcomment:
widget.window.draw_pixbuf(None, self.delete, 0, 0, w-40, ymove+5 , -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
if my in range(ymove, ymove+35) and not notcomment and notlastline:
widget.window.draw_pixbuf(None, self.delete, 0, 0, w-40, ymove+5 , -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
prevline = line
yline = yline + 40
ymove = yline+self.offset
# ADD TASK
if my in range(ymove+5, ymove+5+20) and mx in range(50, 50+20) and self.tool == "select":
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND1))
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#cb9165"))
widget.window.draw_rectangle(xgc, True, 50, ymove+5+self.offset-2, 22, 22)
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#1c1c1c"))
widget.window.draw_rectangle(xgc, True, 50+30+32, ymove+5, 160, 30)
ctx.set_source_rgb(1,1,1)
ctx.set_font_size(20)
ctx.move_to( 50+32+32, ymove+25)
ctx.show_text("Add Task")
# IF CLICKED
if "GDK_BUTTON1" in str(fx) and "GDK_BUTTON1" not in str(self.mpf) and self.win.is_active():
def ee(theline, line):
Pname = ""
Pname = dialogs.PickName("New Task")
if Pname != "":
self.FILE.append("[ ] "+Pname)
# refrashing the file
#reloadfile = True
self.save()
self.open()
glib.timeout_add(10, ee, ind, line)
widget.window.draw_pixbuf(None, self.plus, 0, 0, 50, ymove+5 , -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
# IF ASSET COPY CHECKLIST FROM ASSET
if self.FILENAME.replace(self.pf, "").startswith("/dev/"):
if my in range(ymove+5, ymove+5+20) and mx in range(50+32, 50+32+20) and self.tool == "select":
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND1))
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#cb9165"))
widget.window.draw_rectangle(xgc, True, 50+32, ymove+5+self.offset-2, 22, 22)
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#1c1c1c"))
widget.window.draw_rectangle(xgc, True, 50+30+32, ymove+5, 160+50, 30)
ctx.set_source_rgb(1,1,1)
ctx.set_font_size(20)
ctx.move_to( 50+32+32, ymove+25)
ctx.show_text("Import Checklist")
# IF CLICKED
if "GDK_BUTTON1" in str(fx) and "GDK_BUTTON1" not in str(self.mpf) and self.win.is_active():
def ee(ind, line):
importing = self.pf+itemselector.select(self.pf)+"/asset.progress"
if os.path.exists(importing):
importing = open(importing, "r")
importing = importing.read().split("\n")
for n, i in enumerate(importing[9:]):
if "[ ]" in i or "[V]" in i:
self.FILE.append(i.replace("[V]", "[ ]"))
self.save()
self.open()
glib.timeout_add(10, ee, ind, line)
widget.window.draw_pixbuf(None, self.pasteicon, 0, 0, 50+32, ymove+5 , -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
#if not foundhightlight and self.highlight:
# #
# removestring.append(self.highlight)
if removestring:
for removing in removestring:
o = open(self.pf+"/schedule.data","r")
o = o.read().split("\n")
if o[-1] == "":
o = o[:-1]
try:
for i in o:
if i.endswith(removing) and self.FILENAME.replace(self.pf, "") in i:
o.remove(i)
s = open(self.pf+"/schedule.data","w")
for i in o:
s.write(i+"\n")
s.close()
#self.highlight = None
except Exception as e:
pass
#SCROLL
if self.mpy > my and "GDK_BUTTON2" in str(fx) and "GDK_BUTTON2" in str(self.mpf) and self.win.is_active():
self.offset = self.offset + (my-self.mpy)
if self.mpy < my and "GDK_BUTTON2" in str(fx) and "GDK_BUTTON2" in str(self.mpf) and self.win.is_active():
self.offset = self.offset - (self.mpy-my)
#if self.offset < 0-(yline+40-h): # SCROLL LIMITS #
# self.offset = 0-(yline+40-h) # for now I disabled them so when optimization happens
# # there will not be nasty artifacts. Like scrolling gets
#if self.offset > 0: # somewhere unexpected. I need to look at this more. But not now.
# self.offset = 0 # At the top of the frame you can find the code that does this issue.
# I think it's something to do with yline value.
#if reloadfile:
#
# self.save()
# self.open()
#############################################################################
############################# UNTIL HERE ####################################
#############################################################################
# TESTING SOMETHING
ctx.set_font_size(20)
ctx.move_to( mx, my)
#ctx.show_text(str(mx)+":"+str(my)+" "+str(self.mainscroll))
self.dW = w
self.DH = h
self.mpx = mx
self.mpy = my
self.mpf = fx
self.initframe = False
def callback():
if self.allowed == True:
widget.queue_draw()
glib.timeout_add(1, callback)
|
JYamihud/blender-organizer
|
py_data/modules/checklist.py
|
Python
|
gpl-2.0
| 54,210
|
[
"FLEUR"
] |
f6b8296ca1950030ba0b8ccf1ba53d14b875f76678bac18bfdaa686f1b14dae8
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from .proc_table import procedures, hooks, energy_only_methods
from .proc import scf_helper, scf_wavefunction_factory
from .empirical_dispersion import EmpericalDispersion
from . import dft_functional
|
kratman/psi4public
|
psi4/driver/procrouting/__init__.py
|
Python
|
gpl-2.0
| 1,150
|
[
"Psi4"
] |
714663801f6fdae80c5893a3b8acae7ad81ee261a4f0a7b6898080f56c83217f
|
#!/usr/bin/env python2.7
# This version is created at Mon Mar 17 12:54:44 CET 2014
# Author: Asli I. Ozen (asli@cbs.dtu.dk)
# License: GPL 3.0 (http://www.gnu.org/licenses/gpl-3.0.txt)
import sys, gzip
import re, string
import argparse
import os
from Bio.Blast import NCBIStandalone
from operator import itemgetter, attrgetter
from datetime import datetime as dt
import time
from os.path import basename
sys.path.append('/home/projects5/pr_53035/people/asli/bin/lib/python2.7/site-packages')
prog= sys.argv[0]
example = "----------------------------------------------------------------------------- \
example usage: \n" + prog + " -it test.blasttab -l test.lengths -v \n" + \
prog + " -id test.blastparse -s 30 -q 30 -e 0.0001 -sn nosave \
-----------------------------------------------------------------------------"
helpstr = '''
description: This script parses blast/ublast results and filters them based on the given cut-offs.
Blast results should be in -m 0 format or tab separated -m 6 format. With ublast, the results should be
obtained with -blast6out option.
'''
epi="Author: Asli I. Ozen (asli@cbs.dtu.dk)"
class BlastParse:
def __init__(self):
self.start = time.time()
d_ = dt.today()
self.timestarted = d_.strftime("%d-%m-%Y %H:%M:%S")
self.parseArgs()
def parseArgs(self):
self.parser = argparse.ArgumentParser(description=example + helpstr, epilog = epi, conflict_handler='resolve')
self.parser.add_argument("-id", metavar="blastparsein", help="pre-made blastparse result FILE as an input back again",nargs=1)
self.parser.add_argument("-it", metavar="blasttabin", help="blast tabular result FILE as an input")
self.parser.add_argument("-ib", metavar="blastin", help="blast/psi-blast -m 0 result FILE as an input", nargs=1)
self.parser.add_argument("-o", metavar="output", help="Output FILE name (default=inputfile.blastparse)", nargs=1)
self.parser.add_argument("-bf", metavar="[blast|psiblast]",type=str,default="blast", help="blast -m 0 output file format (default=blast)", nargs=1)
self.parser.add_argument("-l", metavar="lengths", help="Query lengths FILE (required if tabular blast result input(-it) is given)")
self.parser.add_argument("-n", metavar="[savenew|nosave]", type=str, default="nosave", help="save new blastparse or not (default=savenew) ",nargs=1)
self.parser.add_argument("-s", metavar="INT", default= "50", help="minimum similarity cutoff")
self.parser.add_argument("-q", metavar="INT",default= "50", help="minimum query coverage cutoff")
#self.parser.add_argument("-tc", metavar="targetcoverage", help="minimum target coverage cutoff")
self.parser.add_argument("-e", metavar="FLOAT", default= "1e-10" , help="evalue cutoff i.e. 1e-5 (default=1e-10), decimals allowed i.e. 0.0001")
self.parser.add_argument("-v","--verbose", action="store_true" , help="increase output verbosity")
def read_lengths(self):
fl= open(self.lenfile,"rU")
self.lendict={}
for line in fl:
#print line
query = line.split("\t")[0]
query_name = query.split(" ")[0].strip(">")
length= int(line.split("\t")[1].strip("\n"))
self.lendict[query_name]=length
fl.close()
def ReadBlast(self, file, OUT, iszipped = 0, is_psiblast=None):
output= open(OUT, "w")
self.selfhits=[]
if is_psiblast:
print >> sys.stderr, 'Parsing PSI-Blast'
self.parser = NCBIStandalone.PSIBlastParser()
else:
self.parser = NCBIStandalone.BlastParser()
if file[-3:] == '.gz' or iszipped:
handle = gzip.open(file)
else:
handle = open(file)
self.iter = NCBIStandalone.Iterator(handle = handle, parser = self.parser)
self.blastDict = {}
while 1:
try:
rec = self.iter.next()
if not rec: break
except:
sys.stderr.write('Can\'t iterate on blast records anymore. Abort.\n')
import traceback
traceback.print_exc()
return 'Error parsing %s' % file
self.query = rec.query.split(" ")[0] ## blast_record.query.split(" ")[0]
self.length = rec.query_letters
if self.length < self.min_size:
self.printer("Does not meet the minimum length " + str(self.min_size))
break
if is_psiblast: rec = rec.rounds[-1]
# each alignment is one potential hit
for n, alignment in enumerate(rec.alignments):
hsp = alignment.hsps[0] #no multiple hsps
alnlength=hsp.align_length
hit = alignment.title
#targetlength = alignment.length
#m = re.search("sp\|([A-Z0-9]+)\|([A-Z0-9_]+) ?(.+)?", alignment.title)
m = re.search("sp\|(.+?)\|(.+?) (.+)?", alignment.title)
if m: # pyphynr blast result
hit_sp_ac = m.group(1)
hit_sp_id = m.group(2)
hit_sp_note = m.group(3)
elif alignment.title[0] == '>': # result from qadditional blast databases
hit_sp_ac = None
hit_sp_id = alignment.title[1:].split()[0]
hit_sp_note = None
else:
hit_sp_ac = None
hit_sp_id = None
hit_sp_note = None
self.printer(hit_sp_id)
similarity = hsp.positives[0]/float(hsp.positives[1])*100
if float(hsp.expect) <= float(self.HSP_max_evalue):
if float(similarity) >= int(self.HSP_minimal_positives):
coverage = hsp.positives[1]/float(self.length)*100
if float(coverage) >= int(self.HSP_minimal_coverage):
#targetcoverage = hsp.positives[1]/float(targetlength)*100
#if float(targetcoverage) > int(self.HSP_minimal_targetcov):
#self.compatibles.append((hit_sp_ac, hit))
#hitlist = [hit_sp_id, n+1 , hsp.positives[0]/float(hsp.positives[1])*100, hsp.positives[1]/float(self.length)*100, hsp.positives[1]/float(targetlength)*100, hsp.score, hsp.expect]
hitlist = [hit_sp_id, hsp.positives[0]/float(hsp.positives[1])*100, hsp.positives[1]/float(self.length)*100, hsp.score, hsp.expect]
if self.cB: self.createblastDict(query,hitlist)
output.write("%s\t" % (self.query)),
for element in hitlist:
output.write("%s\t" % element),
output.write("\n")
output.close()
handle.close()
return None
def ReadBlastresultsTab(self, filename, OUT):
if filename[-3:] == '.gz':
fh = gzip.open(filename)
else:
fh= open(filename,"rU")
#hitsdict={}
#hitlist = [hit_sp_id, n+1 , hsp.positives[0]/float(hsp.positives[1])*100, hsp.positives[1]/float(self.length)*100, hsp.score, hsp.expect]
self.blastDict={}
self.selfhits=[]
self.read_lengths()
output= open(OUT, "w")
self.printer(basename(OUT) + " file initiated")
#lines=fh.readlines()
for line in fh:
line = line.strip("\n")
if len(line.split("\t")) > 2:
query = line.split("\t")[0]
#print query
query_name = query.split(" ")[0]
hit_sp_id = line.split("\t")[1]
percent_id = float(line.split("\t")[2])
aln_len=float(line.split("\t")[3])
query_length=self.lendict[query_name]
coverage = 100*int(aln_len)/float(query_length)
bitscore = float(line.split("\t")[11])
evalue = float(line.split("\t")[10])
if float(coverage) > 100 : coverage = 100
if str(query_name) == str(hit_sp_id):
#print "SameSameSame"
self.selfhits.append(query)
else:
if float(evalue) <= float(self.HSP_max_evalue):
if float(percent_id) >= int(self.HSP_minimal_positives):
if float(coverage) >= int(self.HSP_minimal_coverage):
hitlist=[hit_sp_id, percent_id, coverage, bitscore, evalue]
if self.cB: self.createblastDict(query,hitlist)
output.write("%s\t" % (query_name)),
for element in hitlist:
output.write("%s\t" % element),
output.write("\n")
self.printer(basename(OUT) + " file DONE!")
output.close()
fh.close()
def ReadBlastparse(self, OUT):
#hitsdict={}
#hitlist = [hit_sp_id, n+1 , hsp.positives[0]/float(hsp.positives[1])*100, hsp.positives[1]/float(self.length)*100, hsp.score, hsp.expect]
if self.blastparse[-3:] == '.gz':
fh = gzip.open(self.blastparse)
else:
fh= open(self.blastparse,"rU")
lines=fh.readlines()
output= open(OUT, "w")
self.blastDict={}
for line in lines:
line = line.strip("\n")
if len(line.split("\t")) > 2:
query = line.split("\t")[0]
hit_sp_id = line.split("\t")[1]
#n=float(line.split("\t")[2])
percent_id = float(line.split("\t")[2])
coverage = float(line.split("\t")[3])
#targetcoverage = float(line.split("\t")[5])
bitscore = float(line.split("\t")[4])
evalue = float(line.split("\t")[5])
if str(query) == str(hit_sp_id):
#print "SameSameSame"
self.selfhits.append(query)
else:
if float(evalue) <= float(self.HSP_max_evalue):
if float(percent_id) >= int(self.HSP_minimal_positives):
if float(coverage) >= int(self.HSP_minimal_coverage):
hitlist=[hit_sp_id, percent_id, coverage, bitscore, evalue]
if self.cB == 'savenew':
self.createblastDict(query,hitlist)
self.writeoutput(output,query,hitlist)
else:
self.createblastDict(query,hitlist)
output.close()
if self.cB != 'savenew' and os.path.getsize(OUT) == 0:
os.system("rm " + OUT)
fh.close()
def writeoutput(self, oh, query, hitlist):
oh.write("%s\t" % (query))
for element in hitlist:
oh.write("%s\t" % element),
oh.write("\n")
def createblastDict(self, query, hitlist):
self.selfhits=[]
hit_sp_id=hitlist[0]
if str(query) is not str(hit_sp_id):
#hitlist=[hit_sp_id, n, percent_id, coverage,targetcoverage, bitscore,evalue]
#hitlist=[hit_sp_id, percent_id, coverage, bitscore,evalue]
if query in self.blastDict:
self.blastDict[query].append(hitlist)
else:
self.blastDict[query] = [hitlist]
def mainthing(self):
self.HSP_minimal_positives = self.opts.s
self.HSP_minimal_coverage = self.opts.q
#self.HSP_minimal_targetcov = self.opts.tc
self.HSP_minimal_coverage_length = 20
self.lenfile= self.opts.l
self.HSP_max_evalue = self.opts.e
self.v = self.opts.verbose
self.min_size = 0
self.cB = self.opts.n[0]
if self.opts.id:
self.blastparse=self.opts.id[0]
if self.opts.o:
output = self.opts.o[0]
else:
newname= str(self.blastparse).split(".")[0:-1]
output = ".".join(newname) + ".new.blastparse"
self.ReadBlastparse(output)
elif self.opts.it:
blasttab = self.opts.it
if self.opts.o:
output = self.opts.o[0]
else:
output = blasttab + ".blastparse"
self.ReadBlastresultsTab(blasttab,output)
else:
try:
blastfile = self.opts.io[0]
typ = self.opts.bo[1]
if self.opts.o:
output = self.opts.o[0]
else:
output = blastfile + ".blastparse"
if typ == "psiblast":
self.ReadBlast(blastfile, output, is_psiblast=True)
else:
self.ReadBlast(blastfile, output)
except:
raise IOError('If you dont have Pre-made blastparse or ublast-tab results, you should provide a normal blast output (-m0)')
#timeused = (time.time() - self.start) / 60
self.timing= (time.time() - self.start) /60
self.printer("### Time used for running: "+str(round(self.timing*60)) + " seconds ("+str(round(self.timing)) + " min)")
timeended= dt.today().strftime("%d-%m-%Y %H:%M:%S")
def printer(self,string):
if self.opts.verbose:
print string
if __name__ == '__main__':
try:
obj = BlastParse()
obj.opts=obj.parser.parse_args(sys.argv[1:])
obj.printer("\n### " + sys.argv[0] + " initialized at " + obj.timestarted)
obj.printer("### OPTIONS : " + str(obj.opts))
obj.mainthing()
# obj.parser.print_help()
except Exception,e:
print str(e)
import traceback
traceback.print_exc()
#
###############
# INPUT LIST
# blast output in tab format & query lengths file : genecatalogue_vs_uniprot.blasttab OR genecatalogue_vs_genecatalogue.blasttab & genecatalogue.lengths
# blast output in -m 0 format : genecatalogue_vs_uniprot.blastout OR genecatalogue_vs_genecatalogue.blastout
# pre-made blastparse file : genecatalogue_vs_uniprot.blastparse
#
# OUTPUT LIST
# new blastparse file based on given parameters : genecatalogue_vs_uniprot.blastparse
# if premade blastparse is given, the blastDict is generated : obj.blastDict
#
# OPTIONS LIST
# '-id', '--blastparsein', help="pre-made blastparse output file"
# '-it', '--blasttabin', help="blast tabular output file"
# '-ib', '--blastm0in', help="blast -m 0 output file
# '-bf', type of blast ('blast' or 'psiblast')"
# '-l', '--lengths', help="Query lengths file"
# '-s', '--similarity', default= "50", help="minimum similarity cutoff"
# '-qc', '--querycoverage',default= "50", help="minimum query coverage cutoff"
# '-tc', '--targetcoverage', help="minimum target coverage cutoff"
# '-e', '--maxevalue', default= "1e-10" , help="evalue cutoff"
#
#
|
MG-group-tools/MGFunc
|
mgfunc_v2/blastparse.py
|
Python
|
gpl-3.0
| 14,248
|
[
"BLAST"
] |
d148cc467fe7723362d48adcf3ae456c84006079033eb11c8966cd3ac70ae1ba
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
import math
import matplotlib.pyplot as plt
import numpy as np
import dask.array as da
import scipy.interpolate
import scipy as sp
from scipy.signal import savgol_filter
from scipy.ndimage.filters import gaussian_filter1d
try:
from statsmodels.nonparametric.smoothers_lowess import lowess
statsmodels_installed = True
except BaseException:
statsmodels_installed = False
from hyperspy.signal import BaseSignal
from hyperspy._signals.common_signal1d import CommonSignal1D
from hyperspy.signal_tools import SpikesRemoval
from hyperspy.models.model1d import Model1D
from hyperspy.defaults_parser import preferences
from hyperspy.signal_tools import (
Signal1DCalibration,
SmoothingSavitzkyGolay,
SmoothingLowess,
SmoothingTV,
ButterworthFilter)
from hyperspy.ui_registry import DISPLAY_DT, TOOLKIT_DT
from hyperspy.misc.tv_denoise import _tv_denoise_1d
from hyperspy.signal_tools import BackgroundRemoval
from hyperspy.decorators import interactive_range_selector
from hyperspy.signal_tools import IntegrateArea, _get_background_estimator
from hyperspy._signals.lazy import LazySignal
from hyperspy.docstrings.signal1d import CROP_PARAMETER_DOC
from hyperspy.docstrings.signal import SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG
_logger = logging.getLogger(__name__)
def find_peaks_ohaver(y, x=None, slope_thresh=0., amp_thresh=None,
medfilt_radius=5, maxpeakn=30000, peakgroup=10,
subchannel=True,):
"""Find peaks along a 1D line.
Function to locate the positive peaks in a noisy x-y data set.
Detects peaks by looking for downward zero-crossings in the first
derivative that exceed 'slope_thresh'.
Returns an array containing position, height, and width of each peak.
Sorted by position.
'slope_thresh' and 'amp_thresh', control sensitivity: higher values
will neglect wider peaks (slope) and smaller features (amp),
respectively.
Parameters
----------
y : array
1D input array, e.g. a spectrum
x : array (optional)
1D array describing the calibration of y (must have same shape as y)
slope_thresh : float (optional)
1st derivative threshold to count the peak;
higher values will neglect broader features;
default is set to 0.
amp_thresh : float (optional)
intensity threshold below which peaks are ignored;
higher values will neglect smaller features;
default is set to 10% of max(y).
medfilt_radius : int (optional)
median filter window to apply to smooth the data
(see scipy.signal.medfilt);
if 0, no filter will be applied;
default is set to 5.
peakgroup : int (optional)
number of points around the "top part" of the peak that
are taken to estimate the peak height; for spikes or
very narrow peaks, keep PeakGroup=1 or 2; for broad or
noisy peaks, make PeakGroup larger to reduce the effect
of noise;
default is set to 10.
maxpeakn : int (optional)
number of maximum detectable peaks;
default is set to 30000.
subchannel : bool (optional)
default is set to True.
Returns
-------
P : structured array of shape (npeaks)
contains fields: 'position', 'width', and 'height' for each peak.
Examples
--------
>>> x = np.arange(0,50,0.01)
>>> y = np.cos(x)
>>> peaks = find_peaks_ohaver(y, x, 0, 0)
Notes
-----
Original code from T. C. O'Haver, 1995.
Version 2 Last revised Oct 27, 2006 Converted to Python by
Michael Sarahan, Feb 2011.
Revised to handle edges better. MCS, Mar 2011
"""
if x is None:
x = np.arange(len(y), dtype=np.int64)
if not amp_thresh:
amp_thresh = 0.1 * y.max()
peakgroup = np.round(peakgroup)
if medfilt_radius:
d = np.gradient(scipy.signal.medfilt(y, medfilt_radius))
else:
d = np.gradient(y)
n = np.round(peakgroup / 2 + 1)
peak_dt = np.dtype([('position', np.float),
('height', np.float),
('width', np.float)])
P = np.array([], dtype=peak_dt)
peak = 0
for j in range(len(y) - 4):
if np.sign(d[j]) > np.sign(d[j + 1]): # Detects zero-crossing
if np.sign(d[j + 1]) == 0:
continue
# if slope of derivative is larger than slope_thresh
if d[j] - d[j + 1] > slope_thresh:
# if height of peak is larger than amp_thresh
if y[j] > amp_thresh:
# the next section is very slow, and actually messes
# things up for images (discrete pixels),
# so by default, don't do subchannel precision in the
# 1D peakfind step.
if subchannel:
xx = np.zeros(peakgroup)
yy = np.zeros(peakgroup)
s = 0
for k in range(peakgroup):
groupindex = int(j + k - n + 1)
if groupindex < 1:
xx = xx[1:]
yy = yy[1:]
s += 1
continue
elif groupindex > y.shape[0] - 1:
xx = xx[:groupindex - 1]
yy = yy[:groupindex - 1]
break
xx[k - s] = x[groupindex]
yy[k - s] = y[groupindex]
avg = np.average(xx)
stdev = np.std(xx)
xxf = (xx - avg) / stdev
# Fit parabola to log10 of sub-group with
# centering and scaling
yynz = yy != 0
coef = np.polyfit(
xxf[yynz], np.log10(np.abs(yy[yynz])), 2)
c1 = coef[2]
c2 = coef[1]
c3 = coef[0]
with np.errstate(invalid='ignore'):
width = np.linalg.norm(stdev * 2.35703 /
(np.sqrt(2) * np.sqrt(-1 *
c3)))
# if the peak is too narrow for least-squares
# technique to work well, just use the max value
# of y in the sub-group of points near peak.
if peakgroup < 7:
height = np.max(yy)
position = xx[np.argmin(np.abs(yy - height))]
else:
position = - ((stdev * c2 / (2 * c3)) - avg)
height = np.exp(c1 - c3 * (c2 / (2 * c3)) ** 2)
# Fill results array P. One row for each peak
# detected, containing the
# peak position (x-value) and peak height (y-value).
else:
position = x[j]
height = y[j]
# no way to know peak width without
# the above measurements.
width = 0
if (not np.isnan(position) and 0 < position < x[-1]):
P = np.hstack((P,
np.array([(position, height, width)],
dtype=peak_dt)))
peak += 1
# return only the part of the array that contains peaks
# (not the whole maxpeakn x 3 array)
if len(P) > maxpeakn:
minh = np.sort(P['height'])[-maxpeakn]
P = P[P['height'] >= minh]
# Sorts the values as a function of position
P.sort(0)
return P
def interpolate1D(number_of_interpolation_points, data):
ip = number_of_interpolation_points
ch = len(data)
old_ax = np.linspace(0, 100, ch)
new_ax = np.linspace(0, 100, ch * ip - (ip - 1))
interpolator = scipy.interpolate.interp1d(old_ax, data)
return interpolator(new_ax)
def _estimate_shift1D(data, **kwargs):
mask = kwargs.get('mask', None)
ref = kwargs.get('ref', None)
interpolate = kwargs.get('interpolate', True)
ip = kwargs.get('ip', 5)
data_slice = kwargs.get('data_slice', slice(None))
if bool(mask):
# asarray is required for consistensy as argmax
# returns a numpy scalar array
return np.asarray(np.nan)
data = data[data_slice]
if interpolate is True:
data = interpolate1D(ip, data)
return np.argmax(np.correlate(ref, data, 'full')) - len(ref) + 1
def _shift1D(data, **kwargs):
shift = kwargs.get('shift', 0.)
original_axis = kwargs.get('original_axis', None)
fill_value = kwargs.get('fill_value', np.nan)
kind = kwargs.get('kind', 'linear')
offset = kwargs.get('offset', 0.)
scale = kwargs.get('scale', 1.)
size = kwargs.get('size', 2)
if np.isnan(shift) or shift == 0:
return data
axis = np.linspace(offset, offset + scale * (size - 1), size)
si = sp.interpolate.interp1d(original_axis,
data,
bounds_error=False,
fill_value=fill_value,
kind=kind)
offset = float(offset - shift)
axis = np.linspace(offset, offset + scale * (size - 1), size)
return si(axis)
class Signal1D(BaseSignal, CommonSignal1D):
"""
"""
_signal_dimension = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.axes_manager.signal_dimension != 1:
self.axes_manager.set_signal_dimension(1)
def _spikes_diagnosis(self, signal_mask=None,
navigation_mask=None):
"""Plots a histogram to help in choosing the threshold for
spikes removal.
Parameters
----------
signal_mask : boolean array
Restricts the operation to the signal locations not marked
as True (masked)
navigation_mask : boolean array
Restricts the operation to the navigation locations not
marked as True (masked).
See also
--------
spikes_removal_tool
"""
self._check_signal_dimension_equals_one()
dc = self.data
if signal_mask is not None:
dc = dc[..., ~signal_mask]
if navigation_mask is not None:
dc = dc[~navigation_mask, :]
der = np.abs(np.diff(dc, 1, -1))
n = ((~navigation_mask).sum() if navigation_mask else
self.axes_manager.navigation_size)
# arbitrary cutoff for number of spectra necessary before histogram
# data is compressed by finding maxima of each spectrum
tmp = BaseSignal(der) if n < 2000 else BaseSignal(
np.ravel(der.max(-1)))
# get histogram signal using smart binning and plot
tmph = tmp.get_histogram()
tmph.plot()
# Customize plot appearance
plt.gca().set_title('')
plt.gca().fill_between(tmph.axes_manager[0].axis,
tmph.data,
facecolor='#fddbc7',
interpolate=True,
color='none')
ax = tmph._plot.signal_plot.ax
axl = tmph._plot.signal_plot.ax_lines[0]
axl.set_line_properties(color='#b2182b')
plt.xlabel('Derivative magnitude')
plt.ylabel('Log(Counts)')
ax.set_yscale('log')
ax.set_ylim(10 ** -1, plt.ylim()[1])
ax.set_xlim(plt.xlim()[0], 1.1 * plt.xlim()[1])
plt.draw()
def spikes_removal_tool(self, signal_mask=None,
navigation_mask=None, display=True, toolkit=None):
"""Graphical interface to remove spikes from EELS spectra.
Parameters
----------
signal_mask : boolean array
Restricts the operation to the signal locations not marked
as True (masked)
navigation_mask : boolean array
Restricts the operation to the navigation locations not
marked as True (masked)
%s
%s
See also
--------
_spikes_diagnosis
"""
self._check_signal_dimension_equals_one()
sr = SpikesRemoval(self,
navigation_mask=navigation_mask,
signal_mask=signal_mask)
return sr.gui(display=display, toolkit=toolkit)
spikes_removal_tool.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
def create_model(self, dictionary=None):
"""Create a model for the current data.
Returns
-------
model : `Model1D` instance.
"""
model = Model1D(self, dictionary=dictionary)
return model
def shift1D(
self,
shift_array,
interpolation_method='linear',
crop=True,
expand=False,
fill_value=np.nan,
parallel=None,
show_progressbar=None,
max_workers=None,
):
"""Shift the data in place over the signal axis by the amount specified
by an array.
Parameters
----------
shift_array : numpy array
An array containing the shifting amount. It must have
`axes_manager._navigation_shape_in_array` shape.
interpolation_method : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
integer specifying the order of the spline interpolator to
use.
%s
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
fill_value : float
If crop is False fill the data outside of the original
interval with the given value where needed.
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if not np.any(shift_array):
# Nothing to do, the shift array if filled with zeros
return
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
# Figure out min/max shifts, and translate to shifts in index as well
minimum, maximum = np.nanmin(shift_array), np.nanmax(shift_array)
if minimum < 0:
ihigh = 1 + axis.value2index(
axis.high_value + minimum,
rounding=math.floor)
else:
ihigh = axis.high_index + 1
if maximum > 0:
ilow = axis.value2index(axis.offset + maximum,
rounding=math.ceil)
else:
ilow = axis.low_index
if expand:
if self._lazy:
ind = axis.index_in_array
pre_shape = list(self.data.shape)
post_shape = list(self.data.shape)
pre_chunks = list(self.data.chunks)
post_chunks = list(self.data.chunks)
pre_shape[ind] = axis.high_index - ihigh + 1
post_shape[ind] = ilow - axis.low_index
for chunks, shape in zip((pre_chunks, post_chunks),
(pre_shape, post_shape)):
maxsize = min(np.max(chunks[ind]), shape[ind])
num = np.ceil(shape[ind] / maxsize)
chunks[ind] = tuple(len(ar) for ar in
np.array_split(np.arange(shape[ind]),
num))
pre_array = da.full(tuple(pre_shape),
fill_value,
chunks=tuple(pre_chunks))
post_array = da.full(tuple(post_shape),
fill_value,
chunks=tuple(post_chunks))
self.data = da.concatenate((pre_array, self.data, post_array),
axis=ind)
else:
padding = []
for i in range(self.data.ndim):
if i == axis.index_in_array:
padding.append((axis.high_index - ihigh + 1,
ilow - axis.low_index))
else:
padding.append((0, 0))
self.data = np.pad(self.data, padding, mode='constant',
constant_values=(fill_value,))
axis.offset += minimum
axis.size += axis.high_index - ihigh + 1 + ilow - axis.low_index
self._map_iterate(_shift1D, (('shift', shift_array.ravel()),),
original_axis=axis.axis,
fill_value=fill_value,
kind=interpolation_method,
offset=axis.offset,
scale=axis.scale,
size=axis.size,
show_progressbar=show_progressbar,
parallel=parallel,
max_workers=max_workers,
ragged=False)
if crop and not expand:
_logger.debug("Cropping %s from index %i to %i"
% (self, ilow, ihigh))
self.crop(axis.index_in_axes_manager,
ilow,
ihigh)
self.events.data_changed.trigger(obj=self)
shift1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def interpolate_in_between(
self,
start,
end,
delta=3,
show_progressbar=None,
parallel=None,
max_workers=None,
**kwargs,
):
"""Replace the data in a given range by interpolation.
The operation is performed in place.
Parameters
----------
start, end : int or float
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
delta : int or float
The windows around the (start, end) to use for interpolation
%s
%s
%s
**kwargs :
All extra keyword arguments are passed to
:py:func:`scipy.interpolate.interp1d`. See the function documentation
for details.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
i1 = axis._get_index(start)
i2 = axis._get_index(end)
if isinstance(delta, float):
delta = int(delta / axis.scale)
i0 = int(np.clip(i1 - delta, 0, np.inf))
i3 = int(np.clip(i2 + delta, 0, axis.size))
def interpolating_function(dat):
dat_int = sp.interpolate.interp1d(
list(range(i0, i1)) + list(range(i2, i3)),
dat[i0:i1].tolist() + dat[i2:i3].tolist(),
**kwargs)
dat[i1:i2] = dat_int(list(range(i1, i2)))
return dat
self._map_iterate(interpolating_function,
ragged=False,
parallel=parallel,
show_progressbar=show_progressbar,
max_workers=max_workers)
self.events.data_changed.trigger(obj=self)
interpolate_in_between.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def _check_navigation_mask(self, mask):
if mask is not None:
if not isinstance(mask, BaseSignal):
raise ValueError("mask must be a BaseSignal instance.")
elif mask.axes_manager.signal_dimension not in (0, 1):
raise ValueError("mask must be a BaseSignal "
"with signal_dimension equal to 1")
elif (mask.axes_manager.navigation_dimension !=
self.axes_manager.navigation_dimension):
raise ValueError("mask must be a BaseSignal with the same "
"navigation_dimension as the current signal.")
def estimate_shift1D(
self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
mask=None,
show_progressbar=None,
parallel=None,
max_workers=None,
):
"""Estimate the shifts in the current signal axis using
cross-correlation.
This method can only estimate the shift by comparing
unidimensional features that should not change the position in
the signal axis. To decrease the memory usage, the time of
computation and the accuracy of the results it is convenient to
select the feature of interest providing sensible values for
`start` and `end`. By default interpolation is used to obtain
subpixel precision.
Parameters
----------
start, end : int, float or None
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as eference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
mask : `BaseSignal` of bool.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
%s
%s
%s
Returns
-------
An array with the result of the estimation in the axis units.
Although the computation is performed in batches if the signal is
lazy, the result is computed in memory because it depends on the
current state of the axes that could change later on in the workflow.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
ip = number_of_interpolation_points + 1
axis = self.axes_manager.signal_axes[0]
self._check_navigation_mask(mask)
# we compute for now
if isinstance(start, da.Array):
start = start.compute()
if isinstance(end, da.Array):
end = end.compute()
i1, i2 = axis._get_index(start), axis._get_index(end)
if reference_indices is None:
reference_indices = self.axes_manager.indices
ref = self.inav[reference_indices].data[i1:i2]
if interpolate is True:
ref = interpolate1D(ip, ref)
iterating_kwargs = ()
if mask is not None:
iterating_kwargs += (('mask', mask),)
shift_signal = self._map_iterate(
_estimate_shift1D,
iterating_kwargs=iterating_kwargs,
data_slice=slice(i1, i2),
ref=ref,
ip=ip,
interpolate=interpolate,
ragged=False,
parallel=parallel,
inplace=False,
show_progressbar=show_progressbar,
max_workers=max_workers,
)
shift_array = shift_signal.data
if max_shift is not None:
if interpolate is True:
max_shift *= ip
shift_array.clip(-max_shift, max_shift)
if interpolate is True:
shift_array = shift_array / ip
shift_array *= axis.scale
if self._lazy:
# We must compute right now because otherwise any changes to the
# axes_manager of the signal later in the workflow may result in
# a wrong shift_array
shift_array = shift_array.compute()
return shift_array
estimate_shift1D.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def align1D(self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
interpolation_method='linear',
crop=True,
expand=False,
fill_value=np.nan,
also_align=None,
mask=None,
show_progressbar=None):
"""Estimate the shifts in the signal axis using
cross-correlation and use the estimation to align the data in place.
This method can only estimate the shift by comparing
unidimensional
features that should not change the position.
To decrease memory usage, time of computation and improve
accuracy it is convenient to select the feature of interest
setting the `start` and `end` keywords. By default interpolation is
used to obtain subpixel precision.
Parameters
----------
start, end : int, float or None
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as eference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
interpolation_method : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
integer specifying the order of the spline interpolator to
use.
%s
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
fill_value : float
If crop is False fill the data outside of the original
interval with the given value where needed.
also_align : list of signals, None
A list of BaseSignal instances that has exactly the same
dimensions as this one and that will be aligned using the shift map
estimated using the this signal.
mask : `BaseSignal` or bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
%s
Returns
-------
An array with the result of the estimation.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
See also
--------
estimate_shift1D
"""
if also_align is None:
also_align = []
self._check_signal_dimension_equals_one()
if self._lazy:
_logger.warning('In order to properly expand, the lazy '
'reference signal will be read twice (once to '
'estimate shifts, and second time to shift '
'appropriatelly), which might take a long time. '
'Use expand=False to only pass through the data '
'once.')
shift_array = self.estimate_shift1D(
start=start,
end=end,
reference_indices=reference_indices,
max_shift=max_shift,
interpolate=interpolate,
number_of_interpolation_points=number_of_interpolation_points,
mask=mask,
show_progressbar=show_progressbar)
signals_to_shift = [self] + also_align
for signal in signals_to_shift:
signal.shift1D(shift_array=shift_array,
interpolation_method=interpolation_method,
crop=crop,
fill_value=fill_value,
expand=expand,
show_progressbar=show_progressbar)
align1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG)
def integrate_in_range(self, signal_range='interactive',
display=True, toolkit=None):
"""Sums the spectrum over an energy range, giving the integrated
area.
The energy range can either be selected through a GUI or the command
line.
Parameters
----------
signal_range : a tuple of this form (l, r) or "interactive"
l and r are the left and right limits of the range. They can be
numbers or None, where None indicates the extremes of the interval.
If l and r are floats the `signal_range` will be in axis units (for
example eV). If l and r are integers the `signal_range` will be in
index units. When `signal_range` is "interactive" (default) the
range is selected using a GUI. Note that ROIs can be used
in place of a tuple.
Returns
--------
integrated_spectrum : `BaseSignal` subclass
See Also
--------
integrate_simpson
Examples
--------
Using the GUI
>>> s = hs.signals.Signal1D(range(1000))
>>> s.integrate_in_range() #doctest: +SKIP
Using the CLI
>>> s_int = s.integrate_in_range(signal_range=(560,None))
Selecting a range in the axis units, by specifying the
signal range with floats.
>>> s_int = s.integrate_in_range(signal_range=(560.,590.))
Selecting a range using the index, by specifying the
signal range with integers.
>>> s_int = s.integrate_in_range(signal_range=(100,120))
"""
from hyperspy.misc.utils import deprecation_warning
msg = (
"The `Signal1D.integrate_in_range` method is deprecated and will "
"be removed in v2.0. Use a `roi.SpanRoi` followed by `integrate1D` "
"instead.")
deprecation_warning(msg)
if signal_range == 'interactive':
self_copy = self.deepcopy()
ia = IntegrateArea(self_copy, signal_range)
ia.gui(display=display, toolkit=toolkit)
integrated_signal1D = self_copy
else:
integrated_signal1D = self._integrate_in_range_commandline(
signal_range)
return integrated_signal1D
def _integrate_in_range_commandline(self, signal_range):
e1 = signal_range[0]
e2 = signal_range[1]
integrated_signal1D = self.isig[e1:e2].integrate1D(-1)
return integrated_signal1D
def calibrate(self, display=True, toolkit=None):
"""
Calibrate the spectral dimension using a gui.
It displays a window where the new calibration can be set by:
* setting the values of offset, units and scale directly
* or selecting a range by dragging the mouse on the spectrum figure
and setting the new values for the given range limits
Parameters
----------
%s
%s
Notes
-----
For this method to work the output_dimension must be 1.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
calibration = Signal1DCalibration(self)
return calibration.gui(display=display, toolkit=toolkit)
calibrate.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
def smooth_savitzky_golay(
self,
polynomial_order=None,
window_length=None,
differential_order=0,
parallel=None,
max_workers=None,
display=True,
toolkit=None,
):
"""
Apply a Savitzky-Golay filter to the data in place.
If `polynomial_order` or `window_length` or `differential_order` are
None the method is run in interactive mode.
Parameters
----------
polynomial_order : int, optional
The order of the polynomial used to fit the samples.
`polyorder` must be less than `window_length`.
window_length : int, optional
The length of the filter window (i.e. the number of coefficients).
`window_length` must be a positive odd integer.
differential_order: int, optional
The order of the derivative to compute. This must be a
nonnegative integer. The default is 0, which means to filter
the data without differentiating.
%s
%s
%s
%s
Notes
-----
More information about the filter in `scipy.signal.savgol_filter`.
"""
self._check_signal_dimension_equals_one()
if (polynomial_order is not None and
window_length is not None):
axis = self.axes_manager.signal_axes[0]
self.map(savgol_filter, window_length=window_length,
polyorder=polynomial_order, deriv=differential_order,
delta=axis.scale, ragged=False, parallel=parallel, max_workers=max_workers)
else:
# Interactive mode
smoother = SmoothingSavitzkyGolay(self)
smoother.differential_order = differential_order
if polynomial_order is not None:
smoother.polynomial_order = polynomial_order
if window_length is not None:
smoother.window_length = window_length
return smoother.gui(display=display, toolkit=toolkit)
smooth_savitzky_golay.__doc__ %= (PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)
def smooth_lowess(
self,
smoothing_parameter=None,
number_of_iterations=None,
show_progressbar=None,
parallel=None,
max_workers=None,
display=True,
toolkit=None,
):
"""
Lowess data smoothing in place.
If `smoothing_parameter` or `number_of_iterations` are None the method
is run in interactive mode.
Parameters
----------
smoothing_parameter: float or None
Between 0 and 1. The fraction of the data used
when estimating each y-value.
number_of_iterations: int or None
The number of residual-based reweightings
to perform.
%s
%s
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
ImportError
If statsmodels is not installed.
Notes
-----
This method uses the lowess algorithm from the `statsmodels` library,
which needs to be installed to use this method.
"""
if not statsmodels_installed:
raise ImportError("statsmodels is not installed. This package is "
"required for this feature.")
self._check_signal_dimension_equals_one()
if smoothing_parameter is None or number_of_iterations is None:
smoother = SmoothingLowess(self)
if smoothing_parameter is not None:
smoother.smoothing_parameter = smoothing_parameter
if number_of_iterations is not None:
smoother.number_of_iterations = number_of_iterations
return smoother.gui(display=display, toolkit=toolkit)
else:
self.map(lowess,
exog=self.axes_manager[-1].axis,
frac=smoothing_parameter,
it=number_of_iterations,
is_sorted=True,
return_sorted=False,
show_progressbar=show_progressbar,
ragged=False,
parallel=parallel,
max_workers=max_workers)
smooth_lowess.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)
def smooth_tv(
self,
smoothing_parameter=None,
show_progressbar=None,
parallel=None,
max_workers=None,
display=True,
toolkit=None,
):
"""
Total variation data smoothing in place.
Parameters
----------
smoothing_parameter: float or None
Denoising weight relative to L2 minimization. If None the method
is run in interactive mode.
%s
%s
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if smoothing_parameter is None:
smoother = SmoothingTV(self)
return smoother.gui(display=display, toolkit=toolkit)
else:
self.map(_tv_denoise_1d, weight=smoothing_parameter,
ragged=False,
show_progressbar=show_progressbar,
parallel=parallel,
max_workers=max_workers)
smooth_tv.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)
def filter_butterworth(self,
cutoff_frequency_ratio=None,
type='low',
order=2, display=True, toolkit=None):
"""
Butterworth filter in place.
Parameters
----------
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
smoother = ButterworthFilter(self)
if cutoff_frequency_ratio is not None:
smoother.cutoff_frequency_ratio = cutoff_frequency_ratio
smoother.type = type
smoother.order = order
smoother.apply()
else:
return smoother.gui(display=display, toolkit=toolkit)
filter_butterworth.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
def _remove_background_cli(
self, signal_range, background_estimator, fast=True,
zero_fill=False, show_progressbar=None, model=None,
return_model=False):
""" See :py:meth:`~hyperspy._signal1d.signal1D.remove_background`. """
if model is None:
from hyperspy.models.model1d import Model1D
model = Model1D(self)
if background_estimator not in model:
model.append(background_estimator)
background_estimator.estimate_parameters(
self,
signal_range[0],
signal_range[1],
only_current=False)
if not fast:
model.set_signal_range(signal_range[0], signal_range[1])
model.multifit(show_progressbar=show_progressbar,
iterpath='serpentine')
model.reset_signal_range()
if self._lazy:
result = self - model.as_signal(show_progressbar=show_progressbar)
else:
try:
axis = self.axes_manager.signal_axes[0]
scale_factor = axis.scale if self.metadata.Signal.binned else 1
bkg = background_estimator.function_nd(axis.axis) * scale_factor
result = self - bkg
except MemoryError:
result = self - model.as_signal(
show_progressbar=show_progressbar)
if zero_fill:
if self._lazy:
low_idx = result.axes_manager[-1].value2index(signal_range[0])
z = da.zeros(low_idx, chunks=(low_idx,))
cropped_da = result.data[low_idx:]
result.data = da.concatenate([z, cropped_da])
else:
result.isig[:signal_range[0]] = 0
if return_model:
if fast:
# Calculate the variance for each navigation position only when
# using fast, otherwise the chisq is already calculated when
# doing the multifit
d = result.data[..., np.where(model.channel_switches)[0]]
variance = model._get_variance(only_current=False)
d *= d / (1. * variance) # d = difference^2 / variance.
model.chisq.data = d.sum(-1)
result = (result, model)
return result
def remove_background(
self,
signal_range='interactive',
background_type='Power Law',
polynomial_order=2,
fast=True,
zero_fill=False,
plot_remainder=True,
show_progressbar=None,
return_model=False,
display=True,
toolkit=None):
"""
Remove the background, either in place using a gui or returned as a new
spectrum using the command line. The fast option is not accurate for
most background type - except Gaussian, Offset and Power law - but it
is useful to estimate the initial fitting parameters before performing
a full fit.
Parameters
----------
signal_range : "interactive", tuple of ints or floats, optional
If this argument is not specified, the signal range has to be
selected using a GUI. And the original spectrum will be replaced.
If tuple is given, the a spectrum will be returned.
background_type : str
The type of component which should be used to fit the background.
Possible components: Doniach, Gaussian, Lorentzian, Offset, Polynomial,
PowerLaw, Exponential, SkewNormal, SplitVoigt, Voigt.
If Polynomial is used, the polynomial order can be specified
polynomial_order : int, default 2
Specify the polynomial order if a Polynomial background is used.
fast : bool
If True, perform an approximative estimation of the parameters.
If False, the signal is fitted using non-linear least squares
afterwards.This is slower compared to the estimation but
possibly more accurate.
zero_fill : bool
If True, all spectral channels lower than the lower bound of the
fitting range will be set to zero (this is the default behavior
of Gatan's DigitalMicrograph). Setting this value to False
allows for inspection of the quality of background fit throughout
the pre-fitting region.
plot_remainder : bool
If True, add a (green) line previewing the remainder signal after
background removal. This preview is obtained from a Fast calculation
so the result may be different if a NLLS calculation is finally
performed.
return_model : bool
If True, the background model is returned. The chi² can be obtained
from this model using
:py:meth:`~hyperspy.models.model1d.Model1D.chisqd`.
%s
%s
%s
Returns
-------
{None, signal, background_model or (signal, background_model)}
If signal_range is not 'interactive', the background substracted
signal is returned. If return_model is True, returns the background
model.
Examples
--------
Using gui, replaces spectrum s
>>> s = hs.signals.Signal1D(range(1000))
>>> s.remove_background() #doctest: +SKIP
Using command line, returns a Signal1D:
>>> s.remove_background(signal_range=(400,450),
background_type='PowerLaw')
<Signal1D, title: , dimensions: (|1000)>
Using a full model to fit the background:
>>> s.remove_background(signal_range=(400,450), fast=False)
<Signal1D, title: , dimensions: (|1000)>
Returns background substracted and the model:
>>> s.remove_background(signal_range=(400,450),
fast=False,
return_model=True)
(<Signal1D, title: , dimensions: (|1000)>, <Model1D>)
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
# Create model here, so that we can return it
from hyperspy.models.model1d import Model1D
model = Model1D(self)
if signal_range == 'interactive':
br = BackgroundRemoval(self, background_type=background_type,
polynomial_order=polynomial_order,
fast=fast,
plot_remainder=plot_remainder,
show_progressbar=show_progressbar,
zero_fill=zero_fill,
model=model)
br.gui(display=display, toolkit=toolkit)
if return_model:
return model
else:
background_estimator = _get_background_estimator(
background_type, polynomial_order)[0]
result = self._remove_background_cli(
signal_range=signal_range,
background_estimator=background_estimator,
fast=fast,
zero_fill=zero_fill,
show_progressbar=show_progressbar,
model=model,
return_model=return_model)
return result
remove_background.__doc__ %= (SHOW_PROGRESSBAR_ARG, DISPLAY_DT, TOOLKIT_DT)
@interactive_range_selector
def crop_signal1D(self, left_value=None, right_value=None,):
"""Crop in place the spectral dimension.
Parameters
----------
left_value, righ_value : int, float or None
If int the values are taken as indices. If float they are
converted to indices using the spectral axis calibration.
If left_value is None crops from the beginning of the axis.
If right_value is None crops up to the end of the axis. If
both are
None the interactive cropping interface is activated
enabling
cropping the spectrum using a span selector in the signal
plot.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
try:
left_value, right_value = left_value
except TypeError:
# It was not a ROI, we carry on
pass
self.crop(axis=self.axes_manager.signal_axes[0].index_in_axes_manager,
start=left_value, end=right_value)
def gaussian_filter(self, FWHM):
"""Applies a Gaussian filter in the spectral dimension in place.
Parameters
----------
FWHM : float
The Full Width at Half Maximum of the gaussian in the
spectral axis units
Raises
------
ValueError
If FWHM is equal or less than zero.
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if FWHM <= 0:
raise ValueError(
"FWHM must be greater than zero")
axis = self.axes_manager.signal_axes[0]
FWHM *= 1 / axis.scale
self.map(gaussian_filter1d, sigma=FWHM / 2.35482, ragged=False)
def hanning_taper(self, side='both', channels=None, offset=0):
"""Apply a hanning taper to the data in place.
Parameters
----------
side : 'left', 'right' or 'both'
Specify which side to use.
channels : None or int
The number of channels to taper. If None 5% of the total
number of channels are tapered.
offset : int
Returns
-------
channels
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if not np.issubdtype(self.data.dtype, np.floating):
raise TypeError("The data dtype should be `float`. It can be "
"changed by using the `change_dtype('float')` "
"method of the signal.")
# TODO: generalize it
self._check_signal_dimension_equals_one()
if channels is None:
channels = int(round(len(self()) * 0.02))
if channels < 20:
channels = 20
dc = self._data_aligned_with_axes
if self._lazy and offset != 0:
shp = dc.shape
if len(shp) == 1:
nav_shape = ()
nav_chunks = ()
else:
nav_shape = shp[:-1]
nav_chunks = dc.chunks[:-1]
zeros = da.zeros(nav_shape + (offset,),
chunks=nav_chunks + ((offset,),))
if side == 'left' or side == 'both':
if self._lazy:
tapered = dc[..., offset:channels + offset]
tapered *= np.hanning(2 * channels)[:channels]
therest = dc[..., channels + offset:]
thelist = [] if offset == 0 else [zeros]
thelist.extend([tapered, therest])
dc = da.concatenate(thelist, axis=-1)
else:
dc[..., offset:channels + offset] *= (
np.hanning(2 * channels)[:channels])
dc[..., :offset] *= 0.
if side == 'right' or side == 'both':
rl = None if offset == 0 else -offset
if self._lazy:
therest = dc[..., :-channels - offset]
tapered = dc[..., -channels - offset:rl]
tapered *= np.hanning(2 * channels)[-channels:]
thelist = [therest, tapered]
if offset != 0:
thelist.append(zeros)
dc = da.concatenate(thelist, axis=-1)
else:
dc[..., -channels - offset:rl] *= (
np.hanning(2 * channels)[-channels:])
if offset != 0:
dc[..., -offset:] *= 0.
if self._lazy:
self.data = dc
self.events.data_changed.trigger(obj=self)
return channels
def find_peaks1D_ohaver(self, xdim=None,
slope_thresh=0,
amp_thresh=None,
subchannel=True,
medfilt_radius=5,
maxpeakn=30000,
peakgroup=10,
parallel=None,
max_workers=None):
"""Find positive peaks along a 1D Signal. It detects peaks by looking
for downward zero-crossings in the first derivative that exceed
'slope_thresh'.
'slope_thresh' and 'amp_thresh', control sensitivity: higher
values will neglect broad peaks (slope) and smaller features (amp),
respectively.
`peakgroup` is the number of points around the top of the peak
that are taken to estimate the peak height. For spikes or very
narrow peaks, set `peakgroup` to 1 or 2; for broad or noisy peaks,
make `peakgroup` larger to reduce the effect of noise.
Parameters
----------
slope_thresh : float, optional
1st derivative threshold to count the peak;
higher values will neglect broader features;
default is set to 0.
amp_thresh : float, optional
intensity threshold below which peaks are ignored;
higher values will neglect smaller features;
default is set to 10%% of max(y).
medfilt_radius : int, optional
median filter window to apply to smooth the data
(see :py:func:`scipy.signal.medfilt`);
if 0, no filter will be applied;
default is set to 5.
peakgroup : int, optional
number of points around the "top part" of the peak
that are taken to estimate the peak height;
default is set to 10
maxpeakn : int, optional
number of maximum detectable peaks;
default is set to 5000.
subchannel : bool, default True
default is set to True.
%s
%s
Returns
-------
structured array of shape (npeaks) containing fields: 'position',
'width', and 'height' for each peak.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
# TODO: add scipy.signal.find_peaks_cwt
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0].axis
peaks = self.map(find_peaks_ohaver,
x=axis,
slope_thresh=slope_thresh,
amp_thresh=amp_thresh,
medfilt_radius=medfilt_radius,
maxpeakn=maxpeakn,
peakgroup=peakgroup,
subchannel=subchannel,
ragged=True,
parallel=parallel,
max_workers=max_workers,
inplace=False)
return peaks.data
find_peaks1D_ohaver.__doc__ %= (PARALLEL_ARG, MAX_WORKERS_ARG)
def estimate_peak_width(
self,
factor=0.5,
window=None,
return_interval=False,
parallel=None,
show_progressbar=None,
max_workers=None,
):
"""Estimate the width of the highest intensity of peak
of the spectra at a given fraction of its maximum.
It can be used with asymmetric peaks. For accurate results any
background must be previously substracted.
The estimation is performed by interpolation using cubic splines.
Parameters
----------
factor : 0 < float < 1
The default, 0.5, estimates the FWHM.
window : None or float
The size of the window centred at the peak maximum
used to perform the estimation.
The window size must be chosen with care: if it is narrower
than the width of the peak at some positions or if it is
so wide that it includes other more intense peaks this
method cannot compute the width and a NaN is stored instead.
return_interval: bool
If True, returns 2 extra signals with the positions of the
desired height fraction at the left and right of the
peak.
%s
%s
%s
Returns
-------
width or [width, left, right], depending on the value of
`return_interval`.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
if not 0 < factor < 1:
raise ValueError("factor must be between 0 and 1.")
axis = self.axes_manager.signal_axes[0]
# x = axis.axis
maxval = self.axes_manager.navigation_size
show_progressbar = show_progressbar and maxval > 0
def estimating_function(spectrum,
window=None,
factor=0.5,
axis=None):
x = axis.axis
if window is not None:
vmax = axis.index2value(spectrum.argmax())
slices = axis._get_array_slices(
slice(vmax - window * 0.5, vmax + window * 0.5))
spectrum = spectrum[slices]
x = x[slices]
spline = scipy.interpolate.UnivariateSpline(
x,
spectrum - factor * spectrum.max(),
s=0)
roots = spline.roots()
if len(roots) == 2:
return np.array(roots)
else:
return np.full((2,), np.nan)
both = self._map_iterate(estimating_function,
window=window,
factor=factor,
axis=axis,
ragged=False,
inplace=False,
parallel=parallel,
show_progressbar=show_progressbar,
max_workers=None)
left, right = both.T.split()
width = right - left
if factor == 0.5:
width.metadata.General.title = (
self.metadata.General.title + " FWHM")
left.metadata.General.title = (
self.metadata.General.title + " FWHM left position")
right.metadata.General.title = (
self.metadata.General.title + " FWHM right position")
else:
width.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum" % factor)
left.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum left position" % factor)
right.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum right position" % factor)
for signal in (left, width, right):
signal.axes_manager.set_signal_dimension(0)
signal.set_signal_type("")
if return_interval is True:
return [width, left, right]
else:
return width
estimate_peak_width.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
class LazySignal1D(LazySignal, Signal1D):
"""
"""
_lazy = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.axes_manager.set_signal_dimension(1)
|
dnjohnstone/hyperspy
|
hyperspy/_signals/signal1d.py
|
Python
|
gpl-3.0
| 60,556
|
[
"Gaussian"
] |
706b899a8923a9c9368c852bf5a232e4760620273e9ed302d181cf70564e8d9c
|
"""Unit test for roman1.py
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.3 $"
__date__ = "$Date: 2004/05/05 21:57:20 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
import roman1
import unittest
class KnownValues(unittest.TestCase):
knownValues = ( (1, 'I'),
(2, 'II'),
(3, 'III'),
(4, 'IV'),
(5, 'V'),
(6, 'VI'),
(7, 'VII'),
(8, 'VIII'),
(9, 'IX'),
(10, 'X'),
(50, 'L'),
(100, 'C'),
(500, 'D'),
(1000, 'M'),
(31, 'XXXI'),
(148, 'CXLVIII'),
(294, 'CCXCIV'),
(312, 'CCCXII'),
(421, 'CDXXI'),
(528, 'DXXVIII'),
(621, 'DCXXI'),
(782, 'DCCLXXXII'),
(870, 'DCCCLXX'),
(941, 'CMXLI'),
(1043, 'MXLIII'),
(1110, 'MCX'),
(1226, 'MCCXXVI'),
(1301, 'MCCCI'),
(1485, 'MCDLXXXV'),
(1509, 'MDIX'),
(1607, 'MDCVII'),
(1754, 'MDCCLIV'),
(1832, 'MDCCCXXXII'),
(1993, 'MCMXCIII'),
(2074, 'MMLXXIV'),
(2152, 'MMCLII'),
(2212, 'MMCCXII'),
(2343, 'MMCCCXLIII'),
(2499, 'MMCDXCIX'),
(2574, 'MMDLXXIV'),
(2646, 'MMDCXLVI'),
(2723, 'MMDCCXXIII'),
(2892, 'MMDCCCXCII'),
(2975, 'MMCMLXXV'),
(3051, 'MMMLI'),
(3185, 'MMMCLXXXV'),
(3250, 'MMMCCL'),
(3313, 'MMMCCCXIII'),
(3408, 'MMMCDVIII'),
(3501, 'MMMDI'),
(3610, 'MMMDCX'),
(3743, 'MMMDCCXLIII'),
(3844, 'MMMDCCCXLIV'),
(3888, 'MMMDCCCLXXXVIII'),
(3940, 'MMMCMXL'),
(3999, 'MMMCMXCIX'))
def testToRomanKnownValues(self):
"""toRoman should give known result with known input"""
for integer, numeral in self.knownValues:
result = roman1.toRoman(integer)
self.assertEqual(numeral, result)
def testFromRomanKnownValues(self):
"""fromRoman should give known result with known input"""
for integer, numeral in self.knownValues:
result = roman1.fromRoman(numeral)
self.assertEqual(integer, result)
class ToRomanBadInput(unittest.TestCase):
def testTooLarge(self):
"""toRoman should fail with large input"""
self.assertRaises(roman1.OutOfRangeError, roman1.toRoman, 4000)
def testZero(self):
"""toRoman should fail with 0 input"""
self.assertRaises(roman1.OutOfRangeError, roman1.toRoman, 0)
def testNegative(self):
"""toRoman should fail with negative input"""
self.assertRaises(roman1.OutOfRangeError, roman1.toRoman, -1)
def testNonInteger(self):
"""toRoman should fail with non-integer input"""
self.assertRaises(roman1.NotIntegerError, roman1.toRoman, 0.5)
class FromRomanBadInput(unittest.TestCase):
def testTooManyRepeatedNumerals(self):
"""fromRoman should fail with too many repeated numerals"""
for s in ('MMMM', 'DD', 'CCCC', 'LL', 'XXXX', 'VV', 'IIII'):
self.assertRaises(roman1.InvalidRomanNumeralError, roman1.fromRoman, s)
def testRepeatedPairs(self):
"""fromRoman should fail with repeated pairs of numerals"""
for s in ('CMCM', 'CDCD', 'XCXC', 'XLXL', 'IXIX', 'IVIV'):
self.assertRaises(roman1.InvalidRomanNumeralError, roman1.fromRoman, s)
def testMalformedAntecedent(self):
"""fromRoman should fail with malformed antecedents"""
for s in ('IIMXCC', 'VX', 'DCM', 'CMM', 'IXIV',
'MCMC', 'XCX', 'IVI', 'LM', 'LD', 'LC'):
self.assertRaises(roman1.InvalidRomanNumeralError, roman1.fromRoman, s)
class SanityCheck(unittest.TestCase):
def testSanity(self):
"""fromRoman(toRoman(n))==n for all n"""
for integer in range(1, 4000):
numeral = roman1.toRoman(integer)
result = roman1.fromRoman(numeral)
self.assertEqual(integer, result)
class CaseCheck(unittest.TestCase):
def testToRomanCase(self):
"""toRoman should always return uppercase"""
for integer in range(1, 4000):
numeral = roman1.toRoman(integer)
self.assertEqual(numeral, numeral.upper())
def testFromRomanCase(self):
"""fromRoman should only accept uppercase input"""
for integer in range(1, 4000):
numeral = roman1.toRoman(integer)
roman1.fromRoman(numeral.upper())
self.assertRaises(roman1.InvalidRomanNumeralError,
roman1.fromRoman, numeral.lower())
if __name__ == "__main__":
unittest.main()
|
tapomayukh/projects_in_python
|
sandbox_tapo/src/refs/diveintopython-pdf-5.4/diveintopython-5.4/py/roman/stage1/romantest1.py
|
Python
|
mit
| 5,496
|
[
"VisIt"
] |
9fbb7d1e6a96a63edb0c0223efbce713a2e74feb42e863f9ec1ac10845a57aab
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import print_function
from six.moves import range
import MDAnalysis
import MDAnalysis.analysis.hole
from MDAnalysis.analysis.hole import HOLEtraj, HOLE
from numpy.testing import (TestCase, dec,
assert_equal, assert_almost_equal,
assert_array_equal,
assert_array_almost_equal, assert_)
import numpy as np
import nose
from nose.plugins.attrib import attr
import os
import errno
from MDAnalysisTests.datafiles import PDB_HOLE, MULTIPDB_HOLE
from MDAnalysisTests import (executable_not_found, module_not_found,
tempdir, in_dir)
def rlimits_missing():
# return True if resources module not accesible (ie setting of rlimits)
try:
# on Unix we can manipulate our limits: http://docs.python.org/2/library/resource.html
import resource
soft_max_open_files, hard_max_open_files = resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
return True
return False
class TestHOLE(TestCase):
filename = PDB_HOLE
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
def setUp(self):
# keep tempdir around for the whole lifetime of the class
self.tempdir = tempdir.TempDir()
with in_dir(self.tempdir.name):
H = HOLE(self.filename, raseed=31415)
H.run()
H.collect()
self.H = H
def tearDown(self):
del self.H
del self.tempdir
@attr('slow')
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
def test_HOLE(self):
profiles = self.H.profiles.values()
assert_equal(len(profiles), 1,
err_msg="HOLE.profile should contain exactly 1 profile")
p = profiles[0]
assert_equal(len(p), 425,
err_msg="wrong number of points in HOLE profile")
assert_almost_equal(p.rxncoord.mean(), -1.41225,
err_msg="wrong mean HOLE rxncoord")
assert_almost_equal(p.radius.min(), 1.19707,
err_msg="wrong min HOLE radius")
@attr('slow')
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
def test_vmd_surface(self):
with in_dir(self.tempdir.name):
filename = self.H.create_vmd_surface(filename="hole.vmd")
assert_equal(len(open(filename).readlines()), 6504,
err_msg="HOLE VMD surface file is incomplete")
class TestHOLEtraj(TestCase):
filename = MULTIPDB_HOLE
start = 5
stop = 7
# HOLE is so slow so we only run it once and keep it in
# the class; note that you may not change universe.trajectory
# (eg iteration) because this is not safe in parallel
@classmethod
def setUpClass(cls):
cls.universe = MDAnalysis.Universe(cls.filename)
if not executable_not_found("hole"):
with tempdir.in_tempdir():
H = HOLEtraj(cls.universe, start=cls.start,
stop=cls.stop, raseed=31415)
H.run()
cls.H = H
else:
cls.H = None
cls.frames = [ts.frame
for ts in cls.universe.trajectory[cls.start:cls.stop]]
@classmethod
def tearDownClass(cls):
del cls.H
del cls.universe
# This is VERY slow on 11 frames so we just take 2
@attr('slow')
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
def test_HOLEtraj(self):
assert_array_equal(sorted(self.H.profiles.keys()), self.frames,
err_msg="H.profiles.keys() should contain the frame numbers")
data = np.transpose([(len(p), p.rxncoord.mean(), p.radius.min())
for p in self.H.profiles.values()])
assert_array_equal(data[0], [401, 399],
err_msg="incorrect profile lengths")
assert_array_almost_equal(data[1], [1.98767, 0.0878],
err_msg="wrong mean HOLE rxncoord")
assert_array_almost_equal(data[2], [1.19819, 1.29628],
err_msg="wrong minimum radius")
@attr('slow')
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
def test_min_radius(self):
assert_array_almost_equal(self.H.min_radius(),
np.array([[ 5. , 1.19819],
[ 6. , 1.29628]]),
err_msg="min_radius() array not correct")
@attr('slow')
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
@dec.skipif(module_not_found("matplotlib"))
def test_plot(self):
import matplotlib.axes
ax = self.H.plot(label=True)
assert_(isinstance(ax, matplotlib.axes.Axes),
msg="H.plot() did not produce an Axes instance")
@attr('slow')
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
@dec.skipif(module_not_found("matplotlib"))
def test_plot3D(self):
import mpl_toolkits.mplot3d
ax = self.H.plot3D()
assert_(isinstance(ax, mpl_toolkits.mplot3d.Axes3D),
msg="H.plot3D() did not produce an Axes3D instance")
@attr('slow')
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
@dec.skipif(module_not_found("matplotlib"))
def test_plot3D_rmax(self):
import mpl_toolkits.mplot3d
ax = self.H.plot3D(rmax=2.5)
assert_(isinstance(ax, mpl_toolkits.mplot3d.Axes3D),
msg="H.plot3D(rmax=float) did not produce an Axes3D instance")
class TestHoleModule(TestCase):
@dec.skipif(rlimits_missing, msg="Test skipped because platform does not allow setting rlimits")
def setUp(self):
self.universe = MDAnalysis.Universe(MULTIPDB_HOLE)
try:
# on Unix we can manipulate our limits: http://docs.python.org/2/library/resource.html
import resource
self.soft_max_open_files, self.hard_max_open_files = resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
pass
@attr('slow')
@attr('issue')
@dec.skipif(rlimits_missing, msg="Test skipped because platform does not allow setting rlimits")
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
def test_hole_module_fd_closure(self):
"""test open file descriptors are closed (MDAnalysisTests.analysis.test_hole.TestHoleModule): Issue 129"""
# If Issue 129 isn't resolved, this function will produce an OSError on
# the system, and cause many other tests to fail as well.
#
# Successful test takes ~10 s, failure ~2 s.
# Hasten failure by setting "ulimit -n 64" (can't go too low because of open modules etc...)
import resource
# ----- temporary hack -----
# on Mac OS X (on Travis) we run out of open file descriptors
# before even starting this test (see
# https://github.com/MDAnalysis/mdanalysis/pull/901#issuecomment-231938093);
# if this issue is solved by #363 then revert the following
# hack:
#
import platform
if platform.platform() == "Darwin":
max_open_files = 512
else:
max_open_files = 64
#
# --------------------------
resource.setrlimit(resource.RLIMIT_NOFILE,
(max_open_files, self.hard_max_open_files))
with tempdir.in_tempdir():
try:
H = HOLEtraj(self.universe, cvect=[0, 1, 0], sample=20.0)
finally:
self._restore_rlimits()
# pretty unlikely that the code will get through 2 rounds if the MDA
# issue 129 isn't fixed, although this depends on the file descriptor
# open limit for the machine in question
try:
for i in range(2):
# will typically get an OSError for too many files being open after
# about 2 seconds if issue 129 isn't resolved
H.run()
except OSError as err:
if err.errno == errno.EMFILE:
raise AssertionError("HOLEtraj does not close file descriptors (Issue 129)")
raise
finally:
# make sure to restore open file limit !!
self._restore_rlimits()
def _restore_rlimits(self):
try:
import resource
resource.setrlimit(resource.RLIMIT_NOFILE,
(self.soft_max_open_files, self.hard_max_open_files))
except ImportError:
pass
def tearDown(self):
self._restore_rlimits()
del self.universe
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/analysis/test_hole.py
|
Python
|
gpl-2.0
| 10,067
|
[
"MDAnalysis",
"VMD"
] |
acfca22b188d142a6dfaabab4b084891531b235d0c2bcc064ade0cff6624d5f5
|
##
# @package RAMS.NXT
# @file NXTMotor.py
# @author Brian Kim
# @date 7/24/14
# @brief a wrapper around a MotorProfileAssembly that defines an interface with an NXTMotor
#
from NXTPort import NXTPort
from Rover import MotorAssembly
class NXTMotor( NXTPort ):
def __init__( self, asm=None ):
NXTPort.__init__( self, asm )
# self.resetTacho()
def tacho( self ):
if self.isValid():
asm = self.asm()
name = asm.name()
y = asm.signal( name + '_angle' ).specNode()
return y()[0]
def resetTacho( self ):
if self.isValid():
asm = self.asm()
name = asm.name()
y = asm.signal( name + '_angle' ).specNode()
y(0)
def setProfile( self, accel, vel, disp ):
if self.isValid():
asm = self.asm()
name = asm.name()
profile = asm.assembly( name+'Motor_ProfileMotor',0,False )
if not profile == None:
profile.motorProfileCmd( accel, vel, disp )
else:
raise Exception( 'Couldn\'t get ProfileMotor for %s' % name )
|
briansan/rams
|
RAMS/nxt/NXTMotor.py
|
Python
|
bsd-3-clause
| 1,029
|
[
"Brian"
] |
37cad6c5320a5f4face183903b69c766ae21e0df1dbac89c6207553820ef2a16
|
# (C) 2013, James Cammarata <jcammarata@ansible.com>
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import datetime
import functools
import hashlib
import json
import os
import stat
import tarfile
import time
import threading
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils.api import retry_with_delays_and_condition
from ansible.module_utils.api import generate_jittered_backoff
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.six.moves.urllib.parse import quote as urlquote, urlencode, urlparse, parse_qs, urljoin
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.urls import open_url, prepare_multipart
from ansible.utils.display import Display
from ansible.utils.hashing import secure_hash_s
from ansible.utils.path import makedirs_safe
try:
from urllib.parse import urlparse
except ImportError:
# Python 2
from urlparse import urlparse
display = Display()
_CACHE_LOCK = threading.Lock()
COLLECTION_PAGE_SIZE = 100
RETRY_HTTP_ERROR_CODES = [ # TODO: Allow user-configuration
429, # Too Many Requests
520, # Galaxy rate limit error code (Cloudflare unknown error)
]
def cache_lock(func):
def wrapped(*args, **kwargs):
with _CACHE_LOCK:
return func(*args, **kwargs)
return wrapped
def is_rate_limit_exception(exception):
# Note: cloud.redhat.com masks rate limit errors with 403 (Forbidden) error codes.
# Since 403 could reflect the actual problem (such as an expired token), we should
# not retry by default.
return isinstance(exception, GalaxyError) and exception.http_code in RETRY_HTTP_ERROR_CODES
def g_connect(versions):
"""
Wrapper to lazily initialize connection info to Galaxy and verify the API versions required are available on the
endpoint.
:param versions: A list of API versions that the function supports.
"""
def decorator(method):
def wrapped(self, *args, **kwargs):
if not self._available_api_versions:
display.vvvv("Initial connection to galaxy_server: %s" % self.api_server)
# Determine the type of Galaxy server we are talking to. First try it unauthenticated then with Bearer
# auth for Automation Hub.
n_url = self.api_server
error_context_msg = 'Error when finding available api versions from %s (%s)' % (self.name, n_url)
if self.api_server == 'https://galaxy.ansible.com' or self.api_server == 'https://galaxy.ansible.com/':
n_url = 'https://galaxy.ansible.com/api/'
try:
data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg, cache=True)
except (AnsibleError, GalaxyError, ValueError, KeyError) as err:
# Either the URL doesnt exist, or other error. Or the URL exists, but isn't a galaxy API
# root (not JSON, no 'available_versions') so try appending '/api/'
if n_url.endswith('/api') or n_url.endswith('/api/'):
raise
# Let exceptions here bubble up but raise the original if this returns a 404 (/api/ wasn't found).
n_url = _urljoin(n_url, '/api/')
try:
data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg, cache=True)
except GalaxyError as new_err:
if new_err.http_code == 404:
raise err
raise
if 'available_versions' not in data:
raise AnsibleError("Tried to find galaxy API root at %s but no 'available_versions' are available "
"on %s" % (n_url, self.api_server))
# Update api_server to point to the "real" API root, which in this case could have been the configured
# url + '/api/' appended.
self.api_server = n_url
# Default to only supporting v1, if only v1 is returned we also assume that v2 is available even though
# it isn't returned in the available_versions dict.
available_versions = data.get('available_versions', {u'v1': u'v1/'})
if list(available_versions.keys()) == [u'v1']:
available_versions[u'v2'] = u'v2/'
self._available_api_versions = available_versions
display.vvvv("Found API version '%s' with Galaxy server %s (%s)"
% (', '.join(available_versions.keys()), self.name, self.api_server))
# Verify that the API versions the function works with are available on the server specified.
available_versions = set(self._available_api_versions.keys())
common_versions = set(versions).intersection(available_versions)
if not common_versions:
raise AnsibleError("Galaxy action %s requires API versions '%s' but only '%s' are available on %s %s"
% (method.__name__, ", ".join(versions), ", ".join(available_versions),
self.name, self.api_server))
return method(self, *args, **kwargs)
return wrapped
return decorator
def get_cache_id(url):
""" Gets the cache ID for the URL specified. """
url_info = urlparse(url)
port = None
try:
port = url_info.port
except ValueError:
pass # While the URL is probably invalid, let the caller figure that out when using it
# Cannot use netloc because it could contain credentials if the server specified had them in there.
return '%s:%s' % (url_info.hostname, port or '')
@cache_lock
def _load_cache(b_cache_path):
""" Loads the cache file requested if possible. The file must not be world writable. """
cache_version = 1
if not os.path.isfile(b_cache_path):
display.vvvv("Creating Galaxy API response cache file at '%s'" % to_text(b_cache_path))
with open(b_cache_path, 'w'):
os.chmod(b_cache_path, 0o600)
cache_mode = os.stat(b_cache_path).st_mode
if cache_mode & stat.S_IWOTH:
display.warning("Galaxy cache has world writable access (%s), ignoring it as a cache source."
% to_text(b_cache_path))
return
with open(b_cache_path, mode='rb') as fd:
json_val = to_text(fd.read(), errors='surrogate_or_strict')
try:
cache = json.loads(json_val)
except ValueError:
cache = None
if not isinstance(cache, dict) or cache.get('version', None) != cache_version:
display.vvvv("Galaxy cache file at '%s' has an invalid version, clearing" % to_text(b_cache_path))
cache = {'version': cache_version}
# Set the cache after we've cleared the existing entries
with open(b_cache_path, mode='wb') as fd:
fd.write(to_bytes(json.dumps(cache), errors='surrogate_or_strict'))
return cache
def _urljoin(*args):
return '/'.join(to_native(a, errors='surrogate_or_strict').strip('/') for a in args + ('',) if a)
class GalaxyError(AnsibleError):
""" Error for bad Galaxy server responses. """
def __init__(self, http_error, message):
super(GalaxyError, self).__init__(message)
self.http_code = http_error.code
self.url = http_error.geturl()
try:
http_msg = to_text(http_error.read())
err_info = json.loads(http_msg)
except (AttributeError, ValueError):
err_info = {}
url_split = self.url.split('/')
if 'v2' in url_split:
galaxy_msg = err_info.get('message', http_error.reason)
code = err_info.get('code', 'Unknown')
full_error_msg = u"%s (HTTP Code: %d, Message: %s Code: %s)" % (message, self.http_code, galaxy_msg, code)
elif 'v3' in url_split:
errors = err_info.get('errors', [])
if not errors:
errors = [{}] # Defaults are set below, we just need to make sure 1 error is present.
message_lines = []
for error in errors:
error_msg = error.get('detail') or error.get('title') or http_error.reason
error_code = error.get('code') or 'Unknown'
message_line = u"(HTTP Code: %d, Message: %s Code: %s)" % (self.http_code, error_msg, error_code)
message_lines.append(message_line)
full_error_msg = "%s %s" % (message, ', '.join(message_lines))
else:
# v1 and unknown API endpoints
galaxy_msg = err_info.get('default', http_error.reason)
full_error_msg = u"%s (HTTP Code: %d, Message: %s)" % (message, self.http_code, galaxy_msg)
self.message = to_native(full_error_msg)
# Keep the raw string results for the date. It's too complex to parse as a datetime object and the various APIs return
# them in different formats.
CollectionMetadata = collections.namedtuple('CollectionMetadata', ['namespace', 'name', 'created_str', 'modified_str'])
class CollectionVersionMetadata:
def __init__(self, namespace, name, version, download_url, artifact_sha256, dependencies):
"""
Contains common information about a collection on a Galaxy server to smooth through API differences for
Collection and define a standard meta info for a collection.
:param namespace: The namespace name.
:param name: The collection name.
:param version: The version that the metadata refers to.
:param download_url: The URL to download the collection.
:param artifact_sha256: The SHA256 of the collection artifact for later verification.
:param dependencies: A dict of dependencies of the collection.
"""
self.namespace = namespace
self.name = name
self.version = version
self.download_url = download_url
self.artifact_sha256 = artifact_sha256
self.dependencies = dependencies
@functools.total_ordering
class GalaxyAPI:
""" This class is meant to be used as a API client for an Ansible Galaxy server """
def __init__(
self, galaxy, name, url,
username=None, password=None, token=None, validate_certs=True,
available_api_versions=None,
clear_response_cache=False, no_cache=True,
priority=float('inf'),
):
self.galaxy = galaxy
self.name = name
self.username = username
self.password = password
self.token = token
self.api_server = url
self.validate_certs = validate_certs
self._available_api_versions = available_api_versions or {}
self._priority = priority
b_cache_dir = to_bytes(C.config.get_config_value('GALAXY_CACHE_DIR'), errors='surrogate_or_strict')
makedirs_safe(b_cache_dir, mode=0o700)
self._b_cache_path = os.path.join(b_cache_dir, b'api.json')
if clear_response_cache:
with _CACHE_LOCK:
if os.path.exists(self._b_cache_path):
display.vvvv("Clearing cache file (%s)" % to_text(self._b_cache_path))
os.remove(self._b_cache_path)
self._cache = None
if not no_cache:
self._cache = _load_cache(self._b_cache_path)
display.debug('Validate TLS certificates for %s: %s' % (self.api_server, self.validate_certs))
def __str__(self):
# type: (GalaxyAPI) -> str
"""Render GalaxyAPI as a native string representation."""
return to_native(self.name)
def __unicode__(self):
# type: (GalaxyAPI) -> unicode
"""Render GalaxyAPI as a unicode/text string representation."""
return to_text(self.name)
def __repr__(self):
# type: (GalaxyAPI) -> str
"""Render GalaxyAPI as an inspectable string representation."""
return (
'<{instance!s} "{name!s}" @ {url!s} with priority {priority!s}>'.
format(
instance=self, name=self.name,
priority=self._priority, url=self.api_server,
)
)
def __lt__(self, other_galaxy_api):
# type: (GalaxyAPI, GalaxyAPI) -> Union[bool, 'NotImplemented']
"""Return whether the instance priority is higher than other."""
if not isinstance(other_galaxy_api, self.__class__):
return NotImplemented
return (
self._priority > other_galaxy_api._priority or
self.name < self.name
)
@property
@g_connect(['v1', 'v2', 'v3'])
def available_api_versions(self):
# Calling g_connect will populate self._available_api_versions
return self._available_api_versions
@retry_with_delays_and_condition(
backoff_iterator=generate_jittered_backoff(retries=6, delay_base=2, delay_threshold=40),
should_retry_error=is_rate_limit_exception
)
def _call_galaxy(self, url, args=None, headers=None, method=None, auth_required=False, error_context_msg=None,
cache=False):
url_info = urlparse(url)
cache_id = get_cache_id(url)
query = parse_qs(url_info.query)
if cache and self._cache:
server_cache = self._cache.setdefault(cache_id, {})
iso_datetime_format = '%Y-%m-%dT%H:%M:%SZ'
valid = False
if url_info.path in server_cache:
expires = datetime.datetime.strptime(server_cache[url_info.path]['expires'], iso_datetime_format)
valid = datetime.datetime.utcnow() < expires
is_paginated_url = 'page' in query or 'offset' in query
if valid and not is_paginated_url:
# Got a hit on the cache and we aren't getting a paginated response
path_cache = server_cache[url_info.path]
if path_cache.get('paginated'):
if '/v3/' in url_info.path:
res = {'links': {'next': None}}
else:
res = {'next': None}
# Technically some v3 paginated APIs return in 'data' but the caller checks the keys for this so
# always returning the cache under results is fine.
res['results'] = []
for result in path_cache['results']:
res['results'].append(result)
else:
res = path_cache['results']
return res
elif not is_paginated_url:
# The cache entry had expired or does not exist, start a new blank entry to be filled later.
expires = datetime.datetime.utcnow()
expires += datetime.timedelta(days=1)
server_cache[url_info.path] = {
'expires': expires.strftime(iso_datetime_format),
'paginated': False,
}
headers = headers or {}
self._add_auth_token(headers, url, required=auth_required)
try:
display.vvvv("Calling Galaxy at %s" % url)
resp = open_url(to_native(url), data=args, validate_certs=self.validate_certs, headers=headers,
method=method, timeout=20, http_agent=user_agent(), follow_redirects='safe')
except HTTPError as e:
raise GalaxyError(e, error_context_msg)
except Exception as e:
raise AnsibleError("Unknown error when attempting to call Galaxy at '%s': %s" % (url, to_native(e)))
resp_data = to_text(resp.read(), errors='surrogate_or_strict')
try:
data = json.loads(resp_data)
except ValueError:
raise AnsibleError("Failed to parse Galaxy response from '%s' as JSON:\n%s"
% (resp.url, to_native(resp_data)))
if cache and self._cache:
path_cache = self._cache[cache_id][url_info.path]
# v3 can return data or results for paginated results. Scan the result so we can determine what to cache.
paginated_key = None
for key in ['data', 'results']:
if key in data:
paginated_key = key
break
if paginated_key:
path_cache['paginated'] = True
results = path_cache.setdefault('results', [])
for result in data[paginated_key]:
results.append(result)
else:
path_cache['results'] = data
return data
def _add_auth_token(self, headers, url, token_type=None, required=False):
# Don't add the auth token if one is already present
if 'Authorization' in headers:
return
if not self.token and required:
raise AnsibleError("No access token or username set. A token can be set with --api-key "
"or at {0}.".format(to_native(C.GALAXY_TOKEN_PATH)))
if self.token:
headers.update(self.token.headers())
@cache_lock
def _set_cache(self):
with open(self._b_cache_path, mode='wb') as fd:
fd.write(to_bytes(json.dumps(self._cache), errors='surrogate_or_strict'))
@g_connect(['v1'])
def authenticate(self, github_token):
"""
Retrieve an authentication token
"""
url = _urljoin(self.api_server, self.available_api_versions['v1'], "tokens") + '/'
args = urlencode({"github_token": github_token})
resp = open_url(url, data=args, validate_certs=self.validate_certs, method="POST", http_agent=user_agent())
data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
return data
@g_connect(['v1'])
def create_import_task(self, github_user, github_repo, reference=None, role_name=None):
"""
Post an import request
"""
url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports") + '/'
args = {
"github_user": github_user,
"github_repo": github_repo,
"github_reference": reference if reference else ""
}
if role_name:
args['alternate_role_name'] = role_name
elif github_repo.startswith('ansible-role'):
args['alternate_role_name'] = github_repo[len('ansible-role') + 1:]
data = self._call_galaxy(url, args=urlencode(args), method="POST")
if data.get('results', None):
return data['results']
return data
@g_connect(['v1'])
def get_import_task(self, task_id=None, github_user=None, github_repo=None):
"""
Check the status of an import task.
"""
url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports")
if task_id is not None:
url = "%s?id=%d" % (url, task_id)
elif github_user is not None and github_repo is not None:
url = "%s?github_user=%s&github_repo=%s" % (url, github_user, github_repo)
else:
raise AnsibleError("Expected task_id or github_user and github_repo")
data = self._call_galaxy(url)
return data['results']
@g_connect(['v1'])
def lookup_role_by_name(self, role_name, notify=True):
"""
Find a role by name.
"""
role_name = to_text(urlquote(to_bytes(role_name)))
try:
parts = role_name.split(".")
user_name = ".".join(parts[0:-1])
role_name = parts[-1]
if notify:
display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
except Exception:
raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name)
url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles",
"?owner__username=%s&name=%s" % (user_name, role_name))
data = self._call_galaxy(url)
if len(data["results"]) != 0:
return data["results"][0]
return None
@g_connect(['v1'])
def fetch_role_related(self, related, role_id):
"""
Fetch the list of related items for the given role.
The url comes from the 'related' field of the role.
"""
results = []
try:
url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles", role_id, related,
"?page_size=50")
data = self._call_galaxy(url)
results = data['results']
done = (data.get('next_link', None) is None)
# https://github.com/ansible/ansible/issues/64355
# api_server contains part of the API path but next_link includes the /api part so strip it out.
url_info = urlparse(self.api_server)
base_url = "%s://%s/" % (url_info.scheme, url_info.netloc)
while not done:
url = _urljoin(base_url, data['next_link'])
data = self._call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
except Exception as e:
display.warning("Unable to retrieve role (id=%s) data (%s), but this is not fatal so we continue: %s"
% (role_id, related, to_text(e)))
return results
@g_connect(['v1'])
def get_list(self, what):
"""
Fetch the list of items specified.
"""
try:
url = _urljoin(self.api_server, self.available_api_versions['v1'], what, "?page_size")
data = self._call_galaxy(url)
if "results" in data:
results = data['results']
else:
results = data
done = True
if "next" in data:
done = (data.get('next_link', None) is None)
while not done:
url = _urljoin(self.api_server, data['next_link'])
data = self._call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
return results
except Exception as error:
raise AnsibleError("Failed to download the %s list: %s" % (what, to_native(error)))
@g_connect(['v1'])
def search_roles(self, search, **kwargs):
search_url = _urljoin(self.api_server, self.available_api_versions['v1'], "search", "roles", "?")
if search:
search_url += '&autocomplete=' + to_text(urlquote(to_bytes(search)))
tags = kwargs.get('tags', None)
platforms = kwargs.get('platforms', None)
page_size = kwargs.get('page_size', None)
author = kwargs.get('author', None)
if tags and isinstance(tags, string_types):
tags = tags.split(',')
search_url += '&tags_autocomplete=' + '+'.join(tags)
if platforms and isinstance(platforms, string_types):
platforms = platforms.split(',')
search_url += '&platforms_autocomplete=' + '+'.join(platforms)
if page_size:
search_url += '&page_size=%s' % page_size
if author:
search_url += '&username_autocomplete=%s' % author
data = self._call_galaxy(search_url)
return data
@g_connect(['v1'])
def add_secret(self, source, github_user, github_repo, secret):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets") + '/'
args = urlencode({
"source": source,
"github_user": github_user,
"github_repo": github_repo,
"secret": secret
})
data = self._call_galaxy(url, args=args, method="POST")
return data
@g_connect(['v1'])
def list_secrets(self):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets")
data = self._call_galaxy(url, auth_required=True)
return data
@g_connect(['v1'])
def remove_secret(self, secret_id):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets", secret_id) + '/'
data = self._call_galaxy(url, auth_required=True, method='DELETE')
return data
@g_connect(['v1'])
def delete_role(self, github_user, github_repo):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "removerole",
"?github_user=%s&github_repo=%s" % (github_user, github_repo))
data = self._call_galaxy(url, auth_required=True, method='DELETE')
return data
# Collection APIs #
@g_connect(['v2', 'v3'])
def publish_collection(self, collection_path):
"""
Publishes a collection to a Galaxy server and returns the import task URI.
:param collection_path: The path to the collection tarball to publish.
:return: The import task URI that contains the import results.
"""
display.display("Publishing collection artifact '%s' to %s %s" % (collection_path, self.name, self.api_server))
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
if not os.path.exists(b_collection_path):
raise AnsibleError("The collection path specified '%s' does not exist." % to_native(collection_path))
elif not tarfile.is_tarfile(b_collection_path):
raise AnsibleError("The collection path specified '%s' is not a tarball, use 'ansible-galaxy collection "
"build' to create a proper release artifact." % to_native(collection_path))
with open(b_collection_path, 'rb') as collection_tar:
sha256 = secure_hash_s(collection_tar.read(), hash_func=hashlib.sha256)
content_type, b_form_data = prepare_multipart(
{
'sha256': sha256,
'file': {
'filename': b_collection_path,
'mime_type': 'application/octet-stream',
},
}
)
headers = {
'Content-type': content_type,
'Content-length': len(b_form_data),
}
if 'v3' in self.available_api_versions:
n_url = _urljoin(self.api_server, self.available_api_versions['v3'], 'artifacts', 'collections') + '/'
else:
n_url = _urljoin(self.api_server, self.available_api_versions['v2'], 'collections') + '/'
resp = self._call_galaxy(n_url, args=b_form_data, headers=headers, method='POST', auth_required=True,
error_context_msg='Error when publishing collection to %s (%s)'
% (self.name, self.api_server))
return resp['task']
@g_connect(['v2', 'v3'])
def wait_import_task(self, task_id, timeout=0):
"""
Waits until the import process on the Galaxy server has completed or the timeout is reached.
:param task_id: The id of the import task to wait for. This can be parsed out of the return
value for GalaxyAPI.publish_collection.
:param timeout: The timeout in seconds, 0 is no timeout.
"""
state = 'waiting'
data = None
# Construct the appropriate URL per version
if 'v3' in self.available_api_versions:
full_url = _urljoin(self.api_server, self.available_api_versions['v3'],
'imports/collections', task_id, '/')
else:
full_url = _urljoin(self.api_server, self.available_api_versions['v2'],
'collection-imports', task_id, '/')
display.display("Waiting until Galaxy import task %s has completed" % full_url)
start = time.time()
wait = 2
while timeout == 0 or (time.time() - start) < timeout:
try:
data = self._call_galaxy(full_url, method='GET', auth_required=True,
error_context_msg='Error when getting import task results at %s' % full_url)
except GalaxyError as e:
if e.http_code != 404:
raise
# The import job may not have started, and as such, the task url may not yet exist
display.vvv('Galaxy import process has not started, wait %s seconds before trying again' % wait)
time.sleep(wait)
continue
state = data.get('state', 'waiting')
if data.get('finished_at', None):
break
display.vvv('Galaxy import process has a status of %s, wait %d seconds before trying again'
% (state, wait))
time.sleep(wait)
# poor man's exponential backoff algo so we don't flood the Galaxy API, cap at 30 seconds.
wait = min(30, wait * 1.5)
if state == 'waiting':
raise AnsibleError("Timeout while waiting for the Galaxy import process to finish, check progress at '%s'"
% to_native(full_url))
for message in data.get('messages', []):
level = message['level']
if level == 'error':
display.error("Galaxy import error message: %s" % message['message'])
elif level == 'warning':
display.warning("Galaxy import warning message: %s" % message['message'])
else:
display.vvv("Galaxy import message: %s - %s" % (level, message['message']))
if state == 'failed':
code = to_native(data['error'].get('code', 'UNKNOWN'))
description = to_native(
data['error'].get('description', "Unknown error, see %s for more details" % full_url))
raise AnsibleError("Galaxy import process failed: %s (Code: %s)" % (description, code))
@g_connect(['v2', 'v3'])
def get_collection_metadata(self, namespace, name):
"""
Gets the collection information from the Galaxy server about a specific Collection.
:param namespace: The collection namespace.
:param name: The collection name.
return: CollectionMetadata about the collection.
"""
if 'v3' in self.available_api_versions:
api_path = self.available_api_versions['v3']
field_map = [
('created_str', 'created_at'),
('modified_str', 'updated_at'),
]
else:
api_path = self.available_api_versions['v2']
field_map = [
('created_str', 'created'),
('modified_str', 'modified'),
]
info_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, '/')
error_context_msg = 'Error when getting the collection info for %s.%s from %s (%s)' \
% (namespace, name, self.name, self.api_server)
data = self._call_galaxy(info_url, error_context_msg=error_context_msg)
metadata = {}
for name, api_field in field_map:
metadata[name] = data.get(api_field, None)
return CollectionMetadata(namespace, name, **metadata)
@g_connect(['v2', 'v3'])
def get_collection_version_metadata(self, namespace, name, version):
"""
Gets the collection information from the Galaxy server about a specific Collection version.
:param namespace: The collection namespace.
:param name: The collection name.
:param version: Version of the collection to get the information for.
:return: CollectionVersionMetadata about the collection at the version requested.
"""
api_path = self.available_api_versions.get('v3', self.available_api_versions.get('v2'))
url_paths = [self.api_server, api_path, 'collections', namespace, name, 'versions', version, '/']
n_collection_url = _urljoin(*url_paths)
error_context_msg = 'Error when getting collection version metadata for %s.%s:%s from %s (%s)' \
% (namespace, name, version, self.name, self.api_server)
data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg, cache=True)
self._set_cache()
return CollectionVersionMetadata(data['namespace']['name'], data['collection']['name'], data['version'],
data['download_url'], data['artifact']['sha256'],
data['metadata']['dependencies'])
@g_connect(['v2', 'v3'])
def get_collection_versions(self, namespace, name):
"""
Gets a list of available versions for a collection on a Galaxy server.
:param namespace: The collection namespace.
:param name: The collection name.
:return: A list of versions that are available.
"""
relative_link = False
if 'v3' in self.available_api_versions:
api_path = self.available_api_versions['v3']
pagination_path = ['links', 'next']
relative_link = True # AH pagination results are relative an not an absolute URI.
else:
api_path = self.available_api_versions['v2']
pagination_path = ['next']
page_size_name = 'limit' if 'v3' in self.available_api_versions else 'page_size'
versions_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, 'versions', '/?%s=%d' % (page_size_name, COLLECTION_PAGE_SIZE))
versions_url_info = urlparse(versions_url)
# We should only rely on the cache if the collection has not changed. This may slow things down but it ensures
# we are not waiting a day before finding any new collections that have been published.
if self._cache:
server_cache = self._cache.setdefault(get_cache_id(versions_url), {})
modified_cache = server_cache.setdefault('modified', {})
try:
modified_date = self.get_collection_metadata(namespace, name).modified_str
except GalaxyError as err:
if err.http_code != 404:
raise
# No collection found, return an empty list to keep things consistent with the various APIs
return []
cached_modified_date = modified_cache.get('%s.%s' % (namespace, name), None)
if cached_modified_date != modified_date:
modified_cache['%s.%s' % (namespace, name)] = modified_date
if versions_url_info.path in server_cache:
del server_cache[versions_url_info.path]
self._set_cache()
error_context_msg = 'Error when getting available collection versions for %s.%s from %s (%s)' \
% (namespace, name, self.name, self.api_server)
try:
data = self._call_galaxy(versions_url, error_context_msg=error_context_msg, cache=True)
except GalaxyError as err:
if err.http_code != 404:
raise
# v3 doesn't raise a 404 so we need to mimick the empty response from APIs that do.
return []
if 'data' in data:
# v3 automation-hub is the only known API that uses `data`
# since v3 pulp_ansible does not, we cannot rely on version
# to indicate which key to use
results_key = 'data'
else:
results_key = 'results'
versions = []
while True:
versions += [v['version'] for v in data[results_key]]
next_link = data
for path in pagination_path:
next_link = next_link.get(path, {})
if not next_link:
break
elif relative_link:
# TODO: This assumes the pagination result is relative to the root server. Will need to be verified
# with someone who knows the AH API.
# Remove the query string from the versions_url to use the next_link's query
versions_url = urljoin(versions_url, urlparse(versions_url).path)
next_link = versions_url.replace(versions_url_info.path, next_link)
data = self._call_galaxy(to_native(next_link, errors='surrogate_or_strict'),
error_context_msg=error_context_msg, cache=True)
self._set_cache()
return versions
|
pmarques/ansible
|
lib/ansible/galaxy/api.py
|
Python
|
gpl-3.0
| 37,005
|
[
"Galaxy"
] |
bc3227e0a13dacaee7ddf56953a746c7d5dba812b926f433823758e315bba66d
|
#!/usr/bin/env python
import vtk
def main():
pd_fn = get_program_parameters()
colors = vtk.vtkNamedColors()
polyData = ReadPolyData(pd_fn)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polyData)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetDiffuseColor(colors.GetColor3d("Crimson"))
actor.GetProperty().SetSpecular(.6)
actor.GetProperty().SetSpecularPower(30)
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.AddActor(actor)
renderer.SetBackground(colors.GetColor3d("Silver"))
# Interact to change camera.
renderWindow.Render()
renderWindowInteractor.Start()
# After the interaction is done, save the scene.
SaveSceneToFieldData(polyData, actor, renderer.GetActiveCamera())
renderWindow.Render()
renderWindowInteractor.Start()
# After interaction , restore the scene.
RestoreSceneFromFieldData(polyData, actor, renderer.GetActiveCamera())
renderWindow.Render()
renderWindowInteractor.Start()
def get_program_parameters():
import argparse
description = 'Saving a scene to field data.'
epilogue = '''
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('data_file', help='A polydata file e.g. Armadillo.ply.')
args = parser.parse_args()
return args.data_file
def ReadPolyData(file_name):
import os
path, extension = os.path.splitext(file_name)
extension = extension.lower()
if extension == ".ply":
reader = vtk.vtkPLYReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtp":
reader = vtk.vtkXMLpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".obj":
reader = vtk.vtkOBJReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".stl":
reader = vtk.vtkSTLReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtk":
reader = vtk.vtkpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".g":
reader = vtk.vtkBYUReader()
reader.SetGeometryFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
else:
# Return a None if the extension is unknown.
poly_data = None
return poly_data
def SaveSceneToFieldData(data, actor, camera):
# Actor
# Position, orientation, origin, scale, usrmatrix, usertransform
# Camera
# FocalPoint, Position, ViewUp, ViewAngle, ClippingRange
fp_format = '{0:.6f}'
res = dict()
res['Camera:FocalPoint'] = ', '.join(fp_format.format(n) for n in camera.GetFocalPoint())
res['Camera:Position'] = ', '.join(fp_format.format(n) for n in camera.GetPosition())
res['Camera:ViewUp'] = ', '.join(fp_format.format(n) for n in camera.GetViewUp())
res['Camera:ViewAngle'] = fp_format.format(camera.GetViewAngle())
res['Camera:ClippingRange'] = ', '.join(fp_format.format(n) for n in camera.GetClippingRange())
buffer = ''
for k, v in res.items():
buffer += k + ' ' + v + '\n'
cameraArray = vtk.vtkStringArray()
cameraArray.SetNumberOfValues(1)
cameraArray.SetValue(0, buffer)
cameraArray.SetName("Camera")
data.GetFieldData().AddArray(cameraArray)
def RestoreSceneFromFieldData(data, actor, camera):
import re
# Some regular expressions.
reCP = re.compile(r'^Camera:Position')
reCFP = re.compile(r'^Camera:FocalPoint')
reCVU = re.compile(r'^Camera:ViewUp')
reCVA = re.compile(r'^Camera:ViewAngle')
reCCR = re.compile(r'^Camera:ClippingRange')
keys = [reCP, reCFP, reCVU, reCVA, reCCR]
# float_number = re.compile(r'[^0-9.\-]*([0-9e.\-]*[^,])[^0-9.\-]*([0-9e.\-]*[^,])[^0-9.\-]*([0-9e.\-]*[^,])')
# float_scalar = re.compile(r'[^0-9.\-]*([0-9.\-e]*[^,])')
buffer = data.GetFieldData().GetAbstractArray("Camera").GetValue(0).split('\n')
res = dict()
for line in buffer:
if not line.strip():
continue
line = line.strip().replace(',', '').split()
for i in keys:
m = re.match(i, line[0])
if m:
k = m.group(0)
if m:
# Convert the rest of the line to floats.
v = list(map(lambda x: float(x), line[1:]))
if len(v) == 1:
res[k] = v[0]
else:
res[k] = v
for k, v in res.items():
if re.match(reCP, k):
camera.SetPosition(v)
elif re.match(reCFP, k):
camera.SetFocalPoint(v)
elif re.match(reCVU, k):
camera.SetViewUp(v)
elif re.match(reCVA, k):
camera.SetViewAngle(v)
elif re.match(reCCR, k):
camera.SetClippingRange(v)
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/Utilities/SaveSceneToFieldData.py
|
Python
|
apache-2.0
| 5,461
|
[
"VTK"
] |
9e95c537982f596f39fd19d8d89107566ab5d7208c5aab02b8606d493cb45ba5
|
'''
Parses a Python source file into an AST in JSON format. can be viewed
online in a viewer like: http://jsonviewer.stack.hu/
Usage:
python parse_python_to_json.py --pyfile=test.py # pass in code within a file
python parse_python_to_json.py 'print "Hello world"' # pass in code as a string
Try running on its own source code; whoa very META!
python parse_python_to_json.py --pyfile=parse_python_to_json.py
Output: prints JSON to stdout
Created on 2017-01-20 by Philip Guo
'''
import ast
import json
import optparse
#import pprint
import pythonparser # based on https://github.com/m-labs/pythonparser
import os
import sys
#pp = pprint.PrettyPrinter()
class Visitor:
def visit(self, obj, level=0):
"""Visit a node or a list of nodes. Other values are ignored"""
if isinstance(obj, list):
return [self.visit(elt, level) for elt in obj]
elif isinstance(obj, pythonparser.ast.AST):
typ = obj.__class__.__name__
#print >> sys.stderr, obj
loc = None
if hasattr(obj, 'loc'):
loc = {
'start': {'line': obj.loc.begin().line(), 'column': obj.loc.begin().column()},
'end': {'line': obj.loc.end().line(), 'column': obj.loc.end().column()}
}
# TODO: check out obj._locs for more details later if needed
d = {}
d['type'] = typ
d['loc'] = loc
d['_fields'] = obj._fields
for field_name in obj._fields:
val = self.visit(getattr(obj, field_name), level+1)
d[field_name] = val
return d
else:
# let's hope this is a primitive type that's JSON-encodable!
return obj
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("--pyfile", action="store", dest="pyfile",
help="Take input from a Python source file")
parser.add_option("--pp", action="store_true",
help="Pretty-print JSON for human viewing")
(options, args) = parser.parse_args()
if options.pyfile:
code = open(options.pyfile).read()
else:
code = args[0]
# make sure it ends with a newline to get parse() to work:
if code[-1] != '\n':
code += '\n'
indent_level = None
if options.pp:
indent_level = 2
try:
p = pythonparser.parse(code)
v = Visitor()
res = v.visit(p)
print json.dumps(res, indent=indent_level)
except pythonparser.diagnostic.Error as e:
error_obj = {'type': 'parse_error'}
diag = e.diagnostic
loc = diag.location
error_obj['loc'] = {
'start': {'line': loc.begin().line(), 'column': loc.begin().column()},
'end': {'line': loc.end().line(), 'column': loc.end().column()}
}
error_obj['message'] = diag.message()
print json.dumps(error_obj, indent=indent_level)
sys.exit(1)
|
pgbovine/python-parse-to-json
|
parse_python_to_json.py
|
Python
|
mit
| 3,059
|
[
"VisIt"
] |
7fa045e95b99cb6222fe1eaf6a0be45f8712940af16f6e24755f1de70821eb4c
|
#__author__ = 'Jared Streich and Kevin Murray'
#__version__ = '2983474627822723646378273647280.9001.2 and a half'
#__date__ = 'August, 6 2013'
#Citation at bottom, mostly from Phidgets etc.
#Basic imports
from ctypes import *
import sys
from time import sleep
#Phidget specific imports
from Phidgets.PhidgetException import PhidgetErrorCodes, PhidgetException
from Phidgets.Events.Events import AttachEventArgs, DetachEventArgs, ErrorEventArgs, CurrentChangeEventArgs, PositionChangeEventArgs, VelocityChangeEventArgs
from Phidgets.Devices.AdvancedServo import AdvancedServo
from Phidgets.Devices.Servo import ServoTypes
import os
#Create an advancedServo object
try:
advancedServo = AdvancedServo()
except RuntimeError as e:
print("Runtime Exception: %s" % e.details)
print("Exiting....")
exit(1)
#stack to keep current values in
currentList = [0,0,0,0,0,0,0,0]
velocityList = [0,0,0,0,0,0,0,0]
#Information Display Function
def DisplayDeviceInfo():
print("|------------|----------------------------------|--------------|------------|")
print("|- Attached -|- Type -|- Serial No. -|- Version -|")
print("|------------|----------------------------------|--------------|------------|")
print("|- %8s -|- %30s -|- %10d -|- %8d -|" % (advancedServo.isAttached(), advancedServo.getDeviceName(), advancedServo.getSerialNum(), advancedServo.getDeviceVersion()))
print("|------------|----------------------------------|--------------|------------|")
print("Number of motors: %i" % (advancedServo.getMotorCount()))
#Event Handler Callback Functions
def Attached(e):
attached = e.device
print("Servo %i Attached!" % (attached.getSerialNum()))
def Detached(e):
detached = e.device
print("Servo %i Detached!" % (detached.getSerialNum()))
def Error(e):
try:
source = e.device
print("Phidget Error %i: %s" % (source.getSerialNum(), e.eCode, e.description))
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
def CurrentChanged(e):
global currentList
currentList[e.index] = e.current
def PositionChanged(e):
source = e.device
print("AdvancedServo %i: Motor %i Position: %f - Velocity: %f - Current: %f" % (source.getSerialNum(), e.index, e.position, velocityList[e.index], currentList[e.index]))
if advancedServo.getStopped(e.index) == True:
print("Motor %i Stopped" % (e.index))
def VelocityChanged(e):
global velocityList
velocityList[e.index] = e.velocity
#Main Program Code
#set up our event handlers
try:
advancedServo.setOnAttachHandler(Attached)
advancedServo.setOnDetachHandler(Detached)
advancedServo.setOnErrorhandler(Error)
advancedServo.setOnCurrentChangeHandler(CurrentChanged)
advancedServo.setOnPositionChangeHandler(PositionChanged)
advancedServo.setOnVelocityChangeHandler(VelocityChanged)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Opening phidget object....")
try:
advancedServo.openPhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Waiting for attach....")
try:
advancedServo.waitForAttach(10000)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
try:
advancedServo.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Exiting....")
exit(1)
else:
DisplayDeviceInfo()
try:
print("Setting the servo type for motor 0 to HITEC_HS322HD")
advancedServo.setServoType(0, ServoTypes.PHIDGET_SERVO_HITEC_HS322HD)
#Setting custom servo parameters example - 600us-2000us == 120 degrees, velocity max 1500
#advancedServo.setServoParameters(0, 600, 2000, 120, 1500)
print("Speed Ramping state: %s" % advancedServo.getSpeedRampingOn(0))
print("Stopped state: %s" % advancedServo.getStopped(0))
print("Engaged state: %s" % advancedServo.getEngaged(0))
print("Working with motor 0 only...")
print("Engage the motor...")
advancedServo.setEngaged(0, True)
sleep(2)
print("Engaged state: %s" % advancedServo.getEngaged(0))
print("Move to position PositionMax...")
advancedServo.setPosition(0, advancedServo.getPositionMax(0))
sleep(10)
fh = open("/Users/u5212257/trigger", "w")
fh.write("a")
fh.close()
sleep(0.5)
print("Move to position Check 1")
advancedServo.setServoParameters(0, 800, 1600, 330, 25)
sleep(10)
print("Move to position PositionMin...")
advancedServo.setPosition(0, advancedServo.getPositionMin(0))
sleep(10)
print("Disengage the motor...")
advancedServo.setEngaged(0, False)
sleep(2)
print("Engaged state: %s" % advancedServo.getEngaged(0))
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
try:
advancedServo.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Exiting....")
exit(1)
print("Closing...")
try:
advancedServo.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
os.unlink("/Users/u5212257/trigger")
print("Done.")
exit(0)
#Modified from:
#"""Copyright 2010 Phidgets Inc.
#This work is licensed under the Creative Commons Attribution 2.5 Canada License.
#To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
#"""
#__author__ = 'Adam Stelmack'
#__version__ = '2.1.8'
#__date__ = 'May 17 2010'
fh = open("/Users/u5212257/Desktop, "w")
fh.write("a")
fh.close()
sleep(0.5)
|
borevitzlab/plantspin
|
sps.py
|
Python
|
gpl-3.0
| 6,155
|
[
"VisIt"
] |
c5f633d1fc0a112c4d96950d61b4b18ea33eedd48f4b331c9c183e71c593e3b6
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
from itertools import product
import numpy as np
from mdtraj.utils import ensure_type
from mdtraj.geometry import compute_distances, compute_angles
from mdtraj.geometry import _geometry
__all__ = ['wernet_nilsson', 'baker_hubbard', 'kabsch_sander']
##############################################################################
# Functions
##############################################################################
def wernet_nilsson(traj, exclude_water=True, periodic=True):
"""Identify hydrogen bonds based on cutoffs for the Donor-H...Acceptor
distance and angle according to the criterion outlined in [1].
As opposed to Baker-Hubbard, this is a "cone" criterion where the
distance cutoff depends on the angle.
The criterion employed is :math:`r_\\text{DA} < 3.3 A - 0.00044*\\delta_{HDA}*\\delta_{HDA}`,
where :math:`r_\\text{DA}` is the distance between donor and acceptor heavy atoms,
and :math:`\\delta_{HDA}` is the angle made by the hydrogen atom, donor, and acceptor atoms,
measured in degrees (zero in the case of a perfectly straight bond: D-H ... A).
When donor the donor is 'O' and the acceptor is 'O', this corresponds to
the definition established in [1]_. The donors considered by this method
are NH and OH, and the acceptors considered are O and N. In the paper the only
donor considered is OH.
Parameters
----------
traj : md.Trajectory
An mdtraj trajectory. It must contain topology information.
exclude_water : bool, default=True
Exclude solvent molecules from consideration.
periodic : bool, default=True
Set to True to calculate displacements and angles across periodic box boundaries.
Returns
-------
hbonds : list, len=n_frames
A list containing the atom indices involved in each of the identified
hydrogen bonds at each frame. Each element in the list is an array
where each row contains three integer indices, `(d_i, h_i, a_i)`,
such that `d_i` is the index of the donor atom, `h_i` the index
of the hydrogen atom, and `a_i` the index of the acceptor atom involved
in a hydrogen bond which occurs in that frame.
Notes
-----
Each hydrogen bond is distinguished for the purpose of this function by the
indices of the donor, hydrogen, and acceptor atoms. This means that, for
example, when an ARG sidechain makes a hydrogen bond with its NH2 group,
you might see what appear like double counting of the h-bonds, since the
hydrogen bond formed via the H_1 and H_2 are counted separately, despite
their "chemical indistinguishably"
Examples
--------
>>> md.wernet_nilsson(t)
array([[ 0, 10, 8],
[ 0, 11, 7],
[ 69, 73, 54],
[ 76, 82, 65],
[119, 131, 89],
[140, 148, 265],
[166, 177, 122],
[181, 188, 231]])
>>> label = lambda hbond : '%s -- %s' % (t.topology.atom(hbond[0]), t.topology.atom(hbond[2]))
>>> for hbond in hbonds:
>>> print label(hbond)
GLU1-N -- GLU1-OE2
GLU1-N -- GLU1-OE1
GLY6-N -- SER4-O
CYS7-N -- GLY5-O
TYR11-N -- VAL8-O
MET12-N -- LYS20-O
See Also
--------
baker_hubbard, kabsch_sander
References
----------
.. [1] Wernet, Ph., L.G.M. Pettersson, and A. Nilsson, et al.
"The Structure of the First Coordination Shell in Liquid Water." (2004)
Science 304, 995-999.
"""
distance_cutoff = 0.33
angle_const = 0.000044
angle_cutoff = 45
if traj.topology is None:
raise ValueError('wernet_nilsson requires that traj contain topology '
'information')
def get_donors(e0, e1):
elems = set((e0, e1))
bonditer = traj.topology.bonds
atoms = [(b[0], b[1]) for b in bonditer if set((b[0].element.symbol, b[1].element.symbol)) == elems]
indices = []
for a0, a1 in atoms:
if exclude_water and (a0.residue.name == 'HOH' or a1.residue.name == 'HOH'):
continue
pair = (a0.index, a1.index)
# make sure to get the pair in the right order, so that the index
# for e0 comes before e1
if a0.element.symbol == e1:
pair = pair[::-1]
indices.append(pair)
return indices
nh_donors = get_donors('N', 'H')
oh_donors = get_donors('O', 'H')
xh_donors = np.array(nh_donors + oh_donors)
if len(xh_donors) == 0:
# if there are no hydrogens or protein in the trajectory, we get
# no possible pairs and return nothing
return [np.zeros((0, 3), dtype=int) for _ in range(traj.n_frames)]
if not exclude_water:
acceptors = [a.index for a in traj.topology.atoms if a.element.symbol == 'O' or a.element.symbol == 'N']
else:
acceptors = [a.index for a in traj.topology.atoms if (a.element.symbol == 'O' and a.residue.name != 'HOH') or a.element.symbol == 'N']
# This is used to compute the angles
angle_triplets = np.array([(e[0][1], e[0][0], e[1]) for e in product(xh_donors, acceptors) if e[0][0] != e[1]])
distance_pairs = angle_triplets[:, [0, 2]] # possible O..acceptor pairs
angles = compute_angles(traj, angle_triplets, periodic=periodic) * 180.0 / np.pi # degrees
distances = compute_distances(traj, distance_pairs, periodic=periodic, opt=True)
cutoffs = distance_cutoff - angle_const * angles ** 2
mask = np.logical_and(distances < cutoffs, angles < angle_cutoff)
# The triplets that are returned are O-H ... O, different
# from what's used to compute the angles.
angle_triplets2 = angle_triplets[:, [1, 0, 2]]
return [angle_triplets2[i] for i in mask]
def baker_hubbard(traj, freq=0.1, exclude_water=True, periodic=True):
"""Identify hydrogen bonds based on cutoffs for the Donor-H...Acceptor
distance and angle.
The criterion employed is :math:`\\theta > 120` and
:math:`r_\\text{H...Acceptor} < 2.5 A`.
When donor the donor is 'N' and the acceptor is 'O', this corresponds to
the definition established in [1]_. The donors considered by this method
are NH and OH, and the acceptors considered are O and N.
Parameters
----------
traj : md.Trajectory
An mdtraj trajectory. It must contain topology information.
freq : float, default=0.1
Return only hydrogen bonds that occur in greater this fraction of the
frames in the trajectory.
exclude_water : bool, default=True
Exclude solvent molecules from consideration
periodic : bool, default=True
Set to True to calculate displacements and angles across periodic box boundaries.
Returns
-------
hbonds : np.array, shape=[n_hbonds, 3], dtype=int
An array containing the indices atoms involved in each of the identified
hydrogen bonds. Each row contains three integer indices, `(d_i, h_i,
a_i)`, such that `d_i` is the index of the donor atom, `h_i` the index
of the hydrogen atom, and `a_i` the index of the acceptor atom involved
in a hydrogen bond which occurs (according to the definition above) in
proportion greater than `freq` of the trajectory.
Notes
-----
Each hydrogen bond is distinguished for the purpose of this function by the
indices of the donor, hydrogen, and acceptor atoms. This means that, for
example, when an ARG sidechain makes a hydrogen bond with its NH2 group,
you might see what appear like double counting of the h-bonds, since the
hydrogen bond formed via the H_1 and H_2 are counted separately, despite
their "chemical indistinguishably"
Examples
--------
>>> md.baker_hubbard(t)
array([[ 0, 10, 8],
[ 0, 11, 7],
[ 69, 73, 54],
[ 76, 82, 65],
[119, 131, 89],
[140, 148, 265],
[166, 177, 122],
[181, 188, 231]])
>>> label = lambda hbond : '%s -- %s' % (t.topology.atom(hbond[0]), t.topology.atom(hbond[2]))
>>> for hbond in hbonds:
>>> print label(hbond)
GLU1-N -- GLU1-OE2
GLU1-N -- GLU1-OE1
GLY6-N -- SER4-O
CYS7-N -- GLY5-O
TYR11-N -- VAL8-O
MET12-N -- LYS20-O
See Also
--------
kabsch_sander
References
----------
.. [1] Baker, E. N., and R. E. Hubbard. "Hydrogen bonding in globular
proteins." Progress in Biophysics and Molecular Biology
44.2 (1984): 97-179.
"""
# Cutoff criteria: these could be exposed as function arguments, or
# modified if there are better definitions than the this one based only
# on distances and angles
distance_cutoff = 0.25 # nanometers
angle_cutoff = 2.0 * np.pi / 3.0 # radians
if traj.topology is None:
raise ValueError('baker_hubbard requires that traj contain topology '
'information')
def get_donors(e0, e1):
elems = set((e0, e1))
bonditer = traj.topology.bonds
atoms = [(b[0], b[1]) for b in bonditer if set((b[0].element.symbol, b[1].element.symbol)) == elems]
indices = []
for a0, a1 in atoms:
if exclude_water and (a0.residue.name == 'HOH' or a1.residue.name == 'HOH'):
continue
pair = (a0.index, a1.index)
# make sure to get the pair in the right order, so that the index
# for e0 comes before e1
if a0.element.symbol == e1:
pair = pair[::-1]
indices.append(pair)
return indices
nh_donors = get_donors('N', 'H')
oh_donors = get_donors('O', 'H')
xh_donors = np.concatenate((nh_donors, oh_donors))
if len(xh_donors) == 0:
# if there are no hydrogens or protein in the trajectory, we get
# no possible pairs and return nothing
return np.zeros((0, 3), dtype=int)
if not exclude_water:
acceptors = [a.index for a in traj.topology.atoms if a.element.symbol == 'O' or a.element.symbol == 'N']
else:
acceptors = [a.index for a in traj.topology.atoms if (a.element.symbol == 'O' and a.residue.name != 'HOH') or a.element.symbol == 'N']
angle_triplets = np.array([(e[0][0], e[0][1], e[1]) for e in product(xh_donors, acceptors)])
distance_pairs = angle_triplets[:, [1, 2]] # possible H..acceptor pairs
angles = compute_angles(traj, angle_triplets, periodic=periodic)
distances = compute_distances(traj, distance_pairs, periodic=periodic)
mask = np.logical_and(distances < distance_cutoff, angles > angle_cutoff)
# frequency of occurance of each hydrogen bond in the trajectory
# occurance = np.sum(mask, axis=0).astype(np.double) / traj.n_frames
return [angle_triplets[i] for i in mask] # Commented below line such that the return value is hydrogen bond lists at each frame
# return angle_triplets[occurance > freq]
def kabsch_sander(traj):
"""Compute the Kabsch-Sander hydrogen bond energy between each pair
of residues in every frame.
Hydrogen bonds are defined using an electrostatic definition, assuming
partial charges of -0.42 e and +0.20 e to the carbonyl oxygen and amide
hydrogen respectively, their opposites assigned to the carbonyl carbon
and amide nitrogen. A hydrogen bond is identified if E in the following
equation is less than -0.5 kcal/mol:
.. math::
E = 0.42 \cdot 0.2 \cdot 33.2 kcal/(mol \cdot nm) * \\
(1/r_{ON} + 1/r_{CH} - 1/r_{OH} - 1/r_{CN})
Parameters
----------
traj : md.Trajectory
An mdtraj trajectory. It must contain topology information.
Returns
-------
matrices : list of scipy.sparse.csr_matrix
The return value is a list of length equal to the number of frames
in the trajectory. Each element is an n_residues x n_residues sparse
matrix, where the existence of an entry at row `i`, column `j` with value
`x` means that there exists a hydrogen bond between a backbone CO
group at residue `i` with a backbone NH group at residue `j` whose
Kabsch-Sander energy is less than -0.5 kcal/mol (the threshold for
existence of the "bond"). The exact value of the energy is given by the
value `x`.
See Also
--------
wernet_nilsson, baker_hubbard
References
----------
.. [1] Kabsch W, Sander C (1983). "Dictionary of protein secondary structure: pattern recognition of hydrogen-bonded and geometrical features". Biopolymers 22 (12): 2577-637. dio:10.1002/bip.360221211
"""
if traj.topology is None:
raise ValueError('kabsch_sander requires topology')
import scipy.sparse
xyz, nco_indices, ca_indices, proline_indices, _ = _prep_kabsch_sander_arrays(traj)
n_residues = len(ca_indices)
hbonds = np.empty((xyz.shape[0], n_residues, 2), np.int32)
henergies = np.empty((xyz.shape[0], n_residues, 2), np.float32)
hbonds.fill(-1)
henergies.fill(np.nan)
_geometry._kabsch_sander(xyz, nco_indices, ca_indices, proline_indices,
hbonds, henergies)
# The C code returns its info in a pretty inconvenient format.
# Let's change it to a list of scipy CSR matrices.
matrices = []
hbonds_mask = (hbonds != -1)
for i in range(xyz.shape[0]):
# appologies for this cryptic code -- we need to deal with the low
# level aspects of the csr matrix format.
hbonds_frame = hbonds[i]
mask = hbonds_mask[i]
henergies_frame = henergies[i]
indptr = np.zeros(n_residues + 1, np.int32)
indptr[1:] = np.cumsum(mask.sum(axis=1))
indices = hbonds_frame[mask].flatten()
data = henergies_frame[mask].flatten()
matrices.append(scipy.sparse.csr_matrix(
(data, indices, indptr), shape=(n_residues, n_residues)).T)
return matrices
def _get_or_minus1(f):
try:
return f()
except IndexError:
return -1
def _prep_kabsch_sander_arrays(traj):
xyz = ensure_type(traj.xyz, dtype=np.float32, ndim=3, name='traj.xyz',
shape=(None, None, 3), warn_on_cast=False)
ca_indices, nco_indices, is_proline, is_protein = [], [], [], []
for residue in traj.topology.residues:
ca = _get_or_minus1(lambda: [a.index for a in residue.atoms if a.name == 'CA'][0])
n = _get_or_minus1(lambda: [a.index for a in residue.atoms if a.name == 'N'][0])
c = _get_or_minus1(lambda: [a.index for a in residue.atoms if a.name == 'C'][0])
o = _get_or_minus1(lambda: [a.index for a in residue.atoms if a.name == 'O'][0])
ca_indices.append(ca)
is_proline.append(residue.name == 'PRO')
nco_indices.append([n, c, o])
is_protein.append(ca != -1 and n != -1 and c != -1 and o != -1)
nco_indices = np.array(nco_indices, np.int32)
ca_indices = np.array(ca_indices, np.int32)
proline_indices = np.array(is_proline, np.int32)
is_protein = np.array(is_protein, np.int32)
return xyz, nco_indices, ca_indices, proline_indices, is_protein
|
casawa/mdtraj
|
mdtraj/geometry/hbond.py
|
Python
|
lgpl-2.1
| 16,393
|
[
"MDTraj"
] |
534837ca9096719ec61d7b31643fb94c2fd15b42a68cbb25034d42cc715a582e
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, pprint, time
from cookielib import Cookie
from threading import current_thread
from PyQt4.Qt import (QObject, QNetworkAccessManager, QNetworkDiskCache,
QNetworkProxy, QNetworkProxyFactory, QEventLoop, QUrl, pyqtSignal,
QDialog, QVBoxLayout, QSize, QNetworkCookieJar, Qt, pyqtSlot)
from PyQt4.QtWebKit import QWebPage, QWebSettings, QWebView, QWebElement
from calibre import USER_AGENT, prints, get_proxies, get_proxy_info
from calibre.constants import ispy3, cache_dir
from calibre.utils.logging import ThreadSafeLog
from calibre.gui2 import must_use_qt
from calibre.web.jsbrowser.forms import FormsMixin
class Timeout(Exception): pass
class LoadError(Exception): pass
class WebPage(QWebPage): # {{{
def __init__(self, log,
confirm_callback=None,
prompt_callback=None,
user_agent=USER_AGENT,
enable_developer_tools=False,
parent=None):
QWebPage.__init__(self, parent)
self.log = log
self.user_agent = user_agent if user_agent else USER_AGENT
self.confirm_callback = confirm_callback
self.prompt_callback = prompt_callback
self.setForwardUnsupportedContent(True)
self.unsupportedContent.connect(self.on_unsupported_content)
settings = self.settings()
if enable_developer_tools:
settings.setAttribute(QWebSettings.DeveloperExtrasEnabled, True)
QWebSettings.enablePersistentStorage(os.path.join(cache_dir(),
'webkit-persistence'))
QWebSettings.setMaximumPagesInCache(0)
def userAgentForUrl(self, url):
return self.user_agent
def javaScriptAlert(self, frame, msg):
if self.view() is not None:
return QWebPage.javaScriptAlert(self, frame, msg)
prints('JSBrowser alert():', unicode(msg))
def javaScriptConfirm(self, frame, msg):
if self.view() is not None:
return QWebPage.javaScriptConfirm(self, frame, msg)
if self.confirm_callback is not None:
return self.confirm_callback(unicode(msg))
return True
def javaScriptConsoleMessage(self, msg, lineno, source_id):
prints('JSBrowser msg():%s:%s:'%(unicode(source_id), lineno), unicode(msg))
def javaScriptPrompt(self, frame, msg, default_value, *args):
if self.view() is not None:
return QWebPage.javaScriptPrompt(self, frame, msg, default_value,
*args)
if self.prompt_callback is None:
return (False, default_value) if ispy3 else False
value = self.prompt_callback(unicode(msg), unicode(default_value))
ok = value is not None
if ispy3:
return ok, value
if ok:
result = args[0]
result.clear()
result.append(value)
return ok
@pyqtSlot(result=bool)
def shouldInterruptJavaScript(self):
if self.view() is not None:
return QWebPage.shouldInterruptJavaScript(self)
return True
def on_unsupported_content(self, reply):
self.log.warn('Unsupported content, ignoring: %s'%reply.url())
@property
def ready_state(self):
return unicode(self.mainFrame().evaluateJavaScript('document.readyState').toString())
# }}}
class ProxyFactory(QNetworkProxyFactory): # {{{
def __init__(self, log):
QNetworkProxyFactory.__init__(self)
proxies = get_proxies()
self.proxies = {}
for scheme, proxy_string in proxies.iteritems():
scheme = scheme.lower()
info = get_proxy_info(scheme, proxy_string)
if info is None: continue
hn, port = info['hostname'], info['port']
if not hn or not port: continue
log.debug('JSBrowser using proxy:', pprint.pformat(info))
pt = {'socks5':QNetworkProxy.Socks5Proxy}.get(scheme,
QNetworkProxy.HttpProxy)
proxy = QNetworkProxy(pt, hn, port)
un, pw = info['username'], info['password']
if un:
proxy.setUser(un)
if pw:
proxy.setPassword(pw)
self.proxies[scheme] = proxy
self.default_proxy = QNetworkProxy(QNetworkProxy.DefaultProxy)
def queryProxy(self, query):
scheme = unicode(query.protocolTag()).lower()
return [self.proxies.get(scheme, self.default_proxy)]
# }}}
class NetworkAccessManager(QNetworkAccessManager): # {{{
OPERATION_NAMES = { getattr(QNetworkAccessManager, '%sOperation'%x) :
x.upper() for x in ('Head', 'Get', 'Put', 'Post', 'Delete',
'Custom')
}
report_reply_signal = pyqtSignal(object)
def __init__(self, log, use_disk_cache=True, parent=None):
QNetworkAccessManager.__init__(self, parent)
self.reply_count = 0
self.log = log
if use_disk_cache:
self.cache = QNetworkDiskCache(self)
self.cache.setCacheDirectory(os.path.join(cache_dir(), 'jsbrowser'))
self.setCache(self.cache)
self.sslErrors.connect(self.on_ssl_errors)
self.pf = ProxyFactory(log)
self.setProxyFactory(self.pf)
self.finished.connect(self.on_finished)
self.cookie_jar = QNetworkCookieJar()
self.setCookieJar(self.cookie_jar)
self.main_thread = current_thread()
self.report_reply_signal.connect(self.report_reply, type=Qt.QueuedConnection)
def on_ssl_errors(self, reply, errors):
reply.ignoreSslErrors()
def createRequest(self, operation, request, data):
url = unicode(request.url().toString())
operation_name = self.OPERATION_NAMES[operation]
debug = []
debug.append(('Request: %s %s' % (operation_name, url)))
for h in request.rawHeaderList():
try:
d = ' %s: %s' % (h, request.rawHeader(h))
except:
d = ' %r: %r' % (h, request.rawHeader(h))
debug.append(d)
if data is not None:
raw = data.peek(1024)
try:
raw = raw.decode('utf-8')
except:
raw = repr(raw)
debug.append(' Request data: %s'%raw)
self.log.debug('\n'.join(debug))
return QNetworkAccessManager.createRequest(self, operation, request,
data)
def on_finished(self, reply):
if current_thread() is not self.main_thread:
# This method was called in a thread created by Qt. The python
# interpreter may not be in a safe state, so dont do anything
# more. This signal is queued which means the reply wont be
# reported unless someone spins the event loop. So far, I have only
# seen this happen when doing Ctrl+C in the console.
self.report_reply_signal.emit(reply)
else:
self.report_reply(reply)
def report_reply(self, reply):
reply_url = unicode(reply.url().toString())
self.reply_count += 1
if reply.error():
self.log.warn("Reply error: %s - %d (%s)" %
(reply_url, reply.error(), reply.errorString()))
else:
debug = []
debug.append("Reply successful: %s" % reply_url)
for h in reply.rawHeaderList():
try:
d = ' %s: %s' % (h, reply.rawHeader(h))
except:
d = ' %r: %r' % (h, reply.rawHeader(h))
debug.append(d)
self.log.debug('\n'.join(debug))
def py_cookies(self):
for c in self.cookie_jar.allCookies():
name, value = map(bytes, (c.name(), c.value()))
domain = bytes(c.domain())
initial_dot = domain_specified = domain.startswith(b'.')
secure = bool(c.isSecure())
path = unicode(c.path()).strip().encode('utf-8')
expires = c.expirationDate()
is_session_cookie = False
if expires.isValid():
expires = expires.toTime_t()
else:
expires = None
is_session_cookie = True
path_specified = True
if not path:
path = b'/'
path_specified = False
c = Cookie(0, # version
name, value,
None, # port
False, # port specified
domain, domain_specified, initial_dot, path,
path_specified,
secure, expires, is_session_cookie,
None, # Comment
None, # Comment URL
{} # rest
)
yield c
# }}}
class LoadWatcher(QObject): # {{{
def __init__(self, page, parent=None):
QObject.__init__(self, parent)
self.is_loading = True
self.loaded_ok = None
page.loadFinished.connect(self)
self.page = page
def __call__(self, ok):
self.loaded_ok = ok
self.is_loading = False
self.page.loadFinished.disconnect(self)
self.page = None
# }}}
class BrowserView(QDialog): # {{{
def __init__(self, page, parent=None):
QDialog.__init__(self, parent)
self.l = l = QVBoxLayout(self)
self.setLayout(l)
self.webview = QWebView(self)
l.addWidget(self.webview)
self.resize(QSize(1024, 768))
self.webview.setPage(page)
# }}}
class Browser(QObject, FormsMixin):
'''
Browser (WebKit with no GUI).
This browser is NOT thread safe. Use it in a single thread only! If you
need to run downloads in parallel threads, use multiple browsers (after
copying the cookies).
'''
def __init__(self,
# Logging. If None, uses a default log, which does not output
# debugging info
log = None,
# Receives a string and returns True/False. By default, returns
# True for all strings
confirm_callback=None,
# Prompt callback. Receives a msg string and a default value
# string. Should return the user input value or None if the user
# canceled the prompt. By default returns None.
prompt_callback=None,
# User agent to be used
user_agent=USER_AGENT,
# If True a disk cache is used
use_disk_cache=True,
# Enable Inspect element functionality
enable_developer_tools=False,
# Verbosity
verbosity = 0
):
must_use_qt()
QObject.__init__(self)
FormsMixin.__init__(self)
if log is None:
log = ThreadSafeLog()
if verbosity:
log.filter_level = log.DEBUG
self.log = log
self.page = WebPage(log, confirm_callback=confirm_callback,
prompt_callback=prompt_callback, user_agent=user_agent,
enable_developer_tools=enable_developer_tools,
parent=self)
self.nam = NetworkAccessManager(log, use_disk_cache=use_disk_cache, parent=self)
self.page.setNetworkAccessManager(self.nam)
@property
def user_agent(self):
return self.page.user_agent
def _wait_for_load(self, timeout, url=None):
loop = QEventLoop(self)
start_time = time.time()
end_time = start_time + timeout
lw = LoadWatcher(self.page, parent=self)
while lw.is_loading and end_time > time.time():
if not loop.processEvents():
time.sleep(0.01)
if lw.is_loading:
raise Timeout('Loading of %r took longer than %d seconds'%(
url, timeout))
return lw.loaded_ok
def _wait_for_replies(self, reply_count, timeout):
final_time = time.time() + timeout
loop = QEventLoop(self)
while (time.time() < final_time and self.nam.reply_count <
reply_count):
loop.processEvents()
time.sleep(0.1)
if self.nam.reply_count < reply_count:
raise Timeout('Waiting for replies took longer than %d seconds' %
timeout)
def run_for_a_time(self, timeout):
final_time = time.time() + timeout
loop = QEventLoop(self)
while (time.time() < final_time):
if not loop.processEvents():
time.sleep(0.1)
def visit(self, url, timeout=30.0):
'''
Open the page specified in URL and wait for it to complete loading.
Note that when this method returns, there may still be javascript
that needs to execute (this method returns when the loadFinished()
signal is called on QWebPage). This method will raise a Timeout
exception if loading takes more than timeout seconds.
Returns True if loading was successful, False otherwise.
'''
self.current_form = None
self.page.mainFrame().load(QUrl(url))
return self._wait_for_load(timeout, url)
@property
def dom_ready(self):
return self.page.ready_state in {'complete', 'interactive'}
def wait_till_dom_ready(self, timeout=30.0, url=None):
start_time = time.time()
while not self.dom_ready:
if time.time() - start_time > timeout:
raise Timeout('Loading of %r took longer than %d seconds'%(
url, timeout))
self.run_for_a_time(0.1)
def start_load(self, url, timeout=30.0):
'''
Start the loading of the page at url and return once the DOM is ready,
sub-resources such as scripts/stylesheets/images/etc. may not have all
loaded.
'''
self.current_form = None
self.page.mainFrame().load(QUrl(url))
self.run_for_a_time(0.01)
self.wait_till_dom_ready(timeout=timeout, url=url)
def click(self, qwe_or_selector, wait_for_load=True, ajax_replies=0, timeout=30.0):
'''
Click the :class:`QWebElement` pointed to by qwe_or_selector.
:param wait_for_load: If you know that the click is going to cause a
new page to be loaded, set this to True to have
the method block until the new page is loaded
:para ajax_replies: Number of replies to wait for after clicking a link
that triggers some AJAX interaction
'''
initial_count = self.nam.reply_count
qwe = qwe_or_selector
if not isinstance(qwe, QWebElement):
qwe = self.page.mainFrame().findFirstElement(qwe)
if qwe.isNull():
raise ValueError('Failed to find element with selector: %r'
% qwe_or_selector)
js = '''
var e = document.createEvent('MouseEvents');
e.initEvent( 'click', true, true );
this.dispatchEvent(e);
'''
qwe.evaluateJavaScript(js)
if ajax_replies > 0:
reply_count = initial_count + ajax_replies
self._wait_for_replies(reply_count, timeout)
elif wait_for_load and not self._wait_for_load(timeout):
raise LoadError('Clicking resulted in a failed load')
def click_text_link(self, text_or_regex, selector='a[href]',
wait_for_load=True, ajax_replies=0, timeout=30.0):
target = None
for qwe in self.page.mainFrame().findAllElements(selector):
src = unicode(qwe.toPlainText())
if hasattr(text_or_regex, 'match') and text_or_regex.search(src):
target = qwe
break
if src.lower() == text_or_regex.lower():
target = qwe
break
if target is None:
raise ValueError('No element matching %r with text %s found'%(
selector, text_or_regex))
return self.click(target, wait_for_load=wait_for_load,
ajax_replies=ajax_replies, timeout=timeout)
def show_browser(self):
'''
Show the currently loaded web page in a window. Useful for debugging.
'''
view = BrowserView(self.page)
view.exec_()
@property
def cookies(self):
'''
Return all the cookies set currently as :class:`Cookie` objects.
Returns expired cookies as well.
'''
return list(self.nam.py_cookies())
@property
def html(self):
return unicode(self.page.mainFrame().toHtml())
def close(self):
try:
self.visit('about:blank', timeout=0.01)
except Timeout:
pass
self.nam = self.page = None
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
|
sss/calibre-at-bzr
|
src/calibre/web/jsbrowser/browser.py
|
Python
|
gpl-3.0
| 17,114
|
[
"VisIt"
] |
a6eaa0b342a3baf7266ad9bd5ca4f2b7c631f0f468049eba7050c76df46c938f
|
#!/usr/bin/env python2
"""
NAME:
CoPAS_python2.py <https://github.com/daviddelene/CoPAS>
PURPOSE:
To facilitate the installation, setup, and integration of open source
software and packages related to cloud physics, in-situ airborne data.
Download/Clone CoPAS Distribution
cd $HOME
git clone https://github.com/daviddelene/CoPAS.git
Update the CoPAS.py File
git commit CoPAS.py
git push origin master
EXECUTION EXAMPLE:
Get Help:
CoPAS_python2.py -h
Test for Support Packages:
CoPAS_python2.py -t
Install only the ADPAA package, binary and source pacakges:
CoPAS_python2.py -s ADPAA
Install or update all package, onlye source versions:
CoPAS_python2.py -S
SYNTAX:
CoPAS_python2.py <-h|-s|-t> <ADPAA> <ADTAE> <DRILSDOWN> <EGADS> <SAMAC> <SIMDATA> <SODA> <UIOPS> <nobinary> <notesting>
<-h> - Print Syntax message.
<-S> - Install source package but no binary package.
<-s> - Install source package in addition to binary package.
<-t> - Test for necessary support packages.
ADPAA - Clone/pull the ADPAA SVN repository.
ADTAE - Clone/pull the ADTAE Git repository.
DRILSDOWN - Clone/pull the DRILSDOWN repository.
EGADS - install the EUFAR package.
SAMAC - Install the SAMAC package.
SIMDATA - Download NCAR probe simulation data sets.
SODA - Install the SODA package.
UIOPS - Install the UIOPS package.
<nobinary> - Do not install binary packages.
<notesting> - Do not test for support packages.
No parameter on command line then Clone/pull all repositories.
DEVELOPERS:
David Delene <delene@aero.und.edu>
Nick Gapp (njgapp) <nicholas.james.gapp@ndus.edu>
Joseph Finlon (joefinlon) <finlon2@illinois.edu>
NOTES:
If available, script installs a binary distribution of the package.
If no binary distribution is available, then a copy of the package repository
is installed. If the -s option is used to install source, still need binary
version of packages like ADPAA so don't have to compile and build the code.
Program has three main parts:
1.) Tests to check for required python packages.
2.) Installing python packages.
3.) Cloning and update repositories.
MODIFICATIONS:
David Delene <delene@aero.und.edu> - 2016/12/24
Written.
David Delene <delene@aero.und.edu> - 2016/12/26
Added Cloning of ADTAE repository.
David Delene <delene@aero.und.edu> - 2016/12/27
Added Cloning of SODA repository.
David Delene <delene@aero.und.edu> - 2017/01/12
Added Cloning of EGADS, SAMAC, and UIOPS repository.
David Delene <delene@aero.und.edu> - 2017/02/10
Added nobinary and notesting options.
David Delene <delene@aero.und.edu> - 2017/07/09
Added information about Redhat install.
David Delene <delene@aero.und.edu> - 2017/10/30
Added AOSPY packae.
David Delene <delene@aero.und.edu> - 2018/07/07
Added SIMDATA probe simulation data set.
David Delene <delene@aero.und.edu> - 2018/07/08
Added pull (updating) of git repositories.
David Delene <delene@aero.und.edu> - 2018/07/08
Added cloning of DRILSDOWN.
David Delene <delene@aero.und.edu> - 2019/03/17
Added -S and -t options.
Added all_packages function.
Updated print statements for both python 2 and 3.
Added comment about Redhat install of unittest2.
REFERENCES:
Airborne Data Processing and Analysis (ADPAA)
ADMINISTRATORS
David Delene <delene@aero.und.edu> - Administrator
Andrea Neumann
CURRENT (2017/01/12) DEVELOPERS
Cocos, Noah
Ekness, Jamie
Gapp, Nicholas
Gupta, Siddhant
Hibert, Kurt
O'brien, Joseph
Starzec, Mariusz
Seyler, Scott
Sorenson, Blake
Wilson, Lance
Current (2017/01/12) MEMBERS
Bart, Nichole
Butland, Alex
Kruse, Christopher
Mitchell, Robert
Mulally, Daniel
Sever, Gokhan
Simelane, P.
Uhlmann, Timm
AVAILABILITY
Repository - svn://svn.code.sf.net/p/adpaa
COPYRIGHT
GNU/GPL Version 3
PLATFORM (Operatoring Systems Tested On)
Redhat, Fedora, Ubuntu, Mint Linux (CPLOT/CPLOT2 - Windows)
LANGUAGES
IDL, Python 2, Perl, Bash, Csh, C, Fortran, Matlab, Scilab, Igor
STATUS (December 27, 2016)
3003 Commits, 12 Active Developers, 2 Administrator
SCOPE
Processes data from Science Engineering Associates (SEA) data
acquisition systems, many instruments supported but does not
process Optical Array Probe to produce size distributions.
Does visualization, analysis and file conversion.
Airborne Data Tesing and Evaluation (ADTAE)
ADMINISTRATORS
David Delene <delene@aero.und.edu> - Administrator
Andrea Neumann
CURRENT (2017/01/12) DEVELOPERS
Cocos, Noah
Ekness, Jamie
Gapp, Nicholas
Gupta, Siddhant
Hibert, Kurt
O'brien, Joseph
Starzec, Mariusz
Seyler, Scott
Sorenson, Blake
Wilson, Lance
AVAILABILITY
Repository - https://sourceforge.net/projects/adtae/
COPYRIGHT
GNU/GPL Version 3
PLATFORM (Operatoring Systems Tested On)
Redhat, Fedora, Ubuntu, Mint Linux and Windows
LANGUAGES
Python 2, but mostly just data files.
STATUS (December 27, 2016)
3 Commits, 12 Active Developers, 2 Administrator
SCOPE
The Airborne Data Testing and Evaluation (ADTAE)
project develops open source resources to test and
evaluate software used to process and analyse
measurements from scientific instrument deployed on
airborne platforms. Many of the resources are designed
to work with the Airborne Data Processing and Analysis
(ADPAA) software package (http://adpaa.sourceforge.net).
Automated Climate Data Analysis and Management (AOSPY)
AVAILABILITY
PIP - pip install aospy
LANGUAGES
Works on Python 2.7, 3.4, 3.5, and 3.6.
Drawing Rich Integrated Lat-lon-time Subsets from Dataservers Online into Working Notebooks (DRILSDOWN)
ADMINISTRATORS
Brian Mapes, mapes@miami.edu
AVAILABILITY
Repository - https://github.com/Unidata/drilsdown.git
COPYRIGHT
PLATFORM (Operatoring Systems Tested On)
Fedora
LANGUAGES
Python
STATUS (December 27, 2016)
SCOPE
The DRILSDOWN project facilitate access ro detailed
visualizations (in the Integrated Data Viewer, IDV) of
Cases of Interest (user-defined) within a Python-based geo-space x time
statistical data analyses -- if the data for such visulaizations are
available online in nice aggregated repositories.
EUFAR General Airborne Data-processing Software (EGADS)
DEVELOPERS
Freer, Matt
Henry, Olivier
AVAILABILITY
Repository - https://github.com/eufarn7sp/egads-eufar
COPYRIGHT
New BSD License
PLATFORM (Operatoring Systems Tested On)
Linux, Mac and Windows
LANGUAGES
Python 2
STATUS
2 Active Developers
SCOPE
Toolbox and framework for processing Airborne Atmospheric Data.
Includes meta-data and units. All algorithms are thoroughly documented
in separate, referenceable PDF.
Software for Airborne Measurements of Aerosol and Clouds (SAMAC)
DEVELOPERS
Gagne, Stephanie
MacDonald, Landan
AVAILABILITY
Repository - https://github.com/StephGagne/SAMAC
COPYRIGHT
GNU/GPL Version 3
PLATFORM (Operatoring Systems Tested On)
Linux, Mac, Windows
LANGUAGES
Python 2.7 (Matplotlib, Scipy, Numpy, Basemap, H5py, Xlrd)
STATUS (December 27, 2016)
+13,000+ Lines, 2 Developer
SCOPE
Analysis Package for Calculating, Displaying and Storing Segments from
Processed Data Sets
System for OAP Data Analsis (SIMDATA)
DEVELOPERS
Bansemer, Aaron
AVAILABILITY
FTP Site - ftp.ucar.edu/pub/mmm/bansemer/simulations
COPYRIGHT
None Provided
PLATFORM (Operatoring Systems Tested On)
Linux
LANGUAGES
NONE
STATUS (July 9, 2018)
Setup for July 2018 workshop.
SCOPE
Probe Simuluation data sets for DMT and SPEC probes.
System for OAP Data Analsis (SODA)
DEVELOPERS
Bansemer, Aaron
AVAILABILITY
Repository - https://github.com/abansemer/soda
COPYRIGHT
BSD-3 License
Free use, UCAR/NCAR retain copyright notice.
PLATFORM (Operatoring Systems Tested On)
Linux and Windows, likely Macs
LANGUAGES
IDL (Bash Scripts)
STATUS (December 27, 2016)
+90,000 Lines, +1 Developer
SCOPE
GNU and script based analysis package for optical array
probe data that uses shattering correction and other options
to derive particle spectrum.
University of Illinois OAP Processing Software (UIOPS)
DEVELOPERS
Current Developer:
Joseph A Finlong (finlon2@illinois.edu)
Past Developer
Wei Wu
AVAILABILITY
Curent Version
Repository - https://github.com/joefinlon/UIOPS
Past Version
Repository - https://github.com/weiwu5/UIOPS
COPYRIGHT
GNU GPL V3
PLATFORM (Operatoring Systems Tested On)
Linux Windows, Mac (CGAL modern image processing)
LANGUAGES
Matlab (C++ Image processing, Python, Bash/Csh)
STATUS (December 27, 2016)
1 Developer
SCOPE
Analysis package for optical array probe data.
COPYRIGHT:
2016, 2017, 2018, 2019 David Delene
This program is distributed under terms of the GNU General Public License
This file is part of Airborne Data Processing and Analysis (ADPAA).
ADPAA is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ADPAA is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ADPAA. If not, see <http://www.gnu.org/licenses/>.
"""
try:
import sys
except ImportError:
print (" Required python 'sys' module is not installed.")
quit()
# Define all default options values.
binary = 1
source = 0
testing = 1
testing_only = 0
# Turn off all packages by default.
adpaa = 0
adtae = 0
aospy = 0
drilsdown = 0
eufar = 0
samac = 0
simdata = 0
soda = 0
uiops = 0
# Routine to turn on/off all packages.
def all_packages(status):
if status == 'On':
adpaa = 1
adtae = 1
drilsdown = 1
eufar = 1
samac = 1
simdata = 1
soda = 1
uiops = 1
else:
adpaa = 0
adtae = 0
drilsdown = 0
eufar = 0
samac = 0
simdata = 0
soda = 0
uiops = 0
return (adpaa,adtae,drilsdown,eufar,samac,soda,uiops)
# Define the help/syntax message.
def help_message():
print ("Syntax: CoPAS -h -s <ADPAA> <ADTAE> <EUFAR> <SAMAC> <SODA> <UIOPS> <nobinary> <notesting>")
print (" OPTIONS:")
print (" -h Print help message.")
print (" -S Include source code but no binary installation.")
print (" -s Include source code in addition to binary installation.")
print (" -t Only test for necessary support packages.")
print (" PACKAGES INCLUDED (Default - All Packages):")
print (" ADPAA Process Airborne Data Processing and Analysis (ADPAA) package.")
print (" ADTAE Process Airborne Data Testing and Evaluation (ADTAE) package.")
print (" EUFAR Process EUFAR General Airborne Data-processing Software (EUFAR) package.")
print (" DRILSDOWN Process Drawing Rich Integrated Lat-lon-time Subsets from Dataservers Online into Working Notebooks (DRILSDOWN).")
print (" SAMAC Software for Airborne Measurements of Aerosol and Clouds (SAMAC) package.")
print (" SIMDATA Simulation probe data set.")
print (" SODA System for OAP Data Analysis (SODA) package.")
print (" UIOPS Process University of Illinois OAP Processing Software (UOIPS) package.")
print (" PREFERENCES:")
print (" nobinary Do not install binary packages.")
print (" notesting Do not test for support packages.")
print (" ENIVIRONMENTAL VARIABLES:")
print (" SVN_USERNAME - Checks out repositories using the defiend username.")
# Turn off all packages by default.
adpaa,adtae,drilsdown,eufar,samac,soda,uiops = all_packages('Off')
# Check for - command line options, for example -h.
for param in sys.argv:
if param.startswith('-h'):
help_message()
exit()
if param.startswith('-S'):
source = 1
binary = 0
# If no parameter options, install all packages.
if (len(sys.argv) < 3):
adpaa,adtae,drilsdown,eufar,samac,soda,uiops = all_packages('On')
if param.startswith('-s'):
source = 1
# If no parameter options, install all packages.
if (len(sys.argv) < 3):
adpaa,adtae,drilsdown,eufar,samac,soda,uiops = all_packages('On')
else:
# If no parameter options, install all packages.
if (len(sys.argv) < 2):
adpaa,adtae,drilsdown,eufar,samac,soda,uiops = all_packages('On')
if param.startswith('-t'):
testing_only = 1
# Check for list of packages to install.
for param in sys.argv:
if (param == 'ADPAA'):
adpaa = 1
if (param == 'adpaa'):
adpaa = 1
if (param == 'ADTAE'):
adtae = 1
if (param == 'adtae'):
adtae = 1
if (param == 'AOSPY'):
aospy = 1
if (param == 'aospy'):
aospy = 1
if (param == 'DRILSDOWN'):
drilsdown = 1
if (param == 'drilsdown'):
drilsdown = 1
if (param == 'EUFAR'):
eufar = 1
if (param == 'eufar'):
eufar = 1
if (param == 'SAMAC'):
samac = 1
if (param == 'samac'):
samac = 1
if (param == 'SIMDATA'):
simdata = 1
if (param == 'simdata'):
simdata = 1
if (param == 'SODA'):
soda = 1
if (param == 'soda'):
soda = 1
if (param == 'UIOPS'):
uiops = 1
if (param == 'uiops'):
uiops = 1
# Check for list of long name options.
for param in sys.argv:
if (param == 'nobinary'):
binary = 0
if (param == 'notesting'):
testing = 0
# Import package with existing checking.
print ("Importing Modules:")
import imp
print (" The imp module imported.")
try:
imp.find_module('git')
except ImportError:
print ("** WARNING: The python 'git' module does not exists.")
print ("** Please install (see suggestion below) and execute again.")
print ("** Redhat - sudo yum install GitPython")
print ("** Fedora - sudo dnf install python3-GitPython")
print ("** Ubuntu - sudo apt install python-git")
pass
else:
import git
print (" The git module imported.")
try:
imp.find_module('os')
import os
except ImportError:
print ("** WARNING: The python 'os' module does not exists.")
print ("** Please install (see suggestion below) and execute again.")
print ("** Redaht - sudo yum install python-libs")
print ("** Fedora - sudo dnf install python-libs")
pass
else:
import os
print (" The os module imported.")
### PIP required for AOSPY. ###
try:
import pip
except ImportError:
print (" The python 'pip' module does not exists.")
print (" pip only required for AOSPY.")
pass
else:
import pip
print (" The pip module imported.")
try:
imp.find_module('pysvn')
import pysvn
except ImportError:
print ("** WARNING: The python 'pysvn' module does not exists.")
print ("** Please install (see suggestion below) and execute again.")
print ("** Redhat - sudo yum install pysvn")
print ("** Fedora - sudo dnf install pysvn")
print ("** Ubuntu - sudo apt install python-svn")
pass
else:
import pysvn
print (" The pysvn module imported.")
try:
imp.find_module('shutil')
except ImportError:
print ("** WARNInG: The python 'shutil' module does not exists.")
pass
else:
import shutil
print (" The shutil module imported.")
try:
imp.find_module('sys')
except ImportError:
print ("** WARNInG: The python 'sys' module does not exists.")
pass
else:
import sys
print (" The sys module imported.")
try:
import tarfile
except ImportError:
print ("** WARNING: The python 'tarfile' module does not exists.")
pass
else:
import tarfile
print (" The tarfile module imported.")
try:
import urllib2
except ImportError:
print ("** WARNING: The python 'urllib2' module does not exists.")
print ("** Please install (see suggestion below) and execute again.")
print ("** Redhat - sudo yum install python-urllibs2")
print ("** Fedora - sudo dnf install python-urllibs2")
pass
else:
import urllib2
print (" The urllib2 module imported.")
try:
import unittest2
except ImportError:
print ("** WARNING: The python 'unittest2' module does not exists.")
print ("** Please install (see suggestion below) and execute again.")
print ("** Redhat - sudo yum install python-unittest2")
print ("** Fedora - sudo dnf install python-unittest2")
print ("** Ubuntu - sudo apt install python-unittest2")
pass
else:
import unittest2
print (" The unittest2 module imported.")
# Exit if only want testing for support programs.
if testing_only:
exit()
class Progress(git.remote.RemoteProgress):
def update(self, op_code, cur_count, max_count=None, message=''):
print ('{0}\r'.format(self._cur_line))
print ("Cloning and Updating Repositories:")
### Airborne Data Processing and Analysis (ADPAA) software package. ###
if (adpaa):
# Create directories.
print (" Working on Airborne Data Processing and Analysis (ADPAA) package.")
if (binary):
print (" Downloading binary version of ADPAA.")
if not os.path.isdir("ADPAA"):
os.mkdir('ADPAA')
os.chdir('ADPAA')
if not os.path.isdir("binary_distributions"):
os.mkdir('binary_distributions')
os.chdir('binary_distributions')
# Download tar file of binary package using progress bar.
url = "https://sourceforge.net/projects/adpaa/files/ADPAA.tar.gz"
file_name = url.split('/')[-1]
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print " Downloading ADPAA Binary Version: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
# Extract distribution from compressed tar file.
print (" Extracting ADPAA distribution from compressed tar file.")
tar = tarfile.open('ADPAA.tar.gz', "r:gz")
tar.extractall("..")
tar.close()
# Go back to base directory.
os.chdir('../..')
if (source):
if not os.path.isdir("ADPAA/src"):
print (" Cloning ADPAA source code from repository.")
if not os.path.isdir("ADPAA"):
os.mkdir('ADPAA')
os.chdir('ADPAA')
client = pysvn.Client()
svn_username = os.environ.get('SVN_USERNAME')
if svn_username is None:
client.checkout('svn://svn.code.sf.net/p/adpaa/code/trunk/src','src')
else:
client.checkout('svn+ssh://'+svn_username+'@svn.code.sf.net/p/adpaa/code/trunk/src','src')
os.chdir('..')
print (" Finished cloning ADPAA source code from repository.")
else:
# Updating existing ADPAA repository.
print (" Updating existing ADPAA source code from repository.")
os.chdir('ADPAA')
client = pysvn.Client()
client.update('src')
os.chdir('..')
print (" Finished updating ADPAA source code from repository.")
if (testing):
print (" Tesing for non-installed ADPAA support packages.")
try:
import csv
except ImportError:
print (" Required python 'csv' module is not installed.")
quit()
try:
import numpy
except ImportError:
print (" Required python 'numpy' module is not installed.")
quit()
try:
import math
except ImportError:
print (" Required python 'math' module is not installed.")
quit()
try:
import sys
except ImportError:
print (" Required python 'sys' module is not installed.")
quit()
print (" Finished tesing for non-installed ADPAA support packages.")
### Airborne Data Testing and Evaluation (ADTAE) software package. ###
if (adtae):
# Create main ADTAE directory.
print (" Working on Airborne Data Testing and Evaluation (ADTAE) package.")
if not os.path.isdir("ADTAE"):
os.mkdir('ADTAE')
print (" Cloning ADTAE repository.")
repo = git.Repo.clone_from(
'git://git.code.sf.net/p/adtae/code',
'ADTAE',
progress=Progress())
print (" Finished cloning ADTAE repository.")
else:
# Update the existing repository.
print (" Updating ADTAE repository.")
repo = git.cmd.Git('ADTAE')
repo.pull()
print (" Finished with ADTAE.")
if (aospy):
print (" Installing AOSPY package.")
print (" WARNING: AOSPY installation requires sudo excutation of CoPAS, for example 'sudo ./CoPAS'.")
def install(package):
pip.main(['install', package])
if __name__ == '__main__':
install('aospy')
print (" Finsihed installing AOSPY package.")
### Drawing Rich Integrated Lat-lon-time Subsets from Dataservers Online into Working Notebooks package. ###
if (drilsdown):
# Create main DRILSDOWN directory.
print (" Working on DRILSDOWN package.")
if not os.path.isdir("DRILSDOWN"):
os.mkdir('DRILSDOWN')
print (" Cloning DRILSDOWN repository.")
repo = git.Repo.clone_from(
'git://github.com/Unidata/drilsdown.git',
'DRILSDOWN',
progress=Progress())
print (" Finished cloning DRILSDOWN repository.")
else:
# Update the existing repository.
print (" Updating GRILSDOWN repository.")
repo = git.cmd.Git('DRILSDOWN')
repo.pull()
print (" Finished with DRILSDOWN.")
### EUFAR General Airborne Data-processing Software (EUFAR). ###
if (eufar):
# Create main EUFAR directory.
print (" Working on EUFAR General Airborne Data-processing Software (EUFAR) package.")
if not os.path.isdir("EUFAR"):
os.mkdir('EUFAR')
print (" Cloning EUFAR repository.")
repo = git.Repo.clone_from(
'https://github.com/eufarn7sp/egads-eufar',
'EUFAR',
progress=Progress())
else:
# Update the existing repository.
print (" Updating EUFAR repository.")
repo = git.cmd.Git('EUFAR')
repo.pull()
print (" Finished with EUFAR.")
### Software for Airborne Measurements of Aerosol and Clouds (SAMAC) ###
if (samac):
# Create main SAMAC directory.
print (" Software for Airborne Measurements of Aerosol and Clouds (SAMAC).")
if not os.path.isdir("SAMAC"):
print (" Cloning SAMAC repository.")
# Add in two space without return.
sys.stdout.write(' ')
sys.stdout.flush()
repo = git.Repo.clone_from(
'https://github.com/StephGagne/SAMAC',
'SAMAC',
progress=Progress())
print (" Finished cloning SAMAC.")
else:
# Update the existing repository.
print (" Updating SAMAC repository.")
repo = git.cmd.Git('SAMAC')
repo.pull()
print (" Finished updating SAMAC repository.")
### Simulation probe data (SIMDATA) ###
if (simdata):
# Get from ftp.ucar.edu/pub/mmm/bansemer/simulations/
print (" Downloading simuation probe data set.")
print (" Finished downloading simuation probe data set.")
### System for OAP Data Analysis (SODA) ###
if (soda):
# Create main SODA directory.
print (" System for OAP Data Analysis (SODA) package.")
if not os.path.isdir("SODA"):
print (" Cloning SODA repository.")
repo = git.Repo.clone_from(
'https://github.com/abansemer/soda2',
'SODA',
progress=Progress())
print (" Finished cloning SODA repository.")
else:
# Update the existing repository.
print (" Updating SODA repository.")
repo = git.cmd.Git('SODA')
repo.pull()
print (" Finished with SODA.")
### Process University of Illinois OAP Processing Software (UOIPS) package ###
if (uiops):
# Create main UOIPS directory.
print (" UIOPS Process University of Illinois OAP Processing Software (UOIPS) package.")
if not os.path.isdir("UIOPS"):
print (" Cloning UIOPS repository.")
repo = git.Repo.clone_from(
'https://github.com/joefinlon/UIOPS',
'UIOPS',
progress=Progress())
print (" Finished cloning UIPOS repository.")
else:
# Update the existing repository.
print (" Updating UIOPS repository.")
repo = git.cmd.Git('UIOPS')
repo.pull()
print (" Finished updating UIOPS.")
|
daviddelene/CoPAS
|
CoPAS_python2.py
|
Python
|
agpl-3.0
| 26,416
|
[
"Brian"
] |
731bc0d1d072e264e5794e7eba7da00190d11094207e503da3939367d8fe8e38
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import urllib
import numpy as np
from os import path
#import Apache OCW dependences
import ocw.data_source.local as local
import ocw.data_source.rcmed as rcmed
from ocw.dataset import Bounds as Bounds
import ocw.dataset_processor as dsp
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
import ocw.utils as utils
import ssl
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# File URL leader
FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
# Three Local Model Files
FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
FILE_2 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
# Filename for the output image/plot (without file extension)
OUTPUT_PLOT = "pr_africa_bias_annual"
#variable that we are analyzing
varName = 'pr'
# Spatial and temporal configurations
LAT_MIN = -45.0
LAT_MAX = 42.24
LON_MIN = -24.0
LON_MAX = 60.0
START = datetime.datetime(2000, 1, 1)
END = datetime.datetime(2007, 12, 31)
EVAL_BOUNDS = Bounds(LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
#regridding parameters
gridLonStep=0.5
gridLatStep=0.5
#list for all target_datasets
target_datasets =[]
#list for names for all the datasets
allNames =[]
# Download necessary NetCDF file if not present
if path.exists(FILE_1):
pass
else:
urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
if path.exists(FILE_2):
pass
else:
urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
target_datasets.append(local.load_file(FILE_2, varName, name="UCT"))
""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
print("Working with the rcmed interface to get CRU3.1 Monthly Mean Precipitation")
# the dataset_id and the parameter id were determined from
# https://rcmes.jpl.nasa.gov/content/data-rcmes-database
CRU31 = rcmed.parameter_dataset(10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
""" Step 3: Resample Datasets so they are the same shape """
print("Resampling datasets")
CRU31 = dsp.water_flux_unit_conversion(CRU31)
CRU31 = dsp.temporal_rebin(CRU31, datetime.timedelta(days=30))
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])
target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])
target_datasets[member] = dsp.temporal_rebin(target_datasets[member], datetime.timedelta(days=30))
""" Spatially Regrid the Dataset Objects to a user defined grid """
# Using the bounds we will create a new set of lats and lons
print("Regridding datasets")
new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.spatial_regrid(target_datasets[member], new_lats, new_lons)
#make the model ensemble
target_datasets_ensemble = dsp.ensemble(target_datasets)
target_datasets_ensemble.name="ENS"
#append to the target_datasets for final analysis
target_datasets.append(target_datasets_ensemble)
#find the mean value
#way to get the mean. Note the function exists in util.py
_, CRU31.values = utils.calc_climatology_year(CRU31)
for member, each_target_dataset in enumerate(target_datasets):
_,target_datasets[member].values = utils.calc_climatology_year(target_datasets[member])
for target in target_datasets:
allNames.append(target.name)
#determine the metrics
mean_bias = metrics.Bias()
#create the Evaluation object
RCMs_to_CRU_evaluation = evaluation.Evaluation(CRU31, # Reference dataset for the evaluation
# list of target datasets for the evaluation
target_datasets,
# 1 or more metrics to use in the evaluation
[mean_bias])
RCMs_to_CRU_evaluation.run()
#extract the relevant data from RCMs_to_CRU_evaluation.results
#the results returns a list (num_target_datasets, num_metrics). See docs for further details
#remove the metric dimension
rcm_bias = RCMs_to_CRU_evaluation.results[0]
plotter.draw_contour_map(rcm_bias, new_lats, new_lons, gridshape=(2, 3),fname=OUTPUT_PLOT, subtitles=allNames, cmap='coolwarm_r')
|
MJJoyce/climate
|
examples/multi_model_evaluation.py
|
Python
|
apache-2.0
| 5,315
|
[
"NetCDF"
] |
4f60a55c74beb23b5f637fe5938d20411e7ff67b1fde0e84fda6506f498e20ac
|
# Copyright 2000-2010 Michael Hudson-Doyle <micahel@gmail.com>
# Antonio Cuni
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from pyrepl import commands, reader
from pyrepl.reader import Reader
def prefix(wordlist, j = 0):
d = {}
i = j
try:
while 1:
for word in wordlist:
d[word[i]] = 1
if len(d) > 1:
return wordlist[0][j:i]
i += 1
d = {}
except IndexError:
return wordlist[0][j:i]
import re
def stripcolor(s):
return stripcolor.regexp.sub('', s)
stripcolor.regexp = re.compile(r"\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[m|K]")
def real_len(s):
return len(stripcolor(s))
def left_align(s, maxlen):
stripped = stripcolor(s)
if len(stripped) > maxlen:
# too bad, we remove the color
return stripped[:maxlen]
padding = maxlen - len(stripped)
return s + ' '*padding
def build_menu(cons, wordlist, start, use_brackets, sort_in_column):
if use_brackets:
item = "[ %s ]"
padding = 4
else:
item = "%s "
padding = 2
maxlen = min(max(map(real_len, wordlist)), cons.width - padding)
cols = cons.width / (maxlen + padding)
rows = (len(wordlist) - 1)/cols + 1
if sort_in_column:
# sort_in_column=False (default) sort_in_column=True
# A B C A D G
# D E F B E
# G C F
#
# "fill" the table with empty words, so we always have the same amout
# of rows for each column
missing = cols*rows - len(wordlist)
wordlist = wordlist + ['']*missing
indexes = [(i%cols)*rows + i//cols for i in range(len(wordlist))]
wordlist = [wordlist[i] for i in indexes]
menu = []
i = start
for r in range(rows):
row = []
for col in range(cols):
row.append(item % left_align(wordlist[i], maxlen))
i += 1
if i >= len(wordlist):
break
menu.append( ''.join(row) )
if i >= len(wordlist):
i = 0
break
if r + 5 > cons.height:
menu.append(" %d more... "%(len(wordlist) - i))
break
return menu, i
# this gets somewhat user interface-y, and as a result the logic gets
# very convoluted.
#
# To summarise the summary of the summary:- people are a problem.
# -- The Hitch-Hikers Guide to the Galaxy, Episode 12
#### Desired behaviour of the completions commands.
# the considerations are:
# (1) how many completions are possible
# (2) whether the last command was a completion
# (3) if we can assume that the completer is going to return the same set of
# completions: this is controlled by the ``assume_immutable_completions``
# variable on the reader, which is True by default to match the historical
# behaviour of pyrepl, but e.g. False in the ReadlineAlikeReader to match
# more closely readline's semantics (this is needed e.g. by
# fancycompleter)
#
# if there's no possible completion, beep at the user and point this out.
# this is easy.
#
# if there's only one possible completion, stick it in. if the last thing
# user did was a completion, point out that he isn't getting anywhere, but
# only if the ``assume_immutable_completions`` is True.
#
# now it gets complicated.
#
# for the first press of a completion key:
# if there's a common prefix, stick it in.
# irrespective of whether anything got stuck in, if the word is now
# complete, show the "complete but not unique" message
# if there's no common prefix and if the word is not now complete,
# beep.
# common prefix -> yes no
# word complete \/
# yes "cbnu" "cbnu"
# no - beep
# for the second bang on the completion key
# there will necessarily be no common prefix
# show a menu of the choices.
# for subsequent bangs, rotate the menu around (if there are sufficient
# choices).
class complete(commands.Command):
def do(self):
r = self.reader
stem = r.get_stem()
if r.assume_immutable_completions and \
r.last_command_is(self.__class__):
completions = r.cmpltn_menu_choices
else:
r.cmpltn_menu_choices = completions = \
r.get_completions(stem)
if len(completions) == 0:
r.error("no matches")
elif len(completions) == 1:
if r.assume_immutable_completions and \
len(completions[0]) == len(stem) and \
r.last_command_is(self.__class__):
r.msg = "[ sole completion ]"
r.dirty = 1
r.insert(completions[0][len(stem):])
else:
p = prefix(completions, len(stem))
if p:
r.insert(p)
if r.last_command_is(self.__class__):
if not r.cmpltn_menu_vis:
r.cmpltn_menu_vis = 1
r.cmpltn_menu, r.cmpltn_menu_end = build_menu(
r.console, completions, r.cmpltn_menu_end,
r.use_brackets, r.sort_in_column)
r.dirty = 1
elif stem + p in completions:
r.msg = "[ complete but not unique ]"
r.dirty = 1
else:
r.msg = "[ not unique ]"
r.dirty = 1
class self_insert(commands.self_insert):
def do(self):
commands.self_insert.do(self)
r = self.reader
if r.cmpltn_menu_vis:
stem = r.get_stem()
if len(stem) < 1:
r.cmpltn_reset()
else:
completions = [w for w in r.cmpltn_menu_choices
if w.startswith(stem)]
if completions:
r.cmpltn_menu, r.cmpltn_menu_end = build_menu(
r.console, completions, 0,
r.use_brackets, r.sort_in_column)
else:
r.cmpltn_reset()
class CompletingReader(Reader):
"""Adds completion support
Adds instance variables:
* cmpltn_menu, cmpltn_menu_vis, cmpltn_menu_end, cmpltn_choices:
*
"""
# see the comment for the complete command
assume_immutable_completions = True
use_brackets = True # display completions inside []
sort_in_column = False
def collect_keymap(self):
return super(CompletingReader, self).collect_keymap() + (
(r'\t', 'complete'),)
def __init__(self, console):
super(CompletingReader, self).__init__(console)
self.cmpltn_menu = ["[ menu 1 ]", "[ menu 2 ]"]
self.cmpltn_menu_vis = 0
self.cmpltn_menu_end = 0
for c in [complete, self_insert]:
self.commands[c.__name__] = c
self.commands[c.__name__.replace('_', '-')] = c
def after_command(self, cmd):
super(CompletingReader, self).after_command(cmd)
if not isinstance(cmd, self.commands['complete']) \
and not isinstance(cmd, self.commands['self_insert']):
self.cmpltn_reset()
def calc_screen(self):
screen = super(CompletingReader, self).calc_screen()
if self.cmpltn_menu_vis:
ly = self.lxy[1]
screen[ly:ly] = self.cmpltn_menu
self.screeninfo[ly:ly] = [(0, [])]*len(self.cmpltn_menu)
self.cxy = self.cxy[0], self.cxy[1] + len(self.cmpltn_menu)
return screen
def finish(self):
super(CompletingReader, self).finish()
self.cmpltn_reset()
def cmpltn_reset(self):
self.cmpltn_menu = []
self.cmpltn_menu_vis = 0
self.cmpltn_menu_end = 0
self.cmpltn_menu_choices = []
def get_stem(self):
st = self.syntax_table
SW = reader.SYNTAX_WORD
b = self.buffer
p = self.pos - 1
while p >= 0 and st.get(b[p], SW) == SW:
p -= 1
return ''.join(b[p+1:self.pos])
def get_completions(self, stem):
return []
def test():
class TestReader(CompletingReader):
def get_completions(self, stem):
return [s for l in map(lambda x:x.split(),self.history)
for s in l if s and s.startswith(stem)]
reader = TestReader()
reader.ps1 = "c**> "
reader.ps2 = "c/*> "
reader.ps3 = "c|*> "
reader.ps4 = "c\*> "
while reader.readline():
pass
if __name__=='__main__':
test()
|
c0710204/mirrorsBistu
|
pypi/bandersnatch/lib/python2.7/site-packages/pyrepl/completing_reader.py
|
Python
|
mit
| 9,444
|
[
"Galaxy"
] |
f1769a86bf06ce508dad9a4a52b4f172c69839078925bede629493fdc0201ec0
|
import numpy as np
from enable.api import ColorTrait, Container
from pyface.action.api import Action
from traits.api import Callable, Either, Instance, Tuple, on_trait_change
from .color_function_component import ColorNode, ColorComponent
from .function_component import FunctionComponent
from .gaussian_function_component import (
GaussianComponent, GaussianColorNode, GaussianOpacityNode,
GAUSSIAN_MINIMUM_RADIUS
)
from .menu_tool import menu_tool_with_actions
from .opacity_function_component import OpacityNode, OpacityComponent
from .transfer_function import TransferFunction
from .utils import build_screen_to_function
class BaseCtfEditorAction(Action):
container = Instance(Container)
screen_to_function = Callable
def _screen_to_function_default(self):
return build_screen_to_function(self.container)
class BaseColorAction(BaseCtfEditorAction):
# A callable which prompts the user for a color
prompt_color = Callable
def perform(self, event):
color = self.prompt_color()
if color is None:
return
self.perform_with_color(event, color)
class AddColorAction(BaseColorAction):
name = 'Add Color...'
def perform_with_color(self, event, color):
screen_position = (event.enable_event.x, 0.0)
rel_x, _ = self.screen_to_function(screen_position)
node = ColorNode(center=rel_x, color=color)
component = ColorComponent(node=node)
self.container.add_function_component(component)
class AddGaussianAction(BaseColorAction):
name = 'Add Gaussian...'
def perform_with_color(self, event, color):
screen_position = (event.enable_event.x, event.enable_event.y)
rel_x, rel_y = self.screen_to_function(screen_position)
rad = GAUSSIAN_MINIMUM_RADIUS
color_node = GaussianColorNode(center=rel_x, color=color, radius=rad)
opacity_node = GaussianOpacityNode(center=rel_x, opacity=rel_y,
radius=rad)
component = GaussianComponent(node=color_node,
opacity_node=opacity_node)
self.container.add_function_component(component)
class AddOpacityAction(BaseCtfEditorAction):
name = 'Add Opacity'
def perform(self, event):
screen_position = (event.enable_event.x, event.enable_event.y)
rel_x, rel_y = self.screen_to_function(screen_position)
node = OpacityNode(center=rel_x, opacity=rel_y)
component = OpacityComponent(node=node)
self.container.add_function_component(component)
class CtfEditor(Container):
""" A widget for editing transfer functions.
"""
# The function which is being edited. Contains color and opacity.
function = Instance(TransferFunction)
# A callable which prompts the user for a color
# A single keyword argument 'starting_color' will be passed to the callable
# and its value will be None or an RGB tuple with values in the range
# [0, 1]. An RGB tuple should be returned.
prompt_color_selection = Callable
# Numpy histogram tuple, if any. (values, bin_edges)
histogram = Either(Tuple, None)
# The color to use when drawing the histogram
histogram_color = ColorTrait('gray')
# Add some padding to allow mouse interaction near the edge more pleasant.
padding_left = 5
padding_bottom = 5
padding_top = 5
padding_right = 5
fill_padding = True
# -----------------------------------------------------------------------
# Public interface
# -----------------------------------------------------------------------
def add_function_component(self, component):
self.add(component)
component.add_function_nodes(self.function)
component._transfer_function = self.function
self.function.updated = True
self.request_redraw()
def remove_function_component(self, component):
self.remove(component)
component.remove_function_nodes(self.function)
self.function.updated = True
self.request_redraw()
# -----------------------------------------------------------------------
# Traits initialization
# -----------------------------------------------------------------------
def _function_default(self):
function = TransferFunction()
self._add_components_for_new_function(function)
return function
def _tools_default(self):
prompt_color = self.prompt_color_selection
actions = [
AddColorAction(container=self, prompt_color=prompt_color),
AddGaussianAction(container=self, prompt_color=prompt_color),
AddOpacityAction(container=self),
]
return [menu_tool_with_actions(self, actions)]
# -----------------------------------------------------------------------
# Traits notifications
# -----------------------------------------------------------------------
def _bounds_changed(self, old, new):
super(CtfEditor, self)._bounds_changed(old, new)
for child in self.components:
if isinstance(child, FunctionComponent):
child.parent_changed(self)
def _function_changed(self, new):
for child in self.components[:]:
if isinstance(child, FunctionComponent):
self.remove(child)
if new is not None:
self._add_components_for_new_function(new)
self.request_redraw()
@on_trait_change('function:updated')
def _function_updated(self):
self.request_redraw()
def _histogram_changed(self):
self.request_redraw()
# -----------------------------------------------------------------------
# Drawing
# -----------------------------------------------------------------------
def _draw_container_mainlayer(self, gc, *args, **kwargs):
color_nodes = self.function.color.values()
alpha_nodes = self.function.opacity.values()
gc.clear()
with gc:
# Move the origin to the lower left padding.
gc.translate_ctm(self.padding_left, self.padding_bottom)
self._draw_colors(color_nodes, gc)
if self.histogram is not None:
self._draw_histogram(gc)
self._draw_alpha(alpha_nodes, gc)
def _draw_alpha(self, alpha_nodes, gc):
""" Draw the opacity curve.
"""
w, h = self.width, self.height
points = [(w * i, h * v) for (i, v) in alpha_nodes]
with gc:
gc.set_line_width(1.0)
gc.set_stroke_color((0.0, 0.0, 0.0, 1.0))
gc.lines(points)
gc.stroke_path()
def _draw_colors(self, color_nodes, gc):
""" Draw the colorbar.
"""
w, h = self.width, self.height
grad_stops = np.array([(x, r, g, b, 1.0)
for x, r, g, b in color_nodes])
with gc:
gc.rect(0, 0, w, h)
gc.linear_gradient(0, 0, w, 0, grad_stops, 'pad', 'userSpaceOnUse')
gc.fill_path()
def _draw_histogram(self, gc):
""" Draw the logarithm of the histogram.
"""
values, bin_edges = self.histogram
w, h = self.width, self.height
values = values.astype(float)
zeros = (values == 0)
min_nonzero = values[~zeros].min()
values[zeros] = min_nonzero / 2.0
log_values = np.log(values)
log_values -= log_values.min()
log_values /= log_values.max()
h_values = log_values * h
bin_edges = bin_edges - bin_edges.min()
bin_edges *= w / bin_edges.max()
x = np.concatenate([bin_edges[:1],
np.repeat(bin_edges[1:-1], 2),
bin_edges[-1:]])
y = np.repeat(h_values, 2)
points = np.column_stack([x, y])
with gc:
gc.set_line_width(1.0)
gc.set_stroke_color(self.histogram_color_)
gc.lines(points)
gc.stroke_path()
# -----------------------------------------------------------------------
# Private methods
# -----------------------------------------------------------------------
def _add_components_for_new_function(self, function):
linked_colors, linked_opacities = [], []
if len(function.links) > 0:
linked_colors, linked_opacities = zip(*function.links)
for func in (function.color, function.opacity):
last_index = func.size() - 1
for idx, node in enumerate(func.nodes):
if node in linked_colors or node in linked_opacities:
continue
component = FunctionComponent.from_function_nodes(node)
component._transfer_function = function
component.removable = (idx not in (0, last_index))
self.add(component)
for node_pair in function.links:
component = FunctionComponent.from_function_nodes(*node_pair)
component._transfer_function = function
self.add(component)
|
dmsurti/ensemble
|
ensemble/ctf/editor.py
|
Python
|
bsd-3-clause
| 9,124
|
[
"Gaussian"
] |
280451f3132859f1738b60f695b82f049486b02b5511486c900124bbf5c6f6e4
|
import numpy as np
heading = {}
heading['xyz'] = [0,1,2]
heading['xy' ] = [0,1]
heading['xz' ] = [0,2]
heading['yz' ] = [1,2]
heading['x' ] = [0]
heading['y' ] = [1]
heading['z' ] = [2]
# polarization is complementary to heading
polarization = {}
polarization['xyz'] = [0,1,2]
polarization['xy' ] = [1,0]
polarization['xz' ] = [2,0]
polarization['yz' ] = [2,1]
polarization['x' ] = [1]
polarization['y' ] = [0]
polarization['z' ] = [0]
def user_source():
pass
def user_transversal_source():
pass
class Sources:
def _unpack_options(self,options={}):
# first add the options passed to the function
for key in options:
setattr(self,key,options[key])
# unpack options from self.options{}
for key in self.options:
setattr(self,key,self.options[key])
def _set_f_w(self,material,dictin):
setattr(self,'c',material.co)
for key,value in dictin.items():
setattr(self,key,value)
if hasattr(self,'wavelength'):
setattr(self,'omega',2.0*np.pi*material.co/self.wavelength)
elif hasattr(self,'omega'):
setattr(self,'wavelength',2.0*np.pi*material.co/self.omega)
else:
msg = 'You must define either wavelength or omega'
print(msg)
def gaussian(self,x,dx,s=1.0,xo=0.0,v=0.0,t=0.0):
try:
from scipy.special import erf
except:
self.averaged = False
arg = xo + v*t - x
if self.averaged:
ddx = dx/2.0
erravg = (np.sqrt(np.pi)*s*(erf((ddx + arg)/s) + erf((ddx - arg)/s)))/(2.0*dx)
else:
erravg = np.exp(-arg**2/s**2)
if self.cut:
span = ((arg**2/s**2)<=(4.0*s))
erravg = erravg*span
return erravg
def harmonic(self,x,dx,xo=0.0,omega=0.0,k=1.0,t=0.0,f=None):
arg = x - xo -(omega/k)*t
if f is None:
f = self.harmonic_function
if self.averaged:
ddx = dx/2.0
if f.__name__=='sin':
erravg = (np.cos(k*(ddx - arg)) - np.cos(k*(ddx + arg)))/(k*dx)
elif f.__name__=='cos':
erravg = (np.sin(k*(ddx - arg)) + np.sin(k*(ddx + arg)))/(k*dx)
else:
erravg = f(k*arg)
return erravg
def init(self,state):
state.q.fill(0.0)
if self.shape=='off':
dimh = heading[self.heading]
dimp = polarization[self.heading]
grid = state.grid
if state.num_dim==1:
x = grid.x.centers
waveshape = self.gaussian(x,self._dx,xo=self.offset,s=self.pulse_width)
state.q[0,:] = self._material.zo*waveshape
state.q[1,:] = waveshape
if state.num_dim>=2:
waveshape = 1.0
for i in range(len(self.heading)):
h = dimh[i]
waveshape = waveshape*self.gaussian(grid.c_centers[h],self._delta[h],xo=self.offset[h],s=self.pulse_width[h])
if len(self.heading)==1:
if state.num_dim==2:
waveshape = self.transversal_function(grid.c_centers[dimp[0]])*waveshape
state.q[dimp[0],:,:] = ((-1.0)**dimh[0])*self._material.zo*waveshape
if state.num_dim==2: p=2
if state.num_dim==3: p=5
state.q[p,:,:] = waveshape
return state
def scattering_bc(self,state,dim,t,qbc,auxbc,num_ghost):
grid = state.grid
setattr(grid, '_c_centers_with_ghost', grid.c_centers_with_ghost(num_ghost))
t = state.t
if state.num_dim==1:
x = grid.x.centers_with_ghost[:num_ghost]
qbc[:,:num_ghost] = self.function(x,t)
if state.num_dim==2:
x = grid._c_centers_with_ghost[0]
y = grid._c_centers_with_ghost[1]
if dim.name==state.grid.dimensions[0].name:
x = x[:num_ghost,:]
y = y[:num_ghost,:]
qbc[:,:num_ghost,:] = self.function(x,y,t)
else:
x = x[:,:num_ghost]
y = y[:,:num_ghost]
qbc[:,:,:num_ghost] = self.function(x,y,t)
if state.num_dim==3:
x = grid._c_centers_with_ghost[0]
y = grid._c_centers_with_ghost[1]
z = grid._c_centers_with_ghost[2]
if dim.name==state.grid.dimensions[0].name:
x = x[:num_ghost,:,:]
y = y[:num_ghost,:,:]
z = z[:num_ghost,:,:]
qbc[:,:num_ghost,:,:] = self.function(x,y,z,t)
if dim.name==state.grid.dimensions[1].name:
x = x[:,:num_ghost,:]
y = y[:,:num_ghost,:]
z = z[:,:num_ghost,:]
qbc[:,:,:num_ghost,:] = self.function(x,y,z,t)
if dim.name==state.grid.dimensions[2].name:
x = x[:,:,:num_ghost]
y = y[:,:,:num_ghost]
z = z[:,:,:num_ghost]
qbc[:,:,:,:num_ghost] = self.function(x,y,z,t)
return qbc
def dump(self):
for attr in sorted(dir(self)):
if not attr.startswith('_'):
print("%s = %s" % (attr, getattr(self, attr)))
def _dump_to_latex(self):
from tabulate import tabulate
strt = r'\begin{table][h!]' + '\n' + r'\centering' + '\n' + r'\begin{tabular}[cl]' + '\n' + r'\hline' + '\n'
strt = strt + r'variable & value(s) \\' + '\n' + r'\hline' +'\n'
for attr in sorted(dir(self)):
if not attr.startswith('_'):
s = getattr(self,attr)
if isinstance(s, str):
strt = strt + '\t' + r'\verb+' + attr + '+ \t' + r'&' + '\t' + s + r' \\' + '\n'
elif isinstance(s,float):
strt = strt + '\t' + r'\verb+' + attr + '+ \t' + r'&' + '\t' + str(s) + r' \\' + '\n'
elif isinstance(s,bool):
strt = strt + '\t' + r'\verb+' + attr + '+ \t' + r'&' + '\t' + str(s) + r' \\' + '\n'
else:
try:
len(s)
strt = strt + '\t' + r'\multicolumn{1}{c}\multirow{'+str(np.shape(s)[0])+r'}{*}{\verb+' + attr + r'+}' + '\t' + r'&' + '\t'
for k in range(np.shape(s)[0]):
strt = strt + str(s[k]) + r' \\'
strt = strt + '\n'
except:
if ('function' in str(s)): s=str(s).split('function ')[1].split('at')[0]
if ('method' in str(s)): s=str(s).split('method')[1].split('at')[0].split('.')[1].split('of')[0]
if ('ufunc' in str(s)): s=str(s).split('ufunc ')[1].split('>')[0]
strt = strt + '\t' + r'\verb+' + attr + '+ \t' + r'&' + '\t' + str(s) + r' \\' + '\n'
strt = strt + r'\end{tabular}' + '\n' + r'\end{table]' + '\n'
import uuid
import os
try:
os.makedirs(self._outdir)
except:
pass
f = open(os.path.join(self._outdir,'_source_'+str(uuid.uuid1())+'.tex'),'a')
f.write(strt)
f.close()
def __init__(self):
self.shape = None
self.custom = False
self.custom_func = user_source
self.heading = 'x'
self._outdir = './'
class Source1D(Sources):
def setup(self,options={}):
self._unpack_options(options=options)
self.pulse_width = self.wavelength
if self.shape=='plane':
self.harmonic_function = np.sin
self.function = self._plane
if self.shape=='pulse':
self.shape_function = np.exp
self.function = self._pulse
self.averaged = True
self._dx = 1.0
self._cp = np.sqrt(np.pi)
if self.shape=='harmonic pulse':
self.harmonic_function = np.sin
self.shape_function = np.exp
self.function = self._harmonic_pulse
if self.shape=='off':
self.shape_function = np.exp
return
def _plane(self,x,t):
wave = np.zeros( [2,len(x)], order='F')
harmonic = self.harmonic(x,self._dx,omega=self.omega,k=self.k,t=t)
wave[0,:] = self.Ey*harmonic
wave[1,:] = self.Hz*harmonic
return wave
def _pulse(self,x,t):
wave = np.zeros( [2,len(x)], order='F')
pulseshape = self.gaussian(x,self._dx,xo=self.offset,v=self.v,t=t,s=self.pulse_width)
wave[0,:] = self.Ey*pulseshape
wave[1,:] = self.Hz*pulseshape
return wave
def _harmonic_pulse(self,x,t):
wave = np.zeros( [2,len(x)], order='F')
harmonic = self.harmonic(x,self._dx,omega=self.omega,k=self.k,t=t)
pulseshape = self.gaussian(x,self._dx,xo=self.offset,v=self.v,t=t,s=self.pulse_width)
wave[0,:] = self.Ey*harmonic*pulseshape
wave[1,:] = self.Hz*harmonic*pulseshape
return wave
def _off(self,x,t=0):
wave = np.zeros( [2,len(x)], order='F')
return wave
def __init__(self,material,shape='plane',**kwargs):
self._set_f_w(material,kwargs)
self.options = {}
self.k = 2.0*np.pi/self.wavelength
self.v = material.co/material.bkg_n
self.Ey = material.zo
self.Hz = 1.0
self.offset = 0.0
self.shape = shape
self.custom = False
self.function = None
self.averaged = True
self.custom_func = user_source
self._material = material
self.dx = 1.0
self.heading = 'x'
self.cut = False
self.num_dim = 1
class Source2D(Sources):
def setup(self,options={}):
self._unpack_options(options=options)
self.pulse_width = self.wavelength*np.ones([2])
if self.shape=='custom':
self.custom = True
if self.custom:
self.shape = 'custom'
self.custom_function = user_source
if self.shape=='plane':
self.harmonic_function = np.sin
self.function = self._plane
if self.shape=='pulse':
self.shape_function = np.exp
self.function = self._pulse
self.t_off = (4.0*self.pulse_width[0])/self.v[0]
if self.shape=='harmonic pulse':
self.harmonic_function = np.sin
self.shape_function = np.exp
self.function = self._harmonic_pulse
if self.shape=='bessel pulse':
self.bessel_order = 0
self.function = self._bessel_pulse
self.kill_after_first_zero = True
if self.shape=='off':
self.shape_function = np.exp
if self.transversal_shape=='plane':
self.transversal_function = lambda y: 1.0
if self.transversal_shape=='gauss':
self.transversal_function = lambda y: self._transversal_gauss(y)
if self.transversal_shape=='cosine':
self.transversal_function = lambda y: self._transversal_cosine(y)
if self.transversal_shape=='bessel':
self.transversal_bessel_order = 0
self.transversal_kill_after_first_zero = True
self.transversal_function = self._transversal_bessel
return
def _trasversal_plane(self,u):
shape = 1.0
return shape
def _transversal_cosine(self,u):
p = polarization[self.heading][0]
du = self._delta[p]
uo = self.transversal_offset
shape = self.harmonic(u,du,xo=uo,k=np.pi/self.transversal_width,f=np.cos)
r = (u-uo)/self.transversal_width
shape = shape*(np.abs(r)<=0.5)
return shape
def _transversal_gauss(self,u):
p = polarization[self.heading]
du = self._delta[p]
shape = self.gaussian(u,du,xo=self.transversal_offset,s=self.transversal_width)
return shape
def _transversal_bessel(self,y):
from scipy.special import jn, jn_zeros
first_zero = jn_zeros(self.transversal_bessel_order,1)
shape = jn(self.transversal_bessel_order,(y-self.transversal_offset)*(first_zero[0])/(self.transversal_width/2.0))
if self.transversal_kill_after_first_zero:
shape_kill = np.abs((y-self.transversal_offset)*(first_zero[0])/(self.transversal_width/2.0))<=(first_zero[0])
shape = shape_kill*shape
return shape
def _plane(self,x,y,t=0):
wave = np.zeros( [3,x.shape[0],y.shape[1]], order='F')
harmonic = self.transversal_function(y)*self.harmonic(x,self._delta[0],k=self.k[0],omega=self.omega,t=t)
wave[0,:,:] = self.amplitude[0]*harmonic
wave[1,:,:] = self.amplitude[1]*harmonic
wave[2,:,:] = self.amplitude[2]*harmonic
return wave
def _pulse(self,x,y,t=0):
wave = np.zeros( [3,x.shape[0],y.shape[1]], order='F')
dimh = heading[self.heading]
dimp = polarization[self.heading]
if t<=self.t_off:
shape = 1.0
shape = shape*self.gaussian(x,self._delta[0],xo=self.offset[0],s=self.pulse_width[0],v=self.v[0],t=t)
shape = self.transversal_function(y)*shape
else:
shape = 0.0
if len(self.heading)==1:
wave[dimp[0],:,:] = ((-1.0)**dimh[0])*self._material.zo*shape
wave[2,:,:] = self.amplitude[2]*shape
return wave
def _harmonic_pulse(self,x,y,t=0):
wave = np.zeros( [3,x.shape[0],y.shape[1]], order='F')
if t<=self.t_off:
harmonic = self._plane(x,y,t)
shape = self._pulse(x,y,t)
shape = shape*harmonic
else:
shape = 0.0
wave[0,:,:] = self.amplitude[0]*shape[0]
wave[1,:,:] = self.amplitude[1]*shape[1]
wave[2,:,:] = self.amplitude[2]*shape[2]
return wave
def _bessel_pulse(self,x,y,t=0):
from scipy.special import jn, jn_zeros
first_zero = jn_zeros(self.bessel_order,1)
wave = np.zeros( [3,x.shape[0],y.shape[1]], order='F')
shapex = jn(self.bessel_order,(x - (self.offset[1] + self.v[0]*t)*(first_zero[0])/(self.pulse_width[0]/2.0)))
if self.kill_after_first_zero:
shape_kill = np.abs((x - (self.offset[1] + self.v[0]*t)*(first_zero[0])/(self.pulse_width[0]/2.0)))<=(first_zero[0])
shapex = shape_kill*shapex
shapey = self.transversal_function(y)
shape = shapey*shapex
wave[0,:,:] = self.amplitude[0]*shape
wave[1,:,:] = self.amplitude[1]*shape
wave[2,:,:] = self.amplitude[2]*shape
return wave
def _off(self,x,y,t=0):
wave = np.zeros( [3,x.shape[0],y.shape[1]], order='F')
return wave
def __init__(self,material,shape='off',**kwargs):
self._set_f_w(material,kwargs)
self.options = {}
self.k = np.asarray([2.0*np.pi/self.wavelength,0.0])
self.v = material.co*np.asarray([1.0/material.bkg_n[0],1.0/material.bkg_n[1]])
self.amplitude = np.asarray([0.0,material.zo,1.0])
self.offset = np.zeros([2])
self.shape = shape
self.custom = False
self.function = None
self.heading = 'x'
self.averaged = True
self.cut = False
self._delta = np.ones([2])
self._material = material
self.transversal_shape = 'plane'
self.transversal_offset = 0.0
self.transversal_width = 0.0
self.transversal_function = lambda y: 1.0
self.transversal_delta = 1.0
self.num_dim = 2
class Source3D(Sources):
def setup(self,options={}):
self._unpack_options(options=options)
if self.shape=='custom':
self.custom = True
if self.custom:
self.shape = 'custom'
self.custom_function = user_source
if self.shape=='plane':
self.harmonic_function = np.sin
self.function = self._plane
if self.shape=='pulse':
self.pulse_width = self.wavelength
self.shape_function = np.exp
self.function = self._pulse
self.heading = 'x'
if self.shape=='harmonic pulse':
self.pulse_width = self.wavelength
self.harmonic_function = np.sin
self.shape_function = np.exp
self.function = self._harmonic_pulse
self.heading = 'x'
if self.shape=='bessel pulse':
self.pulse_width = self.wavelength
self.bessel_order = 0
self.function = self._bessel_pulse
self.kill_after_first_zero = True
if self.shape=='off':
self.pulse_width = [self.wavelength]*3
self.shape_function = np.exp
if self.transversal_shape=='plane':
self.transversal_function = lambda y,z: 1.0
if self.transversal_shape=='gauss':
self.transversal_function = lambda y,z: self._transversal_gauss(y,0)*self._transversal_gauss(z,1)
if self.transversal_shape=='cosine':
self.transversal_function = lambda y,z: self._transversal_cosine(y,z)
if self.transversal_shape=='bessel':
self.transversal_bessel_order = 0
self.transversal_kill_after_first_zero = True
self.transversal_function = lambda y,z: self._transversal_bessel(y,0)*self._transversal_bessel(z,1)
return
def _transversal_cosine(self,u,v):
sv = self.transversal_width[0]
su = self.transversal_width[1]
uo = self.transversal_offset[0]
vo = self.transversal_offset[1]
r1 = (u-uo)/sv
r2 = (v-vo)/su
if self.averaged:
du = self._delta[1]
dv = self._delta[2]
ku = np.pi/self.transversal_width[0]
kv = np.pi/self.transversal_width[1]
ddu = self._delta[1]
ddv = self._delta[2]
shape = (2*sv*su*np.cos((ku*(x-uo))/sv)*np.sin((ddu*ku)/sv)*(np.sin((kv*(ddv+y-vo))/su)+np.sin((kv*(ddv-y+vo))/su)))/(du*dv*ku*kv)
else:
shape = np.cos(r*np.pi)
shape = shape*(np.abs(r1)<=0.5)*(np.abs(r2)<=0.5)
return shape
def _transversal_gauss(self,u,p):
du = self._delta[p]
shape = self.gaussian(u,du,xo=self.transversal_offset[p],s=self.transversal_width[p])
return shape
def _transversal_bessel(self,u,p):
from scipy.special import jn, jn_zeros
first_zero = jn_zeros(self.transversal_bessel_order,1)
r = (u-self.transversal_offset[p])/(self.transversal_width[p]/2.0)
shape = jn(self.transversal_bessel_order,r*(first_zero[0]))
if self.transversal_kill_after_first_zero:
shape_kill = np.abs(r*(first_zero[0]))<=(first_zero[0])
shape = shape_kill*shape
return shape
def _plane(self,x,y,z,t=0):
wave = np.zeros( [6,x.shape[0],y.shape[1],z.shape[2]], order='F')
harmonic = self.transversal_function(y,z)*self.harmonic(x,self.delta[0],xo=self.offset[0],k=self.k[0],omega=self.omega,t=t)
wave[1,:,:,:] = self.amplitude[1]*harmonic
wave[5,:,:,:] = self.amplitude[5]*harmonic
return wave
def _pulse(self,x,y,z,t=0):
wave = np.zeros( [6,x.shape[0],y.shape[1],z.shape[2]], order='F')
dimh = heading[self.heading]
dimp = polarization[self.heading]
if t<=self.t_off:
grid = [x,y,z]
shape = 1.0
for i in range(len(self.heading)):
h = dimh[i]
shape = shape*self.gaussian(grid[h],self._delta[h],xo=self.offset[h],s=self.pulse_width[h],v=self.v[h],t=t)
shape = self.transversal_function(y,z)*shape
else:
shape = 0.0
wave[1,:,:,:] = self.amplitude[1]*shape
wave[5,:,:,:] = self.amplitude[5]*shape
return wave
return wave
def _harmonic_pulse(self,x,y,z,t=0):
wave = np.zeros( [6,x.shape[0],y.shape[1],z.shape[2]], order='F')
harmonic1 = self.harmonic_function(self.k[0]*(x-self.offset[1]) - self.omega*t)
harmonic2 = self.harmonic_function(self.k[0]*(x-self.offset[5]) - self.omega*t)
shapex1 = self.transversal_function(y,z)*self.shape_function(-(x - (self.offset[1] + self.v[0]*t))**2/self.pulse_width**2)
shapex2 = self.transversal_function(y,z)*self.shape_function(-(x - (self.offset[5] + self.v[0]*t))**2/self.pulse_width**2)
wave[1,:,:,:] = self.amplitude[1]*shapex1*harmonic1
wave[5,:,:,:] = self.amplitude[5]*shapex2*harmonic2
return wave
def _bessel_pulse(self,x,y,z,t=0):
from scipy.special import jn, jn_zeros
first_zero = jn_zeros(self.bessel_order,1)
wave = np.zeros( [6,x.shape[0],y.shape[1],z.shape[2]], order='F')
shapex1 = self.transversal_function(y,z)*jn(self.bessel_order,(x - (self.offset[1] + self.v[0]*t)*(first_zero[0])/(self.pulse_width/2.0)))
shapex2 = self.transversal_function(y,z)*jn(self.bessel_order,(x - (self.offset[5] + self.v[0]*t)*(first_zero[0])/(self.pulse_width/2.0)))
if self.kill_after_first_zero:
shape_kill1 = np.abs((x - (self.offset[1] + self.v[0]*t)*(first_zero[0])/(self.pulse_width/2.0)))<=(first_zero[0])
shape_kill2 = np.abs((x - (self.offset[5] + self.v[0]*t)*(first_zero[0])/(self.pulse_width/2.0)))<=(first_zero[0])
shapex1 = shape_kill1*shapex1
shapex2 = shape_kill2*shapex2
wave[1,:,:,:] = self.amplitude[1]*shapex1
wave[5,:,:,:] = self.amplitude[5]*shapex2
return wave
def _off(self,x,y,z,t=0):
wave = np.zeros( [6,x.shape[0],y.shape[1],z.shape[2]], order='F')
return wave
def __init__(self,material,shape='off',**kwargs):
self._set_f_w(material,kwargs)
self.options = {}
self.k = np.asarray([2.0*np.pi/self.wavelength,0.0,0.0])
self.v = material.co*np.asarray([1.0/material.bkg_n[0],1.0/material.bkg_n[1],1.0/material.bkg_n[2]])
self.amplitude = np.asarray([0.0,material.zo,0.0,0.0,0.0,1.0])
self.offset = np.zeros([6])
self.transversal_shape = 'plane'
self.transversal_offset = np.zeros([2])
self.transversal_width = np.zeros([2])
self.transversal_function = None
self.shape = shape
self.custom = False
self.function = None
self._material = material
self._delta = np.zeros([3])
self.averaged = False
self.cut = False
|
sanromd/emclaw
|
emclaw/utils/sources.py
|
Python
|
lgpl-3.0
| 23,123
|
[
"Gaussian"
] |
54b193a0a041088dd2164aeb542bc43beff628eb55730484ce8b376509010a5c
|
import os
import sys
import sysconfig
from pysam.libchtslib import *
from pysam.libcutils import *
import pysam.libcutils as libcutils
import pysam.libcfaidx as libcfaidx
from pysam.libcfaidx import *
import pysam.libctabix as libctabix
from pysam.libctabix import *
import pysam.libcsamfile as libcsamfile
from pysam.libcsamfile import *
import pysam.libcalignmentfile as libcalignmentfile
from pysam.libcalignmentfile import *
import pysam.libcalignedsegment as libcalignedsegment
from pysam.libcalignedsegment import *
import pysam.libcvcf as libcvcf
from pysam.libcvcf import *
import pysam.libcbcf as libcbcf
from pysam.libcbcf import *
import pysam.libcbgzf as libcbgzf
from pysam.libcbgzf import *
from pysam.utils import SamtoolsError
import pysam.Pileup as Pileup
from pysam.samtools import *
import pysam.config
# export all the symbols from separate modules
__all__ = \
libchtslib.__all__ +\
libcutils.__all__ +\
libctabix.__all__ +\
libcvcf.__all__ +\
libcbcf.__all__ +\
libcbgzf.__all__ +\
libcfaidx.__all__ +\
libcalignmentfile.__all__ +\
libcalignedsegment.__all__ +\
libcsamfile.__all__ +\
["SamtoolsError"] +\
["Pileup"]
from pysam.version import __version__, __samtools_version__
def get_include():
'''return a list of include directories.'''
dirname = os.path.abspath(os.path.join(os.path.dirname(__file__)))
#
# Header files may be stored in different relative locations
# depending on installation mode (e.g., `python setup.py install`,
# `python setup.py develop`. The first entry in each list is
# where develop-mode headers can be found.
#
htslib_possibilities = [os.path.join(dirname, '..', 'htslib'),
os.path.join(dirname, 'include', 'htslib')]
samtool_possibilities = [os.path.join(dirname, '..', 'samtools'),
os.path.join(dirname, 'include', 'samtools')]
includes = [dirname]
for header_locations in [htslib_possibilities, samtool_possibilities]:
for header_location in header_locations:
if os.path.exists(header_location):
includes.append(os.path.abspath(header_location))
break
return includes
def get_defines():
'''return a list of defined compilation parameters.'''
return [] #('_FILE_OFFSET_BITS', '64'),
# ('_USE_KNETFILE', '')]
def get_libraries():
'''return a list of libraries to link against.'''
# Note that this list does not include libcsamtools.so as there are
# numerous name conflicts with libchtslib.so.
dirname = os.path.abspath(os.path.join(os.path.dirname(__file__)))
pysam_libs = ['libctabixproxies',
'libcfaidx',
'libcsamfile',
'libcvcf',
'libcbcf',
'libctabix']
if pysam.config.HTSLIB == "builtin":
pysam_libs.append('libchtslib')
so = sysconfig.get_config_var('SO')
return [os.path.join(dirname, x + so) for x in pysam_libs]
|
bioinformed/pysam
|
pysam/__init__.py
|
Python
|
mit
| 3,041
|
[
"pysam"
] |
22f92f654708cdde78ec3383eb61788d2811a61078f962b465df8c16f91891a9
|
#!/usr/bin/env python
import pprint
import re
import os, sys
import unittest
sys.path[0:0] = ['.', '..']
from pycparser import c_parser
from pycparser.c_ast import *
from pycparser.c_parser import CParser, Coord, ParseError
_c_parser = c_parser.CParser(
lex_optimize=False,
yacc_debug=True,
yacc_optimize=False,
yacctab='yacctab')
def expand_decl(decl):
""" Converts the declaration into a nested list.
"""
typ = type(decl)
if typ == TypeDecl:
return ['TypeDecl', expand_decl(decl.type)]
elif typ == IdentifierType:
return ['IdentifierType', decl.names]
elif typ == ID:
return ['ID', decl.name]
elif typ in [Struct, Union]:
decls = [expand_decl(d) for d in decl.decls or []]
return [typ.__name__, decl.name, decls]
else:
nested = expand_decl(decl.type)
if typ == Decl:
if decl.quals:
return ['Decl', decl.quals, decl.name, nested]
else:
return ['Decl', decl.name, nested]
elif typ == Typename: # for function parameters
if decl.quals:
return ['Typename', decl.quals, nested]
else:
return ['Typename', nested]
elif typ == ArrayDecl:
dimval = decl.dim.value if decl.dim else ''
return ['ArrayDecl', dimval, decl.dim_quals, nested]
elif typ == PtrDecl:
if decl.quals:
return ['PtrDecl', decl.quals, nested]
else:
return ['PtrDecl', nested]
elif typ == Typedef:
return ['Typedef', decl.name, nested]
elif typ == FuncDecl:
if decl.args:
params = [expand_decl(param) for param in decl.args.params]
else:
params = []
return ['FuncDecl', params, nested]
def expand_init(init):
""" Converts an initialization into a nested list
"""
typ = type(init)
if typ == NamedInitializer:
des = [expand_init(dp) for dp in init.name]
return (des, expand_init(init.expr))
elif typ in (InitList, ExprList):
return [expand_init(expr) for expr in init.exprs]
elif typ == Constant:
return ['Constant', init.type, init.value]
elif typ == ID:
return ['ID', init.name]
class TestCParser_base(unittest.TestCase):
def parse(self, txt, filename=''):
return self.cparser.parse(txt, filename)
def setUp(self):
self.cparser = _c_parser
class TestCParser_fundamentals(TestCParser_base):
def get_decl(self, txt, index=0):
""" Given a source and an index returns the expanded
declaration at that index.
FileAST holds a list of 'external declarations'.
index is the offset of the desired declaration in that
list.
"""
t = self.parse(txt).ext[index]
return expand_decl(t)
def get_decl_init(self, txt, index=0):
""" Returns the expanded initializer of the declaration
at index.
"""
t = self.parse(txt).ext[index]
return expand_init(t.init)
def test_FileAST(self):
t = self.parse('int a; char c;')
self.assertTrue(isinstance(t, FileAST))
self.assertEqual(len(t.ext), 2)
# empty file
t2 = self.parse('')
self.assertTrue(isinstance(t2, FileAST))
self.assertEqual(len(t2.ext), 0)
def test_empty_toplevel_decl(self):
code = 'int foo;;'
t = self.parse(code)
self.assertTrue(isinstance(t, FileAST))
self.assertEqual(len(t.ext), 1)
self.assertEqual(self.get_decl(code),
['Decl', 'foo',
['TypeDecl', ['IdentifierType', ['int']]]])
def assert_coord(self, node, line, file=None):
self.assertEqual(node.coord.line, line)
if file:
self.assertEqual(node.coord.file, file)
def test_coords(self):
""" Tests the "coordinates" of parsed elements - file
name and line numbers, with modification insterted by
#line directives.
"""
self.assert_coord(self.parse('int a;').ext[0], 1)
t1 = """
int a;
int b;\n\n
int c;
"""
f1 = self.parse(t1, filename='test.c')
self.assert_coord(f1.ext[0], 2, 'test.c')
self.assert_coord(f1.ext[1], 3, 'test.c')
self.assert_coord(f1.ext[2], 6, 'test.c')
t1_1 = '''
int main() {
k = p;
printf("%d", b);
return 0;
}'''
f1_1 = self.parse(t1_1, filename='test.c')
self.assert_coord(f1_1.ext[0].body.block_items[0], 3, 'test.c')
self.assert_coord(f1_1.ext[0].body.block_items[1], 4, 'test.c')
t1_2 = '''
int main () {
int p = (int) k;
}'''
f1_2 = self.parse(t1_2, filename='test.c')
# make sure that the Cast has a coord (issue 23)
self.assert_coord(f1_2.ext[0].body.block_items[0].init, 3, 'test.c')
t2 = """
#line 99
int c;
"""
self.assert_coord(self.parse(t2).ext[0], 99)
t3 = """
int dsf;
char p;
#line 3000 "in.h"
char d;
"""
f3 = self.parse(t3, filename='test.c')
self.assert_coord(f3.ext[0], 2, 'test.c')
self.assert_coord(f3.ext[1], 3, 'test.c')
self.assert_coord(f3.ext[2], 3000, 'in.h')
t4 = """
#line 20 "restore.h"
int maydler(char);
#line 30 "includes/daween.ph"
long j, k;
#line 50000
char* ro;
"""
f4 = self.parse(t4, filename='myb.c')
self.assert_coord(f4.ext[0], 20, 'restore.h')
self.assert_coord(f4.ext[1], 30, 'includes/daween.ph')
self.assert_coord(f4.ext[2], 30, 'includes/daween.ph')
self.assert_coord(f4.ext[3], 50000, 'includes/daween.ph')
t5 = """
int
#line 99
c;
"""
self.assert_coord(self.parse(t5).ext[0], 99)
# coord for ellipsis
t6 = """
int foo(int j,
...) {
}"""
f6 = self.parse(t6, filename='z.c')
self.assert_coord(self.parse(t6).ext[0].decl.type.args.params[1], 3)
def test_forloop_coord(self):
t = '''\
void foo() {
for(int z=0; z<4;
z++){}
}
'''
s = self.parse(t, filename='f.c')
forloop = s.ext[0].body.block_items[0]
self.assert_coord(forloop.init, 2, 'f.c')
self.assert_coord(forloop.cond, 2, 'f.c')
self.assert_coord(forloop.next, 3, 'f.c')
def test_simple_decls(self):
self.assertEqual(self.get_decl('int a;'),
['Decl', 'a', ['TypeDecl', ['IdentifierType', ['int']]]])
self.assertEqual(self.get_decl('unsigned int a;'),
['Decl', 'a', ['TypeDecl', ['IdentifierType', ['unsigned', 'int']]]])
self.assertEqual(self.get_decl('_Bool a;'),
['Decl', 'a', ['TypeDecl', ['IdentifierType', ['_Bool']]]])
self.assertEqual(self.get_decl('float _Complex fcc;'),
['Decl', 'fcc', ['TypeDecl', ['IdentifierType', ['float', '_Complex']]]])
self.assertEqual(self.get_decl('char* string;'),
['Decl', 'string',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]])
self.assertEqual(self.get_decl('long ar[15];'),
['Decl', 'ar',
['ArrayDecl', '15', [],
['TypeDecl', ['IdentifierType', ['long']]]]])
self.assertEqual(self.get_decl('long long ar[15];'),
['Decl', 'ar',
['ArrayDecl', '15', [],
['TypeDecl', ['IdentifierType', ['long', 'long']]]]])
self.assertEqual(self.get_decl('unsigned ar[];'),
['Decl', 'ar',
['ArrayDecl', '', [],
['TypeDecl', ['IdentifierType', ['unsigned']]]]])
self.assertEqual(self.get_decl('int strlen(char* s);'),
['Decl', 'strlen',
['FuncDecl',
[['Decl', 's',
['PtrDecl',
['TypeDecl', ['IdentifierType', ['char']]]]]],
['TypeDecl', ['IdentifierType', ['int']]]]])
self.assertEqual(self.get_decl('int strcmp(char* s1, char* s2);'),
['Decl', 'strcmp',
['FuncDecl',
[ ['Decl', 's1',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]],
['Decl', 's2',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]]
],
['TypeDecl', ['IdentifierType', ['int']]]]])
# function return values and parameters may not have type information
self.assertEqual(self.get_decl('extern foobar(foo, bar);'),
['Decl', 'foobar',
['FuncDecl',
[ ['ID', 'foo'],
['ID', 'bar']
],
['TypeDecl', ['IdentifierType', ['int']]]]])
def test_nested_decls(self): # the fun begins
self.assertEqual(self.get_decl('char** ar2D;'),
['Decl', 'ar2D',
['PtrDecl', ['PtrDecl',
['TypeDecl', ['IdentifierType', ['char']]]]]])
self.assertEqual(self.get_decl('int (*a)[1][2];'),
['Decl', 'a',
['PtrDecl',
['ArrayDecl', '1', [],
['ArrayDecl', '2', [],
['TypeDecl', ['IdentifierType', ['int']]]]]]])
self.assertEqual(self.get_decl('int *a[1][2];'),
['Decl', 'a',
['ArrayDecl', '1', [],
['ArrayDecl', '2', [],
['PtrDecl', ['TypeDecl', ['IdentifierType', ['int']]]]]]])
self.assertEqual(self.get_decl('char* const* p;'),
['Decl', 'p',
['PtrDecl', ['PtrDecl', ['const'],
['TypeDecl', ['IdentifierType', ['char']]]]]])
self.assertEqual(self.get_decl('char* * const p;'),
['Decl', 'p',
['PtrDecl', ['const'], ['PtrDecl',
['TypeDecl', ['IdentifierType', ['char']]]]]])
self.assertEqual(self.get_decl('char ***ar3D[40];'),
['Decl', 'ar3D',
['ArrayDecl', '40', [],
['PtrDecl', ['PtrDecl', ['PtrDecl',
['TypeDecl', ['IdentifierType', ['char']]]]]]]])
self.assertEqual(self.get_decl('char (***ar3D)[40];'),
['Decl', 'ar3D',
['PtrDecl', ['PtrDecl', ['PtrDecl',
['ArrayDecl', '40', [], ['TypeDecl', ['IdentifierType', ['char']]]]]]]])
self.assertEqual(self.get_decl('int (*x[4])(char, int);'),
['Decl', 'x',
['ArrayDecl', '4', [],
['PtrDecl',
['FuncDecl',
[ ['Typename', ['TypeDecl', ['IdentifierType', ['char']]]],
['Typename', ['TypeDecl', ['IdentifierType', ['int']]]]],
['TypeDecl', ['IdentifierType', ['int']]]]]]])
self.assertEqual(self.get_decl('char *(*(**foo [][8])())[];'),
['Decl', 'foo',
['ArrayDecl', '', [],
['ArrayDecl', '8', [],
['PtrDecl', ['PtrDecl',
['FuncDecl',
[],
['PtrDecl',
['ArrayDecl', '', [],
['PtrDecl',
['TypeDecl',
['IdentifierType', ['char']]]]]]]]]]]])
# explore named and unnamed function pointer parameters,
# with and without qualifiers
# unnamed w/o quals
self.assertEqual(self.get_decl('int (*k)(int);'),
['Decl', 'k',
['PtrDecl',
['FuncDecl',
[['Typename', ['TypeDecl', ['IdentifierType', ['int']]]]],
['TypeDecl', ['IdentifierType', ['int']]]]]])
# unnamed w/ quals
self.assertEqual(self.get_decl('int (*k)(const int);'),
['Decl', 'k',
['PtrDecl',
['FuncDecl',
[['Typename', ['const'], ['TypeDecl', ['IdentifierType', ['int']]]]],
['TypeDecl', ['IdentifierType', ['int']]]]]])
# named w/o quals
self.assertEqual(self.get_decl('int (*k)(int q);'),
['Decl', 'k',
['PtrDecl',
['FuncDecl',
[['Decl', 'q', ['TypeDecl', ['IdentifierType', ['int']]]]],
['TypeDecl', ['IdentifierType', ['int']]]]]])
# named w/ quals
self.assertEqual(self.get_decl('int (*k)(const volatile int q);'),
['Decl', 'k',
['PtrDecl',
['FuncDecl',
[['Decl', ['const', 'volatile'], 'q',
['TypeDecl', ['IdentifierType', ['int']]]]],
['TypeDecl', ['IdentifierType', ['int']]]]]])
# restrict qualifier
self.assertEqual(self.get_decl('int (*k)(restrict int* q);'),
['Decl', 'k',
['PtrDecl',
['FuncDecl',
[['Decl', ['restrict'], 'q',
['PtrDecl',
['TypeDecl', ['IdentifierType', ['int']]]]]],
['TypeDecl', ['IdentifierType', ['int']]]]]])
def test_func_decls_with_array_dim_qualifiers(self):
self.assertEqual(self.get_decl('int zz(int p[static 10]);'),
['Decl', 'zz',
['FuncDecl',
[['Decl', 'p', ['ArrayDecl', '10', ['static'],
['TypeDecl', ['IdentifierType', ['int']]]]]],
['TypeDecl', ['IdentifierType', ['int']]]]])
self.assertEqual(self.get_decl('int zz(int p[const 10]);'),
['Decl', 'zz',
['FuncDecl',
[['Decl', 'p', ['ArrayDecl', '10', ['const'],
['TypeDecl', ['IdentifierType', ['int']]]]]],
['TypeDecl', ['IdentifierType', ['int']]]]])
self.assertEqual(self.get_decl('int zz(int p[restrict][5]);'),
['Decl', 'zz',
['FuncDecl',
[['Decl', 'p', ['ArrayDecl', '', ['restrict'],
['ArrayDecl', '5', [],
['TypeDecl', ['IdentifierType', ['int']]]]]]],
['TypeDecl', ['IdentifierType', ['int']]]]])
self.assertEqual(self.get_decl('int zz(int p[const restrict static 10][5]);'),
['Decl', 'zz',
['FuncDecl',
[['Decl', 'p', ['ArrayDecl', '10', ['const', 'restrict', 'static'],
['ArrayDecl', '5', [],
['TypeDecl', ['IdentifierType', ['int']]]]]]],
['TypeDecl', ['IdentifierType', ['int']]]]])
def test_qualifiers_storage_specifiers(self):
def assert_qs(txt, index, quals, storage):
d = self.parse(txt).ext[index]
self.assertEqual(d.quals, quals)
self.assertEqual(d.storage, storage)
assert_qs("extern int p;", 0, [], ['extern'])
assert_qs("const long p = 6;", 0, ['const'], [])
d1 = "static const int p, q, r;"
for i in range(3):
assert_qs(d1, i, ['const'], ['static'])
d2 = "static char * const p;"
assert_qs(d2, 0, [], ['static'])
pdecl = self.parse(d2).ext[0].type
self.assertTrue(isinstance(pdecl, PtrDecl))
self.assertEqual(pdecl.quals, ['const'])
def test_sizeof(self):
e = """
void foo()
{
int a = sizeof k;
int b = sizeof(int);
int c = sizeof(int**);;
char* p = "just to make sure this parses w/o error...";
int d = sizeof(int());
}
"""
compound = self.parse(e).ext[0].body
s1 = compound.block_items[0].init
self.assertTrue(isinstance(s1, UnaryOp))
self.assertEqual(s1.op, 'sizeof')
self.assertTrue(isinstance(s1.expr, ID))
self.assertEqual(s1.expr.name, 'k')
s2 = compound.block_items[1].init
self.assertEqual(expand_decl(s2.expr),
['Typename', ['TypeDecl', ['IdentifierType', ['int']]]])
s3 = compound.block_items[2].init
self.assertEqual(expand_decl(s3.expr),
['Typename',
['PtrDecl',
['PtrDecl',
['TypeDecl',
['IdentifierType', ['int']]]]]])
def test_offsetof(self):
e = """
void foo() {
int a = offsetof(struct S, p);
a.b = offsetof(struct sockaddr, sp) + strlen(bar);
}
"""
compound = self.parse(e).ext[0].body
s1 = compound.block_items[0].init
self.assertTrue(isinstance(s1, FuncCall))
self.assertTrue(isinstance(s1.name, ID))
self.assertEqual(s1.name.name, 'offsetof')
self.assertTrue(isinstance(s1.args.exprs[0], Typename))
self.assertTrue(isinstance(s1.args.exprs[1], ID))
# The C99 compound literal feature
#
def test_compound_literals(self):
ps1 = self.parse(r'''
void foo() {
p = (long long){k};
tc = (struct jk){.a = {1, 2}, .b[0] = t};
}''')
compound = ps1.ext[0].body.block_items[0].rvalue
self.assertEqual(expand_decl(compound.type),
['Typename', ['TypeDecl', ['IdentifierType', ['long', 'long']]]])
self.assertEqual(expand_init(compound.init),
[['ID', 'k']])
compound = ps1.ext[0].body.block_items[1].rvalue
self.assertEqual(expand_decl(compound.type),
['Typename', ['TypeDecl', ['Struct', 'jk', []]]])
self.assertEqual(expand_init(compound.init),
[
([['ID', 'a']], [['Constant', 'int', '1'], ['Constant', 'int', '2']]),
([['ID', 'b'], ['Constant', 'int', '0']], ['ID', 't'])])
def test_enums(self):
e1 = "enum mycolor op;"
e1_type = self.parse(e1).ext[0].type.type
self.assertTrue(isinstance(e1_type, Enum))
self.assertEqual(e1_type.name, 'mycolor')
self.assertEqual(e1_type.values, None)
e2 = "enum mysize {large=20, small, medium} shoes;"
e2_type = self.parse(e2).ext[0].type.type
self.assertTrue(isinstance(e2_type, Enum))
self.assertEqual(e2_type.name, 'mysize')
e2_elist = e2_type.values
self.assertTrue(isinstance(e2_elist, EnumeratorList))
for e2_eval in e2_elist.enumerators:
self.assertTrue(isinstance(e2_eval, Enumerator))
self.assertEqual(e2_elist.enumerators[0].name, 'large')
self.assertEqual(e2_elist.enumerators[0].value.value, '20')
self.assertEqual(e2_elist.enumerators[2].name, 'medium')
self.assertEqual(e2_elist.enumerators[2].value, None)
# enum with trailing comma (C99 feature)
e3 = """
enum
{
red,
blue,
green,
} color;
"""
e3_type = self.parse(e3).ext[0].type.type
self.assertTrue(isinstance(e3_type, Enum))
e3_elist = e3_type.values
self.assertTrue(isinstance(e3_elist, EnumeratorList))
for e3_eval in e3_elist.enumerators:
self.assertTrue(isinstance(e3_eval, Enumerator))
self.assertEqual(e3_elist.enumerators[0].name, 'red')
self.assertEqual(e3_elist.enumerators[0].value, None)
self.assertEqual(e3_elist.enumerators[1].name, 'blue')
self.assertEqual(e3_elist.enumerators[2].name, 'green')
def test_typedef(self):
# without typedef, error
s1 = """
node k;
"""
self.assertRaises(ParseError, self.parse, s1)
# now with typedef, works
s2 = """
typedef void* node;
node k;
"""
ps2 = self.parse(s2)
self.assertEqual(expand_decl(ps2.ext[0]),
['Typedef', 'node',
['PtrDecl',
['TypeDecl', ['IdentifierType', ['void']]]]])
self.assertEqual(expand_decl(ps2.ext[1]),
['Decl', 'k',
['TypeDecl', ['IdentifierType', ['node']]]])
s3 = """
typedef int T;
typedef T *pT;
pT aa, bb;
"""
ps3 = self.parse(s3)
self.assertEqual(expand_decl(ps3.ext[3]),
['Decl', 'bb',
['TypeDecl', ['IdentifierType', ['pT']]]])
s4 = '''
typedef char* __builtin_va_list;
typedef __builtin_va_list __gnuc_va_list;
'''
ps4 = self.parse(s4)
self.assertEqual(expand_decl(ps4.ext[1]),
['Typedef', '__gnuc_va_list',
['TypeDecl',
['IdentifierType', ['__builtin_va_list']]]])
s5 = '''typedef struct tagHash Hash;'''
ps5 = self.parse(s5)
self.assertEqual(expand_decl(ps5.ext[0]),
['Typedef', 'Hash', ['TypeDecl', ['Struct', 'tagHash', []]]])
def test_struct_union(self):
s1 = """
struct {
int id;
char* name;
} joe;
"""
self.assertEqual(expand_decl(self.parse(s1).ext[0]),
['Decl', 'joe',
['TypeDecl', ['Struct', None,
[ ['Decl', 'id',
['TypeDecl',
['IdentifierType', ['int']]]],
['Decl', 'name',
['PtrDecl',
['TypeDecl',
['IdentifierType', ['char']]]]]]]]])
s2 = """
struct node p;
"""
self.assertEqual(expand_decl(self.parse(s2).ext[0]),
['Decl', 'p',
['TypeDecl', ['Struct', 'node', []]]])
s21 = """
union pri ra;
"""
self.assertEqual(expand_decl(self.parse(s21).ext[0]),
['Decl', 'ra',
['TypeDecl', ['Union', 'pri', []]]])
s3 = """
struct node* p;
"""
self.assertEqual(expand_decl(self.parse(s3).ext[0]),
['Decl', 'p',
['PtrDecl',
['TypeDecl', ['Struct', 'node', []]]]])
s4 = """
struct node;
"""
self.assertEqual(expand_decl(self.parse(s4).ext[0]),
['Decl', None,
['Struct', 'node', []]])
s5 = """
union
{
struct
{
int type;
} n;
struct
{
int type;
int intnode;
} ni;
} u;
"""
self.assertEqual(expand_decl(self.parse(s5).ext[0]),
['Decl', 'u',
['TypeDecl',
['Union', None,
[['Decl', 'n',
['TypeDecl',
['Struct', None,
[['Decl', 'type',
['TypeDecl', ['IdentifierType', ['int']]]]]]]],
['Decl', 'ni',
['TypeDecl',
['Struct', None,
[['Decl', 'type',
['TypeDecl', ['IdentifierType', ['int']]]],
['Decl', 'intnode',
['TypeDecl', ['IdentifierType', ['int']]]]]]]]]]]])
s6 = """
typedef struct foo_tag
{
void* data;
} foo, *pfoo;
"""
s6_ast = self.parse(s6)
self.assertEqual(expand_decl(s6_ast.ext[0]),
['Typedef', 'foo',
['TypeDecl',
['Struct', 'foo_tag',
[['Decl', 'data',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['void']]]]]]]]])
self.assertEqual(expand_decl(s6_ast.ext[1]),
['Typedef', 'pfoo',
['PtrDecl',
['TypeDecl',
['Struct', 'foo_tag',
[['Decl', 'data',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['void']]]]]]]]]])
s7 = r"""
struct _on_exit_args {
void * _fnargs[32];
void * _dso_handle[32];
long _fntypes;
#line 77 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
long _is_cxa;
};
"""
s7_ast = self.parse(s7, filename='test.c')
self.assert_coord(s7_ast.ext[0].type.decls[2], 6, 'test.c')
self.assert_coord(s7_ast.ext[0].type.decls[3], 78,
r'D:\eli\cpp_stuff\libc_include/sys/reent.h')
s8 = """
typedef enum tagReturnCode {SUCCESS, FAIL} ReturnCode;
typedef struct tagEntry
{
char* key;
char* value;
} Entry;
typedef struct tagNode
{
Entry* entry;
struct tagNode* next;
} Node;
typedef struct tagHash
{
unsigned int table_size;
Node** heads;
} Hash;
"""
s8_ast = self.parse(s8)
self.assertEqual(expand_decl(s8_ast.ext[3]),
['Typedef', 'Hash',
['TypeDecl', ['Struct', 'tagHash',
[['Decl', 'table_size',
['TypeDecl', ['IdentifierType', ['unsigned', 'int']]]],
['Decl', 'heads',
['PtrDecl', ['PtrDecl', ['TypeDecl', ['IdentifierType', ['Node']]]]]]]]]])
def test_anonymous_struct_union(self):
s1 = """
union
{
union
{
int i;
long l;
};
struct
{
int type;
int intnode;
};
} u;
"""
self.assertEqual(expand_decl(self.parse(s1).ext[0]),
['Decl', 'u',
['TypeDecl',
['Union', None,
[['Decl', None,
['Union', None,
[['Decl', 'i',
['TypeDecl',
['IdentifierType', ['int']]]],
['Decl', 'l',
['TypeDecl',
['IdentifierType', ['long']]]]]]],
['Decl', None,
['Struct', None,
[['Decl', 'type',
['TypeDecl',
['IdentifierType', ['int']]]],
['Decl', 'intnode',
['TypeDecl',
['IdentifierType', ['int']]]]]]]]]]])
s2 = """
struct
{
int i;
union
{
int id;
char* name;
};
float f;
} joe;
"""
self.assertEqual(expand_decl(self.parse(s2).ext[0]),
['Decl', 'joe',
['TypeDecl',
['Struct', None,
[['Decl', 'i',
['TypeDecl',
['IdentifierType', ['int']]]],
['Decl', None,
['Union', None,
[['Decl', 'id',
['TypeDecl',
['IdentifierType', ['int']]]],
['Decl', 'name',
['PtrDecl',
['TypeDecl',
['IdentifierType', ['char']]]]]]]],
['Decl', 'f',
['TypeDecl',
['IdentifierType', ['float']]]]]]]])
# ISO/IEC 9899:201x Commitee Draft 2010-11-16, N1539
# section 6.7.2.1, par. 19, example 1
s3 = """
struct v {
union {
struct { int i, j; };
struct { long k, l; } w;
};
int m;
} v1;
"""
self.assertEqual(expand_decl(self.parse(s3).ext[0]),
['Decl', 'v1',
['TypeDecl',
['Struct', 'v',
[['Decl', None,
['Union', None,
[['Decl', None,
['Struct', None,
[['Decl', 'i',
['TypeDecl',
['IdentifierType', ['int']]]],
['Decl', 'j',
['TypeDecl',
['IdentifierType', ['int']]]]]]],
['Decl', 'w',
['TypeDecl',
['Struct', None,
[['Decl', 'k',
['TypeDecl',
['IdentifierType', ['long']]]],
['Decl', 'l',
['TypeDecl',
['IdentifierType', ['long']]]]]]]]]]],
['Decl', 'm',
['TypeDecl',
['IdentifierType', ['int']]]]]]]])
s4 = """
struct v {
int i;
float;
} v2;"""
# just make sure this doesn't raise ParseError
self.parse(s4)
def test_struct_members_namespace(self):
""" Tests that structure/union member names reside in a separate
namespace and can be named after existing types.
"""
s1 = """
typedef int Name;
typedef Name NameArray[10];
struct {
Name Name;
Name NameArray[3];
} sye;
void main(void)
{
sye.Name = 1;
}
"""
s1_ast = self.parse(s1)
self.assertEqual(expand_decl(s1_ast.ext[2]),
['Decl', 'sye',
['TypeDecl', ['Struct', None,
[ ['Decl', 'Name',
['TypeDecl',
['IdentifierType', ['Name']]]],
['Decl', 'NameArray',
['ArrayDecl', '3', [],
['TypeDecl', ['IdentifierType', ['Name']]]]]]]]])
self.assertEqual(s1_ast.ext[3].body.block_items[0].lvalue.field.name, 'Name')
def test_struct_bitfields(self):
# a struct with two bitfields, one unnamed
s1 = """
struct {
int k:6;
int :2;
} joe;
"""
parsed_struct = self.parse(s1).ext[0]
# We can see here the name of the decl for the unnamed bitfield is
# None, but expand_decl doesn't show bitfield widths
# ...
self.assertEqual(expand_decl(parsed_struct),
['Decl', 'joe',
['TypeDecl', ['Struct', None,
[ ['Decl', 'k',
['TypeDecl',
['IdentifierType', ['int']]]],
['Decl', None,
['TypeDecl',
['IdentifierType', ['int']]]]]]]])
# ...
# so we test them manually
self.assertEqual(parsed_struct.type.type.decls[0].bitsize.value, '6')
self.assertEqual(parsed_struct.type.type.decls[1].bitsize.value, '2')
def test_tags_namespace(self):
""" Tests that the tags of structs/unions/enums reside in a separate namespace and
can be named after existing types.
"""
s1 = """
typedef int tagEntry;
struct tagEntry
{
char* key;
char* value;
} Entry;
"""
s1_ast = self.parse(s1)
self.assertEqual(expand_decl(s1_ast.ext[1]),
['Decl', 'Entry',
['TypeDecl', ['Struct', 'tagEntry',
[['Decl', 'key',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]],
['Decl', 'value',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]]]]]])
s2 = """
struct tagEntry;
typedef struct tagEntry tagEntry;
struct tagEntry
{
char* key;
char* value;
} Entry;
"""
s2_ast = self.parse(s2)
self.assertEqual(expand_decl(s2_ast.ext[2]),
['Decl', 'Entry',
['TypeDecl', ['Struct', 'tagEntry',
[['Decl', 'key',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]],
['Decl', 'value',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]]]]]])
s3 = """
typedef int mytag;
enum mytag {ABC, CDE};
enum mytag joe;
"""
s3_type = self.parse(s3).ext[1].type
self.assertTrue(isinstance(s3_type, Enum))
self.assertEqual(s3_type.name, 'mytag')
def test_multi_decls(self):
d1 = 'int a, b;'
self.assertEqual(self.get_decl(d1, 0),
['Decl', 'a', ['TypeDecl', ['IdentifierType', ['int']]]])
self.assertEqual(self.get_decl(d1, 1),
['Decl', 'b', ['TypeDecl', ['IdentifierType', ['int']]]])
d2 = 'char* p, notp, ar[4];'
self.assertEqual(self.get_decl(d2, 0),
['Decl', 'p',
['PtrDecl',
['TypeDecl', ['IdentifierType', ['char']]]]])
self.assertEqual(self.get_decl(d2, 1),
['Decl', 'notp', ['TypeDecl', ['IdentifierType', ['char']]]])
self.assertEqual(self.get_decl(d2, 2),
['Decl', 'ar',
['ArrayDecl', '4', [],
['TypeDecl', ['IdentifierType', ['char']]]]])
def test_invalid_multiple_types_error(self):
bad = [
'int enum {ab, cd} fubr;',
'enum kid char brbr;']
for b in bad:
self.assertRaises(ParseError, self.parse, b)
def test_duplicate_typedef(self):
""" Tests that redeclarations of existing types are parsed correctly.
This is non-standard, but allowed by many compilers.
"""
d1 = '''
typedef int numbertype;
typedef int numbertype;
'''
self.assertEqual(self.get_decl(d1, 0),
['Typedef', 'numbertype',
['TypeDecl', ['IdentifierType', ['int']]]])
self.assertEqual(self.get_decl(d1, 1),
['Typedef', 'numbertype',
['TypeDecl', ['IdentifierType', ['int']]]])
d2 = '''
typedef int (*funcptr)(int x);
typedef int (*funcptr)(int x);
'''
self.assertEqual(self.get_decl(d2, 0),
['Typedef', 'funcptr',
['PtrDecl', ['FuncDecl',
[['Decl', 'x', ['TypeDecl', ['IdentifierType', ['int']]]]],
['TypeDecl', ['IdentifierType', ['int']]]]]])
self.assertEqual(self.get_decl(d2, 1),
['Typedef', 'funcptr',
['PtrDecl', ['FuncDecl',
[['Decl', 'x', ['TypeDecl', ['IdentifierType', ['int']]]]],
['TypeDecl', ['IdentifierType', ['int']]]]]])
d3 = '''
typedef int numberarray[5];
typedef int numberarray[5];
'''
self.assertEqual(self.get_decl(d3, 0),
['Typedef', 'numberarray',
['ArrayDecl', '5', [],
['TypeDecl', ['IdentifierType', ['int']]]]])
self.assertEqual(self.get_decl(d3, 1),
['Typedef', 'numberarray',
['ArrayDecl', '5', [],
['TypeDecl', ['IdentifierType', ['int']]]]])
def test_decl_inits(self):
d1 = 'int a = 16;'
#~ self.parse(d1).show()
self.assertEqual(self.get_decl(d1),
['Decl', 'a', ['TypeDecl', ['IdentifierType', ['int']]]])
self.assertEqual(self.get_decl_init(d1),
['Constant', 'int', '16'])
d1_1 = 'float f = 0xEF.56p1;'
self.assertEqual(self.get_decl_init(d1_1),
['Constant', 'float', '0xEF.56p1'])
d1_2 = 'int bitmask = 0b1001010;'
self.assertEqual(self.get_decl_init(d1_2),
['Constant', 'int', '0b1001010'])
d2 = 'long ar[] = {7, 8, 9};'
self.assertEqual(self.get_decl(d2),
['Decl', 'ar',
['ArrayDecl', '', [],
['TypeDecl', ['IdentifierType', ['long']]]]])
self.assertEqual(self.get_decl_init(d2),
[ ['Constant', 'int', '7'],
['Constant', 'int', '8'],
['Constant', 'int', '9']])
d21 = 'long ar[4] = {};'
self.assertEqual(self.get_decl_init(d21), [])
d3 = 'char p = j;'
self.assertEqual(self.get_decl(d3),
['Decl', 'p', ['TypeDecl', ['IdentifierType', ['char']]]])
self.assertEqual(self.get_decl_init(d3),
['ID', 'j'])
d4 = "char x = 'c', *p = {0, 1, 2, {4, 5}, 6};"
self.assertEqual(self.get_decl(d4, 0),
['Decl', 'x', ['TypeDecl', ['IdentifierType', ['char']]]])
self.assertEqual(self.get_decl_init(d4, 0),
['Constant', 'char', "'c'"])
self.assertEqual(self.get_decl(d4, 1),
['Decl', 'p',
['PtrDecl',
['TypeDecl', ['IdentifierType', ['char']]]]])
self.assertEqual(self.get_decl_init(d4, 1),
[ ['Constant', 'int', '0'],
['Constant', 'int', '1'],
['Constant', 'int', '2'],
[['Constant', 'int', '4'],
['Constant', 'int', '5']],
['Constant', 'int', '6']])
def test_decl_named_inits(self):
d1 = 'int a = {.k = 16};'
self.assertEqual(self.get_decl_init(d1),
[( [['ID', 'k']],
['Constant', 'int', '16'])])
d2 = 'int a = { [0].a = {1}, [1].a[0] = 2 };'
self.assertEqual(self.get_decl_init(d2),
[
([['Constant', 'int', '0'], ['ID', 'a']],
[['Constant', 'int', '1']]),
([['Constant', 'int', '1'], ['ID', 'a'], ['Constant', 'int', '0']],
['Constant', 'int', '2'])])
d3 = 'int a = { .a = 1, .c = 3, 4, .b = 5};'
self.assertEqual(self.get_decl_init(d3),
[
([['ID', 'a']], ['Constant', 'int', '1']),
([['ID', 'c']], ['Constant', 'int', '3']),
['Constant', 'int', '4'],
([['ID', 'b']], ['Constant', 'int', '5'])])
def test_function_definitions(self):
def parse_fdef(str):
return self.parse(str).ext[0]
def fdef_decl(fdef):
return expand_decl(fdef.decl)
f1 = parse_fdef('''
int factorial(int p)
{
return 3;
}
''')
self.assertEqual(fdef_decl(f1),
['Decl', 'factorial',
['FuncDecl',
[['Decl', 'p', ['TypeDecl', ['IdentifierType', ['int']]]]],
['TypeDecl', ['IdentifierType', ['int']]]]])
self.assertEqual(type(f1.body.block_items[0]), Return)
f2 = parse_fdef('''
char* zzz(int p, char* c)
{
int a;
char b;
a = b + 2;
return 3;
}
''')
self.assertEqual(fdef_decl(f2),
['Decl', 'zzz',
['FuncDecl',
[ ['Decl', 'p', ['TypeDecl', ['IdentifierType', ['int']]]],
['Decl', 'c', ['PtrDecl',
['TypeDecl', ['IdentifierType', ['char']]]]]],
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]]])
self.assertEqual(list(map(type, f2.body.block_items)),
[Decl, Decl, Assignment, Return])
f3 = parse_fdef('''
char* zzz(p, c)
long p, *c;
{
int a;
char b;
a = b + 2;
return 3;
}
''')
self.assertEqual(fdef_decl(f3),
['Decl', 'zzz',
['FuncDecl',
[ ['ID', 'p'],
['ID', 'c']],
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]]])
self.assertEqual(list(map(type, f3.body.block_items)),
[Decl, Decl, Assignment, Return])
self.assertEqual(expand_decl(f3.param_decls[0]),
['Decl', 'p', ['TypeDecl', ['IdentifierType', ['long']]]])
self.assertEqual(expand_decl(f3.param_decls[1]),
['Decl', 'c', ['PtrDecl', ['TypeDecl', ['IdentifierType', ['long']]]]])
# function return values and parameters may not have type information
f4 = parse_fdef('''
que(p)
{
return 3;
}
''')
self.assertEqual(fdef_decl(f4),
['Decl', 'que',
['FuncDecl',
[['ID', 'p']],
['TypeDecl', ['IdentifierType', ['int']]]]])
def test_unified_string_literals(self):
# simple string, for reference
d1 = self.get_decl_init('char* s = "hello";')
self.assertEqual(d1, ['Constant', 'string', '"hello"'])
d2 = self.get_decl_init('char* s = "hello" " world";')
self.assertEqual(d2, ['Constant', 'string', '"hello world"'])
# the test case from issue 6
d3 = self.parse(r'''
int main() {
fprintf(stderr,
"Wrong Params?\n"
"Usage:\n"
"%s <binary_file_path>\n",
argv[0]
);
}
''')
self.assertEqual(
d3.ext[0].body.block_items[0].args.exprs[1].value,
r'"Wrong Params?\nUsage:\n%s <binary_file_path>\n"')
d4 = self.get_decl_init('char* s = "" "foobar";')
self.assertEqual(d4, ['Constant', 'string', '"foobar"'])
d5 = self.get_decl_init(r'char* s = "foo\"" "bar";')
self.assertEqual(d5, ['Constant', 'string', r'"foo\"bar"'])
def test_unified_wstring_literals(self):
d1 = self.get_decl_init('char* s = L"hello" L"world";')
self.assertEqual(d1, ['Constant', 'string', 'L"helloworld"'])
d2 = self.get_decl_init('char* s = L"hello " L"world" L" and I";')
self.assertEqual(d2, ['Constant', 'string', 'L"hello world and I"'])
def test_inline_specifier(self):
ps2 = self.parse('static inline void inlinefoo(void);')
self.assertEqual(ps2.ext[0].funcspec, ['inline'])
# variable length array
def test_vla(self):
ps2 = self.parse(r'''
int main() {
int size;
int var[size = 5];
int var2[*];
}
''')
self.assertTrue(isinstance(ps2.ext[0].body.block_items[1].type.dim, Assignment))
self.assertTrue(isinstance(ps2.ext[0].body.block_items[2].type.dim, ID))
class TestCParser_whole_code(TestCParser_base):
""" Testing of parsing whole chunks of code.
Since I don't want to rely on the structure of ASTs too
much, most of these tests are implemented with visitors.
"""
# A simple helper visitor that lists the values of all the
# Constant nodes it sees.
#
class ConstantVisitor(NodeVisitor):
def __init__(self):
self.values = []
def visit_Constant(self, node):
self.values.append(node.value)
# This visitor counts the amount of references to the ID
# with the name provided to it in the constructor.
#
class IDNameCounter(NodeVisitor):
def __init__(self, name):
self.name = name
self.nrefs = 0
def visit_ID(self, node):
if node.name == self.name:
self.nrefs += 1
# Counts the amount of nodes of a given class
#
class NodeKlassCounter(NodeVisitor):
def __init__(self, node_klass):
self.klass = node_klass
self.n = 0
def generic_visit(self, node):
if node.__class__ == self.klass:
self.n += 1
NodeVisitor.generic_visit(self, node)
def assert_all_Constants(self, code, constants):
""" Asserts that the list of all Constant values (by
'preorder' appearance) in the chunk of code is as
given.
"""
if isinstance(code, str):
parsed = self.parse(code)
else:
parsed = code
cv = self.ConstantVisitor()
cv.visit(parsed)
self.assertEqual(cv.values, constants)
def assert_num_ID_refs(self, code, name, num):
""" Asserts the number of references to the ID with
the given name.
"""
if isinstance(code, str):
parsed = self.parse(code)
else:
parsed = code
iv = self.IDNameCounter(name)
iv.visit(parsed)
self.assertEqual(iv.nrefs, num)
def assert_num_klass_nodes(self, code, klass, num):
""" Asserts the amount of klass nodes in the code.
"""
if isinstance(code, str):
parsed = self.parse(code)
else:
parsed = code
cv = self.NodeKlassCounter(klass)
cv.visit(parsed)
self.assertEqual(cv.n, num)
def test_expressions(self):
e1 = '''int k = (r + 10.0) >> 6 + 8 << (3 & 0x14);'''
self.assert_all_Constants(e1, ['10.0', '6', '8', '3', '0x14'])
e2 = r'''char n = '\n', *prefix = "st_";'''
self.assert_all_Constants(e2, [r"'\n'", '"st_"'])
s1 = r'''int main() {
int i = 5, j = 6, k = 1;
if ((i=j && k == 1) || k > j)
printf("Hello, world\n");
return 0;
}'''
ps1 = self.parse(s1)
self.assert_all_Constants(ps1,
['5', '6', '1', '1', '"Hello, world\\n"', '0'])
self.assert_num_ID_refs(ps1, 'i', 1)
self.assert_num_ID_refs(ps1, 'j', 2)
def test_statements(self):
s1 = r'''
void foo(){
if (sp == 1)
if (optind >= argc ||
argv[optind][0] != '-' || argv[optind][1] == '\0')
return -1;
else if (strcmp(argv[optind], "--") == 0) {
optind++;
return -1;
}
}
'''
self.assert_all_Constants(s1,
['1', '0', r"'-'", '1', r"'\0'", '1', r'"--"', '0', '1'])
ps1 = self.parse(s1)
self.assert_num_ID_refs(ps1, 'argv', 3)
self.assert_num_ID_refs(ps1, 'optind', 5)
self.assert_num_klass_nodes(ps1, If, 3)
self.assert_num_klass_nodes(ps1, Return, 2)
self.assert_num_klass_nodes(ps1, FuncCall, 1) # strcmp
self.assert_num_klass_nodes(ps1, BinaryOp, 7)
# In the following code, Hash and Node were defined as
# int to pacify the parser that sees they're used as
# types
#
s2 = r'''
typedef int Hash, Node;
void HashDestroy(Hash* hash)
{
unsigned int i;
if (hash == NULL)
return;
for (i = 0; i < hash->table_size; ++i)
{
Node* temp = hash->heads[i];
while (temp != NULL)
{
Node* temp2 = temp;
free(temp->entry->key);
free(temp->entry->value);
free(temp->entry);
temp = temp->next;
free(temp2);
}
}
free(hash->heads);
hash->heads = NULL;
free(hash);
}
'''
ps2 = self.parse(s2)
self.assert_num_klass_nodes(ps2, FuncCall, 6)
self.assert_num_klass_nodes(ps2, FuncDef, 1)
self.assert_num_klass_nodes(ps2, For, 1)
self.assert_num_klass_nodes(ps2, While, 1)
self.assert_num_klass_nodes(ps2, StructRef, 10)
# declarations don't count
self.assert_num_ID_refs(ps2, 'hash', 6)
self.assert_num_ID_refs(ps2, 'i', 4)
s3 = r'''
void x(void) {
int a, b;
if (a < b)
do {
a = 0;
} while (0);
else if (a == b) {
a = 1;
}
}
'''
ps3 = self.parse(s3)
self.assert_num_klass_nodes(ps3, DoWhile, 1)
self.assert_num_ID_refs(ps3, 'a', 4)
self.assert_all_Constants(ps3, ['0', '0', '1'])
def test_empty_statement(self):
s1 = r'''
void foo(void){
;
return;
}
'''
ps1 = self.parse(s1)
self.assert_num_klass_nodes(ps1, EmptyStatement, 1)
self.assert_num_klass_nodes(ps1, Return, 1)
def test_switch_statement(self):
def assert_case_node(node, const_value):
self.assertTrue(isinstance(node, Case))
self.assertTrue(isinstance(node.expr, Constant))
self.assertEqual(node.expr.value, const_value)
def assert_default_node(node):
self.assertTrue(isinstance(node, Default))
s1 = r'''
int foo(void) {
switch (myvar) {
case 10:
k = 10;
p = k + 1;
return 10;
case 20:
case 30:
return 20;
default:
break;
}
return 0;
}
'''
ps1 = self.parse(s1)
switch = ps1.ext[0].body.block_items[0]
block = switch.stmt.block_items
assert_case_node(block[0], '10')
self.assertEqual(len(block[0].stmts), 3)
assert_case_node(block[1], '20')
self.assertEqual(len(block[1].stmts), 0)
assert_case_node(block[2], '30')
self.assertEqual(len(block[2].stmts), 1)
assert_default_node(block[3])
s2 = r'''
int foo(void) {
switch (myvar) {
default:
joe = moe;
return 10;
case 10:
case 20:
case 30:
case 40:
break;
}
return 0;
}
'''
ps2 = self.parse(s2)
switch = ps2.ext[0].body.block_items[0]
block = switch.stmt.block_items
assert_default_node(block[0])
self.assertEqual(len(block[0].stmts), 2)
assert_case_node(block[1], '10')
self.assertEqual(len(block[1].stmts), 0)
assert_case_node(block[2], '20')
self.assertEqual(len(block[1].stmts), 0)
assert_case_node(block[3], '30')
self.assertEqual(len(block[1].stmts), 0)
assert_case_node(block[4], '40')
self.assertEqual(len(block[4].stmts), 1)
def test_for_statement(self):
s2 = r'''
void x(void)
{
int i;
for (i = 0; i < 5; ++i) {
x = 50;
}
}
'''
ps2 = self.parse(s2)
self.assert_num_klass_nodes(ps2, For, 1)
# here there are 3 refs to 'i' since the declaration doesn't count as
# a ref in the visitor
#
self.assert_num_ID_refs(ps2, 'i', 3)
s3 = r'''
void x(void)
{
for (int i = 0; i < 5; ++i) {
x = 50;
}
}
'''
ps3 = self.parse(s3)
self.assert_num_klass_nodes(ps3, For, 1)
# here there are 2 refs to 'i' since the declaration doesn't count as
# a ref in the visitor
#
self.assert_num_ID_refs(ps3, 'i', 2)
s4 = r'''
void x(void) {
for (int i = 0;;)
i;
}
'''
ps4 = self.parse(s4)
self.assert_num_ID_refs(ps4, 'i', 1)
def _open_c_file(self, name):
""" Find a c file by name, taking into account the current dir can be
in a couple of typical places
"""
testdir = os.path.dirname(__file__)
name = os.path.join(testdir, 'c_files', name)
assert os.path.exists(name)
return open(name, 'rU')
def test_whole_file(self):
# See how pycparser handles a whole, real C file.
#
with self._open_c_file('memmgr_with_h.c') as f:
code = f.read()
p = self.parse(code)
self.assert_num_klass_nodes(p, FuncDef, 5)
# each FuncDef also has a FuncDecl. 4 declarations
# + 5 definitions, overall 9
self.assert_num_klass_nodes(p, FuncDecl, 9)
self.assert_num_klass_nodes(p, Typedef, 4)
self.assertEqual(p.ext[4].coord.line, 88)
self.assertEqual(p.ext[4].coord.file, "./memmgr.h")
self.assertEqual(p.ext[6].coord.line, 10)
self.assertEqual(p.ext[6].coord.file, "memmgr.c")
def test_whole_file_with_stdio(self):
# Parse a whole file with stdio.h included by cpp
#
with self._open_c_file('cppd_with_stdio_h.c') as f:
code = f.read()
p = self.parse(code)
self.assertTrue(isinstance(p.ext[0], Typedef))
self.assertEqual(p.ext[0].coord.line, 213)
self.assertEqual(p.ext[0].coord.file, "D:\eli\cpp_stuff\libc_include/stddef.h")
self.assertTrue(isinstance(p.ext[-1], FuncDef))
self.assertEqual(p.ext[-1].coord.line, 15)
self.assertEqual(p.ext[-1].coord.file, "example_c_file.c")
self.assertTrue(isinstance(p.ext[-8], Typedef))
self.assertTrue(isinstance(p.ext[-8].type, TypeDecl))
self.assertEqual(p.ext[-8].name, 'cookie_io_functions_t')
class TestCParser_typenames(TestCParser_base):
""" Test issues related to the typedef-name problem.
"""
def test_innerscope_typedef(self):
# should fail since TT is not a type in bar
s1 = r'''
void foo() {
typedef char TT;
TT x;
}
void bar() {
TT y;
}
'''
self.assertRaises(ParseError, self.parse, s1)
# should succeed since TT is not a type in bar
s2 = r'''
void foo() {
typedef char TT;
TT x;
}
void bar() {
unsigned TT;
}
'''
self.assertTrue(isinstance(self.parse(s2), FileAST))
def test_innerscope_reuse_typedef_name(self):
# identifiers can be reused in inner scopes; the original should be
# restored at the end of the block
s1 = r'''
typedef char TT;
void foo(void) {
unsigned TT;
TT = 10;
}
TT x = 5;
'''
s1_ast = self.parse(s1)
self.assertEqual(expand_decl(s1_ast.ext[1].body.block_items[0]),
['Decl', 'TT', ['TypeDecl', ['IdentifierType', ['unsigned']]]])
self.assertEqual(expand_decl(s1_ast.ext[2]),
['Decl', 'x', ['TypeDecl', ['IdentifierType', ['TT']]]])
# this should be recognized even with an initializer
s2 = r'''
typedef char TT;
void foo(void) {
unsigned TT = 10;
}
'''
s2_ast = self.parse(s2)
self.assertEqual(expand_decl(s2_ast.ext[1].body.block_items[0]),
['Decl', 'TT', ['TypeDecl', ['IdentifierType', ['unsigned']]]])
# before the second local variable, TT is a type; after, it's a
# variable
s3 = r'''
typedef char TT;
void foo(void) {
TT tt = sizeof(TT);
unsigned TT = 10;
}
'''
s3_ast = self.parse(s3)
self.assertEqual(expand_decl(s3_ast.ext[1].body.block_items[0]),
['Decl', 'tt', ['TypeDecl', ['IdentifierType', ['TT']]]])
self.assertEqual(expand_decl(s3_ast.ext[1].body.block_items[1]),
['Decl', 'TT', ['TypeDecl', ['IdentifierType', ['unsigned']]]])
# a variable and its type can even share the same name
s4 = r'''
typedef char TT;
void foo(void) {
TT TT = sizeof(TT);
unsigned uu = TT * 2;
}
'''
s4_ast = self.parse(s4)
self.assertEqual(expand_decl(s4_ast.ext[1].body.block_items[0]),
['Decl', 'TT', ['TypeDecl', ['IdentifierType', ['TT']]]])
self.assertEqual(expand_decl(s4_ast.ext[1].body.block_items[1]),
['Decl', 'uu', ['TypeDecl', ['IdentifierType', ['unsigned']]]])
# ensure an error is raised if a type, redeclared as a variable, is
# used as a type
s5 = r'''
typedef char TT;
void foo(void) {
unsigned TT = 10;
TT erroneous = 20;
}
'''
self.assertRaises(ParseError, self.parse, s5)
def test_parameter_reuse_typedef_name(self):
# identifiers can be reused as parameter names; parameter name scope
# begins and ends with the function body; it's important that TT is
# used immediately before the LBRACE or after the RBRACE, to test
# a corner case
s1 = r'''
typedef char TT;
void foo(unsigned TT, TT bar) {
TT = 10;
}
TT x = 5;
'''
s1_ast = self.parse(s1)
self.assertEqual(expand_decl(s1_ast.ext[1].decl),
['Decl', 'foo',
['FuncDecl',
[ ['Decl', 'TT', ['TypeDecl', ['IdentifierType', ['unsigned']]]],
['Decl', 'bar', ['TypeDecl', ['IdentifierType', ['TT']]]]],
['TypeDecl', ['IdentifierType', ['void']]]]])
# the scope of a parameter name in a function declaration ends at the
# end of the declaration...so it is effectively never used; it's
# important that TT is used immediately after the declaration, to
# test a corner case
s2 = r'''
typedef char TT;
void foo(unsigned TT, TT bar);
TT x = 5;
'''
s2_ast = self.parse(s2)
self.assertEqual(expand_decl(s2_ast.ext[1]),
['Decl', 'foo',
['FuncDecl',
[ ['Decl', 'TT', ['TypeDecl', ['IdentifierType', ['unsigned']]]],
['Decl', 'bar', ['TypeDecl', ['IdentifierType', ['TT']]]]],
['TypeDecl', ['IdentifierType', ['void']]]]])
# ensure an error is raised if a type, redeclared as a parameter, is
# used as a type
s3 = r'''
typedef char TT;
void foo(unsigned TT, TT bar) {
TT erroneous = 20;
}
'''
self.assertRaises(ParseError, self.parse, s3)
def test_nested_function_decls(self):
# parameter names of nested function declarations must not escape into
# the top-level function _definition's_ scope; the following must
# succeed because TT is still a typedef inside foo's body
s1 = r'''
typedef char TT;
void foo(unsigned bar(int TT)) {
TT x = 10;
}
'''
self.assertTrue(isinstance(self.parse(s1), FileAST))
def test_samescope_reuse_name(self):
# a typedef name cannot be reused as an object name in the same scope
s1 = r'''
typedef char TT;
char TT = 5;
'''
self.assertRaises(ParseError, self.parse, s1)
# ...and vice-versa
s2 = r'''
char TT = 5;
typedef char TT;
'''
self.assertRaises(ParseError, self.parse, s2)
if __name__ == '__main__':
#~ suite = unittest.TestLoader().loadTestsFromNames(
#~ ['test_c_parser.TestCParser_fundamentals.test_typedef'])
#~ unittest.TextTestRunner(verbosity=2).run(suite)
unittest.main()
|
keulraesik/pycparser
|
tests/test_c_parser.py
|
Python
|
bsd-3-clause
| 62,280
|
[
"MOE",
"VisIt"
] |
5d5d0f0a2d90a0e53f7f049715df2abd0b5aec233686a5219377fdf4fa88e8df
|
#!/usr/bin/python
# vim: et sw=4 ts=4:
# -*- coding: utf-8 -*-
#
# Piwik - free/libre analytics platform
#
# @link http://piwik.org
# @license http://www.gnu.org/licenses/gpl-3.0.html GPL v3 or later
# @version $Id$
#
# For more info see: http://piwik.org/log-analytics/ and http://piwik.org/docs/log-analytics-tool-how-to/
#
# Requires Python 2.6 or greater.
#
import base64
import bz2
import ConfigParser
import datetime
import fnmatch
import gzip
import hashlib
import httplib
import inspect
import itertools
import logging
import optparse
import os
import os.path
import Queue
import re
import sys
import threading
import time
import urllib
import urllib2
import urlparse
import subprocess
import functools
import traceback
import socket
import textwrap
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
if sys.version_info < (2, 6):
print >> sys.stderr, 'simplejson (http://pypi.python.org/pypi/simplejson/) is required.'
sys.exit(1)
##
## Constants.
##
STATIC_EXTENSIONS = set((
'gif jpg jpeg png bmp ico svg svgz ttf otf eot woff class swf css js xml robots.txt webp'
).split())
DOWNLOAD_EXTENSIONS = set((
'7z aac arc arj asf asx avi bin csv deb dmg doc docx exe flv gz gzip hqx '
'ibooks jar mpg mp2 mp3 mp4 mpeg mov movie msi msp odb odf odg odp '
'ods odt ogg ogv pdf phps ppt pptx qt qtm ra ram rar rpm sea sit tar tbz '
'bz2 tbz tgz torrent txt wav wma wmv wpd xls xlsx xml xsd z zip '
'azw3 epub mobi apk'
).split())
# A good source is: http://phpbb-bots.blogspot.com/
EXCLUDED_USER_AGENTS = (
'adsbot-google',
'ask jeeves',
'baidubot',
'bot-',
'bot/',
'ccooter/',
'crawl',
'curl',
'echoping',
'exabot',
'feed',
'googlebot',
'ia_archiver',
'java/',
'libwww',
'mediapartners-google',
'msnbot',
'netcraftsurvey',
'panopta',
'robot',
'spider',
'surveybot',
'twiceler',
'voilabot',
'yahoo',
'yandex',
)
PIWIK_DEFAULT_MAX_ATTEMPTS = 3
PIWIK_DEFAULT_DELAY_AFTER_FAILURE = 10
DEFAULT_SOCKET_TIMEOUT = 300
PIWIK_EXPECTED_IMAGE = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAAAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=='
)
##
## Formats.
##
class BaseFormatException(Exception): pass
class BaseFormat(object):
def __init__(self, name):
self.name = name
self.regex = None
self.date_format = '%d/%b/%Y:%H:%M:%S'
def check_format(self, file):
line = file.readline()
file.seek(0)
return self.check_format_line(line)
def check_format_line(self, line):
return False
class JsonFormat(BaseFormat):
def __init__(self, name):
super(JsonFormat, self).__init__(name)
self.json = None
self.date_format = '%Y-%m-%dT%H:%M:%S'
def check_format_line(self, line):
try:
self.json = json.loads(line)
return True
except:
return False
def match(self, line):
try:
self.json = json.loads(line)
return self
except:
self.json = None
return None
def get(self, key):
# Some ugly patchs ...
if key == 'generation_time_milli':
self.json[key] = int(self.json[key] * 1000)
# Patch date format ISO 8601
elif key == 'date':
tz = self.json[key][19:]
self.json['timezone'] = tz.replace(':', '')
self.json[key] = self.json[key][:19]
try:
return self.json[key]
except KeyError:
raise BaseFormatException()
def get_all(self,):
return self.json
def remove_ignored_groups(self, groups):
for group in groups:
del self.json[group]
class RegexFormat(BaseFormat):
def __init__(self, name, regex, date_format=None):
super(RegexFormat, self).__init__(name)
if regex is not None:
self.regex = re.compile(regex)
if date_format is not None:
self.date_format = date_format
self.matched = None
def check_format_line(self, line):
return self.match(line)
def match(self,line):
if not self.regex:
return None
match_result = self.regex.match(line)
if match_result:
self.matched = match_result.groupdict()
else:
self.matched = None
return match_result
def get(self, key):
try:
return self.matched[key]
except KeyError:
raise BaseFormatException("Cannot find group '%s'." % key)
def get_all(self,):
return self.matched
def remove_ignored_groups(self, groups):
for group in groups:
del self.matched[group]
class W3cExtendedFormat(RegexFormat):
FIELDS_LINE_PREFIX = '#Fields: '
fields = {
'date': '(?P<date>^\d+[-\d+]+',
'time': '[\d+:]+)[.\d]*?', # TODO should not assume date & time will be together not sure how to fix ATM.
'cs-uri-stem': '(?P<path>/\S*)',
'cs-uri-query': '(?P<query_string>\S*)',
'c-ip': '"?(?P<ip>[\d*.-]*)"?',
'cs(User-Agent)': '(?P<user_agent>".*?"|\S+)',
'cs(Referer)': '(?P<referrer>\S+)',
'sc-status': '(?P<status>\d+)',
'sc-bytes': '(?P<length>\S+)',
'cs-host': '(?P<host>\S+)',
'cs-username': '(?P<userid>\S+)',
'time-taken': '(?P<generation_time_secs>[.\d]+)'
}
def __init__(self):
super(W3cExtendedFormat, self).__init__('w3c_extended', None, '%Y-%m-%d %H:%M:%S')
def check_format(self, file):
self.create_regex(file)
# if we couldn't create a regex, this file does not follow the W3C extended log file format
if not self.regex:
file.seek(0)
return
first_line = file.readline()
file.seek(0)
return self.check_format_line(first_line)
def create_regex(self, file):
fields_line = None
if config.options.w3c_fields:
fields_line = config.options.w3c_fields
# collect all header lines up until the Fields: line
# if we're reading from stdin, we can't seek, so don't read any more than the Fields line
header_lines = []
while fields_line is None:
line = file.readline().strip()
if not line:
continue
if not line.startswith('#'):
break
if line.startswith(W3cExtendedFormat.FIELDS_LINE_PREFIX):
fields_line = line
else:
header_lines.append(line)
if not fields_line:
return
# store the header lines for a later check for IIS
self.header_lines = header_lines
# Parse the 'Fields: ' line to create the regex to use
full_regex = []
expected_fields = type(self).fields.copy() # turn custom field mapping into field => regex mapping
# if the --w3c-time-taken-millisecs option is used, make sure the time-taken field is interpreted as milliseconds
if config.options.w3c_time_taken_in_millisecs:
expected_fields['time-taken'] = '(?P<generation_time_milli>[\d.]+)'
for mapped_field_name, field_name in config.options.custom_w3c_fields.iteritems():
expected_fields[mapped_field_name] = expected_fields[field_name]
del expected_fields[field_name]
# add custom field regexes supplied through --w3c-field-regex option
for field_name, field_regex in config.options.w3c_field_regexes.iteritems():
expected_fields[field_name] = field_regex
# Skip the 'Fields: ' prefix.
fields_line = fields_line[9:].strip()
for field in re.split('\s+', fields_line):
try:
regex = expected_fields[field]
except KeyError:
regex = '(?:".*?"|\S+)'
full_regex.append(regex)
full_regex = '\s+'.join(full_regex)
logging.debug("Based on 'Fields:' line, computed regex to be %s", full_regex)
self.regex = re.compile(full_regex)
def check_for_iis_option(self):
if not config.options.w3c_time_taken_in_millisecs and self._is_time_taken_milli() and self._is_iis():
logging.info("WARNING: IIS log file being parsed without --w3c-time-taken-milli option. IIS"
" stores millisecond values in the time-taken field. If your logfile does this, the aforementioned"
" option must be used in order to get accurate generation times.")
def _is_iis(self):
return len([line for line in self.header_lines if 'internet information services' in line.lower() or 'iis' in line.lower()]) > 0
def _is_time_taken_milli(self):
return 'generation_time_milli' not in self.regex.pattern
class IisFormat(W3cExtendedFormat):
fields = W3cExtendedFormat.fields.copy()
fields.update({
'time-taken': '(?P<generation_time_milli>[.\d]+)',
'sc-win32-status': '(?P<__win32_status>\S+)' # this group is useless for log importing, but capturing it
# will ensure we always select IIS for the format instead of
# W3C logs when detecting the format. This way there will be
# less accidental importing of IIS logs w/o --w3c-time-taken-milli.
})
def __init__(self):
super(IisFormat, self).__init__()
self.name = 'iis'
class AmazonCloudFrontFormat(W3cExtendedFormat):
fields = W3cExtendedFormat.fields.copy()
fields.update({
'x-event': '(?P<event_action>\S+)',
'x-sname': '(?P<event_name>\S+)',
'cs-uri-stem': '(?:rtmp:/)?(?P<path>/\S*)',
'c-user-agent': '(?P<user_agent>".*?"|\S+)',
# following are present to match cloudfront instead of W3C when we know it's cloudfront
'x-edge-location': '(?P<x_edge_location>".*?"|\S+)',
'x-edge-result-type': '(?P<x_edge_result_type>".*?"|\S+)',
'x-edge-request-id': '(?P<x_edge_request_id>".*?"|\S+)',
'x-host-header': '(?P<x_host_header>".*?"|\S+)'
})
def __init__(self):
super(AmazonCloudFrontFormat, self).__init__()
self.name = 'amazon_cloudfront'
def get(self, key):
if key == 'event_category' and 'event_category' not in self.matched:
return 'cloudfront_rtmp'
elif key == 'status' and 'status' not in self.matched:
return '200'
elif key == 'user_agent':
user_agent = super(AmazonCloudFrontFormat, self).get(key)
return urllib2.unquote(user_agent)
else:
return super(AmazonCloudFrontFormat, self).get(key)
_HOST_PREFIX = '(?P<host>[\w\-\.]*)(?::\d+)?\s+'
_COMMON_LOG_FORMAT = (
'(?P<ip>\S+)\s+\S+\s+\S+\s+\[(?P<date>.*?)\s+(?P<timezone>.*?)\]\s+'
'"\S+\s+(?P<path>.*?)\s+\S+"\s+(?P<status>\S+)\s+(?P<length>\S+)'
)
_NCSA_EXTENDED_LOG_FORMAT = (_COMMON_LOG_FORMAT +
'\s+"(?P<referrer>.*?)"\s+"(?P<user_agent>.*?)"'
)
_S3_LOG_FORMAT = (
'\S+\s+(?P<host>\S+)\s+\[(?P<date>.*?)\s+(?P<timezone>.*?)\]\s+(?P<ip>\S+)\s+'
'\S+\s+\S+\s+\S+\s+\S+\s+"\S+\s+(?P<path>.*?)\s+\S+"\s+(?P<status>\S+)\s+\S+\s+(?P<length>\S+)\s+'
'\S+\s+\S+\s+\S+\s+"(?P<referrer>.*?)"\s+"(?P<user_agent>.*?)"'
)
_ICECAST2_LOG_FORMAT = ( _NCSA_EXTENDED_LOG_FORMAT +
'\s+(?P<session_time>\S+)'
)
FORMATS = {
'common': RegexFormat('common', _COMMON_LOG_FORMAT),
'common_vhost': RegexFormat('common_vhost', _HOST_PREFIX + _COMMON_LOG_FORMAT),
'ncsa_extended': RegexFormat('ncsa_extended', _NCSA_EXTENDED_LOG_FORMAT),
'common_complete': RegexFormat('common_complete', _HOST_PREFIX + _NCSA_EXTENDED_LOG_FORMAT),
'w3c_extended': W3cExtendedFormat(),
'amazon_cloudfront': AmazonCloudFrontFormat(),
'iis': IisFormat(),
's3': RegexFormat('s3', _S3_LOG_FORMAT),
'icecast2': RegexFormat('icecast2', _ICECAST2_LOG_FORMAT),
'nginx_json': JsonFormat('nginx_json'),
}
##
## Code.
##
class Configuration(object):
"""
Stores all the configuration options by reading sys.argv and parsing,
if needed, the config.inc.php.
It has 2 attributes: options and filenames.
"""
class Error(Exception):
pass
def _create_parser(self):
"""
Initialize and return the OptionParser instance.
"""
option_parser = optparse.OptionParser(
usage='Usage: %prog [options] log_file [ log_file [...] ]',
description="Import HTTP access logs to Piwik. "
"log_file is the path to a server access log file (uncompressed, .gz, .bz2, or specify - to read from stdin). "
" By default, the script will try to produce clean reports and will exclude bots, static files, discard http error and redirects, etc. This is customizable, see below.",
epilog="About Piwik Server Log Analytics: http://piwik.org/log-analytics/ "
" Found a bug? Please create a ticket in http://dev.piwik.org/ "
" Please send your suggestions or successful user story to hello@piwik.org "
)
option_parser.add_option(
'--debug', '-d', dest='debug', action='count', default=0,
help="Enable debug output (specify multiple times for more verbose)",
)
option_parser.add_option(
'--debug-tracker', dest='debug_tracker', action='store_true', default=False,
help="Appends &debug=1 to tracker requests and prints out the result so the tracker can be debugged. If "
"using the log importer results in errors with the tracker or improperly recorded visits, this option can "
"be used to find out what the tracker is doing wrong. To see debug tracker output, you must also set the "
"[Tracker] debug_on_demand INI config to 1 in your Piwik's config.ini.php file."
)
option_parser.add_option(
'--debug-request-limit', dest='debug_request_limit', type='int', default=None,
help="Debug option that will exit after N requests are parsed. Can be used w/ --debug-tracker to limit the "
"output of a large log file."
)
option_parser.add_option(
'--url', dest='piwik_url',
help="REQUIRED Your Piwik server URL, eg. http://example.com/piwik/ or http://analytics.example.net",
)
option_parser.add_option(
'--dry-run', dest='dry_run',
action='store_true', default=False,
help="Perform a trial run with no tracking data being inserted into Piwik",
)
option_parser.add_option(
'--show-progress', dest='show_progress',
action='store_true', default=os.isatty(sys.stdout.fileno()),
help="Print a progress report X seconds (default: 1, use --show-progress-delay to override)"
)
option_parser.add_option(
'--show-progress-delay', dest='show_progress_delay',
type='int', default=1,
help="Change the default progress delay"
)
option_parser.add_option(
'--add-sites-new-hosts', dest='add_sites_new_hosts',
action='store_true', default=False,
help="When a hostname is found in the log file, but not matched to any website "
"in Piwik, automatically create a new website in Piwik with this hostname to "
"import the logs"
)
option_parser.add_option(
'--idsite', dest='site_id',
help= ("When specified, "
"data in the specified log files will be tracked for this Piwik site ID."
" The script will not auto-detect the website based on the log line hostname (new websites will not be automatically created).")
)
option_parser.add_option(
'--idsite-fallback', dest='site_id_fallback',
help="Default Piwik site ID to use if the hostname doesn't match any "
"known Website's URL. New websites will not be automatically created. "
" Used only if --add-sites-new-hosts or --idsite are not set",
)
default_config = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'../../config/config.ini.php'),
)
option_parser.add_option(
'--config', dest='config_file', default=default_config,
help=(
"This is only used when --login and --password is not used. "
"Piwik will read the configuration file (default: %default) to "
"fetch the Super User token_auth from the config file. "
)
)
option_parser.add_option(
'--login', dest='login',
help="You can manually specify the Piwik Super User login"
)
option_parser.add_option(
'--password', dest='password',
help="You can manually specify the Piwik Super User password"
)
option_parser.add_option(
'--token-auth', dest='piwik_token_auth',
help="Piwik Super User token_auth, 32 characters hexadecimal string, found in Piwik > API",
)
option_parser.add_option(
'--hostname', dest='hostnames', action='append', default=[],
help="Accepted hostname (requests with other hostnames will be excluded). "
"Can be specified multiple times"
)
option_parser.add_option(
'--exclude-path', dest='excluded_paths', action='append', default=[],
help="Any URL path matching this exclude-path will not be imported in Piwik. Can be specified multiple times"
)
option_parser.add_option(
'--exclude-path-from', dest='exclude_path_from',
help="Each line from this file is a path to exclude (see: --exclude-path)"
)
option_parser.add_option(
'--include-path', dest='included_paths', action='append', default=[],
help="Paths to include. Can be specified multiple times. If not specified, all paths are included."
)
option_parser.add_option(
'--include-path-from', dest='include_path_from',
help="Each line from this file is a path to include"
)
option_parser.add_option(
'--useragent-exclude', dest='excluded_useragents',
action='append', default=[],
help="User agents to exclude (in addition to the standard excluded "
"user agents). Can be specified multiple times",
)
option_parser.add_option(
'--enable-static', dest='enable_static',
action='store_true', default=False,
help="Track static files (images, css, js, ico, ttf, etc.)"
)
option_parser.add_option(
'--enable-bots', dest='enable_bots',
action='store_true', default=False,
help="Track bots. All bot visits will have a Custom Variable set with name='Bot' and value='$Bot_user_agent_here$'"
)
option_parser.add_option(
'--enable-http-errors', dest='enable_http_errors',
action='store_true', default=False,
help="Track HTTP errors (status code 4xx or 5xx)"
)
option_parser.add_option(
'--enable-http-redirects', dest='enable_http_redirects',
action='store_true', default=False,
help="Track HTTP redirects (status code 3xx except 304)"
)
option_parser.add_option(
'--enable-reverse-dns', dest='reverse_dns',
action='store_true', default=False,
help="Enable reverse DNS, used to generate the 'Providers' report in Piwik. "
"Disabled by default, as it impacts performance"
)
option_parser.add_option(
'--strip-query-string', dest='strip_query_string',
action='store_true', default=False,
help="Strip the query string from the URL"
)
option_parser.add_option(
'--query-string-delimiter', dest='query_string_delimiter', default='?',
help="The query string delimiter (default: %default)"
)
option_parser.add_option(
'--log-format-name', dest='log_format_name', default=None,
help=("Access log format to detect (supported are: %s). "
"When not specified, the log format will be autodetected by trying all supported log formats."
% ', '.join(sorted(FORMATS.iterkeys())))
)
available_regex_groups = ['date', 'path', 'query_string', 'ip', 'user_agent', 'referrer', 'status',
'length', 'host', 'userid', 'generation_time_milli', 'event_action',
'event_name', 'timezone', 'session_time']
option_parser.add_option(
'--log-format-regex', dest='log_format_regex', default=None,
help="Regular expression used to parse log entries. Regexes must contain named groups for different log fields. "
"Recognized fields include: %s. For an example of a supported Regex, see the source code of this file. "
"Overrides --log-format-name." % (', '.join(available_regex_groups))
)
option_parser.add_option(
'--log-date-format', dest='log_date_format', default=None,
help="Format string used to parse dates. You can specify any format that can also be specified to "
"the strptime python function."
)
option_parser.add_option(
'--log-hostname', dest='log_hostname', default=None,
help="Force this hostname for a log format that doesn't include it. All hits "
"will seem to come to this host"
)
option_parser.add_option(
'--skip', dest='skip', default=0, type='int',
help="Skip the n first lines to start parsing/importing data at a given line for the specified log file",
)
option_parser.add_option(
'--recorders', dest='recorders', default=1, type='int',
help="Number of simultaneous recorders (default: %default). "
"It should be set to the number of CPU cores in your server. "
"You can also experiment with higher values which may increase performance until a certain point",
)
option_parser.add_option(
'--recorder-max-payload-size', dest='recorder_max_payload_size', default=200, type='int',
help="Maximum number of log entries to record in one tracking request (default: %default). "
)
option_parser.add_option(
'--replay-tracking', dest='replay_tracking',
action='store_true', default=False,
help="Replay piwik.php requests found in custom logs (only piwik.php requests expected). \nSee http://piwik.org/faq/how-to/faq_17033/"
)
option_parser.add_option(
'--replay-tracking-expected-tracker-file', dest='replay_tracking_expected_tracker_file', default='piwik.php',
help="The expected suffix for tracking request paths. Only logs whose paths end with this will be imported. Defaults "
"to 'piwik.php' so only requests to the piwik.php file will be imported."
)
option_parser.add_option(
'--output', dest='output',
help="Redirect output (stdout and stderr) to the specified file"
)
option_parser.add_option(
'--encoding', dest='encoding', default='utf8',
help="Log files encoding (default: %default)"
)
option_parser.add_option(
'--disable-bulk-tracking', dest='use_bulk_tracking',
default=True, action='store_false',
help="Disables use of bulk tracking so recorders record one hit at a time."
)
option_parser.add_option(
'--debug-force-one-hit-every-Ns', dest='force_one_action_interval', default=False, type='float',
help="Debug option that will force each recorder to record one hit every N secs."
)
option_parser.add_option(
'--force-lowercase-path', dest='force_lowercase_path', default=False, action='store_true',
help="Make URL path lowercase so paths with the same letters but different cases are "
"treated the same."
)
option_parser.add_option(
'--enable-testmode', dest='enable_testmode', default=False, action='store_true',
help="If set, it will try to get the token_auth from the piwik_tests directory"
)
option_parser.add_option(
'--download-extensions', dest='download_extensions', default=None,
help="By default Piwik tracks as Downloads the most popular file extensions. If you set this parameter (format: pdf,doc,...) then files with an extension found in the list will be imported as Downloads, other file extensions downloads will be skipped."
)
option_parser.add_option(
'--add-download-extensions', dest='extra_download_extensions', default=None,
help="Add extensions that should be treated as downloads. See --download-extensions for more info."
)
option_parser.add_option(
'--w3c-map-field', action='callback', callback=functools.partial(self._set_option_map, 'custom_w3c_fields'), type='string',
help="Map a custom log entry field in your W3C log to a default one. Use this option to load custom log "
"files that use the W3C extended log format such as those from the Advanced Logging W3C module. Used "
"as, eg, --w3c-map-field my-date=date. Recognized default fields include: %s\n\n"
"Formats that extend the W3C extended log format (like the cloudfront RTMP log format) may define more "
"fields that can be mapped."
% (', '.join(W3cExtendedFormat.fields.keys()))
)
option_parser.add_option(
'--w3c-time-taken-millisecs', action='store_true', default=False, dest='w3c_time_taken_in_millisecs',
help="If set, interprets the time-taken W3C log field as a number of milliseconds. This must be set for importing"
" IIS logs."
)
option_parser.add_option(
'--w3c-fields', dest='w3c_fields', default=None,
help="Specify the '#Fields:' line for a log file in the W3C Extended log file format. Use this option if "
"your log file doesn't contain the '#Fields:' line which is required for parsing. This option must be used "
"in conjuction with --log-format-name=w3c_extended.\n"
"Example: --w3c-fields='#Fields: date time c-ip ...'"
)
option_parser.add_option(
'--w3c-field-regex', action='callback', callback=functools.partial(self._set_option_map, 'w3c_field_regexes'), type='string',
help="Specify a regex for a field in your W3C extended log file. You can use this option to parse fields the "
"importer does not natively recognize and then use one of the --regex-group-to-XXX-cvar options to track "
"the field in a custom variable. For example, specifying --w3c-field-regex=sc-win32-status=(?P<win32_status>\\S+) "
"--regex-group-to-page-cvar=\"win32_status=Windows Status Code\" will track the sc-win32-status IIS field "
"in the 'Windows Status Code' custom variable. Regexes must contain a named group."
)
option_parser.add_option(
'--title-category-delimiter', dest='title_category_delimiter', default='/',
help="If --enable-http-errors is used, errors are shown in the page titles report. If you have "
"changed General.action_title_category_delimiter in your Piwik configuration, you need to set this "
"option to the same value in order to get a pretty page titles report."
)
option_parser.add_option(
'--dump-log-regex', dest='dump_log_regex', action='store_true', default=False,
help="Prints out the regex string used to parse log lines and exists. Can be useful for using formats "
"in newer versions of the script in older versions of the script. The output regex can be used with "
"the --log-format-regex option."
)
option_parser.add_option(
'--ignore-groups', dest='regex_groups_to_ignore', default=None,
help="Comma separated list of regex groups to ignore when parsing log lines. Can be used to, for example, "
"disable normal user id tracking. See documentation for --log-format-regex for list of available "
"regex groups."
)
option_parser.add_option(
'--regex-group-to-visit-cvar', action='callback', callback=functools.partial(self._set_option_map, 'regex_group_to_visit_cvars_map'), type='string',
help="Track an attribute through a custom variable with visit scope instead of through Piwik's normal "
"approach. For example, to track usernames as a custom variable instead of through the uid tracking "
"parameter, supply --regex-group-to-visit-cvar=\"userid=User Name\". This will track usernames in a "
"custom variable named 'User Name'. The list of available regex groups can be found in the documentation "
"for --log-format-regex (additional regex groups you may have defined "
"in --log-format-regex can also be used)."
)
option_parser.add_option(
'--regex-group-to-page-cvar', action='callback', callback=functools.partial(self._set_option_map, 'regex_group_to_page_cvars_map'), type='string',
help="Track an attribute through a custom variable with page scope instead of through Piwik's normal "
"approach. For example, to track usernames as a custom variable instead of through the uid tracking "
"parameter, supply --regex-group-to-page-cvar=\"userid=User Name\". This will track usernames in a "
"custom variable named 'User Name'. The list of available regex groups can be found in the documentation "
"for --log-format-regex (additional regex groups you may have defined "
"in --log-format-regex can also be used)."
)
option_parser.add_option(
'--retry-max-attempts', dest='max_attempts', default=PIWIK_DEFAULT_MAX_ATTEMPTS, type='int',
help="The maximum number of times to retry a failed tracking request."
)
option_parser.add_option(
'--retry-delay', dest='delay_after_failure', default=PIWIK_DEFAULT_DELAY_AFTER_FAILURE, type='int',
help="The number of seconds to wait before retrying a failed tracking request."
)
option_parser.add_option(
'--request-timeout', dest='request_timeout', default=DEFAULT_SOCKET_TIMEOUT, type='int',
help="The maximum number of seconds to wait before terminating an HTTP request to Piwik."
)
return option_parser
def _set_option_map(self, option_attr_name, option, opt_str, value, parser):
"""
Sets a key-value mapping in a dict that is built from command line options. Options that map
string keys to string values (like --w3c-map-field) can set the callback to a bound partial
of this method to handle the option.
"""
parts = value.split('=')
if len(parts) != 2:
fatal_error("Invalid %s option: '%s'" % (opt_str, value))
key, value = parts
if not hasattr(parser.values, option_attr_name):
setattr(parser.values, option_attr_name, {})
getattr(parser.values, option_attr_name)[key] = value
def _parse_args(self, option_parser):
"""
Parse the command line args and create self.options and self.filenames.
"""
self.options, self.filenames = option_parser.parse_args(sys.argv[1:])
if self.options.output:
sys.stdout = sys.stderr = open(self.options.output, 'a+', 0)
if not self.filenames:
print(option_parser.format_help())
sys.exit(1)
# Configure logging before calling logging.{debug,info}.
logging.basicConfig(
format='%(asctime)s: [%(levelname)s] %(message)s',
level=logging.DEBUG if self.options.debug >= 1 else logging.INFO,
)
self.options.excluded_useragents = set([s.lower() for s in self.options.excluded_useragents])
if self.options.exclude_path_from:
paths = [path.strip() for path in open(self.options.exclude_path_from).readlines()]
self.options.excluded_paths.extend(path for path in paths if len(path) > 0)
if self.options.excluded_paths:
self.options.excluded_paths = set(self.options.excluded_paths)
logging.debug('Excluded paths: %s', ' '.join(self.options.excluded_paths))
if self.options.include_path_from:
paths = [path.strip() for path in open(self.options.include_path_from).readlines()]
self.options.included_paths.extend(path for path in paths if len(path) > 0)
if self.options.included_paths:
self.options.included_paths = set(self.options.included_paths)
logging.debug('Included paths: %s', ' '.join(self.options.included_paths))
if self.options.hostnames:
logging.debug('Accepted hostnames: %s', ', '.join(self.options.hostnames))
else:
logging.debug('Accepted hostnames: all')
if self.options.log_format_regex:
self.format = RegexFormat('custom', self.options.log_format_regex, self.options.log_date_format)
elif self.options.log_format_name:
try:
self.format = FORMATS[self.options.log_format_name]
except KeyError:
fatal_error('invalid log format: %s' % self.options.log_format_name)
else:
self.format = None
if not hasattr(self.options, 'custom_w3c_fields'):
self.options.custom_w3c_fields = {}
elif self.format is not None:
# validate custom field mappings
for custom_name, default_name in self.options.custom_w3c_fields.iteritems():
if default_name not in type(format).fields:
fatal_error("custom W3C field mapping error: don't know how to parse and use the '%' field" % default_name)
return
if not hasattr(self.options, 'regex_group_to_visit_cvars_map'):
self.options.regex_group_to_visit_cvars_map = {}
if not hasattr(self.options, 'regex_group_to_page_cvars_map'):
self.options.regex_group_to_page_cvars_map = {}
if not hasattr(self.options, 'w3c_field_regexes'):
self.options.w3c_field_regexes = {}
else:
# make sure each custom w3c field regex has a named group
for field_name, field_regex in self.options.w3c_field_regexes.iteritems():
if '(?P<' not in field_regex:
fatal_error("cannot find named group in custom w3c field regex '%s' for field '%s'" % (field_regex, field_name))
return
if not self.options.piwik_url:
fatal_error('no URL given for Piwik')
if not (self.options.piwik_url.startswith('http://') or self.options.piwik_url.startswith('https://')):
self.options.piwik_url = 'http://' + self.options.piwik_url
logging.debug('Piwik URL is: %s', self.options.piwik_url)
if not self.options.piwik_token_auth:
try:
self.options.piwik_token_auth = self._get_token_auth()
except Piwik.Error, e:
fatal_error(e)
logging.debug('Authentication token token_auth is: %s', self.options.piwik_token_auth)
if self.options.recorders < 1:
self.options.recorders = 1
download_extensions = DOWNLOAD_EXTENSIONS
if self.options.download_extensions:
download_extensions = set(self.options.download_extensions.split(','))
if self.options.extra_download_extensions:
download_extensions.update(self.options.extra_download_extensions.split(','))
self.options.download_extensions = download_extensions
if self.options.regex_groups_to_ignore:
self.options.regex_groups_to_ignore = set(self.options.regex_groups_to_ignore.split(','))
def __init__(self):
self._parse_args(self._create_parser())
def _get_token_auth(self):
"""
If the token auth is not specified in the options, get it from Piwik.
"""
# Get superuser login/password from the options.
logging.debug('No token-auth specified')
if self.options.login and self.options.password:
piwik_login = self.options.login
piwik_password = hashlib.md5(self.options.password).hexdigest()
logging.debug('Using credentials: (login = %s, password = %s)', piwik_login, piwik_password)
try:
api_result = piwik.call_api('UsersManager.getTokenAuth',
userLogin=piwik_login,
md5Password=piwik_password,
_token_auth='',
_url=self.options.piwik_url,
)
except urllib2.URLError, e:
fatal_error('error when fetching token_auth from the API: %s' % e)
try:
return api_result['value']
except KeyError:
# Happens when the credentials are invalid.
message = api_result.get('message')
fatal_error(
'error fetching authentication token token_auth%s' % (
': %s' % message if message else '')
)
else:
# Fallback to the given (or default) configuration file, then
# get the token from the API.
logging.debug(
'No credentials specified, reading them from "%s"',
self.options.config_file,
)
config_file = ConfigParser.RawConfigParser()
success = len(config_file.read(self.options.config_file)) > 0
if not success:
fatal_error(
"the configuration file" + self.options.config_file + " could not be read. Please check permission. This file must be readable to get the authentication token"
)
updatetokenfile = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'../../misc/cron/updatetoken.php'),
)
phpBinary = 'php'
is_windows = sys.platform.startswith('win')
if is_windows:
try:
processWin = subprocess.Popen('where php.exe', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
[stdout, stderr] = processWin.communicate()
if processWin.returncode == 0:
phpBinary = stdout.strip()
else:
fatal_error("We couldn't detect PHP. It might help to add your php.exe to the path or alternatively run the importer using the --login and --password option")
except:
fatal_error("We couldn't detect PHP. You can run the importer using the --login and --password option to fix this issue")
command = [phpBinary, updatetokenfile]
if self.options.enable_testmode:
command.append('--testmode')
hostname = urlparse.urlparse( self.options.piwik_url ).hostname
command.append('--piwik-domain=' + hostname )
command = subprocess.list2cmdline(command)
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
[stdout, stderr] = process.communicate()
if process.returncode != 0:
fatal_error("`" + command + "` failed with error: " + stderr + ".\nReponse code was: " + str(process.returncode) + ". You can alternatively run the importer using the --login and --password option")
filename = stdout
credentials = open(filename, 'r').readline()
credentials = credentials.split('\t')
return credentials[1]
def get_resolver(self):
if self.options.site_id:
logging.debug('Resolver: static')
return StaticResolver(self.options.site_id)
else:
logging.debug('Resolver: dynamic')
return DynamicResolver()
class Statistics(object):
"""
Store statistics about parsed logs and recorded entries.
Can optionally print statistics on standard output every second.
"""
class Counter(object):
"""
Simple integers cannot be used by multithreaded programs. See:
http://stackoverflow.com/questions/6320107/are-python-ints-thread-safe
"""
def __init__(self):
# itertools.count's implementation in C does not release the GIL and
# therefore is thread-safe.
self.counter = itertools.count(1)
self.value = 0
def increment(self):
self.value = self.counter.next()
def advance(self, n):
for i in range(n):
self.increment()
def __str__(self):
return str(int(self.value))
def __init__(self):
self.time_start = None
self.time_stop = None
self.piwik_sites = set() # sites ID
self.piwik_sites_created = [] # (hostname, site ID)
self.piwik_sites_ignored = set() # hostname
self.count_lines_parsed = self.Counter()
self.count_lines_recorded = self.Counter()
# requests that the Piwik tracker considered invalid (or failed to track)
self.invalid_lines = []
# Do not match the regexp.
self.count_lines_invalid = self.Counter()
# No site ID found by the resolver.
self.count_lines_no_site = self.Counter()
# Hostname filtered by config.options.hostnames
self.count_lines_hostname_skipped = self.Counter()
# Static files.
self.count_lines_static = self.Counter()
# Ignored user-agents.
self.count_lines_skipped_user_agent = self.Counter()
# Ignored HTTP erors.
self.count_lines_skipped_http_errors = self.Counter()
# Ignored HTTP redirects.
self.count_lines_skipped_http_redirects = self.Counter()
# Downloads
self.count_lines_downloads = self.Counter()
# Ignored downloads when --download-extensions is used
self.count_lines_skipped_downloads = self.Counter()
# Misc
self.dates_recorded = set()
self.monitor_stop = False
def set_time_start(self):
self.time_start = time.time()
def set_time_stop(self):
self.time_stop = time.time()
def _compute_speed(self, value, start, end):
delta_time = end - start
if value == 0:
return 0
if delta_time == 0:
return 'very high!'
else:
return value / delta_time
def _round_value(self, value, base=100):
return round(value * base) / base
def _indent_text(self, lines, level=1):
"""
Return an indented text. 'lines' can be a list of lines or a single
line (as a string). One level of indentation is 4 spaces.
"""
prefix = ' ' * (4 * level)
if isinstance(lines, basestring):
return prefix + lines
else:
return '\n'.join(
prefix + line
for line in lines
)
def print_summary(self):
invalid_lines_summary = ''
if self.invalid_lines:
invalid_lines_summary = '''Invalid log lines
-----------------
The following lines were not tracked by Piwik, either due to a malformed tracker request or error in the tracker:
%s
''' % textwrap.fill(", ".join(self.invalid_lines), 80)
print '''
%(invalid_lines)sLogs import summary
-------------------
%(count_lines_recorded)d requests imported successfully
%(count_lines_downloads)d requests were downloads
%(total_lines_ignored)d requests ignored:
%(count_lines_skipped_http_errors)d HTTP errors
%(count_lines_skipped_http_redirects)d HTTP redirects
%(count_lines_invalid)d invalid log lines
%(count_lines_no_site)d requests did not match any known site
%(count_lines_hostname_skipped)d requests did not match any --hostname
%(count_lines_skipped_user_agent)d requests done by bots, search engines...
%(count_lines_static)d requests to static resources (css, js, images, ico, ttf...)
%(count_lines_skipped_downloads)d requests to file downloads did not match any --download-extensions
Website import summary
----------------------
%(count_lines_recorded)d requests imported to %(total_sites)d sites
%(total_sites_existing)d sites already existed
%(total_sites_created)d sites were created:
%(sites_created)s
%(total_sites_ignored)d distinct hostnames did not match any existing site:
%(sites_ignored)s
%(sites_ignored_tips)s
Performance summary
-------------------
Total time: %(total_time)d seconds
Requests imported per second: %(speed_recording)s requests per second
Processing your log data
------------------------
In order for your logs to be processed by Piwik, you may need to run the following command:
./console core:archive --force-all-websites --force-all-periods=315576000 --force-date-last-n=1000 --url='%(url)s'
''' % {
'count_lines_recorded': self.count_lines_recorded.value,
'count_lines_downloads': self.count_lines_downloads.value,
'total_lines_ignored': sum([
self.count_lines_invalid.value,
self.count_lines_skipped_user_agent.value,
self.count_lines_skipped_http_errors.value,
self.count_lines_skipped_http_redirects.value,
self.count_lines_static.value,
self.count_lines_skipped_downloads.value,
self.count_lines_no_site.value,
self.count_lines_hostname_skipped.value,
]),
'count_lines_invalid': self.count_lines_invalid.value,
'count_lines_skipped_user_agent': self.count_lines_skipped_user_agent.value,
'count_lines_skipped_http_errors': self.count_lines_skipped_http_errors.value,
'count_lines_skipped_http_redirects': self.count_lines_skipped_http_redirects.value,
'count_lines_static': self.count_lines_static.value,
'count_lines_skipped_downloads': self.count_lines_skipped_downloads.value,
'count_lines_no_site': self.count_lines_no_site.value,
'count_lines_hostname_skipped': self.count_lines_hostname_skipped.value,
'total_sites': len(self.piwik_sites),
'total_sites_existing': len(self.piwik_sites - set(site_id for hostname, site_id in self.piwik_sites_created)),
'total_sites_created': len(self.piwik_sites_created),
'sites_created': self._indent_text(
['%s (ID: %d)' % (hostname, site_id) for hostname, site_id in self.piwik_sites_created],
level=3,
),
'total_sites_ignored': len(self.piwik_sites_ignored),
'sites_ignored': self._indent_text(
self.piwik_sites_ignored, level=3,
),
'sites_ignored_tips': '''
TIPs:
- if one of these hosts is an alias host for one of the websites
in Piwik, you can add this host as an "Alias URL" in Settings > Websites.
- use --add-sites-new-hosts if you wish to automatically create
one website for each of these hosts in Piwik rather than discarding
these requests.
- use --idsite-fallback to force all these log lines with a new hostname
to be recorded in a specific idsite (for example for troubleshooting/visualizing the data)
- use --idsite to force all lines in the specified log files
to be all recorded in the specified idsite
- or you can also manually create a new Website in Piwik with the URL set to this hostname
''' if self.piwik_sites_ignored else '',
'total_time': self.time_stop - self.time_start,
'speed_recording': self._round_value(self._compute_speed(
self.count_lines_recorded.value,
self.time_start, self.time_stop,
)),
'url': config.options.piwik_url,
'invalid_lines': invalid_lines_summary
}
##
## The monitor is a thread that prints a short summary each second.
##
def _monitor(self):
latest_total_recorded = 0
while not self.monitor_stop:
current_total = stats.count_lines_recorded.value
time_elapsed = time.time() - self.time_start
print '%d lines parsed, %d lines recorded, %d records/sec (avg), %d records/sec (current)' % (
stats.count_lines_parsed.value,
current_total,
current_total / time_elapsed if time_elapsed != 0 else 0,
(current_total - latest_total_recorded) / config.options.show_progress_delay,
)
latest_total_recorded = current_total
time.sleep(config.options.show_progress_delay)
def start_monitor(self):
t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def stop_monitor(self):
self.monitor_stop = True
class Piwik(object):
"""
Make requests to Piwik.
"""
class Error(Exception):
def __init__(self, message, code = None):
super(Exception, self).__init__(message)
self.code = code
class RedirectHandlerWithLogging(urllib2.HTTPRedirectHandler):
"""
Special implementation of HTTPRedirectHandler that logs redirects in debug mode
to help users debug system issues.
"""
def redirect_request(self, req, fp, code, msg, hdrs, newurl):
logging.debug("Request redirected (code: %s) to '%s'" % (code, newurl))
return urllib2.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl)
@staticmethod
def _call(path, args, headers=None, url=None, data=None):
"""
Make a request to the Piwik site. It is up to the caller to format
arguments, to embed authentication, etc.
"""
if url is None:
url = config.options.piwik_url
headers = headers or {}
if data is None:
# If Content-Type isn't defined, PHP do not parse the request's body.
headers['Content-type'] = 'application/x-www-form-urlencoded'
data = urllib.urlencode(args)
elif not isinstance(data, basestring) and headers['Content-type'] == 'application/json':
data = json.dumps(data)
if args:
path = path + '?' + urllib.urlencode(args)
headers['User-Agent'] = 'Piwik/LogImport'
try:
timeout = config.options.request_timeout
except:
timeout = None # the config global object may not be created at this point
request = urllib2.Request(url + path, data, headers)
opener = urllib2.build_opener(Piwik.RedirectHandlerWithLogging())
response = opener.open(request, timeout = timeout)
result = response.read()
response.close()
return result
@staticmethod
def _call_api(method, **kwargs):
"""
Make a request to the Piwik API taking care of authentication, body
formatting, etc.
"""
args = {
'module' : 'API',
'format' : 'json2',
'method' : method,
}
# token_auth, by default, is taken from config.
token_auth = kwargs.pop('_token_auth', None)
if token_auth is None:
token_auth = config.options.piwik_token_auth
if token_auth:
args['token_auth'] = token_auth
url = kwargs.pop('_url', None)
if kwargs:
args.update(kwargs)
# Convert lists into appropriate format.
# See: http://developer.piwik.org/api-reference/reporting-api#passing-an-array-of-data-as-a-parameter
# Warning: we have to pass the parameters in order: foo[0], foo[1], foo[2]
# and not foo[1], foo[0], foo[2] (it will break Piwik otherwise.)
final_args = []
for key, value in args.iteritems():
if isinstance(value, (list, tuple)):
for index, obj in enumerate(value):
final_args.append(('%s[%d]' % (key, index), obj))
else:
final_args.append((key, value))
res = Piwik._call('/', final_args, url=url)
try:
return json.loads(res)
except ValueError:
raise urllib2.URLError('Piwik returned an invalid response: ' + res)
@staticmethod
def _call_wrapper(func, expected_response, on_failure, *args, **kwargs):
"""
Try to make requests to Piwik at most PIWIK_FAILURE_MAX_RETRY times.
"""
errors = 0
while True:
try:
response = func(*args, **kwargs)
if expected_response is not None and response != expected_response:
if on_failure is not None:
error_message = on_failure(response, kwargs.get('data'))
else:
error_message = "didn't receive the expected response. Response was %s " % response
raise urllib2.URLError(error_message)
return response
except (urllib2.URLError, httplib.HTTPException, ValueError, socket.timeout), e:
logging.info('Error when connecting to Piwik: %s', e)
code = None
if isinstance(e, urllib2.HTTPError):
# See Python issue 13211.
message = 'HTTP Error %s %s' % (e.code, e.msg)
code = e.code
elif isinstance(e, urllib2.URLError):
message = e.reason
else:
message = str(e)
# decorate message w/ HTTP response, if it can be retrieved
if hasattr(e, 'read'):
message = message + ", response: " + e.read()
errors += 1
if errors == config.options.max_attempts:
logging.info("Max number of attempts reached, server is unreachable!")
raise Piwik.Error(message, code)
else:
logging.info("Retrying request, attempt number %d" % (errors + 1))
time.sleep(config.options.delay_after_failure)
@classmethod
def call(cls, path, args, expected_content=None, headers=None, data=None, on_failure=None):
return cls._call_wrapper(cls._call, expected_content, on_failure, path, args, headers,
data=data)
@classmethod
def call_api(cls, method, **kwargs):
return cls._call_wrapper(cls._call_api, None, None, method, **kwargs)
##
## Resolvers.
##
## A resolver is a class that turns a hostname into a Piwik site ID.
##
class StaticResolver(object):
"""
Always return the same site ID, specified in the configuration.
"""
def __init__(self, site_id):
self.site_id = site_id
# Go get the main URL
site = piwik.call_api(
'SitesManager.getSiteFromId', idSite=self.site_id
)
if site.get('result') == 'error':
fatal_error(
"cannot get the main URL of this site: %s" % site.get('message')
)
self._main_url = site['main_url']
stats.piwik_sites.add(self.site_id)
def resolve(self, hit):
return (self.site_id, self._main_url)
def check_format(self, format):
pass
class DynamicResolver(object):
"""
Use Piwik API to determine the site ID.
"""
_add_site_lock = threading.Lock()
def __init__(self):
self._cache = {}
if config.options.replay_tracking:
# get existing sites
self._cache['sites'] = piwik.call_api('SitesManager.getAllSites')
def _get_site_id_from_hit_host(self, hit):
return piwik.call_api(
'SitesManager.getSitesIdFromSiteUrl',
url=hit.host,
)
def _add_site(self, hit):
main_url = 'http://' + hit.host
DynamicResolver._add_site_lock.acquire()
try:
# After we obtain the lock, make sure the site hasn't already been created.
res = self._get_site_id_from_hit_host(hit)
if res:
return res[0]['idsite']
# The site doesn't exist.
logging.debug('No Piwik site found for the hostname: %s', hit.host)
if config.options.site_id_fallback is not None:
logging.debug('Using default site for hostname: %s', hit.host)
return config.options.site_id_fallback
elif config.options.add_sites_new_hosts:
if config.options.dry_run:
# Let's just return a fake ID.
return 0
logging.debug('Creating a Piwik site for hostname %s', hit.host)
result = piwik.call_api(
'SitesManager.addSite',
siteName=hit.host,
urls=[main_url],
)
if result.get('result') == 'error':
logging.error("Couldn't create a Piwik site for host %s: %s",
hit.host, result.get('message'),
)
return None
else:
site_id = result['value']
stats.piwik_sites_created.append((hit.host, site_id))
return site_id
else:
# The site doesn't exist, we don't want to create new sites and
# there's no default site ID. We thus have to ignore this hit.
return None
finally:
DynamicResolver._add_site_lock.release()
def _resolve(self, hit):
res = self._get_site_id_from_hit_host(hit)
if res:
# The site already exists.
site_id = res[0]['idsite']
else:
site_id = self._add_site(hit)
if site_id is not None:
stats.piwik_sites.add(site_id)
return site_id
def _resolve_when_replay_tracking(self, hit):
"""
If parsed site ID found in the _cache['sites'] return site ID and main_url,
otherwise return (None, None) tuple.
"""
site_id = hit.args['idsite']
if site_id in self._cache['sites']:
stats.piwik_sites.add(site_id)
return (site_id, self._cache['sites'][site_id]['main_url'])
else:
return (None, None)
def _resolve_by_host(self, hit):
"""
Returns the site ID and site URL for a hit based on the hostname.
"""
try:
site_id = self._cache[hit.host]
except KeyError:
logging.debug(
'Site ID for hostname %s not in cache', hit.host
)
site_id = self._resolve(hit)
logging.debug('Site ID for hostname %s: %s', hit.host, site_id)
self._cache[hit.host] = site_id
return (site_id, 'http://' + hit.host)
def resolve(self, hit):
"""
Return the site ID from the cache if found, otherwise call _resolve.
If replay_tracking option is enabled, call _resolve_when_replay_tracking.
"""
if config.options.replay_tracking:
# We only consider requests with piwik.php which don't need host to be imported
return self._resolve_when_replay_tracking(hit)
else:
return self._resolve_by_host(hit)
def check_format(self, format):
if config.options.replay_tracking:
pass
elif format.regex is not None and 'host' not in format.regex.groupindex and not config.options.log_hostname:
fatal_error(
"the selected log format doesn't include the hostname: you must "
"specify the Piwik site ID with the --idsite argument"
)
class Recorder(object):
"""
A Recorder fetches hits from the Queue and inserts them into Piwik using
the API.
"""
recorders = []
def __init__(self):
self.queue = Queue.Queue(maxsize=2)
# if bulk tracking disabled, make sure we can store hits outside of the Queue
if not config.options.use_bulk_tracking:
self.unrecorded_hits = []
@classmethod
def launch(cls, recorder_count):
"""
Launch a bunch of Recorder objects in a separate thread.
"""
for i in xrange(recorder_count):
recorder = Recorder()
cls.recorders.append(recorder)
run = recorder._run_bulk if config.options.use_bulk_tracking else recorder._run_single
t = threading.Thread(target=run)
t.daemon = True
t.start()
logging.debug('Launched recorder')
@classmethod
def add_hits(cls, all_hits):
"""
Add a set of hits to the recorders queue.
"""
# Organize hits so that one client IP will always use the same queue.
# We have to do this so visits from the same IP will be added in the right order.
hits_by_client = [[] for r in cls.recorders]
for hit in all_hits:
hits_by_client[hit.get_visitor_id_hash() % len(cls.recorders)].append(hit)
for i, recorder in enumerate(cls.recorders):
recorder.queue.put(hits_by_client[i])
@classmethod
def wait_empty(cls):
"""
Wait until all recorders have an empty queue.
"""
for recorder in cls.recorders:
recorder._wait_empty()
def _run_bulk(self):
while True:
try:
hits = self.queue.get()
except:
# TODO: we should log something here, however when this happens, logging.etc will throw
return
if len(hits) > 0:
try:
self._record_hits(hits)
except Piwik.Error, e:
fatal_error(e, hits[0].filename, hits[0].lineno) # approximate location of error
self.queue.task_done()
def _run_single(self):
while True:
if config.options.force_one_action_interval != False:
time.sleep(config.options.force_one_action_interval)
if len(self.unrecorded_hits) > 0:
hit = self.unrecorded_hits.pop(0)
try:
self._record_hits([hit])
except Piwik.Error, e:
fatal_error(e, hit.filename, hit.lineno)
else:
self.unrecorded_hits = self.queue.get()
self.queue.task_done()
def _wait_empty(self):
"""
Wait until the queue is empty.
"""
while True:
if self.queue.empty():
# We still have to wait for the last queue item being processed
# (queue.empty() returns True before queue.task_done() is
# called).
self.queue.join()
return
time.sleep(1)
def date_to_piwik(self, date):
date, time = date.isoformat(sep=' ').split()
return '%s %s' % (date, time.replace('-', ':'))
def _get_hit_args(self, hit):
"""
Returns the args used in tracking a hit, without the token_auth.
"""
site_id, main_url = resolver.resolve(hit)
if site_id is None:
# This hit doesn't match any known Piwik site.
if config.options.replay_tracking:
stats.piwik_sites_ignored.add('unrecognized site ID %s' % hit.args.get('idsite'))
else:
stats.piwik_sites_ignored.add(hit.host)
stats.count_lines_no_site.increment()
return
stats.dates_recorded.add(hit.date.date())
path = hit.path
if hit.query_string and not config.options.strip_query_string:
path += config.options.query_string_delimiter + hit.query_string
# only prepend main url / host if it's a path
url_prefix = self._get_host_with_protocol(hit.host, main_url) if hasattr(hit, 'host') else main_url
url = (url_prefix if path.startswith('/') else '') + path[:1024]
# handle custom variables before generating args dict
if config.options.enable_bots:
if hit.is_robot:
hit.add_visit_custom_var("Bot", hit.user_agent)
else:
hit.add_visit_custom_var("Not-Bot", hit.user_agent)
hit.add_page_custom_var("HTTP-code", hit.status)
args = {
'rec': '1',
'apiv': '1',
'url': url.encode('utf8'),
'urlref': hit.referrer[:1024].encode('utf8'),
'cip': hit.ip,
'cdt': self.date_to_piwik(hit.date),
'idsite': site_id,
'dp': '0' if config.options.reverse_dns else '1',
'ua': hit.user_agent.encode('utf8')
}
if config.options.replay_tracking:
# prevent request to be force recorded when option replay-tracking
args['rec'] = '0'
# idsite is already determined by resolver
if 'idsite' in hit.args:
del hit.args['idsite']
args.update(hit.args)
if hit.is_download:
args['download'] = args['url']
if config.options.enable_bots:
args['bots'] = '1'
if hit.is_error or hit.is_redirect:
args['action_name'] = '%s%sURL = %s%s' % (
hit.status,
config.options.title_category_delimiter,
urllib.quote(args['url'], ''),
("%sFrom = %s" % (
config.options.title_category_delimiter,
urllib.quote(args['urlref'], '')
) if args['urlref'] != '' else '')
)
if hit.generation_time_milli > 0:
args['gt_ms'] = int(hit.generation_time_milli)
if hit.event_category and hit.event_action:
args['e_c'] = hit.event_category
args['e_a'] = hit.event_action
if hit.event_name:
args['e_n'] = hit.event_name
if hit.length:
args['bw_bytes'] = hit.length
# convert custom variable args to JSON
if 'cvar' in args and not isinstance(args['cvar'], basestring):
args['cvar'] = json.dumps(args['cvar'])
if '_cvar' in args and not isinstance(args['_cvar'], basestring):
args['_cvar'] = json.dumps(args['_cvar'])
return args
def _get_host_with_protocol(self, host, main_url):
if '://' not in host:
parts = urlparse.urlparse(main_url)
host = parts.scheme + '://' + host
return host
def _record_hits(self, hits):
"""
Inserts several hits into Piwik.
"""
if not config.options.dry_run:
data = {
'token_auth': config.options.piwik_token_auth,
'requests': [self._get_hit_args(hit) for hit in hits]
}
try:
args = {}
if config.options.debug_tracker:
args['debug'] = '1'
response = piwik.call(
'/piwik.php', args=args,
expected_content=None,
headers={'Content-type': 'application/json'},
data=data,
on_failure=self._on_tracking_failure
)
if config.options.debug_tracker:
logging.debug('tracker response:\n%s' % response)
# check for invalid requests
try:
response = json.loads(response)
except:
logging.info("bulk tracking returned invalid JSON")
# don't display the tracker response if we're debugging the tracker.
# debug tracker output will always break the normal JSON output.
if not config.options.debug_tracker:
logging.info("tracker response:\n%s" % response)
response = {}
if ('invalid_indices' in response and isinstance(response['invalid_indices'], list) and
response['invalid_indices']):
invalid_count = len(response['invalid_indices'])
invalid_lines = [str(hits[index].lineno) for index in response['invalid_indices']]
invalid_lines_str = ", ".join(invalid_lines)
stats.invalid_lines.extend(invalid_lines)
logging.info("The Piwik tracker identified %s invalid requests on lines: %s" % (invalid_count, invalid_lines_str))
elif 'invalid' in response and response['invalid'] > 0:
logging.info("The Piwik tracker identified %s invalid requests." % response['invalid'])
except Piwik.Error, e:
# if the server returned 400 code, BulkTracking may not be enabled
if e.code == 400:
fatal_error("Server returned status 400 (Bad Request).\nIs the BulkTracking plugin disabled?", hits[0].filename, hits[0].lineno)
raise
stats.count_lines_recorded.advance(len(hits))
def _is_json(self, result):
try:
json.loads(result)
return True
except ValueError, e:
return False
def _on_tracking_failure(self, response, data):
"""
Removes the successfully tracked hits from the request payload so
they are not logged twice.
"""
try:
response = json.loads(response)
except:
# the response should be in JSON, but in case it can't be parsed just try another attempt
logging.debug("cannot parse tracker response, should be valid JSON")
return response
# remove the successfully tracked hits from payload
tracked = response['tracked']
data['requests'] = data['requests'][tracked:]
return response['message']
class Hit(object):
"""
It's a simple container.
"""
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
super(Hit, self).__init__()
if config.options.force_lowercase_path:
self.full_path = self.full_path.lower()
def get_visitor_id_hash(self):
visitor_id = self.ip
if config.options.replay_tracking:
for param_name_to_use in ['uid', 'cid', '_id', 'cip']:
if param_name_to_use in self.args:
visitor_id = self.args[param_name_to_use]
break
return abs(hash(visitor_id))
def add_page_custom_var(self, key, value):
"""
Adds a page custom variable to this Hit.
"""
self._add_custom_var(key, value, 'cvar')
def add_visit_custom_var(self, key, value):
"""
Adds a visit custom variable to this Hit.
"""
self._add_custom_var(key, value, '_cvar')
def _add_custom_var(self, key, value, api_arg_name):
if api_arg_name not in self.args:
self.args[api_arg_name] = {}
if isinstance(self.args[api_arg_name], basestring):
logging.debug("Ignoring custom %s variable addition [ %s = %s ], custom var already set to string." % (api_arg_name, key, value))
return
index = len(self.args[api_arg_name]) + 1
self.args[api_arg_name][index] = [key, value]
class Parser(object):
"""
The Parser parses the lines in a specified file and inserts them into
a Queue.
"""
def __init__(self):
self.check_methods = [method for name, method
in inspect.getmembers(self, predicate=inspect.ismethod)
if name.startswith('check_')]
## All check_* methods are called for each hit and must return True if the
## hit can be imported, False otherwise.
def check_hostname(self, hit):
# Check against config.hostnames.
if not hasattr(hit, 'host') or not config.options.hostnames:
return True
# Accept the hostname only if it matches one pattern in the list.
result = any(
fnmatch.fnmatch(hit.host, pattern)
for pattern in config.options.hostnames
)
if not result:
stats.count_lines_hostname_skipped.increment()
return result
def check_static(self, hit):
if hit.extension in STATIC_EXTENSIONS:
if config.options.enable_static:
hit.is_download = True
return True
else:
stats.count_lines_static.increment()
return False
return True
def check_download(self, hit):
if hit.extension in config.options.download_extensions:
stats.count_lines_downloads.increment()
hit.is_download = True
return True
# the file is not in the white-listed downloads
# if it's a know download file, we shall skip it
elif hit.extension in DOWNLOAD_EXTENSIONS:
stats.count_lines_skipped_downloads.increment()
return False
return True
def check_user_agent(self, hit):
user_agent = hit.user_agent.lower()
for s in itertools.chain(EXCLUDED_USER_AGENTS, config.options.excluded_useragents):
if s in user_agent:
if config.options.enable_bots:
hit.is_robot = True
return True
else:
stats.count_lines_skipped_user_agent.increment()
return False
return True
def check_http_error(self, hit):
if hit.status[0] in ('4', '5'):
if config.options.replay_tracking:
# process error logs for replay tracking, since we don't care if piwik error-ed the first time
return True
elif config.options.enable_http_errors:
hit.is_error = True
return True
else:
stats.count_lines_skipped_http_errors.increment()
return False
return True
def check_http_redirect(self, hit):
if hit.status[0] == '3' and hit.status != '304':
if config.options.enable_http_redirects:
hit.is_redirect = True
return True
else:
stats.count_lines_skipped_http_redirects.increment()
return False
return True
def check_path(self, hit):
for excluded_path in config.options.excluded_paths:
if fnmatch.fnmatch(hit.path, excluded_path):
return False
# By default, all paths are included.
if config.options.included_paths:
for included_path in config.options.included_paths:
if fnmatch.fnmatch(hit.path, included_path):
return True
return False
return True
@staticmethod
def check_format(lineOrFile):
format = False
format_groups = 0
for name, candidate_format in FORMATS.iteritems():
logging.debug("Check format %s", name)
match = None
try:
if isinstance(lineOrFile, basestring):
match = candidate_format.check_format_line(lineOrFile)
else:
match = candidate_format.check_format(lineOrFile)
except Exception, e:
logging.debug('Error in format checking: %s', traceback.format_exc())
pass
if match:
logging.debug('Format %s matches', name)
# compare format groups if this *BaseFormat has groups() method
try:
# if there's more info in this match, use this format
match_groups = len(match.groups())
logging.debug('Format match contains %d groups' % match_groups)
if format_groups < match_groups:
format = candidate_format
format_groups = match_groups
except AttributeError:
format = candidate_format
else:
logging.debug('Format %s does not match', name)
# if the format is W3cExtendedFormat, check if the logs are from IIS and if so, issue a warning if the
# --w3c-time-taken-milli option isn't set
if isinstance(format, W3cExtendedFormat):
format.check_for_iis_option()
return format
@staticmethod
def detect_format(file):
"""
Return the best matching format for this file, or None if none was found.
"""
logging.debug('Detecting the log format')
format = False
# check the format using the file (for formats like the W3cExtendedFormat one)
format = Parser.check_format(file)
# check the format using the first N lines (to avoid irregular ones)
lineno = 0
limit = 100000
while not format and lineno < limit:
line = file.readline()
if not line: # if at eof, don't keep looping
break
lineno = lineno + 1
logging.debug("Detecting format against line %i" % lineno)
format = Parser.check_format(line)
try:
file.seek(0)
except IOError:
pass
if not format:
fatal_error("cannot automatically determine the log format using the first %d lines of the log file. " % limit +
"\nMaybe try specifying the format with the --log-format-name command line argument." )
return
logging.debug('Format %s is the best match', format.name)
return format
def parse(self, filename):
"""
Parse the specified filename and insert hits in the queue.
"""
def invalid_line(line, reason):
stats.count_lines_invalid.increment()
if config.options.debug >= 2:
logging.debug('Invalid line detected (%s): %s' % (reason, line))
if filename == '-':
filename = '(stdin)'
file = sys.stdin
else:
if not os.path.exists(filename):
print >> sys.stderr, "\n=====> Warning: File %s does not exist <=====" % filename
return
else:
if filename.endswith('.bz2'):
open_func = bz2.BZ2File
elif filename.endswith('.gz'):
open_func = gzip.open
else:
open_func = open
file = open_func(filename, 'r')
if config.options.show_progress:
print 'Parsing log %s...' % filename
if config.format:
# The format was explicitely specified.
format = config.format
if isinstance(format, W3cExtendedFormat):
format.create_regex(file)
if format.regex is None:
return fatal_error(
"File is not in the correct format, is there a '#Fields:' line? "
"If not, use the --w3c-fields option."
)
else:
# If the file is empty, don't bother.
data = file.read(100)
if len(data.strip()) == 0:
return
try:
file.seek(0)
except IOError:
pass
format = self.detect_format(file)
if format is None:
return fatal_error(
'Cannot guess the logs format. Please give one using '
'either the --log-format-name or --log-format-regex option'
)
# Make sure the format is compatible with the resolver.
resolver.check_format(format)
if config.options.dump_log_regex:
logging.info("Using format '%s'." % format.name)
if format.regex:
logging.info("Regex being used: %s" % format.regex.pattern)
else:
logging.info("Format %s does not use a regex to parse log lines." % format.name)
logging.info("--dump-log-regex option used, aborting log import.")
os._exit(0)
valid_lines_count = 0
hits = []
for lineno, line in enumerate(file):
try:
line = line.decode(config.options.encoding)
except UnicodeDecodeError:
invalid_line(line, 'invalid encoding')
continue
stats.count_lines_parsed.increment()
if stats.count_lines_parsed.value <= config.options.skip:
continue
match = format.match(line)
if not match:
invalid_line(line, 'line did not match')
continue
valid_lines_count = valid_lines_count + 1
if config.options.debug_request_limit and valid_lines_count >= config.options.debug_request_limit:
if len(hits) > 0:
Recorder.add_hits(hits)
logging.info("Exceeded limit specified in --debug-request-limit, exiting.")
return
hit = Hit(
filename=filename,
lineno=lineno,
status=format.get('status'),
full_path=format.get('path'),
is_download=False,
is_robot=False,
is_error=False,
is_redirect=False,
args={},
)
if config.options.regex_group_to_page_cvars_map:
self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_page_cvars_map, True)
if config.options.regex_group_to_visit_cvars_map:
self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_visit_cvars_map, False)
if config.options.regex_groups_to_ignore:
format.remove_ignored_groups(config.options.regex_groups_to_ignore)
try:
hit.query_string = format.get('query_string')
hit.path = hit.full_path
except BaseFormatException:
hit.path, _, hit.query_string = hit.full_path.partition(config.options.query_string_delimiter)
# W3cExtendedFormat detaults to - when there is no query string, but we want empty string
if hit.query_string == '-':
hit.query_string = ''
hit.extension = hit.path.rsplit('.')[-1].lower()
try:
hit.referrer = format.get('referrer')
if hit.referrer.startswith('"'):
hit.referrer = hit.referrer[1:-1]
except BaseFormatException:
hit.referrer = ''
if hit.referrer == '-':
hit.referrer = ''
try:
hit.user_agent = format.get('user_agent')
# in case a format parser included enclosing quotes, remove them so they are not
# sent to Piwik
if hit.user_agent.startswith('"'):
hit.user_agent = hit.user_agent[1:-1]
except BaseFormatException:
hit.user_agent = ''
hit.ip = format.get('ip')
try:
hit.length = int(format.get('length'))
except (ValueError, BaseFormatException):
# Some lines or formats don't have a length (e.g. 304 redirects, W3C logs)
hit.length = 0
try:
hit.generation_time_milli = float(format.get('generation_time_milli'))
except BaseFormatException:
try:
hit.generation_time_milli = float(format.get('generation_time_micro')) / 1000
except BaseFormatException:
try:
hit.generation_time_milli = float(format.get('generation_time_secs')) * 1000
except BaseFormatException:
hit.generation_time_milli = 0
if config.options.log_hostname:
hit.host = config.options.log_hostname
else:
try:
hit.host = format.get('host').lower().strip('.')
if hit.host.startswith('"'):
hit.host = hit.host[1:-1]
except BaseFormatException:
# Some formats have no host.
pass
# Add userid
try:
hit.userid = None
userid = format.get('userid')
if userid != '-':
hit.args['uid'] = hit.userid = userid
except:
pass
# add event info
try:
hit.event_category = hit.event_action = hit.event_name = None
hit.event_category = format.get('event_category')
hit.event_action = format.get('event_action')
hit.event_name = format.get('event_name')
if hit.event_name == '-':
hit.event_name = None
except:
pass
# Check if the hit must be excluded.
if not all((method(hit) for method in self.check_methods)):
continue
# Parse date.
# We parse it after calling check_methods as it's quite CPU hungry, and
# we want to avoid that cost for excluded hits.
date_string = format.get('date')
try:
hit.date = datetime.datetime.strptime(date_string, format.date_format)
except ValueError, e:
invalid_line(line, 'invalid date or invalid format: %s' % str(e))
continue
# Parse timezone and substract its value from the date
try:
timezone = float(format.get('timezone'))
except BaseFormatException:
timezone = 0
except ValueError:
invalid_line(line, 'invalid timezone')
continue
if timezone:
hit.date -= datetime.timedelta(hours=timezone/100)
if config.options.replay_tracking:
# we need a query string and we only consider requests with piwik.php
if not hit.query_string or not hit.path.lower().endswith(config.options.replay_tracking_expected_tracker_file):
invalid_line(line, 'no query string, or ' + hit.path.lower() + ' does not end with piwik.php')
continue
query_arguments = urlparse.parse_qs(hit.query_string)
if not "idsite" in query_arguments:
invalid_line(line, 'missing idsite')
continue
try:
hit.args.update((k, v.pop().encode('raw_unicode_escape').decode(config.options.encoding)) for k, v in query_arguments.iteritems())
except UnicodeDecodeError:
invalid_line(line, 'invalid encoding')
continue
hits.append(hit)
if len(hits) >= config.options.recorder_max_payload_size * len(Recorder.recorders):
Recorder.add_hits(hits)
hits = []
# add last chunk of hits
if len(hits) > 0:
Recorder.add_hits(hits)
def _add_custom_vars_from_regex_groups(self, hit, format, groups, is_page_var):
for group_name, custom_var_name in groups.iteritems():
if group_name in format.get_all():
value = format.get(group_name)
# don't track the '-' empty placeholder value
if value == '-':
continue
if is_page_var:
hit.add_page_custom_var(custom_var_name, value)
else:
hit.add_visit_custom_var(custom_var_name, value)
def main():
"""
Start the importing process.
"""
stats.set_time_start()
if config.options.show_progress:
stats.start_monitor()
recorders = Recorder.launch(config.options.recorders)
try:
for filename in config.filenames:
parser.parse(filename)
Recorder.wait_empty()
except KeyboardInterrupt:
pass
stats.set_time_stop()
if config.options.show_progress:
stats.stop_monitor()
stats.print_summary()
def fatal_error(error, filename=None, lineno=None):
print >> sys.stderr, 'Fatal error: %s' % error
if filename and lineno is not None:
print >> sys.stderr, (
'You can restart the import of "%s" from the point it failed by '
'specifying --skip=%d on the command line.\n' % (filename, lineno)
)
os._exit(1)
if __name__ == '__main__':
try:
piwik = Piwik()
config = Configuration()
stats = Statistics()
resolver = config.get_resolver()
parser = Parser()
main()
sys.exit(0)
except KeyboardInterrupt:
pass
|
oluabbeys/drupzod
|
piwik/misc/log-analytics/import_logs.py
|
Python
|
gpl-2.0
| 90,649
|
[
"VisIt"
] |
62ab684ad0661f742637b32f3a7a34a2588df7bb247ffaffc55efd82802fc915
|
#!../../../../virtualenv/bin/python3
# -*- coding: utf-8 -*-
# NB: The shebang line above assumes you've installed a python virtual environment alongside your working copy of the
# <4most-4gp-scripts> git repository. It also only works if you invoke this python script from the directory where it
# is located. If these two assumptions are incorrect (e.g. you're using Conda), you can still use this script by typing
# <python convolve_library.py>, but <./convolve_library.py> will not work.
"""
Take a library of spectra, and convolve each spectrum with some convolution kernel.
"""
import argparse
import logging
import os
import re
import time
from os import path as os_path
import numpy as np
from fourgp_speclib import SpectrumLibrarySqlite, Spectrum
from scipy.stats import norm
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s:%(filename)s:%(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
# Read input parameters
our_path = os_path.split(os_path.abspath(__file__))[0]
root_path = os_path.join(our_path, "../..")
pid = os.getpid()
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('--input-library',
required=False,
default="galah_test_sample_4fs_hrs_50only",
dest="input_library",
help="The name of the spectrum library we are to read input spectra from. A subset of the stars "
"in the input library may optionally be selected by suffixing its name with a comma-separated "
"list of constraints in [] brackets. Use the syntax my_library[Teff=3000] to demand equality, "
"or [0<[Fe/H]<0.2] to specify a range. We do not currently support other operators like "
"[Teff>5000], but such ranges are easy to recast is a range, e.g. [5000<Teff<9999].")
parser.add_argument('--output-library',
required=False,
default="galah_test_sample_4fs_hrs_convolved",
dest="output_library",
help="The name of the spectrum library we are to feed the convolved spectra into.")
parser.add_argument('--workspace', dest='workspace', default="",
help="Directory where we expect to find spectrum libraries.")
parser.add_argument('--width',
required=False,
default="1.7",
dest="width",
help="The width of the half-ellipse convolution function.")
parser.add_argument('--kernel',
choices=["gaussian", "half_ellipse"],
required=False,
default="gaussian",
dest="kernel",
help="Select the convolution kernel to use.")
parser.add_argument('--create',
action='store_true',
dest="create",
help="Create a clean spectrum library to feed output spectra into. Will throw an error if "
"a spectrum library already exists with the same name.")
parser.add_argument('--no-create',
action='store_false',
dest="create",
help="Do not create a clean spectrum library to feed output spectra into.")
parser.set_defaults(create=True)
parser.add_argument('--db-in-tmp',
action='store_true',
dest="db_in_tmp",
help="Symlink database into /tmp while we're putting data into it (for performance). "
"Don't mess with this option unless you know what you're doing.")
parser.add_argument('--no-db-in-tmp',
action='store_false',
dest="db_in_tmp",
help="Do not symlink database into /tmp while we're putting data into it. Recommended")
parser.set_defaults(db_in_tmp=False)
parser.add_argument('--log-file',
required=False,
default="/tmp/half_ellipse_convolution_{}.log".format(pid),
dest="log_to",
help="Specify a log file where we log our progress.")
args = parser.parse_args()
logger.info("Adding {} convolution to spectra from <{}>, going into <{}>".format(args.kernel,
args.input_library,
args.output_library))
# Set path to workspace where we create libraries of spectra
workspace = args.workspace if args.workspace else os_path.join(our_path, "../../../workspace")
os.system("mkdir -p {}".format(workspace))
# Open input SpectrumLibrary, and search for flux normalised spectra meeting our filtering constraints
spectra = SpectrumLibrarySqlite.open_and_search(library_spec=args.input_library,
workspace=workspace,
extra_constraints={}
)
# Get a list of the spectrum IDs which we were returned
input_library, input_spectra_ids, input_spectra_constraints = [spectra[i] for i in ("library", "items", "constraints")]
# Create new spectrum library for output
library_name = re.sub("/", "_", args.output_library)
library_path = os_path.join(workspace, library_name)
output_library = SpectrumLibrarySqlite(path=library_path, create=args.create)
# We may want to symlink the sqlite3 database file into /tmp for performance reasons
# This bit of crack-on-a-stick is only useful if /tmp is on a ram disk, though...
if args.db_in_tmp:
del output_library
os.system("mv {} /tmp/tmp_{}.db".format(os_path.join(library_path, "index.db"), library_name))
os.system("ln -s /tmp/tmp_{}.db {}".format(library_name, os_path.join(library_path, "index.db")))
output_library = SpectrumLibrarySqlite(path=library_path, create=False)
# Parse the half-ellipse width that the user specified on the command line
kernel_width = float(args.width)
# Create half-ellipse convolution function
convolution_raster = np.arange(-5, 5.1)
if args.kernel == "half_ellipse":
convolution_kernel = np.sqrt(np.maximum(0, 1 - convolution_raster ** 2 / kernel_width ** 2))
elif args.kernel == "gaussian":
convolution_kernel = (norm.cdf((convolution_raster + 0.5) / kernel_width) -
norm.cdf((convolution_raster - 0.5) / kernel_width))
else:
assert False, "Unknown convolution kernel <{}>".format(args.kernel)
# Normalise convolution kernel
convolution_kernel /= sum(convolution_kernel)
# Start making a log file
with open(args.log_to, "w") as result_log:
# Loop over spectra to process
for input_spectrum_id in input_spectra_ids:
logger.info("Working on <{}>".format(input_spectrum_id['filename']))
# Open Spectrum data from disk
input_spectrum_array = input_library.open(ids=input_spectrum_id['specId'])
# Turn SpectrumArray object into a Spectrum object
input_spectrum = input_spectrum_array.extract_item(0)
# Look up the unique ID of the star we've just loaded
# Newer spectrum libraries have a uid field which is guaranteed unique; for older spectrum libraries use
# Starname instead.
# Work out which field we're using (uid or Starname)
spectrum_matching_field = 'uid' if 'uid' in input_spectrum.metadata else 'Starname'
# Look up the unique ID of this object
object_name = input_spectrum.metadata[spectrum_matching_field]
# Write log message
result_log.write("\n[{}] {}... ".format(time.asctime(), object_name))
result_log.flush()
# Convolve spectrum
flux_data = input_spectrum.values
flux_data_convolved = np.convolve(a=flux_data, v=convolution_kernel, mode='same')
flux_errors = input_spectrum.value_errors
flux_errors_convolved = np.convolve(a=flux_errors, v=convolution_kernel, mode='same')
output_spectrum = Spectrum(wavelengths=input_spectrum.wavelengths,
values=flux_data_convolved,
value_errors=flux_errors_convolved,
metadata=input_spectrum.metadata
)
# Import degraded spectra into output spectrum library
output_library.insert(spectra=output_spectrum,
filenames=input_spectrum_id['filename'],
metadata_list={"convolution_width": kernel_width,
"convolution_kernel": args.kernel})
# If we put database in /tmp while adding entries to it, now return it to original location
if args.db_in_tmp:
del output_library
os.system("mv /tmp/tmp_{}.db {}".format(library_name, os_path.join(library_path, "index.db")))
|
dcf21/4most-4gp-scripts
|
src/scripts/degrade_spectra/convolve_library.py
|
Python
|
mit
| 8,999
|
[
"Gaussian"
] |
cd944badaab119e41e7cbd463109dfcddc9984405f81162243dcb4413ca04980
|
# -*- coding: utf-8 -*-
def VtkDibujaLineas(nmbActor):
# Define el actor a emplear para dibujar elementos.
ugridMapper= vtk.vtkDataSetMapper().SetInput(ugrid)
nmbActor= vtk.vtkActor()
nmbActor.SetMapper(ugridMapper)
nmbActor.GetProperty().SetColor(0,0,0)
nmbActor.GetProperty().SetRepresentationToWireFrame()
renderer.AddActor(nmbActor)
|
lcpt/xc
|
python_modules/postprocess/xcVtk/CAD_model/vtk_plot_lines.py
|
Python
|
gpl-3.0
| 352
|
[
"VTK"
] |
06464ebeb55a0dcb2a282d5739e2dc65c3ed8ce7851b2a1b81a44c0dbd697459
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import open_connect.connect_core.utils.models
import django_extensions.db.fields
import django.utils.timezone
from django.conf import settings
from pytz import common_timezones
from open_connect.connect_core.utils.location import STATES
import open_connect.accounts.models
TIMEZONE_CHOICES = [(tz, tz) for tz in common_timezones if tz.startswith('US/')]
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.TextField(max_length=200, unique=True, verbose_name='username')),
('email', models.EmailField(help_text=b'The email account notifications are sent to. This will not change the email address you use to login.', unique=True, max_length=254, verbose_name='Notification Email')),
('is_staff', models.BooleanField(default=False, verbose_name='staff status')),
('is_active', models.BooleanField(default=True, verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('modified_at', models.DateTimeField(auto_now=True)),
('first_name', models.CharField(max_length=255, blank=True)),
('last_name', models.CharField(max_length=255, blank=True)),
('biography', models.TextField(blank=True)),
('timezone', models.CharField(default=b'US/Central', max_length=255, choices=TIMEZONE_CHOICES)),
('uuid', django_extensions.db.fields.UUIDField(max_length=36, editable=False, blank=True)),
('unsubscribed', models.BooleanField(default=False)),
('is_banned', models.BooleanField(default=False)),
('group_notification_period', models.CharField(default=b'immediate', max_length=50, verbose_name='Default Notification Setting', choices=[(b'none', b"Don't send email notifications"), (b'daily', b'Send a daily digest'), (b'immediate', b'Send me an email for every new message')])),
('direct_notification_period', models.CharField(default=b'immediate', max_length=50, choices=[(b'none', b"Don't send email notifications"), (b'daily', b'Send a daily digest'), (b'immediate', b'Send me an email for every new message')])),
('moderator_notification_period', models.IntegerField(default=1, help_text=b'Minimum time between notifications of new messages to moderate', verbose_name='Moderation Notification Time Period', choices=[(1, b'Hourly'), (4, b'Every 4 Hours'), (12, b'Every 12 Hours'), (24, b'Once Per Day'), (0, b'No New Moderation Notifications')])),
('phone', models.CharField(max_length=30, blank=True)),
('zip_code', models.CharField(max_length=10, blank=True)),
('state', models.CharField(blank=True, max_length=2, choices=[(state, state) for state in STATES])),
('facebook_url', models.URLField(blank=True)),
('twitter_handle', models.CharField(blank=True, max_length=20, validators=[open_connect.accounts.models.validate_twitter_handle])),
('website_url', models.URLField(blank=True)),
('invite_verified', models.BooleanField(default=True)),
('show_groups_on_profile', models.BooleanField(default=True, help_text='Can we display the groups you belong to on your public profile?')),
('tos_accepted_at', models.DateTimeField(null=True, blank=True)),
('ucoc_accepted_at', models.DateTimeField(null=True, blank=True)),
('has_viewed_tutorial', models.BooleanField(default=False)),
('receive_group_join_notifications', models.BooleanField(default=True, help_text='Would you like to receive notifications when new users join your groups?')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'permissions': (('can_view_banned', 'Can view banned users.'), ('can_ban', 'Can ban users.'), ('can_unban', 'Can unban users.'), ('can_view_user_report', 'Can view user report.'), ('can_view_group_report', 'Can view group report.'), ('can_impersonate', 'Can impersonate other users.'), ('can_moderate_all_messages', 'Can moderate all messages.'), ('can_initiate_direct_messages', 'Can initiate direct messages.'), ('can_modify_permissions', 'Can modify user permissions.'))
},
bases=(open_connect.connect_core.utils.models.CacheMixinModel, models.Model),
),
migrations.CreateModel(
name='Invite',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('email', models.EmailField(unique=True, max_length=254)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('consumed_at', models.DateTimeField(null=True, blank=True)),
('notified', models.DateTimeField(null=True, editable=False)),
('code', models.CharField(default=open_connect.accounts.models.generate_unique_invite_code, max_length=32)),
('consumed_by', models.ForeignKey(related_name='consumed_invite', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('created_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'created_at',
'permissions': (('email_invites', 'Email Invites To Users'),),
},
bases=(open_connect.connect_core.utils.models.CacheMixinModel, models.Model),
),
migrations.CreateModel(
name='Visit',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('ip_address', models.GenericIPAddressField(null=True, blank=True)),
('user_agent', models.TextField(blank=True)),
('user', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
},
bases=(open_connect.connect_core.utils.models.CacheMixinModel, models.Model),
),
]
|
lpatmo/actionify_the_news
|
open_connect/accounts/migrations/0001_initial.py
|
Python
|
mit
| 7,323
|
[
"VisIt"
] |
9e620ca0ce869fd9d66aa0fdef8bd6c42367296744f73b657b1ff97282207e1c
|
"""Collection of DIRAC useful file related modules.
.. warning::
By default on Error they return None.
"""
#pylint: skip-file
## getGlobbedFiles gives "RuntimeError: maximum recursion depth exceeded" in pylint
import os
import hashlib
import random
import glob
import sys
import re
import errno
__RCSID__ = "$Id$"
def mkDir( path ):
""" Emulate 'mkdir -p path' (if path exists already, don't raise an exception)
"""
try:
if os.path.isdir(path):
return
os.makedirs( path )
except OSError as osError:
if osError.errno == errno.EEXIST and os.path.isdir( path ):
pass
else:
raise
def mkLink( src, dst ):
""" Protected creation of simbolic link
"""
try:
os.symlink(src, dst)
except OSError as osError:
if osError.errno == errno.EEXIST and os.path.islink(dst) and os.path.realpath(dst) == src:
pass
else:
raise
def makeGuid( fileName = None ):
"""Utility to create GUID. If a filename is provided the
GUID will correspond to its content's hexadecimal md5 checksum.
Otherwise a random seed is used to create the GUID.
The format is capitalized 8-4-4-4-12.
.. warning::
Could return None in case of OSError or IOError.
:param string fileName: name of file
"""
myMd5 = hashlib.md5()
if fileName:
try:
with open( fileName, 'r' ) as fd:
data = fd.read( 10 * 1024 * 1024 )
myMd5.update( data )
except:
return None
else:
myMd5.update( str( random.getrandbits( 128 ) ) )
md5HexString = myMd5.hexdigest().upper()
return generateGuid( md5HexString, "MD5" )
def generateGuid( checksum, checksumtype ):
""" Generate a GUID based on the file checksum
"""
if checksum:
if checksumtype == "MD5":
checksumString = checksum
elif checksumtype == "Adler32":
checksumString = str( checksum ).zfill( 32 )
else:
checksumString = ''
if checksumString:
guid = "%s-%s-%s-%s-%s" % ( checksumString[0:8],
checksumString[8:12],
checksumString[12:16],
checksumString[16:20],
checksumString[20:32] )
guid = guid.upper()
return guid
# Failed to use the check sum, generate a new guid
myMd5 = hashlib.md5()
myMd5.update( str( random.getrandbits( 128 ) ) )
md5HexString = myMd5.hexdigest()
guid = "%s-%s-%s-%s-%s" % ( md5HexString[0:8],
md5HexString[8:12],
md5HexString[12:16],
md5HexString[16:20],
md5HexString[20:32] )
guid = guid.upper()
return guid
def checkGuid( guid ):
"""Checks whether a supplied GUID is of the correct format.
The guid is a string of 36 characters [0-9A-F] long split into 5 parts of length 8-4-4-4-12.
.. warning::
As we are using GUID produced by various services and some of them could not follow
convention, this function is passing by a guid which can be made of lower case chars or even just
have 5 parts of proper length with whatever chars.
:param string guid: string to be checked
:return: True (False) if supplied string is (not) a valid GUID.
"""
reGUID = re.compile( "^[0-9A-F]{8}(-[0-9A-F]{4}){3}-[0-9A-F]{12}$" )
if reGUID.match( guid.upper() ):
return True
else:
guid = [ len( x ) for x in guid.split( "-" ) ]
if ( guid == [ 8, 4, 4, 4, 12 ] ):
return True
return False
def getSize( fileName ):
"""Get size of a file.
:param string fileName: name of file to be checked
The os module claims only OSError can be thrown,
but just for curiosity it's catching all possible exceptions.
.. warning::
On any exception it returns -1.
"""
try:
return os.stat( fileName )[6]
except OSError:
return - 1
def getGlobbedTotalSize( files ):
"""Get total size of a list of files or a single file.
Globs the parameter to allow regular expressions.
:params list files: list or tuple of strings of files
"""
totalSize = 0
if isinstance( files, (list, tuple) ):
for entry in files:
size = getGlobbedTotalSize( entry )
if size == -1:
size = 0
totalSize += size
else:
for path in glob.glob( files ):
if os.path.isdir( path ):
for content in os.listdir( path ):
totalSize += getGlobbedTotalSize( os.path.join( path, content ) )
if os.path.isfile( path ):
size = getSize( path )
if size == -1:
size = 0
totalSize += size
return totalSize
def getGlobbedFiles( files ):
"""Get list of files or a single file.
Globs the parameter to allow regular expressions.
:params list files: list or tuple of strings of files
"""
globbedFiles = []
if isinstance( files, ( list, tuple ) ):
for entry in files:
globbedFiles += getGlobbedFiles( entry )
else:
for path in glob.glob( files ):
if os.path.isdir( path ):
for content in os.listdir( path ):
globbedFiles += getGlobbedFiles( os.path.join( path, content ) )
if os.path.isfile( path ):
globbedFiles.append( path )
return globbedFiles
def getCommonPath( files ):
"""Get the common path for all files in the file list.
:param files: list of strings with paths
:type files: python:list
"""
def properSplit( dirPath ):
"""Splitting of path to drive and path parts for non-Unix file systems.
:param string dirPath: path
"""
nDrive, nPath = os.path.splitdrive( dirPath )
return [ nDrive ] + [ d for d in nPath.split( os.sep ) if d.strip() ]
if not files:
return ""
commonPath = properSplit( files[0] )
for fileName in files:
if os.path.isdir( fileName ):
dirPath = fileName
else:
dirPath = os.path.dirname( fileName )
nPath = properSplit( dirPath )
tPath = []
for i in range( min( len( commonPath ), len( nPath ) ) ):
if commonPath[ i ] != nPath[ i ]:
break
tPath .append( commonPath[ i ] )
if not tPath:
return ""
commonPath = tPath
return tPath[0] + os.sep + os.path.join( *tPath[1:] )
def getMD5ForFiles( fileList ):
"""Calculate md5 for the content of all the files.
:param fileList: list of paths
:type fileList: python:list
"""
fileList.sort()
hashMD5 = hashlib.md5()
for filePath in fileList:
if os.path.isdir( filePath ):
continue
with open( filePath, "rb" ) as fd:
buf = fd.read( 4096 )
while buf:
hashMD5.update( buf )
buf = fd.read( 4096 )
return hashMD5.hexdigest()
if __name__ == "__main__":
for p in sys.argv[1:]:
print "%s : %s bytes" % ( p, getGlobbedTotalSize( p ) )
|
Andrew-McNab-UK/DIRAC
|
Core/Utilities/File.py
|
Python
|
gpl-3.0
| 6,776
|
[
"DIRAC"
] |
baf02942089adeeac09a6882a3c7e1054a427b82970dc3727c0fba7d03a88ae9
|
#!/usr/bin/env python
import numpy as np
# Eigenvalues a, b, c
e_a = 1.0
e_b = 0.6
e_c = 0.4
eigenvalues = [[e_a, e_b, e_c],
[-e_a, e_b, e_c],
[-e_a, -e_b, e_c],
[-e_a, e_b, -e_c],
[e_a, -e_b, e_c],
[e_a, -e_b, -e_c],
[e_a, e_b, -e_c],
[-e_a, -e_b, -e_c]]
with open('StrainFlavorsEigenvalues.txt', 'w') as f:
for eigenval in eigenvalues:
for ii in range(2):
f.write('{0:g}\t'.format(eigenval[ii]))
f.write('{0:g}\n'.format(eigenval[2]))
angles = [0.0]
while(angles[-1] < 300.0):
angles.append(angles[-1] + 60.0)
with open('StrainFlavorsAngles.txt', 'w') as f:
for a in angles:
f.write('{0}\n'.format(a))
with open('StrainFlavorsStrain.vtk', 'w') as f:
f.write('# vtk DataFile Version 2.0\n')
f.write('Different strain flavors.\n')
f.write('ASCII\n')
f.write('DATASET STRUCTURED_POINTS\n')
f.write('DIMENSIONS ' + str(len(angles)) + ' ' + str(len(eigenvalues)) + ' 1\n')
f.write('ORIGIN 0.0 0.0 0.0\n')
f.write('SPACING 1.0 1.0 1.0\n')
f.write('\nPOINT_DATA ' + str(len(angles) * len(eigenvalues)) + '\n')
f.write('TENSORS strain double\n')
for eigenval in eigenvalues:
for theta in angles:
angle = np.pi/180*theta
eigenvector_a = np.array([np.cos(angle), np.sin(angle), 0.0])
eigenvector_a.shape = (3,1)
eigenvector_b = np.array([np.cos(angle+np.pi/2), np.sin(angle+np.pi/2), 0.0])
eigenvector_b.shape = (3,1)
eigenvector_c = np.array([0.0, 0.0, 1.0])
eigenvector_c.shape = (3,1)
lam = np.concatenate((eigenvector_a, eigenvector_b, eigenvector_c), axis=1)
tensor = np.dot(np.dot(lam,np.identity(3)*eigenval),np.linalg.inv(lam))
for row in tensor:
f.write('{0:.20g} {1:.20g} {2:.20g}\n'.format(row[0], row[1], row[2]))
f.write('\n')
|
thewtex/VTKSignedTensor
|
Testing/Data/Input/GenerateStrainFlavors.py
|
Python
|
apache-2.0
| 1,940
|
[
"VTK"
] |
9513aec63344ae0e56f2c7f810088f4d597dc15d9341d6ebc60c51b515efa2e9
|
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
import calendar
from collections import deque
from datetime import datetime
from google.protobuf.descriptor import FieldDescriptor
from navitiacommon import response_pb2, type_pb2
from itertools import izip
def str_to_time_stamp(str):
"""
convert a string to a posix timestamp
the string must be in the YYYYMMDDTHHMMSS format
like 20170534T124500
"""
date = datetime.strptime(str, "%Y%m%dT%H%M%S")
return date_to_timestamp(date)
def date_to_timestamp(date):
"""
convert a datatime objet to a posix timestamp (number of seconds from 1070/1/1)
"""
return int(calendar.timegm(date.utctimetuple()))
def walk_dict(tree, visitor):
"""
depth first search on a dict.
call the visit(elem) method on the visitor for each node
if the visitor returns True, stop the search
>>> bob = {'tutu': 1,
... 'tata': [1, 2],
... 'toto': {'bob':12, 'bobette': 13, 'nested_bob': {'bob': 3}},
... 'tete': ('tuple1', ['ltuple1', 'ltuple2']),
... 'titi': [{'a':1}, {'b':1}]}
>>> def my_visitor(name, val):
... print "{}={}".format(name, val)
>>> walk_dict(bob, my_visitor)
titi={'b': 1}
b=1
titi={'a': 1}
a=1
tete=ltuple2
tete=ltuple1
tete=tuple1
tutu=1
toto={'bobette': 13, 'bob': 12, 'nested_bob': {'bob': 3}}
nested_bob={'bob': 3}
bob=3
bob=12
bobette=13
tata=2
tata=1
>>> def my_stoper_visitor(name, val):
... print "{}={}".format(name, val)
... if name == 'tete':
... return True
>>> walk_dict(bob, my_stoper_visitor)
titi={'b': 1}
b=1
titi={'a': 1}
a=1
tete=ltuple2
"""
queue = deque()
def add_elt(name, elt, first=False):
if isinstance(elt, (list, tuple)):
for val in elt:
queue.append((name, val))
elif hasattr(elt, 'iteritems'):
for k, v in elt.iteritems():
queue.append((k, v))
elif first: # for the first elt, we add it even if it is no collection
queue.append((name, elt))
add_elt("main", tree, first=True)
while queue:
elem = queue.pop()
#we don't want to visit the list, we'll visit each node separately
if not isinstance(elem[1], (list, tuple)):
if visitor(elem[0], elem[1]) is True:
#we stop the search if the visitor returns True
break
#for list and tuple, the name is the parent's name
add_elt(elem[0], elem[1])
def walk_protobuf(pb_object, visitor):
"""
Walk on a protobuf and call the visitor for each nodes
>>> journeys = response_pb2.Response()
>>> journey_standard = journeys.journeys.add()
>>> journey_standard.type = "none"
>>> journey_standard.duration = 1
>>> journey_standard.nb_transfers = 2
>>> s = journey_standard.sections.add()
>>> s.duration = 3
>>> s = journey_standard.sections.add()
>>> s.duration = 4
>>> journey_rapid = journeys.journeys.add()
>>> journey_rapid.duration = 5
>>> journey_rapid.nb_transfers = 6
>>> s = journey_rapid.sections.add()
>>> s.duration = 7
>>>
>>> from collections import defaultdict
>>> types_counter = defaultdict(int)
>>> def visitor(name, val):
... types_counter[type(val)] +=1
>>>
>>> walk_protobuf(journeys, visitor)
>>> types_counter[response_pb2.Response]
1
>>> types_counter[response_pb2.Journey]
2
>>> types_counter[response_pb2.Section]
3
>>> types_counter[int] # and 7 int in all
7
"""
queue = deque()
def add_elt(name, elt):
try:
fields = elt.ListFields()
except AttributeError:
return
for field, value in fields:
if field.label == FieldDescriptor.LABEL_REPEATED:
for v in value:
queue.append((field.name, v))
else:
queue.append((field.name, value))
# add_elt("main", pb_object)
queue.append(('main', pb_object))
while queue:
elem = queue.pop()
visitor(elem[0], elem[1])
add_elt(elem[0], elem[1])
def realtime_level_to_pbf(level):
if level == 'base_schedule':
return type_pb2.BASE_SCHEDULE
elif level == 'adapted_schedule':
return type_pb2.ADAPTED_SCHEDULE
elif level == 'realtime':
return type_pb2.REALTIME
else:
raise ValueError('Impossible to convert in pbf')
#we can't use reverse(enumerate(list)) without creating a temporary
#list, so we define our own reverse enumerate
def reverse_enumerate(l):
return izip(xrange(len(l)-1, -1, -1), reversed(l))
def pb_del_if(l, pred):
'''
Delete the elements such as pred(e) is true in a protobuf list.
Return the number of elements deleted.
'''
nb = 0
for i, e in reverse_enumerate(l):
if pred(e):
del l[i]
nb += 1
return nb
|
TeXitoi/navitia
|
source/jormungandr/jormungandr/utils.py
|
Python
|
agpl-3.0
| 6,163
|
[
"VisIt"
] |
e9225d873a164971436af36249beb8a142c71227e03dedbd65aa3c97a7d9f084
|
try:
from ase.optimize.optimize import Optimizer as ase_Optimizer
except ImportError:
# Fall back to old placement
from ase.optimize import Optimizer as ase_Optimizer
from asap3 import parallelpossible as asap_parallel
if asap_parallel:
from asap3.mpi import world
class Optimizer(ase_Optimizer):
def converged(self, forces=None):
"""Did the optimization converge?"""
if forces is None:
forces = self.atoms.get_forces()
mxf2 = (forces**2).sum(axis=1).max()
if asap_parallel:
mxf2 = world.max(mxf2)
return mxf2 < self.fmax**2
|
auag92/n2dm
|
Asap-3.8.4/Python/asap3/optimize/__init__.py
|
Python
|
mit
| 611
|
[
"ASE"
] |
91ba330ab0d900f8f2c3002f9fb3d54958969f8b2f1ce87e8c85f5a03549c600
|
"""
This script parses an ENA metadata file in XML format and prints a subset of information.
Usage: python parse_ENA_sampleInfo_XML.py ERP000909.xml > samples.txt
Input: an XML file exported for a list of ERS accession numbers from ENA using the REST URLs API. For example, one can download an XML file
for sample ERS086023 using http://www.ebi.ac.uk/ena/data/view/ERS086023&display=xml.
Output: a tab-delimited text file containing information retrieved from the XML file.
study_accession, sample_accession, secondary_sample_accession, experiment_accession, run_accession, Isolate_ID, Host, Place_of_isolation, Year_of_isolation
Author of this version: Yu Wan (wanyuac@gmail.com, https://github.com/wanyuac)
Edition history: 6-7, 11 August 2015
Licence: GNU GPL 2.1
"""
import sys
import xml.etree.ElementTree as xmlTree
def get_domains(sample):
study = BioSample = ERS = experiment = run = isolate = strain = host = place = year = "NA" # default value of all fields
for domain in sample:
if domain.tag == "IDENTIFIERS":
BioSample, ERS = sample[0][1].text, sample[0][0].text # <tag>text</tag>
if domain.tag == "SAMPLE_LINKS":
study = sample[4][0][0][1].text # visit nested elements with indices
experiment = sample[4][1][0][1].text
run = sample[4][2][0][1].text
if domain.tag == "SAMPLE_ATTRIBUTES": # This domain may be variable in terms of attributes
for attribute in domain:
if attribute[0].text == "collection_date":
year = attribute[1].text
elif attribute[0].text == "isolate":
isolate = attribute[1].text
elif attribute[0].text == "specific_host":
host = attribute[1].text
elif attribute[0].text == "country":
place = attribute[1].text
elif attribute[0].text == "strain":
strain = attribute[1].text
return [study, BioSample, ERS, experiment, run, isolate, strain, host, place, year]
def main():
file = sys.argv[1]
xml = xmlTree.parse(file).getroot() # parse an XML into a tree of elements
# print the header line
print "\t".join(["study_accession", "sample_accession", "secondary_sample_accession", "experiment_accession", "run_accession", "Isolate_ID", "Strain", "Host", "Place_of_isolation", "Year_of_isolation"])
for sample in xml:
print "\t".join(get_domains(sample))
return
if __name__ == '__main__':
main()
|
wanyuac/BINF_toolkit
|
parse_ENA_sampleInfo_XML.py
|
Python
|
gpl-3.0
| 2,321
|
[
"VisIt"
] |
1f2ce62dd48ceb3de7494a706ec501c700d616135604187040fce7f7ca3342d4
|
# -*- coding: latin-1 -*-
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# Hexa : Creation d'hexaedres
import hexablock
import os
#---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
# ======================================================= make_grid
def make_grid (doc) :
ori = doc.addVertex ( 0, 0, 0)
vz = doc.addVector ( 0, 0, 1)
vx = doc.addVector ( 1 ,0, 0)
dr = 1
da = 360
dl = 1
nr = 1
na = 6
nl = 1
grid = doc.makeCylindrical (ori, vx,vz, dr,da,dl, nr,na,nl, False)
doc .saveVtk ("transfo1.vtk")
return grid
# ======================================================= test_translation
def test_translation () :
doc = hexablock.addDocument ("default")
grid = make_grid (doc)
devant = doc.addVector (10, 0, 0)
grid2 = doc.makeTranslation (grid, devant)
doc .saveVtk ("transfo2.vtk")
return doc
# ======================================================= test_scale
def test_scale () :
doc = hexablock.addDocument ("default")
grid = make_grid (doc)
dest = doc.addVertex (15, 0, 0)
grid2 = doc.makeScale (grid, dest, 0.5)
doc .saveVtk ("transfo3.vtk")
return doc
# ======================================================= test_sym_point
def test_sym_point () :
doc = hexablock.addDocument ("default")
grid = make_grid (doc)
orig = doc.addVertex (5, 0, 0)
grid2 = doc.makeSymmetryPoint (grid, orig)
doc .saveVtk ("transfo4.vtk")
return doc
# ======================================================= test_sym_line
def test_sym_line () :
doc = hexablock.addDocument ("default")
grid = make_grid (doc)
orig = doc.addVertex (5, 0, 0)
dir = doc.addVector (0, 0, 1);
grid2 = doc.makeSymmetryLine (grid, orig, dir)
doc .saveVtk ("transfo5.vtk")
return doc
# ======================================================= test_sym_plan
def test_sym_plan () :
doc = hexablock.addDocument ("default")
grid = make_grid (doc)
orig = doc.addVertex (5, 0, 0)
dir = doc.addVector (1, 0, 0);
grid2 = doc.makeSymmetryPlane (grid, orig, dir)
doc .saveVtk ("transfo6.vtk")
return doc
# ================================================================= Begin
### doc = test_translation ()
doc = test_scale ()
### doc = test_sym_point ()
### doc = test_sym_line ()
### doc = test_sym_plan ()
law = doc.addLaw("Uniform", 4)
for j in range(doc.countPropagation()):
propa = doc.getPropagation(j)
propa.setLaw(law)
mesh_hexas = hexablock.mesh(doc, "maillage:hexas")
|
FedoraScientific/salome-hexablock
|
src/TEST_PY/test_unit/test_transfo.py
|
Python
|
lgpl-2.1
| 3,413
|
[
"VTK"
] |
3a28e4ac16f9e37f2513af49923a1bb9424d2fff72f09a52dbbafff641bca677
|
# coding: utf-8
"""
================
Superflux onsets
================
This notebook demonstrates how to recover the Superflux onset detection algorithm of
`Boeck and Widmer, 2013 <http://dafx13.nuim.ie/papers/09.dafx2013_submission_12.pdf>`_
from librosa.
This algorithm improves onset detection accuracy in the presence of vibrato.
"""
# Code source: Brian McFee
# License: ISC
##################################################
# We'll need numpy and matplotlib for this example
import numpy as np
import matplotlib.pyplot as plt
import librosa
import librosa.display
######################################################
# The method works fine for longer signals, but the
# results are harder to visualize.
y, sr = librosa.load(librosa.ex('trumpet', hq=True),
sr=44100)
####################################################
# These parameters are taken directly from the paper
n_fft = 1024
hop_length = int(librosa.time_to_samples(1./200, sr=sr))
lag = 2
n_mels = 138
fmin = 27.5
fmax = 16000.
max_size = 3
########################################################
# The paper uses a log-frequency representation, but for
# simplicity, we'll use a Mel spectrogram instead.
S = librosa.feature.melspectrogram(y, sr=sr, n_fft=n_fft,
hop_length=hop_length,
fmin=fmin,
fmax=fmax,
n_mels=n_mels)
fig, ax = plt.subplots()
librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
y_axis='mel', x_axis='time', sr=sr,
hop_length=hop_length, fmin=fmin, fmax=fmax, ax=ax)
################################################################
# Now we'll compute the onset strength envelope and onset events
# using the librosa defaults.
odf_default = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
onset_default = librosa.onset.onset_detect(y=y, sr=sr, hop_length=hop_length,
units='time')
#########################################
# And similarly with the superflux method
odf_sf = librosa.onset.onset_strength(S=librosa.power_to_db(S, ref=np.max),
sr=sr,
hop_length=hop_length,
lag=lag, max_size=max_size)
onset_sf = librosa.onset.onset_detect(onset_envelope=odf_sf,
sr=sr,
hop_length=hop_length,
units='time')
######################################################################
# If you look carefully, the default onset detector (top sub-plot) has
# several false positives in high-vibrato regions, eg around 0.62s or
# 1.80s.
#
# The superflux method (middle plot) is less susceptible to vibrato, and
# does not detect onset events at those points.
# sphinx_gallery_thumbnail_number = 2
fig, ax = plt.subplots(nrows=3, sharex=True)
frame_time = librosa.frames_to_time(np.arange(len(odf_default)),
sr=sr,
hop_length=hop_length)
librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
y_axis='mel', x_axis='time', sr=sr,
hop_length=hop_length, fmin=fmin, fmax=fmax, ax=ax[2])
ax[2].set(xlim=[0, 5.0])
ax[0].plot(frame_time, odf_default, label='Spectral flux')
ax[0].vlines(onset_default, 0, odf_default.max(), label='Onsets')
ax[0].legend()
ax[0].label_outer()
ax[1].plot(frame_time, odf_sf, color='g', label='Superflux')
ax[1].vlines(onset_sf, 0, odf_sf.max(), label='Onsets')
ax[1].legend()
ax[0].label_outer()
|
bmcfee/librosa
|
docs/examples/plot_superflux.py
|
Python
|
isc
| 3,764
|
[
"Brian"
] |
432c6c768892ff53e0d2cce7f0651f5c833c4e222e8754c4f8ffc09679a8d97a
|
"""
Support code for 0alias scripts.
@since: 0.28
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, SafeException
from zeroinstall import support
_old_template = '''#!/bin/sh
if [ "$*" = "--versions" ]; then
exec 0launch -gd '%s' "$@"
else
exec 0launch %s '%s' "$@"
fi
'''
_template = '''#!/bin/sh
exec 0launch %s'%s' "$@"
'''
class NotAnAliasScript(SafeException):
pass
class ScriptInfo(object):
"""@since: 1.3"""
uri = None
main = None
command = 'run'
# For backwards compatibility
def __iter__(self):
return iter([self.uri, self.main])
def parse_script_header(stream):
"""Parse a 0alias script, if possible.
This does the same as L{parse_script}, except with an existing stream.
The stream position at exit is undefined.
@type stream: file
@rtype: L{ScriptInfo}
@since: 1.12"""
try:
stream.seek(0)
template_header = _template[:_template.index("%s'")]
actual_header = stream.read(len(template_header))
stream.seek(0)
if template_header == actual_header:
# If it's a 0alias script, it should be quite short!
rest = stream.read()
line = rest.split('\n')[1]
else:
old_template_header = \
_old_template[:_old_template.index("-gd '")]
actual_header = stream.read(len(old_template_header))
if old_template_header != actual_header:
return None
rest = stream.read()
line = rest.split('\n')[2]
except UnicodeDecodeError:
return None
info = ScriptInfo()
split = line.rfind("' '")
if split != -1:
# We have a --main or --command
info.uri = line[split + 3:].split("'")[0]
start, value = line[:split].split("'", 1)
option = start.split('--', 1)[1].strip()
value = value.replace("'\\''", "'")
if option == 'main':
info.main = value
elif option == 'command':
info.command = value or None
else:
return None
else:
info.uri = line.split("'", 2)[1]
return info
def parse_script(pathname):
"""Extract the URI and main values from a 0alias script.
@param pathname: the script to be examined
@type pathname: str
@return: information about the alias script
@rtype: L{ScriptInfo}
@raise NotAnAliasScript: if we can't parse the script"""
with open(pathname, 'rt') as stream:
info = parse_script_header(stream)
if info is None:
raise NotAnAliasScript(_("'%s' does not look like a script created by 0alias") % pathname)
return info
def write_script(stream, interface_uri, main = None, command = None):
"""Write a shell script to stream that will launch the given program.
@param stream: the stream to write to
@type stream: file
@param interface_uri: the program to launch
@type interface_uri: str
@param main: the --main argument to pass to 0launch, if any
@type main: str | None
@param command: the --command argument to pass to 0launch, if any
@type command: str | None"""
assert "'" not in interface_uri
assert "\\" not in interface_uri
assert main is None or command is None, "Can't set --main and --command together"
if main is not None:
option = "--main '%s' " % main.replace("'", "'\\''")
elif command is not None:
option = "--command '%s' " % command.replace("'", "'\\''")
else:
option = ""
stream.write(support.unicode(_template) % (option, interface_uri))
|
rammstein/0install
|
zeroinstall/alias.py
|
Python
|
lgpl-2.1
| 3,280
|
[
"VisIt"
] |
8aad941964e457376a6bb688aca07abf1ec3e319fa72af908986c77adb6cc3da
|
# encoding: utf-8
# Copyright (c) 2001-2016, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from flask import logging
import pybreaker
import requests as requests
from jormungandr import cache, app
from jormungandr.realtime_schedule.realtime_proxy import RealtimeProxy, RealtimeProxyError
from jormungandr.schedule import RealTimePassage
import xml.etree.ElementTree as et
import aniso8601
from datetime import datetime
class Siri(RealtimeProxy):
"""
Class managing calls to siri external service providing real-time next passages
"""
def __init__(self, id, service_url, requestor_ref,
object_id_tag=None, destination_id_tag=None, instance=None, timeout=10, **kwargs):
self.service_url = service_url
self.requestor_ref = requestor_ref # login for siri
self.timeout = timeout #timeout in seconds
self.rt_system_id = id
self.object_id_tag = object_id_tag if object_id_tag else id
self.destination_id_tag = destination_id_tag
self.instance = instance
self.breaker = pybreaker.CircuitBreaker(fail_max=app.config.get('CIRCUIT_BREAKER_MAX_SIRI_FAIL', 5),
reset_timeout=app.config.get('CIRCUIT_BREAKER_SIRI_TIMEOUT_S', 60))
def __repr__(self):
"""
used as the cache key. we use the rt_system_id to share the cache between servers in production
"""
return self.rt_system_id
def _get_next_passage_for_route_point(self, route_point, count, from_dt, current_dt):
stop = route_point.fetch_stop_id(self.object_id_tag)
request = self._make_request(monitoring_ref=stop, dt=from_dt, count=count)
if not request:
return None
siri_response = self._call_siri(request)
if not siri_response or siri_response.status_code != 200:
raise RealtimeProxyError('invalid response')
logging.getLogger(__name__).debug('siri for {}: {}'.format(stop, siri_response.text))
return self._get_passages(siri_response.content, route_point)
def status(self):
return {
'id': self.rt_system_id,
'timeout': self.timeout,
'circuit_breaker': {
'current_state': self.breaker.current_state,
'fail_counter': self.breaker.fail_counter,
'reset_timeout': self.breaker.reset_timeout
},
}
def _get_passages(self, xml, route_point):
ns = {'siri': 'http://www.siri.org.uk/siri'}
try:
root = et.fromstring(xml)
except et.ParseError as e:
logging.getLogger(__name__).exception("invalid xml")
raise RealtimeProxyError('invalid xml')
stop = route_point.fetch_stop_id(self.object_id_tag)
line = route_point.fetch_line_id(self.object_id_tag)
route = route_point.fetch_route_id(self.object_id_tag)
next_passages = []
for visit in root.findall('.//siri:MonitoredStopVisit', ns):
cur_stop = visit.find('.//siri:StopPointRef', ns).text
if stop != cur_stop:
continue
cur_line = visit.find('.//siri:LineRef', ns).text
if line != cur_line:
continue
cur_route = visit.find('.//siri:DirectionName', ns).text
if route != cur_route:
continue
cur_destination = visit.find('.//siri:DestinationName', ns).text
cur_dt = visit.find('.//siri:ExpectedDepartureTime', ns).text
cur_dt = aniso8601.parse_datetime(cur_dt)
next_passages.append(RealTimePassage(cur_dt, cur_destination))
return next_passages
@cache.memoize(app.config['CACHE_CONFIGURATION'].get('TIMEOUT_SIRI', 60))
def _call_siri(self, request):
encoded_request = request.encode('UTF-8')
headers = {
"Content-Type": "text/xml; charset=UTF-8",
"Content-Length": len(encoded_request)
}
logging.getLogger(__name__).debug('siri RT service, post at {}: {}'.format(self.service_url, request))
try:
return self.breaker.call(requests.post,
url=self.service_url,
headers=headers,
data=encoded_request,
verify=False,
timeout=self.timeout)
except pybreaker.CircuitBreakerError as e:
logging.getLogger(__name__).error('siri RT service dead, using base '
'schedule (error: {}'.format(e))
raise RealtimeProxyError('circuit breaker open')
except requests.Timeout as t:
logging.getLogger(__name__).error('siri RT service timeout, using base '
'schedule (error: {}'.format(t))
raise RealtimeProxyError('timeout')
except Exception as e:
logging.getLogger(__name__).exception('siri RT error, using base schedule')
raise RealtimeProxyError(str(e))
def _make_request(self, dt, count, monitoring_ref):
message_identifier='IDontCare'
request = """<?xml version="1.0" encoding="UTF-8"?>
<x:Envelope xmlns:x="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:wsd="http://wsdl.siri.org.uk" xmlns:siri="http://www.siri.org.uk/siri">
<x:Header/>
<x:Body>
<GetStopMonitoring xmlns="http://wsdl.siri.org.uk" xmlns:siri="http://www.siri.org.uk/siri">
<ServiceRequestInfo xmlns="">
<siri:RequestTimestamp>{dt}</siri:RequestTimestamp>
<siri:RequestorRef>{RequestorRef}</siri:RequestorRef>
<siri:MessageIdentifier>{MessageIdentifier}</siri:MessageIdentifier>
</ServiceRequestInfo>
<Request version="1.3" xmlns="">
<siri:RequestTimestamp>{dt}</siri:RequestTimestamp>
<siri:MessageIdentifier>{MessageIdentifier}</siri:MessageIdentifier>
<siri:MonitoringRef>{MonitoringRef}</siri:MonitoringRef>
<siri:MaximumStopVisits>{count}</siri:MaximumStopVisits>
</Request>
<RequestExtension xmlns=""/>
</GetStopMonitoring>
</x:Body>
</x:Envelope>
""".format(dt=datetime.utcfromtimestamp(dt).isoformat(),
count=count,
RequestorRef=self.requestor_ref,
MessageIdentifier=message_identifier,
MonitoringRef=monitoring_ref)
return request
|
antoine-de/navitia
|
source/jormungandr/jormungandr/realtime_schedule/siri.py
|
Python
|
agpl-3.0
| 7,891
|
[
"VisIt"
] |
f4bed81153e629028e3e8e576fe52126e14c58dc8021bbac82465455c6fae1cc
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import pickle
import numpy as np
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element, Species
from pymatgen.core.sites import PeriodicSite, Site
from pymatgen.electronic_structure.core import Magmom
from pymatgen.util.testing import PymatgenTest
class SiteTest(PymatgenTest):
def setUp(self):
self.ordered_site = Site("Fe", [0.25, 0.35, 0.45])
self.disordered_site = Site({"Fe": 0.5, "Mn": 0.5}, [0.25, 0.35, 0.45])
self.propertied_site = Site("Fe2+", [0.25, 0.35, 0.45], {"magmom": 5.1, "charge": 4.2})
self.propertied_magmomvector_site = Site(
"Fe2+",
[0.25, 0.35, 0.45],
{"magmom": Magmom([2.6, 2.6, 3.5]), "charge": 4.2},
)
self.dummy_site = Site("X", [0, 0, 0])
def test_properties(self):
self.assertRaises(AttributeError, getattr, self.disordered_site, "specie")
self.assertIsInstance(self.ordered_site.specie, Element)
self.assertEqual(self.propertied_site.properties["magmom"], 5.1)
self.assertEqual(self.propertied_site.properties["charge"], 4.2)
def test_to_from_dict(self):
d = self.disordered_site.as_dict()
site = Site.from_dict(d)
self.assertEqual(site, self.disordered_site)
self.assertNotEqual(site, self.ordered_site)
d = self.propertied_site.as_dict()
site = Site.from_dict(d)
self.assertEqual(site.properties["magmom"], 5.1)
self.assertEqual(site.properties["charge"], 4.2)
d = self.propertied_magmomvector_site.as_dict()
site = Site.from_dict(d)
self.assertEqual(site.properties["magmom"], Magmom([2.6, 2.6, 3.5]))
self.assertEqual(site.properties["charge"], 4.2)
d = self.dummy_site.as_dict()
site = Site.from_dict(d)
self.assertEqual(site.species, self.dummy_site.species)
def test_hash(self):
self.assertEqual(self.ordered_site.__hash__(), 26)
self.assertEqual(self.disordered_site.__hash__(), 51)
def test_cmp(self):
self.assertTrue(self.ordered_site > self.disordered_site)
def test_distance(self):
osite = self.ordered_site
self.assertAlmostEqual(np.linalg.norm([0.25, 0.35, 0.45]), osite.distance_from_point([0, 0, 0]))
self.assertAlmostEqual(osite.distance(self.disordered_site), 0)
def test_pickle(self):
o = pickle.dumps(self.propertied_site)
self.assertEqual(pickle.loads(o), self.propertied_site)
def test_setters(self):
self.disordered_site.species = "Cu"
self.assertEqual(self.disordered_site.species, Composition("Cu"))
self.disordered_site.x = 1.25
self.disordered_site.y = 1.35
self.assertEqual(self.disordered_site.coords[0], 1.25)
self.assertEqual(self.disordered_site.coords[1], 1.35)
def set_bad_species():
self.disordered_site.species = {"Cu": 0.5, "Gd": 0.6}
self.assertRaises(ValueError, set_bad_species)
class PeriodicSiteTest(PymatgenTest):
def setUp(self):
self.lattice = Lattice.cubic(10.0)
self.si = Element("Si")
self.site = PeriodicSite("Fe", [0.25, 0.35, 0.45], self.lattice)
self.site2 = PeriodicSite({"Si": 0.5}, [0, 0, 0], self.lattice)
self.assertEqual(
self.site2.species,
Composition({Element("Si"): 0.5}),
"Inconsistent site created!",
)
self.propertied_site = PeriodicSite(
Species("Fe", 2),
[0.25, 0.35, 0.45],
self.lattice,
properties={"magmom": 5.1, "charge": 4.2},
)
self.dummy_site = PeriodicSite("X", [0, 0, 0], self.lattice)
def test_properties(self):
"""
Test the properties for a site
"""
self.assertEqual(self.site.a, 0.25)
self.assertEqual(self.site.b, 0.35)
self.assertEqual(self.site.c, 0.45)
self.assertEqual(self.site.x, 2.5)
self.assertEqual(self.site.y, 3.5)
self.assertEqual(self.site.z, 4.5)
self.assertTrue(self.site.is_ordered)
self.assertFalse(self.site2.is_ordered)
self.assertEqual(self.propertied_site.properties["magmom"], 5.1)
self.assertEqual(self.propertied_site.properties["charge"], 4.2)
def test_distance(self):
other_site = PeriodicSite("Fe", np.array([0, 0, 0]), self.lattice)
self.assertAlmostEqual(self.site.distance(other_site), 6.22494979899, 5)
def test_distance_from_point(self):
self.assertNotAlmostEqual(self.site.distance_from_point([0.1, 0.1, 0.1]), 6.22494979899, 5)
self.assertAlmostEqual(self.site.distance_from_point([0.1, 0.1, 0.1]), 6.0564015718906887, 5)
def test_distance_and_image(self):
other_site = PeriodicSite("Fe", np.array([1, 1, 1]), self.lattice)
(distance, image) = self.site.distance_and_image(other_site)
self.assertAlmostEqual(distance, 6.22494979899, 5)
self.assertTrue(([-1, -1, -1] == image).all())
(distance, image) = self.site.distance_and_image(other_site, [1, 0, 0])
self.assertAlmostEqual(distance, 19.461500456028563, 5)
# Test that old and new distance algo give the same ans for
# "standard lattices"
lattice = Lattice(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))
site1 = PeriodicSite("Fe", np.array([0.01, 0.02, 0.03]), lattice)
site2 = PeriodicSite("Fe", np.array([0.99, 0.98, 0.97]), lattice)
self.assertAlmostEqual(
get_distance_and_image_old(site1, site2)[0],
site1.distance_and_image(site2)[0],
)
lattice = Lattice.from_parameters(1, 0.01, 1, 10, 10, 10)
site1 = PeriodicSite("Fe", np.array([0.01, 0.02, 0.03]), lattice)
site2 = PeriodicSite("Fe", np.array([0.99, 0.98, 0.97]), lattice)
self.assertTrue(get_distance_and_image_old(site1, site2)[0] > site1.distance_and_image(site2)[0])
site2 = PeriodicSite("Fe", np.random.rand(3), lattice)
(dist_old, jimage_old) = get_distance_and_image_old(site1, site2)
(dist_new, jimage_new) = site1.distance_and_image(site2)
self.assertTrue(
dist_old - dist_new > -1e-8,
"New distance algo should give smaller answers!",
)
self.assertFalse(
(abs(dist_old - dist_new) < 1e-8) ^ (jimage_old == jimage_new).all(),
"If old dist == new dist, images must be the same!",
)
latt = Lattice.from_parameters(3.0, 3.1, 10.0, 2.96, 2.0, 1.0)
site = PeriodicSite("Fe", [0.1, 0.1, 0.1], latt)
site2 = PeriodicSite("Fe", [0.99, 0.99, 0.99], latt)
(dist, img) = site.distance_and_image(site2)
self.assertAlmostEqual(dist, 0.15495358379511573)
self.assertEqual(list(img), [-11, 6, 0])
def test_is_periodic_image(self):
other = PeriodicSite("Fe", np.array([1.25, 2.35, 4.45]), self.lattice)
self.assertTrue(
self.site.is_periodic_image(other),
"This other site should be a periodic image.",
)
other = PeriodicSite("Fe", np.array([1.25, 2.35, 4.46]), self.lattice)
self.assertFalse(
self.site.is_periodic_image(other),
"This other site should not be a periodic image.",
)
other = PeriodicSite("Fe", np.array([1.25, 2.35, 4.45]), Lattice.rhombohedral(2, 60))
self.assertFalse(
self.site.is_periodic_image(other),
"Different lattices should not be periodic images.",
)
def test_equality(self):
other_site = PeriodicSite("Fe", np.array([1, 1, 1]), self.lattice)
self.assertTrue(self.site.__eq__(self.site))
self.assertFalse(other_site.__eq__(self.site))
self.assertFalse(self.site.__ne__(self.site))
self.assertTrue(other_site.__ne__(self.site))
def test_as_from_dict(self):
d = self.site2.as_dict()
site = PeriodicSite.from_dict(d)
self.assertEqual(site, self.site2)
self.assertNotEqual(site, self.site)
d = self.propertied_site.as_dict()
site3 = PeriodicSite({"Si": 0.5, "Fe": 0.5}, [0, 0, 0], self.lattice)
d = site3.as_dict()
site = PeriodicSite.from_dict(d)
self.assertEqual(site.species, site3.species)
d = self.dummy_site.as_dict()
site = PeriodicSite.from_dict(d)
self.assertEqual(site.species, self.dummy_site.species)
def test_to_unit_cell(self):
site = PeriodicSite("Fe", np.array([1.25, 2.35, 4.46]), self.lattice)
site.to_unit_cell(in_place=True)
val = [0.25, 0.35, 0.46]
self.assertArrayAlmostEqual(site.frac_coords, val)
def test_setters(self):
site = self.propertied_site
site.species = "Cu"
self.assertEqual(site.species, Composition("Cu"))
site.x = 1.25
site.y = 1.35
self.assertEqual(site.coords[0], 1.25)
self.assertEqual(site.coords[1], 1.35)
self.assertEqual(site.a, 0.125)
self.assertEqual(site.b, 0.135)
site.lattice = Lattice.cubic(100)
self.assertEqual(site.x, 12.5)
def set_bad_species():
site.species = {"Cu": 0.5, "Gd": 0.6}
self.assertRaises(ValueError, set_bad_species)
site.frac_coords = [0, 0, 0.1]
self.assertArrayAlmostEqual(site.coords, [0, 0, 10])
site.coords = [1.5, 3.25, 5]
self.assertArrayAlmostEqual(site.frac_coords, [0.015, 0.0325, 0.05])
def test_repr(self):
self.assertEqual(
self.propertied_site.__repr__(),
"PeriodicSite: Fe2+ (2.5000, 3.5000, 4.5000) [0.2500, 0.3500, 0.4500]",
)
def get_distance_and_image_old(site1, site2, jimage=None):
"""
Gets distance between two sites assuming periodic boundary conditions.
If the index jimage of two sites atom j is not specified it selects the
j image nearest to the i atom and returns the distance and jimage
indices in terms of lattice vector translations. If the index jimage of
atom j is specified it returns the distance between the i atom and the
specified jimage atom, the given jimage is also returned.
Args:
other:
other site to get distance from.
jimage:
specific periodic image in terms of lattice translations,
e.g., [1,0,0] implies to take periodic image that is one
a-lattice vector away. If jimage is None, the image that is
nearest to the site is found.
Returns:
(distance, jimage):
distance and periodic lattice translations of the other site
for which the distance applies.
.. note::
Assumes the primitive cell vectors are sufficiently not skewed such
that the condition \\|a\\|cos(ab_angle) < \\|b\\| for all possible cell
vector pairs. ** this method does not check this condition **
"""
if jimage is None:
# Old algorithm
jimage = -np.array(np.around(site2.frac_coords - site1.frac_coords), int)
mapped_vec = site1.lattice.get_cartesian_coords(jimage + site2.frac_coords - site1.frac_coords)
dist = np.linalg.norm(mapped_vec)
return dist, jimage
if __name__ == "__main__":
import unittest
unittest.main()
|
materialsproject/pymatgen
|
pymatgen/core/tests/test_sites.py
|
Python
|
mit
| 11,467
|
[
"pymatgen"
] |
cab47e17b33ace8159ca3659c8277e16310612fa435192dcb3ae19ce773d6535
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import numpy as np
import tempfile
import os
import mdtraj as md
from mdtraj.formats import HDF5TrajectoryFile
from mdtraj.testing import eq
import pytest
try:
from simtk import unit as units
HAVE_UNITS = True
except ImportError:
HAVE_UNITS = False
needs_units = pytest.mark.skipif(not HAVE_UNITS, reason='requires simtk.units')
fd, temp = tempfile.mkstemp(suffix='.h5')
def teardown_module(module):
"""remove the temporary file created by tests in this file
this gets automatically called by nose"""
os.close(fd)
os.unlink(temp)
def test_write_coordinates():
coordinates = np.random.randn(4, 10,3)
with HDF5TrajectoryFile(temp, 'w') as f:
f.write(coordinates)
with HDF5TrajectoryFile(temp) as f:
assert eq(f.root.coordinates[:], coordinates)
assert eq(str(f.root.coordinates.attrs['units']), 'nanometers')
def test_write_coordinates_reshape():
coordinates = np.random.randn(10,3)
with HDF5TrajectoryFile(temp, 'w') as f:
f.write(coordinates)
with HDF5TrajectoryFile(temp) as f:
assert eq(f.root.coordinates[:], coordinates.reshape(1,10,3))
assert eq(str(f.root.coordinates.attrs['units']), 'nanometers')
def test_write_multiple():
coordinates = np.random.randn(4, 10,3)
with HDF5TrajectoryFile(temp, 'w') as f:
f.write(coordinates)
f.write(coordinates)
with HDF5TrajectoryFile(temp) as f:
assert eq(f.root.coordinates[:], np.vstack((coordinates, coordinates)))
def test_write_inconsistent():
coordinates = np.random.randn(4, 10,3)
with HDF5TrajectoryFile(temp, 'w') as f:
f.write(coordinates)
# since the first frames we saved didn't contain velocities, we
# can't save more velocities
with pytest.raises(ValueError):
f.write(coordinates, velocities=coordinates)
def test_write_inconsistent_2():
coordinates = np.random.randn(4, 10,3)
with HDF5TrajectoryFile(temp, 'w') as f:
f.write(coordinates, velocities=coordinates)
# we're saving a deficient set of data, since before we wrote
# more information.
with pytest.raises(ValueError):
f.write(coordinates)
@needs_units
def test_write_units():
# simtk.units are automatically converted into MD units for storage on disk
coordinates = units.Quantity(np.random.randn(4, 10,3), units.angstroms)
velocities = units.Quantity(np.random.randn(4, 10,3), units.angstroms/units.year)
with HDF5TrajectoryFile(temp, 'w') as f:
f.write(coordinates, velocities=velocities)
with HDF5TrajectoryFile(temp) as f:
assert eq(f.root.coordinates[:], coordinates.value_in_unit(units.nanometers))
assert eq(str(f.root.coordinates.attrs['units']), 'nanometers')
assert eq(f.root.velocities[:], velocities.value_in_unit(units.nanometers/units.picosecond))
assert eq(str(f.root.velocities.attrs['units']), 'nanometers/picosecond')
def test_write_units2():
from mdtraj.utils import unit
coordinates = unit.quantity.Quantity(np.random.randn(4, 10,3),
unit.unit_definitions.angstroms)
velocities = unit.quantity.Quantity(np.random.randn(4, 10,3),
unit.unit_definitions.angstroms/unit.unit_definitions.year)
with HDF5TrajectoryFile(temp, 'w') as f:
f.write(coordinates, velocities=velocities)
with HDF5TrajectoryFile(temp) as f:
assert eq(f.root.coordinates[:], coordinates.value_in_unit(unit.unit_definitions.nanometers))
assert eq(str(f.root.coordinates.attrs['units']), 'nanometers')
assert eq(f.root.velocities[:], velocities.value_in_unit(unit.unit_definitions.nanometers/unit.unit_definitions.picosecond))
assert eq(str(f.root.velocities.attrs['units']), 'nanometers/picosecond')
@needs_units
def test_write_units_mismatch():
velocoties = units.Quantity(np.random.randn(4, 10,3), units.angstroms/units.picosecond)
with HDF5TrajectoryFile(temp, 'w') as f:
# if you try to write coordinates that are unitted and not
# in the correct units, we find that
with pytest.raises(TypeError):
f.write(coordinates=velocoties)
def test_topology(get_fn):
top = md.load_pdb(get_fn('native.pdb')).topology
with HDF5TrajectoryFile(temp, 'w') as f:
f.topology = top
with HDF5TrajectoryFile(temp) as f:
assert f.topology == top
def test_constraints():
c = np.array([(1,2,3.5)], dtype=np.dtype([('atom1', np.int32), ('atom2', np.int32), ('distance', np.float32)]))
with HDF5TrajectoryFile(temp, 'w') as f:
f.constraints = c
with HDF5TrajectoryFile(temp) as f:
assert eq(f.constraints, c)
def test_constraints2():
c = np.array([(1,2,3.5)], dtype=np.dtype([('atom1', np.int32), ('atom2', np.int32), ('distance', np.float32)]))
with HDF5TrajectoryFile(temp, 'w') as f:
f.constraints = c
f.constraints = c
with HDF5TrajectoryFile(temp) as f:
assert eq(f.constraints, c)
def test_read_0():
coordinates = np.random.randn(4, 10,3)
with HDF5TrajectoryFile(temp, 'w') as f:
f.write(coordinates, alchemicalLambda=np.array([1,2,3,4]))
with HDF5TrajectoryFile(temp) as f:
got = f.read()
assert eq(got.coordinates, coordinates)
assert eq(got.velocities, None)
assert eq(got.alchemicalLambda, np.array([1,2,3,4]))
@needs_units
def test_read_1():
coordinates = units.Quantity(np.random.randn(4, 10,3), units.angstroms)
velocities = units.Quantity(np.random.randn(4, 10,3), units.angstroms/units.years)
with HDF5TrajectoryFile(temp, 'w') as f:
f.write(coordinates, velocities=velocities)
with HDF5TrajectoryFile(temp) as f:
got = f.read()
assert eq(got.coordinates, coordinates.value_in_unit(units.nanometers))
assert eq(got.velocities, velocities.value_in_unit(units.nanometers/units.picoseconds))
def test_read_slice_0():
coordinates = np.random.randn(4, 10,3)
with HDF5TrajectoryFile(temp, 'w') as f:
f.write(coordinates, alchemicalLambda=np.array([1,2,3,4]))
with HDF5TrajectoryFile(temp) as f:
got = f.read(n_frames=2)
assert eq(got.coordinates, coordinates[:2])
assert eq(got.velocities, None)
assert eq(got.alchemicalLambda, np.array([1,2]))
def test_read_slice_1():
coordinates = np.random.randn(4, 10,3)
with HDF5TrajectoryFile(temp, 'w') as f:
f.write(coordinates)
with HDF5TrajectoryFile(temp) as f:
got = f.read(n_frames=2)
assert eq(got.coordinates, coordinates[:2])
assert eq(got.velocities, None)
got = f.read(n_frames=2)
assert eq(got.coordinates, coordinates[2:])
assert eq(got.velocities, None)
def test_read_slice_2():
coordinates = np.random.randn(4, 10,3)
with HDF5TrajectoryFile(temp, 'w') as f:
f.write(coordinates, alchemicalLambda=np.arange(4))
with HDF5TrajectoryFile(temp) as f:
got = f.read(atom_indices=np.array([0,1]))
assert eq(got.coordinates, coordinates[:, [0,1], :])
assert eq(got.alchemicalLambda, np.arange(4))
def test_read_slice_3():
coordinates = np.random.randn(4, 10,3)
with HDF5TrajectoryFile(temp, 'w') as f:
f.write(coordinates, alchemicalLambda=np.arange(4))
with HDF5TrajectoryFile(temp) as f:
got = f.read(stride=2, atom_indices=np.array([0,1]))
assert eq(got.coordinates, coordinates[::2, [0,1], :])
assert eq(got.alchemicalLambda, np.arange(4)[::2])
def test_do_overwrite():
with open(temp, 'w') as f:
f.write('a')
with HDF5TrajectoryFile(temp, 'w', force_overwrite=True) as f:
f.write(np.random.randn(10,5,3))
def test_vsite_elements(get_fn):
# Test case for issue #265
pdb_filename = get_fn('GG-tip4pew.pdb')
trj = md.load(pdb_filename)
trj.save_hdf5(temp)
trj2 = md.load(temp, top=pdb_filename)
def test_dont_overwrite():
with open(temp, 'w') as f:
f.write('a')
with pytest.raises(IOError):
with HDF5TrajectoryFile(temp, 'w', force_overwrite=False) as f:
f.write(np.random.randn(10,5,3))
def test_attributes():
constraints = np.zeros(10, dtype=[('atom1', np.int32), ('atom2', np.int32), ('distance', np.float32)])
with HDF5TrajectoryFile(temp, 'w') as f:
f.title = 'mytitle'
f.reference = 'myreference'
f.forcefield = 'amber99'
f.randomState = 'sdf'
f.application = 'openmm'
f.constraints = constraints
with HDF5TrajectoryFile(temp) as g:
eq(g.title, 'mytitle')
eq(g.reference, 'myreference')
eq(g.forcefield, 'amber99')
eq(g.randomState, 'sdf')
eq(g.application, 'openmm')
eq(g.constraints, constraints)
def test_append():
x1 = np.random.randn(10,5,3)
x2 = np.random.randn(8,5,3)
with HDF5TrajectoryFile(temp, 'w') as f:
f.write(x1)
with HDF5TrajectoryFile(temp, 'a') as f:
f.write(x2)
with HDF5TrajectoryFile(temp) as f:
eq(f.root.coordinates[:], np.concatenate((x1,x2)))
|
leeping/mdtraj
|
tests/test_hdf5.py
|
Python
|
lgpl-2.1
| 10,192
|
[
"MDTraj",
"OpenMM"
] |
e3f3e0d4c69ceef4d42f903af06391992e0c73498af5b4b2a446af5a4493acf2
|
import nas
import sys
import lib_nas
#import Lib_NAS_Rnas_IA_
# _bias_2_4 v2.0
# estructura Neuronal 2 bits --4 patrones de adaptacion
# @autor:Jose Luis Prado Seoane ---IT Security Researcher & Developer
# Research: Teoria de Sistemas Neuronales (I.A) --TSNB 808565016
# Redes Neuronales y su convergencia hacia modelos e implementaciones orientadas a la
# Ciberseguridad
#
# Descripcion: **********************************************************************************
# Estructura neuronal adaptativa de 2 bits dimensional de (1)-->(n+1) patrones variables en los
# imputs. El secuenciador es externo a la propia estructura y establece un Netlist para poder
# integrarla en un cumulo adjunto dentro de una area de datos en deteccion de patrones***********
#
# ------RNA------<>
# { |
# { |+++++(estructura_0:bias:semaforos:BAMS:correladores...)
# { | |
# { | |++++++(neuron_0 + neuron_(n+1):NETLIST)
# { |
# { |+++++(estructura_(n+1)..
# {
# { : CUMULO/S
# @info:
# @estructura: Clase -- 2 Bits : 4 pattern detected
# @info: La estructura neuronal no se depura por codigo, depurador externo via @tokens
class _bias_2_4(): # @estructura _bias_01_net
#CONSTRUCTOR:
def __init__(self,_bias_):
self.debug=1
if self.debug==1:
print"";print ("Ejecutando " + self.__class__.__name__) + "...\n"
self.__neuron=nas.neuron(_bias_)
self.__bias=int(_bias_)
self.__base_pattern=[]
for base_pattern in range(4):
self.__base_pattern.append(0)
# @info: Pattern imputs --cor
def _memory_pattern_(self,_dendrite_0_,_dendrite_1_,_pattern_):
pattern=[]
self._set_(pattern,_pattern_)
lib_nas._layer_1_1_(self.__neuron,self.__bias,lib_nas._memory_(self.__bias))
self.__neuron.ibn_(_dendrite_0_,_dendrite_1_)
_rtn = lib_nas._pattern_nas(_dendrite_0_,_dendrite_1_,self.__neuron._out_,self.__base_pattern)
lib_nas._layer_1_1_(self.__neuron,self.__bias,lib_nas._memory_(_rtn))
self.__neuron.ibn_(_dendrite_0_,_dendrite_1_)
return self.__neuron._out_
lib_nas._layer_1_1_(self.__neuron,self.__bias,lib_nas._memory_(self.__bias))
# @info: mapa de adaptacion
def _mapper_(self,_pattern_):
valores=[0,1];mapa=[];pattern=[]
print "";print "<--mapper-->";print "2 inputs:4 pattern"
self._set_(pattern,_pattern_)
for x in valores:
for y in valores:
mapa.append("%s%s" %(x,y))
for x in range(len(mapa)):
print "(%s)---%s:->%s" %(x,mapa[x],self.__base_pattern[x])
print ""
# @info: Reseat volcados y _memory_pattern
def _set_(self,pattern,_pattern_):
for token in range(len(self.__base_pattern)):self.__base_pattern[token]=0
for token in _pattern_:pattern.append(int(token))
for token in pattern: self.__base_pattern[token]=1
#--------------------------------------------------------------------------------app()
# @info: app_local
def _init_net_3_(_bias_):
n0 = _bias_2_4(_bias_)
while True:
try:
_pattern=raw_input ('PATRON DESEADO: ')
if _pattern==":q":
break
sys.exit(-1)
if len(_pattern)==4:
learning=lib_nas._correlator(_pattern)
n0._mapper_(learning)
for x in range(4):
den_0_io= int(raw_input ('den_0: '))
den_1_io= int(raw_input ('den_1: '))
neuron_out=n0._memory_pattern_(den_0_io,den_1_io,learning)
print "------------------> %s" %neuron_out
else:
print "Patron de aprendizaje Bias : 4 tokens ej.- 0101"
except:
print "Parametro de aprendizaje incorrecto Bias"
_init_net_3_(0)
# @info: app_local
def _init_net_2_(_bias_):
n0 = _bias_2_4(_bias_)
while True:
try:
_pattern=raw_input ('PATRON DESEADO: ')
if _pattern==":q":
break
sys.exit(-1)
if len(_pattern)<=4:
n0._mapper_(_pattern)
for x in range(4):
den_0_io= int(raw_input ('den_0: '))
den_1_io= int(raw_input ('den_1: '))
neuron_out=n0._memory_pattern_(den_0_io,den_1_io,_pattern)
print "------------------> %s" %neuron_out
else:
print "Maximo patron de aprendizaje Bias : 4 tokens"
except:
print "Parametro de aprendizaje incorrecto Bias:0123"
#_init_net_2_(0)
|
ciudadano72/TSNB_redes_neuronales
|
bias24.py
|
Python
|
gpl-3.0
| 4,681
|
[
"NEURON"
] |
5df06c625fe497c7fcb7f6de3bdc6a8f4eaf4f5dc7e2aca44d0c225d5bd16d07
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import re
import textwrap
from twisted.internet import defer
from twisted.trial import unittest
from buildbot import config
from buildbot.process import properties
from buildbot.process import remotetransfer
from buildbot.process.results import EXCEPTION
from buildbot.process.results import FAILURE
from buildbot.process.results import SKIPPED
from buildbot.process.results import SUCCESS
from buildbot.process.results import WARNINGS
from buildbot.steps import shell
from buildbot.test.fake.remotecommand import Expect
from buildbot.test.fake.remotecommand import ExpectRemoteRef
from buildbot.test.fake.remotecommand import ExpectShell
from buildbot.test.util import config as configmixin
from buildbot.test.util import steps
from buildbot.test.util.misc import TestReactorMixin
class TestShellCommandExecution(steps.BuildStepMixin,
configmixin.ConfigErrorsMixin,
TestReactorMixin,
unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def assertLegacySummary(self, step, running, done=None):
done = done or running
self.assertEqual(
(step._getLegacySummary(done=False),
step._getLegacySummary(done=True)),
(running, done))
def test_doStepIf_False(self):
self.setupStep(
shell.ShellCommand(command="echo hello", doStepIf=False))
self.expectOutcome(result=SKIPPED,
state_string="'echo hello' (skipped)")
return self.runStep()
def test_constructor_args_kwargs(self):
# this is an ugly way to define an API, but for now check that
# the RemoteCommand arguments are properly passed on
step = shell.ShellCommand(workdir='build', command="echo hello",
want_stdout=0, logEnviron=False)
self.assertEqual(step.remote_kwargs, dict(want_stdout=0,
logEnviron=False,
workdir='build',
usePTY=None))
def test_constructor_args_validity(self):
# this checks that an exception is raised for invalid arguments
with self.assertRaisesConfigError(
"Invalid argument(s) passed to RemoteShellCommand: "):
shell.ShellCommand(workdir='build', command="echo Hello World",
wrongArg1=1, wrongArg2='two')
def test_getLegacySummary_from_empty_command(self):
# this is more of a regression test for a potential failure, really
step = shell.ShellCommand(workdir='build', command=' ')
step.rendered = True
self.assertLegacySummary(step, None)
def test_getLegacySummary_from_short_command(self):
step = shell.ShellCommand(workdir='build', command="true")
step.rendered = True
self.assertLegacySummary(step, "'true'")
def test_getLegacySummary_from_short_command_list(self):
step = shell.ShellCommand(workdir='build', command=["true"])
step.rendered = True
self.assertLegacySummary(step, "'true'")
def test_getLegacySummary_from_med_command(self):
step = shell.ShellCommand(command="echo hello")
step.rendered = True
self.assertLegacySummary(step, "'echo hello'")
def test_getLegacySummary_from_med_command_list(self):
step = shell.ShellCommand(command=["echo", "hello"])
step.rendered = True
self.assertLegacySummary(step, "'echo hello'")
def test_getLegacySummary_from_long_command(self):
step = shell.ShellCommand(command="this is a long command")
step.rendered = True
self.assertLegacySummary(step, "'this is ...'")
def test_getLegacySummary_from_long_command_list(self):
step = shell.ShellCommand(command="this is a long command".split())
step.rendered = True
self.assertLegacySummary(step, "'this is ...'")
def test_getLegacySummary_from_nested_command_list(self):
step = shell.ShellCommand(command=["this", ["is", "a"], "nested"])
step.rendered = True
self.assertLegacySummary(step, "'this is ...'")
def test_getLegacySummary_from_nested_command_tuples(self):
step = shell.ShellCommand(command=["this", ("is", "a"), "nested"])
step.rendered = True
self.assertLegacySummary(step, "'this is ...'")
def test_getLegacySummary_from_nested_command_list_empty(self):
step = shell.ShellCommand(command=["this", [], ["is", "a"], "nested"])
step.rendered = True
self.assertLegacySummary(step, "'this is ...'")
def test_getLegacySummary_from_nested_command_list_deep(self):
step = shell.ShellCommand(command=[["this", [[["is", ["a"]]]]]])
step.rendered = True
self.assertLegacySummary(step, "'this is ...'")
def test_getLegacySummary_custom(self):
step = shell.ShellCommand(command="echo hello",
description=["echoing"],
descriptionDone=["echoed"])
step.rendered = True
self.assertLegacySummary(step, None) # handled by parent class
def test_getLegacySummary_with_suffix(self):
step = shell.ShellCommand(
command="echo hello", descriptionSuffix="suffix")
step.rendered = True
self.assertLegacySummary(step, "'echo hello' suffix")
def test_getLegacySummary_unrendered_WithProperties(self):
step = shell.ShellCommand(command=properties.WithProperties(''))
step.rendered = True
self.assertLegacySummary(step, None)
def test_getLegacySummary_unrendered_custom_new_style_class_renderable(self):
step = shell.ShellCommand(command=object())
step.rendered = True
self.assertLegacySummary(step, None)
def test_getLegacySummary_unrendered_custom_old_style_class_renderable(self):
class C:
pass
step = shell.ShellCommand(command=C())
step.rendered = True
self.assertLegacySummary(step, None)
def test_getLegacySummary_unrendered_WithProperties_list(self):
step = shell.ShellCommand(
command=['x', properties.WithProperties(''), 'y'])
step.rendered = True
self.assertLegacySummary(step, "'x y'")
def test_run_simple(self):
self.setupStep(
shell.ShellCommand(workdir='build', command="echo hello"))
self.expectCommands(
ExpectShell(workdir='build', command='echo hello')
+ 0
)
self.expectOutcome(result=SUCCESS, state_string="'echo hello'")
return self.runStep()
def test_run_list(self):
self.setupStep(
shell.ShellCommand(workdir='build',
command=['trial', '-b', '-B', 'buildbot.test']))
self.expectCommands(
ExpectShell(workdir='build',
command=['trial', '-b', '-B', 'buildbot.test'])
+ 0
)
self.expectOutcome(result=SUCCESS,
state_string="'trial -b ...'")
return self.runStep()
def test_run_nested_description(self):
self.setupStep(
shell.ShellCommand(workdir='build',
command=properties.FlattenList(
['trial', ['-b', '-B'], 'buildbot.test']),
descriptionDone=properties.FlattenList(
['test', ['done']]),
descriptionSuffix=properties.FlattenList(['suff', ['ix']])))
self.expectCommands(
ExpectShell(workdir='build',
command=['trial', '-b', '-B', 'buildbot.test'])
+ 0
)
self.expectOutcome(result=SUCCESS,
state_string='test done suff ix')
return self.runStep()
def test_run_nested_command(self):
self.setupStep(
shell.ShellCommand(workdir='build',
command=['trial', ['-b', '-B'], 'buildbot.test']))
self.expectCommands(
ExpectShell(workdir='build',
command=['trial', '-b', '-B', 'buildbot.test'])
+ 0
)
self.expectOutcome(result=SUCCESS,
state_string="'trial -b ...'")
return self.runStep()
def test_run_nested_deeply_command(self):
self.setupStep(
shell.ShellCommand(workdir='build',
command=[['trial', ['-b', ['-B']]], 'buildbot.test']))
self.expectCommands(
ExpectShell(workdir='build',
command=['trial', '-b', '-B', 'buildbot.test'])
+ 0
)
self.expectOutcome(result=SUCCESS,
state_string="'trial -b ...'")
return self.runStep()
def test_run_nested_empty_command(self):
self.setupStep(
shell.ShellCommand(workdir='build',
command=['trial', [], '-b', [], 'buildbot.test']))
self.expectCommands(
ExpectShell(workdir='build',
command=['trial', '-b', 'buildbot.test'])
+ 0
)
self.expectOutcome(result=SUCCESS,
state_string="'trial -b ...'")
return self.runStep()
def test_run_env(self):
self.setupStep(
shell.ShellCommand(workdir='build', command="echo hello"),
worker_env=dict(DEF='HERE'))
self.expectCommands(
ExpectShell(workdir='build', command='echo hello',
env=dict(DEF='HERE'))
+ 0
)
self.expectOutcome(result=SUCCESS)
return self.runStep()
def test_run_env_override(self):
self.setupStep(
shell.ShellCommand(workdir='build', env={'ABC': '123'},
command="echo hello"),
worker_env=dict(ABC='XXX', DEF='HERE'))
self.expectCommands(
ExpectShell(workdir='build', command='echo hello',
env=dict(ABC='123', DEF='HERE'))
+ 0
)
self.expectOutcome(result=SUCCESS)
return self.runStep()
def test_run_usePTY(self):
self.setupStep(
shell.ShellCommand(workdir='build', command="echo hello",
usePTY=False))
self.expectCommands(
ExpectShell(workdir='build', command='echo hello',
usePTY=False)
+ 0
)
self.expectOutcome(result=SUCCESS)
return self.runStep()
def test_run_usePTY_old_worker(self):
self.setupStep(
shell.ShellCommand(workdir='build', command="echo hello",
usePTY=True),
worker_version=dict(shell='1.1'))
self.expectCommands(
ExpectShell(workdir='build', command='echo hello')
+ 0
)
self.expectOutcome(result=SUCCESS)
return self.runStep()
def test_run_decodeRC(self, rc=1, results=WARNINGS, extra_text=" (warnings)"):
self.setupStep(
shell.ShellCommand(workdir='build', command="echo hello",
decodeRC={1: WARNINGS}))
self.expectCommands(
ExpectShell(workdir='build', command='echo hello')
+ rc
)
self.expectOutcome(
result=results, state_string="'echo hello'" + extra_text)
return self.runStep()
def test_run_decodeRC_defaults(self):
return self.test_run_decodeRC(2, FAILURE, extra_text=" (failure)")
def test_run_decodeRC_defaults_0_is_failure(self):
return self.test_run_decodeRC(0, FAILURE, extra_text=" (failure)")
def test_missing_command_error(self):
# this checks that an exception is raised for invalid arguments
with self.assertRaisesConfigError(
"ShellCommand's `command' argument is not specified"):
shell.ShellCommand()
class TreeSize(steps.BuildStepMixin, TestReactorMixin, unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_run_success(self):
self.setupStep(shell.TreeSize())
self.expectCommands(
ExpectShell(workdir='wkdir',
command=['du', '-s', '-k', '.'])
+ ExpectShell.log('stdio', stdout='9292 .\n')
+ 0
)
self.expectOutcome(result=SUCCESS,
state_string="treesize 9292 KiB")
self.expectProperty('tree-size-KiB', 9292)
return self.runStep()
def test_run_misparsed(self):
self.setupStep(shell.TreeSize())
self.expectCommands(
ExpectShell(workdir='wkdir',
command=['du', '-s', '-k', '.'])
+ ExpectShell.log('stdio', stdio='abcdef\n')
+ 0
)
self.expectOutcome(result=WARNINGS,
state_string="treesize unknown (warnings)")
return self.runStep()
def test_run_failed(self):
self.setupStep(shell.TreeSize())
self.expectCommands(
ExpectShell(workdir='wkdir',
command=['du', '-s', '-k', '.'])
+ ExpectShell.log('stdio', stderr='abcdef\n')
+ 1
)
self.expectOutcome(result=FAILURE,
state_string="treesize unknown (failure)")
return self.runStep()
class SetPropertyFromCommand(steps.BuildStepMixin, TestReactorMixin,
unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_constructor_conflict(self):
with self.assertRaises(config.ConfigErrors):
shell.SetPropertyFromCommand(property='foo', extract_fn=lambda: None)
def test_run_property(self):
self.setupStep(
shell.SetPropertyFromCommand(property="res", command="cmd"))
self.expectCommands(
ExpectShell(workdir='wkdir',
command="cmd")
+ ExpectShell.log('stdio', stdout='\n\nabcdef\n')
+ 0
)
self.expectOutcome(result=SUCCESS,
state_string="property 'res' set")
self.expectProperty("res", "abcdef") # note: stripped
self.expectLogfile('property changes', r"res: " + repr('abcdef'))
return self.runStep()
def test_renderable_workdir(self):
self.setupStep(
shell.SetPropertyFromCommand(property="res", command="cmd",
workdir=properties.Interpolate('wkdir')))
self.expectCommands(
ExpectShell(workdir='wkdir',
command="cmd")
+ ExpectShell.log('stdio', stdout='\n\nabcdef\n')
+ 0
)
self.expectOutcome(result=SUCCESS,
state_string="property 'res' set")
self.expectProperty("res", "abcdef") # note: stripped
self.expectLogfile('property changes', r"res: " + repr('abcdef'))
return self.runStep()
def test_run_property_no_strip(self):
self.setupStep(shell.SetPropertyFromCommand(property="res", command="cmd",
strip=False))
self.expectCommands(
ExpectShell(workdir='wkdir',
command="cmd")
+ ExpectShell.log('stdio', stdout='\n\nabcdef\n')
+ 0
)
self.expectOutcome(result=SUCCESS,
state_string="property 'res' set")
self.expectProperty("res", "\n\nabcdef\n")
self.expectLogfile('property changes', r"res: " + repr('\n\nabcdef\n'))
return self.runStep()
def test_run_failure(self):
self.setupStep(
shell.SetPropertyFromCommand(property="res", command="blarg"))
self.expectCommands(
ExpectShell(workdir='wkdir',
command="blarg")
+ ExpectShell.log('stdio', stderr='cannot blarg: File not found')
+ 1
)
self.expectOutcome(result=FAILURE,
state_string="'blarg' (failure)")
self.expectNoProperty("res")
return self.runStep()
def test_run_extract_fn(self):
def extract_fn(rc, stdout, stderr):
self.assertEqual(
(rc, stdout, stderr), (0, 'startend\n', 'STARTEND\n'))
return dict(a=1, b=2)
self.setupStep(
shell.SetPropertyFromCommand(extract_fn=extract_fn, command="cmd"))
self.expectCommands(
ExpectShell(workdir='wkdir',
command="cmd")
+ ExpectShell.log('stdio', stdout='start', stderr='START')
+ ExpectShell.log('stdio', stdout='end')
+ ExpectShell.log('stdio', stderr='END')
+ 0
)
self.expectOutcome(result=SUCCESS,
state_string="2 properties set")
self.expectLogfile('property changes', 'a: 1\nb: 2')
self.expectProperty("a", 1)
self.expectProperty("b", 2)
return self.runStep()
def test_run_extract_fn_cmdfail(self):
def extract_fn(rc, stdout, stderr):
self.assertEqual((rc, stdout, stderr), (3, '', ''))
return dict(a=1, b=2)
self.setupStep(
shell.SetPropertyFromCommand(extract_fn=extract_fn, command="cmd"))
self.expectCommands(
ExpectShell(workdir='wkdir',
command="cmd")
+ 3
)
# note that extract_fn *is* called anyway
self.expectOutcome(result=FAILURE,
state_string="2 properties set (failure)")
self.expectLogfile('property changes', 'a: 1\nb: 2')
return self.runStep()
def test_run_extract_fn_cmdfail_empty(self):
def extract_fn(rc, stdout, stderr):
self.assertEqual((rc, stdout, stderr), (3, '', ''))
return dict()
self.setupStep(
shell.SetPropertyFromCommand(extract_fn=extract_fn, command="cmd"))
self.expectCommands(
ExpectShell(workdir='wkdir',
command="cmd")
+ 3
)
# note that extract_fn *is* called anyway, but returns no properties
self.expectOutcome(result=FAILURE,
state_string="'cmd' (failure)")
return self.runStep()
@defer.inlineCallbacks
def test_run_extract_fn_exception(self):
def extract_fn(rc, stdout, stderr):
raise RuntimeError("oh noes")
self.setupStep(
shell.SetPropertyFromCommand(extract_fn=extract_fn, command="cmd"))
self.expectCommands(
ExpectShell(workdir='wkdir',
command="cmd")
+ 0
)
# note that extract_fn *is* called anyway, but returns no properties
self.expectOutcome(result=EXCEPTION,
state_string="'cmd' (exception)")
yield self.runStep()
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1)
def test_error_both_set(self):
"""
If both ``extract_fn`` and ``property`` are defined,
``SetPropertyFromCommand`` reports a config error.
"""
with self.assertRaises(config.ConfigErrors):
shell.SetPropertyFromCommand(command=["echo", "value"],
property="propname",
extract_fn=lambda x: {"propname": "hello"})
def test_error_none_set(self):
"""
If neither ``extract_fn`` and ``property`` are defined,
``SetPropertyFromCommand`` reports a config error.
"""
with self.assertRaises(config.ConfigErrors):
shell.SetPropertyFromCommand(command=["echo", "value"])
class PerlModuleTest(steps.BuildStepMixin, TestReactorMixin, unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_new_version_success(self):
self.setupStep(shell.PerlModuleTest(command="cmd"))
self.expectCommands(
ExpectShell(workdir='wkdir',
command="cmd")
+ ExpectShell.log('stdio', stdout=textwrap.dedent("""\
This junk ignored
Test Summary Report
Result: PASS
Tests: 10 Failed: 0
Tests: 10 Failed: 0
Files=93, Tests=20"""))
+ 0
)
self.expectOutcome(result=SUCCESS, state_string='20 tests 20 passed')
return self.runStep()
def test_new_version_warnings(self):
self.setupStep(shell.PerlModuleTest(command="cmd",
warningPattern='^OHNOES'))
self.expectCommands(
ExpectShell(workdir='wkdir',
command="cmd")
+ ExpectShell.log('stdio', stdout=textwrap.dedent("""\
This junk ignored
Test Summary Report
-------------------
foo.pl (Wstat: 0 Tests: 10 Failed: 0)
Failed test: 0
OHNOES 1
OHNOES 2
Files=93, Tests=20, 0 wallclock secs ...
Result: PASS"""))
+ 0
)
self.expectOutcome(
result=WARNINGS,
state_string='20 tests 20 passed 2 warnings (warnings)')
return self.runStep()
def test_new_version_failed(self):
self.setupStep(shell.PerlModuleTest(command="cmd"))
self.expectCommands(
ExpectShell(workdir='wkdir',
command="cmd")
+ ExpectShell.log('stdio', stdout=textwrap.dedent("""\
foo.pl .. 1/4"""))
+ ExpectShell.log('stdio', stderr=textwrap.dedent("""\
# Failed test 2 in foo.pl at line 6
# foo.pl line 6 is: ok(0);"""))
+ ExpectShell.log('stdio', stdout=textwrap.dedent("""\
foo.pl .. Failed 1/4 subtests
Test Summary Report
-------------------
foo.pl (Wstat: 0 Tests: 4 Failed: 1)
Failed test: 0
Files=1, Tests=4, 0 wallclock secs ( 0.06 usr 0.01 sys + 0.03 cusr
0.01 csys = 0.11 CPU)
Result: FAIL"""))
+ ExpectShell.log('stdio', stderr=textwrap.dedent("""\
Failed 1/1 test programs. 1/4 subtests failed."""))
+ 1
)
self.expectOutcome(result=FAILURE,
state_string='4 tests 3 passed 1 failed (failure)')
return self.runStep()
def test_old_version_success(self):
self.setupStep(shell.PerlModuleTest(command="cmd"))
self.expectCommands(
ExpectShell(workdir='wkdir',
command="cmd")
+ ExpectShell.log('stdio', stdout=textwrap.dedent("""\
This junk ignored
All tests successful
Files=10, Tests=20, 100 wall blah blah"""))
+ 0
)
self.expectOutcome(result=SUCCESS,
state_string='20 tests 20 passed')
return self.runStep()
def test_old_version_failed(self):
self.setupStep(shell.PerlModuleTest(command="cmd"))
self.expectCommands(
ExpectShell(workdir='wkdir',
command="cmd")
+ ExpectShell.log('stdio', stdout=textwrap.dedent("""\
This junk ignored
Failed 1/1 test programs, 3/20 subtests failed."""))
+ 1
)
self.expectOutcome(result=FAILURE,
state_string='20 tests 17 passed 3 failed (failure)')
return self.runStep()
class SetPropertyDeprecation(unittest.TestCase):
"""
Tests for L{shell.SetProperty}
"""
def test_deprecated(self):
"""
Accessing L{shell.SetProperty} reports a deprecation error.
"""
shell.SetProperty
warnings = self.flushWarnings([self.test_deprecated])
self.assertEqual(len(warnings), 1)
self.assertIdentical(warnings[0]['category'], DeprecationWarning)
self.assertEqual(warnings[0]['message'],
"buildbot.steps.shell.SetProperty was deprecated in Buildbot 0.8.8: "
"It has been renamed to SetPropertyFromCommand"
)
class Configure(unittest.TestCase):
def test_class_attrs(self):
# nothing too exciting here, but at least make sure the class is
# present
step = shell.Configure()
self.assertEqual(step.command, ['./configure'])
class WarningCountingShellCommand(steps.BuildStepMixin,
configmixin.ConfigErrorsMixin,
TestReactorMixin,
unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_no_warnings(self):
self.setupStep(shell.WarningCountingShellCommand(workdir='w',
command=['make']))
self.expectCommands(
ExpectShell(workdir='w',
command=["make"])
+ ExpectShell.log('stdio', stdout='blarg success!')
+ 0
)
self.expectOutcome(result=SUCCESS)
self.expectProperty("warnings-count", 0)
return self.runStep()
def test_default_pattern(self):
self.setupStep(shell.WarningCountingShellCommand(command=['make']))
self.expectCommands(
ExpectShell(workdir='wkdir',
command=["make"])
+ ExpectShell.log('stdio',
stdout='normal: foo\nwarning: blarg!\n'
'also normal\nWARNING: blarg!\n')
+ 0
)
self.expectOutcome(result=WARNINGS)
self.expectProperty("warnings-count", 2)
self.expectLogfile("warnings (2)",
"warning: blarg!\nWARNING: blarg!\n")
return self.runStep()
def test_custom_pattern(self):
self.setupStep(shell.WarningCountingShellCommand(command=['make'],
warningPattern=r"scary:.*"))
self.expectCommands(
ExpectShell(workdir='wkdir',
command=["make"])
+ ExpectShell.log('stdio',
stdout='scary: foo\nwarning: bar\nscary: bar')
+ 0
)
self.expectOutcome(result=WARNINGS)
self.expectProperty("warnings-count", 2)
self.expectLogfile("warnings (2)", "scary: foo\nscary: bar\n")
return self.runStep()
def test_maxWarnCount(self):
self.setupStep(shell.WarningCountingShellCommand(command=['make'],
maxWarnCount=9))
self.expectCommands(
ExpectShell(workdir='wkdir',
command=["make"])
+ ExpectShell.log('stdio', stdout='warning: noo!\n' * 10)
+ 0
)
self.expectOutcome(result=FAILURE)
self.expectProperty("warnings-count", 10)
return self.runStep()
def test_fail_with_warnings(self):
self.setupStep(shell.WarningCountingShellCommand(command=['make']))
self.expectCommands(
ExpectShell(workdir='wkdir',
command=["make"])
+ ExpectShell.log('stdio', stdout='warning: I might fail')
+ 3
)
self.expectOutcome(result=FAILURE)
self.expectProperty("warnings-count", 1)
self.expectLogfile("warnings (1)", "warning: I might fail\n")
return self.runStep()
def test_warn_with_decoderc(self):
self.setupStep(shell.WarningCountingShellCommand(command=['make'], decodeRC={3: WARNINGS}))
self.expectCommands(
ExpectShell(workdir='wkdir',
command=["make"],
)
+ ExpectShell.log('stdio', stdout='I might fail with rc')
+ 3
)
self.expectOutcome(result=WARNINGS)
self.expectProperty("warnings-count", 0)
return self.runStep()
def do_test_suppressions(self, step, supps_file='', stdout='',
exp_warning_count=0, exp_warning_log='',
exp_exception=False, props=None):
self.setupStep(step)
if props is not None:
for key in props:
self.build.setProperty(key, props[key], "")
# Invoke the expected callbacks for the suppression file upload. Note
# that this assumes all of the remote_* are synchronous, but can be
# easily adapted to suit if that changes (using inlineCallbacks)
def upload_behavior(command):
writer = command.args['writer']
writer.remote_write(supps_file)
writer.remote_close()
command.rc = 0
if supps_file is not None:
self.expectCommands(
# step will first get the remote suppressions file
Expect('uploadFile', dict(blocksize=32768, maxsize=None,
workersrc='supps', workdir='wkdir',
writer=ExpectRemoteRef(remotetransfer.StringFileWriter)))
+ Expect.behavior(upload_behavior),
# and then run the command
ExpectShell(workdir='wkdir',
command=["make"])
+ ExpectShell.log('stdio', stdout=stdout)
+ 0
)
else:
self.expectCommands(
ExpectShell(workdir='wkdir',
command=["make"])
+ ExpectShell.log('stdio', stdout=stdout)
+ 0
)
if exp_exception:
self.expectOutcome(result=EXCEPTION,
state_string="'make' (exception)")
else:
if exp_warning_count != 0:
self.expectOutcome(result=WARNINGS,
state_string="'make' (warnings)")
self.expectLogfile("warnings (%d)" % exp_warning_count,
exp_warning_log)
else:
self.expectOutcome(result=SUCCESS,
state_string="'make'")
self.expectProperty("warnings-count", exp_warning_count)
return self.runStep()
def test_suppressions(self):
step = shell.WarningCountingShellCommand(command=['make'],
suppressionFile='supps')
supps_file = textwrap.dedent("""\
# example suppressions file
amar.c : .*unused variable.*
holding.c : .*invalid access to non-static.*
""").strip()
stdout = textwrap.dedent("""\
/bin/sh ../libtool --tag=CC --silent --mode=link gcc blah
/bin/sh ../libtool --tag=CC --silent --mode=link gcc blah
amar.c: In function 'write_record':
amar.c:164: warning: unused variable 'x'
amar.c:164: warning: this should show up
/bin/sh ../libtool --tag=CC --silent --mode=link gcc blah
/bin/sh ../libtool --tag=CC --silent --mode=link gcc blah
holding.c: In function 'holding_thing':
holding.c:984: warning: invalid access to non-static 'y'
""")
exp_warning_log = textwrap.dedent("""\
amar.c:164: warning: this should show up
""")
return self.do_test_suppressions(step, supps_file, stdout, 1,
exp_warning_log)
def test_suppressions_directories(self):
def warningExtractor(step, line, match):
return line.split(':', 2)
step = shell.WarningCountingShellCommand(command=['make'],
suppressionFile='supps',
warningExtractor=warningExtractor)
supps_file = textwrap.dedent("""\
# these should be suppressed:
amar-src/amar.c : XXX
.*/server-src/.* : AAA
# these should not, as the dirs do not match:
amar.c : YYY
server-src.* : BBB
""").strip()
# note that this uses the unicode smart-quotes that gcc loves so much
stdout = textwrap.dedent("""\
make: Entering directory \u2019amar-src\u2019
amar.c:164: warning: XXX
amar.c:165: warning: YYY
make: Leaving directory 'amar-src'
make: Entering directory "subdir"
make: Entering directory 'server-src'
make: Entering directory `one-more-dir`
holding.c:999: warning: BBB
holding.c:1000: warning: AAA
""")
exp_warning_log = textwrap.dedent("""\
amar.c:165: warning: YYY
holding.c:999: warning: BBB
""")
return self.do_test_suppressions(step, supps_file, stdout, 2,
exp_warning_log)
def test_suppressions_directories_custom(self):
def warningExtractor(step, line, match):
return line.split(':', 2)
step = shell.WarningCountingShellCommand(command=['make'],
suppressionFile='supps',
warningExtractor=warningExtractor,
directoryEnterPattern="^IN: (.*)",
directoryLeavePattern="^OUT:")
supps_file = "dir1/dir2/abc.c : .*"
stdout = textwrap.dedent("""\
IN: dir1
IN: decoy
OUT: decoy
IN: dir2
abc.c:123: warning: hello
""")
return self.do_test_suppressions(step, supps_file, stdout, 0, '')
def test_suppressions_linenos(self):
def warningExtractor(step, line, match):
return line.split(':', 2)
step = shell.WarningCountingShellCommand(command=['make'],
suppressionFile='supps',
warningExtractor=warningExtractor)
supps_file = "abc.c:.*:100-199\ndef.c:.*:22"
stdout = textwrap.dedent("""\
abc.c:99: warning: seen 1
abc.c:150: warning: unseen
def.c:22: warning: unseen
abc.c:200: warning: seen 2
""")
exp_warning_log = textwrap.dedent("""\
abc.c:99: warning: seen 1
abc.c:200: warning: seen 2
""")
return self.do_test_suppressions(step, supps_file, stdout, 2,
exp_warning_log)
@defer.inlineCallbacks
def test_suppressions_warningExtractor_exc(self):
def warningExtractor(step, line, match):
raise RuntimeError("oh noes")
step = shell.WarningCountingShellCommand(command=['make'],
suppressionFile='supps',
warningExtractor=warningExtractor)
# need at least one supp to trigger warningExtractor
supps_file = 'x:y'
stdout = "abc.c:99: warning: seen 1"
yield self.do_test_suppressions(step, supps_file, stdout,
exp_exception=True)
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1)
def test_suppressions_addSuppression(self):
# call addSuppression "manually" from a subclass
class MyWCSC(shell.WarningCountingShellCommand):
def start(self):
self.addSuppression([('.*', '.*unseen.*', None, None)])
return super().start()
def warningExtractor(step, line, match):
return line.split(':', 2)
step = MyWCSC(command=['make'], suppressionFile='supps',
warningExtractor=warningExtractor)
stdout = textwrap.dedent("""\
abc.c:99: warning: seen 1
abc.c:150: warning: unseen
abc.c:200: warning: seen 2
""")
exp_warning_log = textwrap.dedent("""\
abc.c:99: warning: seen 1
abc.c:200: warning: seen 2
""")
return self.do_test_suppressions(step, '', stdout, 2,
exp_warning_log)
def test_suppressions_suppressionsParameter(self):
def warningExtractor(step, line, match):
return line.split(':', 2)
supps = (
("abc.c", ".*", 100, 199),
("def.c", ".*", 22, 22),
)
step = shell.WarningCountingShellCommand(command=['make'],
suppressionList=supps,
warningExtractor=warningExtractor)
stdout = textwrap.dedent("""\
abc.c:99: warning: seen 1
abc.c:150: warning: unseen
def.c:22: warning: unseen
abc.c:200: warning: seen 2
""")
exp_warning_log = textwrap.dedent("""\
abc.c:99: warning: seen 1
abc.c:200: warning: seen 2
""")
return self.do_test_suppressions(step, None, stdout, 2,
exp_warning_log)
def test_suppressions_suppressionsRenderableParameter(self):
def warningExtractor(step, line, match):
return line.split(':', 2)
supps = (
("abc.c", ".*", 100, 199),
("def.c", ".*", 22, 22),
)
step = shell.WarningCountingShellCommand(
command=['make'],
suppressionList=properties.Property("suppressionsList"),
warningExtractor=warningExtractor)
stdout = textwrap.dedent("""\
abc.c:99: warning: seen 1
abc.c:150: warning: unseen
def.c:22: warning: unseen
abc.c:200: warning: seen 2
""")
exp_warning_log = textwrap.dedent("""\
abc.c:99: warning: seen 1
abc.c:200: warning: seen 2
""")
return self.do_test_suppressions(step, None, stdout, 2,
exp_warning_log, props={"suppressionsList": supps})
def test_warnExtractFromRegexpGroups(self):
step = shell.WarningCountingShellCommand(command=['make'])
we = shell.WarningCountingShellCommand.warnExtractFromRegexpGroups
line, pat, exp_file, exp_lineNo, exp_text = \
('foo:123:text', '(.*):(.*):(.*)', 'foo', 123, 'text')
self.assertEqual(we(step, line, re.match(pat, line)),
(exp_file, exp_lineNo, exp_text))
def test_missing_command_error(self):
# this checks that an exception is raised for invalid arguments
with self.assertRaisesConfigError(
"WarningCountingShellCommand's `command' argument is not "
"specified"):
shell.WarningCountingShellCommand()
class Compile(steps.BuildStepMixin, TestReactorMixin, unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_class_args(self):
# since this step is just a pre-configured WarningCountingShellCommand,
# there' not much to test!
step = self.setupStep(shell.Compile())
self.assertEqual(step.name, "compile")
self.assertTrue(step.haltOnFailure)
self.assertTrue(step.flunkOnFailure)
self.assertEqual(step.description, ["compiling"])
self.assertEqual(step.descriptionDone, ["compile"])
self.assertEqual(step.command, ["make", "all"])
class Test(steps.BuildStepMixin, configmixin.ConfigErrorsMixin,
TestReactorMixin,
unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
self.setUpBuildStep()
def tearDown(self):
self.tearDownBuildStep()
def test_setTestResults(self):
step = self.setupStep(shell.Test())
step.setTestResults(total=10, failed=3, passed=5, warnings=3)
self.assertEqual(step.statistics, {
'tests-total': 10,
'tests-failed': 3,
'tests-passed': 5,
'tests-warnings': 3,
})
# ensure that they're additive
step.setTestResults(total=1, failed=2, passed=3, warnings=4)
self.assertEqual(step.statistics, {
'tests-total': 11,
'tests-failed': 5,
'tests-passed': 8,
'tests-warnings': 7,
})
def test_describe_not_done(self):
step = self.setupStep(shell.Test())
step.rendered = True
self.assertEqual(step.describe(), None)
def test_describe_done(self):
step = self.setupStep(shell.Test())
step.rendered = True
step.statistics['tests-total'] = 93
step.statistics['tests-failed'] = 10
step.statistics['tests-passed'] = 20
step.statistics['tests-warnings'] = 30
self.assertEqual(step.describe(done=True),
['93 tests', '20 passed', '30 warnings', '10 failed'])
def test_describe_done_no_total(self):
step = self.setupStep(shell.Test())
step.rendered = True
step.statistics['tests-total'] = 0
step.statistics['tests-failed'] = 10
step.statistics['tests-passed'] = 20
step.statistics['tests-warnings'] = 30
# describe calculates 60 = 10+20+30
self.assertEqual(step.describe(done=True),
['60 tests', '20 passed', '30 warnings', '10 failed'])
|
anish/buildbot
|
master/buildbot/test/unit/test_steps_shell.py
|
Python
|
gpl-2.0
| 43,758
|
[
"exciting"
] |
c96c339b186350e793ecf3480fbc7aee6a0ab1e650f26a99ed6a25db23f08c31
|
#!/usr/bin/env python
"""
Python implementation of common model fitting operations to
analyse protein folding data. Simply automates some fitting
and value calculation. Will be extended to include phi-value
analysis and other common calculations.
Allows for quick model evaluation and plotting.
Also tried to make this somewhat abstract and modular to
enable more interesting calculations, such as Ising models
and such.
Requirements (recommended python 2.7+):
- numpy
- scipy
- matplotlib
Lowe, A.R. 2015
"""
import os
import csv
import inspect
from collections import OrderedDict
import numpy as np
from scipy import optimize
from scipy.stats import t as t_distrb
# pyfolding imports
from . import utils
from . import constants
from .plotting import *
__author__ = "Alan R. Lowe"
__email__ = "a.lowe@ucl.ac.uk"
__version__ = constants.VERSION
# by default turn off autoscrolling if it exists
utils.disable_autoscroll()
# set up a global temperature object
temperature = utils.__Temperature()
"""
===========================================================
FILE I/O OPERATIONS
===========================================================
"""
def read_kinetic_data(directory=None, filename=None):
""" Read in kinetic data in the form of an .csv worksheet. It
should be arranged such that each file is a different protein,
and columns represent the following:
[den] k1 k2 ...
This function then returns a chevron object with the data
"""
reader = utils.DataImporter(datatype='Chevron')
return reader.load(os.path.join(directory,filename))
def read_equilibrium_data(directory=None, filename=None):
""" Read in an equilbrium denaturation curve from a .csv
worksheet. It should be arranged such that each file is a
different protein, and columns represent the following:
[den] unfolding
This function then returns an equilbrium curve object.
"""
reader = utils.DataImporter(datatype='EquilibriumDenaturationCurve')
return reader.load(os.path.join(directory,filename))
def read_generic_data(directory=None, filename=None):
""" Read in a generic dataset from a .csv
worksheet. It should be arranged such that each file is a
different protein, and columns represent the following:
x y_0 y_1 ....
This function then returns a generic data object.
"""
reader = utils.DataImporter()
return reader.load(os.path.join(directory,filename))
"""
===========================================================
SETTING CALCULATION TEMPERATURE
===========================================================
"""
def set_temperature(value=constants.TEMPERATURE_CELSIUS):
""" Set the temperature.
Args:
temperature: set the temperature in celsius
Returns:
None
Usage:
>> pyfolding.set_temperature( 10.2 )
"""
temperature.temperature = value
print("Set temperature to {0:2.2f}\u00B0C".format(value))
print("(NOTE: Careful, this sets the temperature for all subsequent calculations)")
"""
===========================================================
BASE CLASSES
===========================================================
"""
class DataTemplate(object):
""" DataTemplate
Base class fo chevrons, equilibrium denaturation curves and generic data.
Takes care of common functions such as fitting of models.
Data is stored internally as a dictionary and associated list, for example:
labels = ['denaturant', 'k1', 'k2']
data = {'x': {'k1': [], 'k2': []}, 'y': {'k1':[], 'k2':[]}
Subclassed objects may use these data in different ways, for example as
Chevron plots or Equilibrium denaturation curves.
Usage:
>> data['k1']
returns a tuple of x['k1'] and y['k1']
Properties:
datasets - return a list of datasets in the model
fit_func - return/set the fit function for the dataset
fit_func_args - return the fit function arguments
fit_params - return the final parameters following the fit
results - a FitResult object following fitting
Members:
Notes:
"""
def __init__(self):
# store the raw data in a dictionary
self.labels = []
self.data = {}
# store associated fit functions
self.__fit_func = None
self.__fit = None
self.__fit_residuals = None
self.components = None
def initialise(self):
raise NotImplementedError
def __getitem__(self, dataset):
""" Return an XY pair from the dataset, based on the label """
if not isinstance(dataset, str):
raise TypeError('Dataset must be specified as as string')
if dataset not in self.datasets:
raise ValueError('Dataset {0:s} not found'.format(dataset))
return ( np.array(self.data['x'][dataset], dtype='float'),
np.array(self.data['y'][dataset], dtype='float') )
@property
def datasets(self): return self.labels[1:]
@property
def fit_func(self): return self.__fit_func.name
@fit_func.setter
def fit_func(self, fit_func=None):
if hasattr(fit_func, "__call__"):
self.__fit_func = fit_func()
else:
raise AttributeError("Fit function must be callable")
@property
def fit_func_args(self):
if self.__fit_func:
return self.__fit_func.fit_func_args
@property
def fit_params(self):
return [p.value for p in self.__fit.fit_params]
@property
def results(self):
return self.__fit
@results.setter
def results(self, result):
if not isinstance(result, FitResult):
raise TypeError("Results must be of type FitResult")
print("Warning: overwriting fit result for {0:s}".format(self))
self.__fit = result
def fit(self, p0=None, const=None):
""" Fit the data to the defined model. Use p0 to introduce the estimated
start values.
"""
if self.__fit_func:
# reset components
self.components = None
# set the default fitting parameters
if not p0: p0 = self.__fit_func.default_params
# set up the fit
f = GlobalFit()
f.fit_funcs = [self.__fit_func]
if const: f.constants = [const]
f.shared = [] # no shared parameters by default
f.x = [self.x]
f.y = [self.y]
f.ID = [self.ID]
out, covar = f.fit( p0=p0 )
self.__fit = f.results[0]
if hasattr(self.__fit_func, "components"):
self.components = self.__fit_func.components(constants.XSIM, *out.tolist())
else:
raise AttributeError("Fit function must be defined first.")
self.__fit.display()
@property
def fitted_x(self):
raise DeprecationWarning("This feature will be deprecated soon.")
@property
def fitted(self):
raise DeprecationWarning("This feature will be deprecated soon.")
def print_fit_params(self):
raise DeprecationWarning("This feature will be deprecated soon.")
if isinstance(self.fit_params, np.ndarray):
print(self.fit_params)
def plot(self, **kwargs):
""" Plot a simple figure of the data, this is context dependent
title='', marker='wo', display_fit=True
"""
# make this cleaner by calling an independent function. User can also
# call these functions
if isinstance(self, Chevron):
plot_chevron(self, **kwargs)
elif isinstance(self, EquilibriumDenaturationCurve):
plot_equilibrium(self, **kwargs)
else:
plot_generic(self, **kwargs)
def save_fit(self, filename):
""" Export the fit. """
exporter = utils.FitExporter()
exporter.export(filename, self.results)
class Protein(object):
""" Protein wrapper object.
This class wraps different types of data and acts as a container object for
a single protein. It can contain equilbrium, kinetic and other types of
data. The object can be passed to higher-order functions, such as 'phi' that
use multiple datasets for calculations.
Properties:
deltaG - the equilbrium deltaG value from equilbrium data
kf_H20 - the observed folding rate in water
Notes:
None
"""
def __init__(self, ID=None):
self.ID = ID
self.chevron = None
self.equilibrium = None
self.other = None
@property
def deltaG(self): return self.equilibrium.deltaG
@property
def kf_H20(self): return self.chevron.results.y_fit[0]
class GenericData(DataTemplate):
""" A generic data model.
"""
def __init__(self, ID=None):
DataTemplate.__init__(self)
self.ID = ID
@property
def x(self): return self[self.datasets[0]][0]
@property
def y(self): return self[self.datasets[0]][1]
@property
def y_raw(self): return self.y
def initialise(self):
pass
class Chevron(DataTemplate):
""" Chevron plot for protein folding kinetics.
Args:
Methods:
Notes:
"""
def __init__(self, ID=None):
DataTemplate.__init__(self)
self.ID = ID
self.__midpoint = None
@property
def denaturant_label(self): return self.labels[0]
@property
def phases(self): return self.datasets
@property
def rates(self): return {k:self[k][1] for k in self.phases}
@property
def denaturant(self): return {k:self[k][0] for k in self.phases}
@property
def x(self): return np.array(self.denaturant[self.phases[0]])
@property
def y(self): return np.array(np.log(self.rates[self.phases[0]]))
@property
def y_raw(self): return np.array(self.rates[self.phases[0]])
@property
def midpoint(self):
""" Return a calculated midpoint for the chevron. Unless
we have set one using equilibrium data.
"""
if not self.__midpoint and self.denaturant:
return self.denaturant['k1'][ np.argmin(self.rates['k1']) ]
else:
return self.__midpoint
@midpoint.setter
def midpoint(self, midpoint=0.0):
if isinstance(midpoint, float):
if midpoint>0. and midpoint<10.: self.__midpoint = midpoint
else:
raise Exception("Midpoint must be a float and 0<x<10")
def unfolding_limb(self, phase=None):
""" Return only the unfolding limb data
"""
if not phase:
phase = self.phases[0]
elif phase not in self.phases:
return None
denaturant, rates = [], []
for d,r in zip(self.denaturant[phase], self.rate(phase)):
if d > self.midpoint:
denaturant.append(d)
rates.append(r)
return denaturant, rates
def refolding_limb(self, phase=None):
""" Return only the refolding limb data
"""
if not phase:
phase = self.phases[0]
elif phase not in self.phases:
return None
denaturant, rates = [], []
for d,r in zip(self.denaturant[phase], self.rate(phase)):
if d <= self.midpoint:
denaturant.append(d)
rates.append(r)
return denaturant, rates
def chevron(self, phase=None):
""" Return the entire phase of a chevron
"""
if not phase:
phase = self.phases[0]
elif phase not in self.phases:
return None
return self.denaturant[phase], self.rate(phase)
def rate(self, phase=None):
return np.log(self.rates[phase])
class EquilibriumDenaturationCurve(DataTemplate):
""" Equilibrium Denaturation curve
Args:
Methods:
Notes:
"""
def __init__(self, ID=None):
DataTemplate.__init__(self)
self.ID = ID
@property
def denaturant_label(self): return self.labels[0]
@property
def curves(self): return self.datasets
@property
def signal(self): return {k:self[k][1] for k in self.curves}
@property
def denaturant(self): return {k:self[k][0] for k in self.curves}
@property
def x(self): return np.array(self.denaturant[self.curves[0]])
@property
def y(self): return np.array(self.signal[self.curves[0]])
@property
def y_raw(self): return self.y
@property
def normalised(self):
""" TODO(arl): Return a normalised equilbrium curve.
"""
raise NotImplementedError
@property
def m_value(self):
if isinstance(self.fit_params, list):
return self.fit_params[ self.fit_func_args.index('m') ]
return None
@property
def midpoint(self):
if isinstance(self.fit_params, list):
return self.fit_params[ self.fit_func_args.index('d50') ]
else:
return None
@property
def two_state(self):
""" Return whether this is a two state model or not """
return 'd50' in self.fit_func_args
def point(self, fraction_folded=0.5):
""" Return the denaturant concentration for a particular
fraction folded. Assumes a two-state transition since I
had to derive this equation by hand.
"""
if self.m_value and self.midpoint:
if fraction_folded<0. or fraction_folded>1.:
raise ValueError("Fraction folded must be in the range 0.<x<1.")
return (np.log((1.-fraction_folded)/fraction_folded) / self.m_value) + self.midpoint
else:
return None
@property
def deltaG(self):
""" Return the deltaG value based on the fit of the data """
if self.m_value and self.midpoint:
return self.m_value * self.midpoint
else:
return None
"""
===========================================================
MODEL FITTING FUNCTIONS
===========================================================
"""
def FIT_ERROR(x):
""" Return a generic fit error """
if isinstance(x, np.ndarray):
return np.ones(x.shape)*constants.FITTING_PENALTY
else:
return None
class FitParameter(object):
""" Object to store parameter error information """
def __init__(self, name, value, param_type='free'):
self.name = name
self.value = value
self.type = param_type
self.DoF = None
self.SE = 0
self.CI = [-np.inf, np.inf]
self.covar = None
self.r_squared = None
@property
def name(self): return self.__name
@name.setter
def name(self, arg_name):
if not isinstance(arg_name, str):
raise TypeError('Arg name must be of type string')
self.__name = arg_name
@property
def type(self): return self.__type
@type.setter
def type(self, arg_type):
if not isinstance(arg_type, str):
raise TypeError('Arg type must be of type string')
if arg_type not in ['free', 'shared', 'constant']:
raise ValueError('Arg type must be either free, shared or constant')
self.__type = arg_type
@property
def CI_low(self): return self.CI[0]
@property
def CI_high(self): return self.CI[1]
class GlobalFit(object):
""" GlobalFit
Wrapper function to perform global fitting. This acts as a wrapper for
multiple FitModels, enabling the user to pair datasets and models and share
data or arguments.
For each fit function, a list of arguments is compiled. Those belonging to
the shared or constant type are set respectively.
Note that a single or individual fit is just a special case of a global fit
where there are no shared values and only one dataset. This wrapped can be
used for that purpose too...
Now added weighting to fits, specified using the weights property. These are
inputs to the sigma function for curve_fit, and specified as the number
of standard deviations of error (assuming Gaussian distrb.)
Args:
x: concatenated x data
y: concatenated y data
weights: (optional)
Properties:
fit_funcs: the fit functions
constants: constants for the fitting
Members:
__call__: evaluates the fit functions
Notes:
"""
def __init__(self):
self.ID = []
self.x = []
self.y = []
self.__fit_funcs = []
self.__shared = []
self.__initialised = False
self.__params = None
self.__results = None
self.__weights = None
self.covar = None
@property
def fit_funcs(self): return self.__fit_funcs
@fit_funcs.setter
def fit_funcs(self, fit_funcs):
for fit_func in fit_funcs:
if not hasattr(fit_func, "__call__"): continue
# append it and instantiate it
if isinstance(fit_func, FitModel):
self.__fit_funcs.append(fit_func)
else:
self.__fit_funcs.append( fit_func() )
@property
def constants(self):
return [f.constants for f in self.__fit_funcs]
@constants.setter
def constants(self, const=None):
if len(const) != len(self.__fit_funcs):
raise ValueError("Number of constants should be the same as number"
" of fit functions")
for constant, fit_func in zip(const, self.__fit_funcs):
fit_func.constants = constant
@property
def shared(self):
return self.__shared
@shared.setter
def shared(self, shared_args=[]):
""" Set the shared arguments for the global fit """
if not isinstance(shared_args, (list, tuple)):
raise TypeError('Shared args must be of type list or tuple')
if not all([isinstance(a, str) for a in shared_args]):
raise TypeError('Shared args must be a list of strings.')
# TODO(arl): check that these shared params exist in the fit functions
# and report an error if incorrect...
self.__shared = list(set(shared_args))
@property
def weights(self):
return self.__weights
@weights.setter
def weights(self, weights):
""" Set weights for the global fit. These should be defined as
standard deviations of errors in ydata. """
if weights is None: self.__weights = None
if not isinstance(weights, (list,tuple)):
raise TypeError('Weights must be of type list or tuple')
if not all(isinstance(w, (np.ndarray, list)) for w in weights):
raise TypeError('Weights must be a list of numpy arrays or lists')
self.__weights = weights
@property
def fit_weights(self):
""" Check and return the weights for fitting """
# check weights
if self.weights is not None:
assert(len(self.weights) == len(self.x) == len(self.y))
return np.concatenate([w for w in self.weights])
return None
@property
def params(self): return self.__params
def __call__(self, *args):
""" Dummy call for all fit functions """
if not self.__initialised: self.initialise()
x = args[0]
fit_args = args[1:]
# now set the values of the objects
for p, p_val in zip(self.params, fit_args):
self.__params[p].value = p_val
ret = np.array(())
for i, fit_func in enumerate(self.fit_funcs):
ret = np.append( ret, self.eval_func(i) )
return ret
def initialise(self):
""" Set up all of the shared, constant and free parameters """
if len(self.ID) != len(self.x):
self.ID = ['protein_{0:d}'.format(i) for i in range(len(self.x))]
shared = {s:FitParameter(s, 0.0, param_type='shared') for s in self.shared}
# set up an ordered dictionary of the parameter objects
all_params = OrderedDict(shared)
for f in self.fit_funcs:
fit_func_params = []
const = [c[0] for c in f.constants]
for arg in f.fit_func_args:
if arg in shared:
fit_func_params.append(shared[arg])
elif arg in const:
c_val = f.constants[const.index(arg)][1]
fit_func_params.append(FitParameter(arg, c_val, param_type='constant'))
else:
fit_func_params.append(FitParameter(arg, 0.0, param_type='free'))
f.rick_and_morty = fit_func_params
# print f.name, [(g.name, g.type) for g in f.rick_and_morty]
# now make the master list of params
for i, f in enumerate(self.fit_funcs):
for p in f.rick_and_morty:
if p.type=='shared' and p.name not in all_params:
all_params[p.name] = p
elif p.type not in ('shared','constant'):
# all_params[p.name+'_'+str(i)] = p
all_params[p.name+'_{'+self.ID[i]+'}'] = p
# save this ordered dict for later
self.__params = all_params
# set the flag so that we don't do this again
self.__initialised = True
def eval_func(self, i):
""" Evaluate the fit function """
if i<0 or i>len(self.fit_funcs):
raise ValueError('Cannot evaluate fit function {0:d}'.format(i))
fit_func = self.fit_funcs[i]
x_this = np.array( self.x[i] )
args_this = [a.value for a in fit_func.rick_and_morty]
return fit_func(x_this, *args_this)
def fit(self, p0=[], bounds=None):
""" Run the fit. """
# check a few things for consistency
assert(len(self.x) == len(self.y))
# concatenate the xy data
x = np.concatenate([x for x in self.x])
y = np.concatenate([y for y in self.y])
# fit the data
if bounds:
out, covar = optimize.curve_fit(self, x, y, p0=p0, bounds=bounds,
max_nfev=20000, absolute_sigma=True,
sigma=self.fit_weights)
else:
out, covar = optimize.curve_fit(self, x, y, p0=p0, maxfev=20000,
absolute_sigma=True,
sigma=self.fit_weights)
# now finalise and set up the results
self.all_residuals = residuals(y_data=y, y_fit=self(x, *out))
self.finalise(out, covar)
return out, covar
def finalise(self, out, covar):
""" Take the results of the fitting, set the parameter values and
calculate errors.
"""
# put the parameter values in
for i, p in enumerate(self.params):
self.params[p].value = out[i]
self.params[p].covar = covar[i,i]
self.covar = covar
self.__results = []
# set up the fit result objects
for i,f in enumerate(self.fit_funcs):
result = FitResult(fit_name=f.name, fit_params=f.rick_and_morty)
result.ID = self.ID[i]
result.method = "pyfolding.GlobalFit and scipy.optimize.curve_fit"
result.y = self.eval_func(i)
result.x_fit = np.linspace(np.min([0.]+self.x[i].tolist()), np.max(self.x[i]),100)
result.y_fit = f(result.x_fit, *[a.value for a in f.rick_and_morty])
result.covar = covar
result.residuals = residuals(y_data=self.y[i], y_fit=result.y)
result.r_squared = r_squared(y_data=self.y[i], y_fit=result.y)
result.all_residuals = self.all_residuals
self.__results.append(result)
@property
def results(self):
return self.__results
class FitResult(object):
""" Fitting result object.
This is an internal class that collates fit results and enables calculation
of errors, residuals and other fun things.
Args:
name: a name for the fit result (e.g. TwoStateChevron)
fit_args: the fit function arguments
ID: The identifier of the protein
Properties:
method: the name of the optimisation algorithm used
errors: the calculated errors (SEM) for the fit arguments
details: return a zipped list of argument, value, error tuples
standard_error: the standard_error of the overall fit
covar: covariance matrix following optimisation
residuals: residuals of fit to data
all_residuals: all residuals for a global fit (same as residuals if an
individual fit)
r_squared: r^2 value for the fit
Members:
display: return a formatted output of the fitting statistics
Notes:
TODO(arl): implement an export function
"""
def __init__(self, fit_name=None, fit_params=None):
self.ID = None
self.fit_params = fit_params
self.name = fit_name
self.covar = None
self.residuals = None
self.all_residuals = None
self.r_squared = None
self.x_fit = None
self.y_fit = None
self.__method = "scipy.optimize.curve_fit"
@property
def method(self): return self.__method
@method.setter
def method(self, method=None):
if not isinstance(method, str):
raise TypeError("FitResult: Method must be a string")
self.__method = method
def display(self):
""" Print the errors and fit values """
table_width = max([len("Model: "+self.name), len(" Fitting results "), 80])
nl = 0
for p in self.details:
nl = max(nl, len(p.name))
print("="*table_width)
print(" Fitting results")
print("="*table_width)
if self.ID: print(" ID: {0:s}".format(self.ID))
print(" Model: {0:s}".format(self.name))
print(" Optimiser: {0:s}".format(self.__method))
print(" Temperature: {0:2.2f}\u00B0C\n".format(temperature.temperature))
for p in self.details:
self.display_row(p, nl)
print("-"*table_width)
print(" R^2: \t{0:2.5f}".format(self.r_squared))
print(" DOF: \t{0:d}".format(self.DoF))
print("|SS|: \t{0:2.2e}".format(self.SS))
print("="*table_width)
print("\n")
def display_row(self, p, max_name_len):
""" Take a parameter and display a row of the table """
p_name = p.name.ljust(max_name_len)
if p.type == 'constant':
print(" ({0:s}) {1:s} {2:>2.5e}".format(p.type[0], p_name, p.value))
return
print(" ({0:s}) {1:s} {2:>2.5e} \u00B1 {3:<2.5e}" \
" \t {6:d}\u0025 CI[{4:>2.5e}, {5:>2.5e}]".format(p.type[0],
p_name, p.value, p.SE, p.CI_low, p.CI_high,
int(constants.CONFIDENCE_INTERVAL)))
def confidence(self, i):
""" Return the 95 per cent confidence interval for a fitted parameter
https://stats.stackexchange.com/questions/72047/when-fitting-a-curve-how-do-i-calculate-the-95-confidence-interval-for-my-fitt
[BestFit(Pi) +/- t(95%,DF)*SE(Pi)
NOTES:
TODO(arl): make this a user defined interval
"""
ci = constants.CONFIDENCE_INTERVAL / 100.0
conf = t_distrb.pdf(ci, self.DoF) * self.SE(i)
return (self.fit_params[i].value-conf, self.fit_params[i].value+conf)
def SE(self, i):
""" Return the SE for parameter i
SE(Pi) = sqrt[ (SS/DF) * Cov(i,i) ]
"""
SE = np.sqrt( (self.SS / self.DoF) * self.fit_params[i].covar )
return SE
@property
def DoF(self):
""" Return the number of degrees of freedom, essentially the difference
between the number of data points and the number of fit parameters
"""
return len(self.all_residuals) - len(self.fit_params)
@property
def SS(self):
""" Sum of squared residuals """
# SS = np.matrix(self.all_residuals) * np.matrix(self.all_residuals).T
SS = np.sum(self.all_residuals**2)
return SS
@property
def details(self):
""" Return a zipped list of the fit arguments, values and errors """
details = []
for i, f in enumerate(self.fit_params):
if f.type == 'constant':
details.append(f)
continue
f.DoF = self.DoF
f.SE = self.SE(i)
f.CI = self.confidence(i)
# f.covar = self.covar[i,i]
details.append(f)
return details
@property
def standard_error(self):
""" Return the standard error of the fit """
return np.std(self.residuals) / np.sqrt(1.*len(self.residuals))
def export(self, filename):
raise NotImplementedError
class FitModel(object):
""" FitModel class
A generic fit model to enable a common core set of functions
but specific new member functions to be enabled in derived
classes.
Can define parameters in this manner:
('kf',0), ('mf',1), ... in order to enable paramater sharing
in global fitting. By default the model just gets the params
in the order they are defined in the function defininition
Note: this must be subclassed to work.
Args:
constants: constants for fitting
Properties:
params: the parameters of the fit model
name: the name of the fit model
default_params: the default starting parameters for the fit
fit_func_args: the names of the fit function arguments
equation: a LaTeX formatted string of the model
Methods:
__call__: evaluates the fit function with the given args
print_equation: (static) prints the equation to the stdout
fit_func: (not defined) the actual fit function
error_func: (not defined) the error function
Notes:
"""
def __init__(self):
self.__params = None
self.__param_names = None
self.__default_params = None
self.fit_params = None
self.fit_covar = None
self.constants = []
# has this model been verified
self.verified = False
@property
def params(self): return self.__params
@params.setter
def params(self, params=None):
if isinstance(params, tuple):
self.__params, self.__param_names = [], []
for key,value in params:
self.__param_names.append(key)
self.__params.append(value)
else:
raise Warning("Fit parameters must be a tuple")
@property
def name(self): return self.__class__.__name__
def __call__(self, x, *args):
""" Parse the fit arguments and pass onto the
fitting function
"""
# fit_args = self.get_fit_params(x, *args)
# return self.error_func( self.fit_func(x, *fit_args) )
return self.error_func( self.fit_func(x, *args) )
def fit_func(self, x, *args):
""" The fit function should be defined here """
raise Exception("Fit function must be defined")
def error_func(self, y):
""" The error function should be defined here """
return y
def get_fit_params(self, x, *args):
fit_args = [args[v] for v in self.__params]
# if we have constants replace the arguments
# with the constants
if self.constants:
for arg, constant in self.constants:
if arg in self.__param_names:
idx = self.__params[ self.__param_names.index(arg) ]
fit_args[idx] = constant
return fit_args
@property
def default_params(self):
""" Give back either the set starting parameters,
or set all to 1.0
"""
if isinstance(self.__default_params, np.ndarray):
return self.__default_params
else:
return np.ones((len(self.params),1))
@default_params.setter
def default_params(self, params):
if isinstance(params, np.ndarray):
self.__default_params = params
@property
def fit_func_args(self):
# return inspect.getargspec(self.fit_func).args[2:]
return inspect.getfullargspec(self.fit_func).args[2:]
@property
def equation(self):
raise NotImplementedError
# @staticmethod
def print_equation(self):
# FIXED(arl): no longer requires IPython
if not 'ipykernel' in sys.modules:
print(self.equation)
return
# if we are in an IPython shell or Jupyter notebook, use the LaTeX
# display for the equation
from IPython.display import display, Math, Latex
display(Math(self.equation))
return None
def info(self):
self.print_equation()
print(self.__doc__)
def r_squared(y_data=None, y_fit=None):
return 1. - np.sum((y_data - y_fit)**2) / np.sum((y_data - np.mean(y_data))**2)
def residuals(y_data=None, y_fit=None):
return y_data - y_fit
def phi(ref_protein, mut_protein):
""" Makes this easier to use! """
from .phi import phi
return phi(ref_protein, cmp_protein)
"""
===========================================================
TEST FUNCTION
===========================================================
"""
def test(protein_ID='Simulated protein'):
"""
Test function to make sure that PyFolding is installed correctly
and functioning as it should. Generates a simulated data set
using known parameters and noise, and then fits and plots the
data comparing these to the ground truth.
"""
from . import models
# initialise the data structures
chevron = Chevron(ID=protein_ID)
equilibrium = EquilibriumDenaturationCurve(ID=protein_ID)
acceptible_error = 1e-2
truth = {'eq':[1.5, 5.], 'kin': [100., 1., 0.005, 1.]}
# denaturant concentrations
den = np.linspace(0.,10.,100)
# generate a two-state equilibrium curve, with Gaussian noise
# alpha_f, beta_f, alpha_u, beta_u, m, d50
eq_model = models.TwoStateEquilibrium()
eq_raw = eq_model.fit_func(den, *truth['eq'])
eq_sim = eq_raw + np.random.randn(100,)*0.01
equilibrium.labels = ['[Denaturant] (M)', 'e1']
equilibrium.data = {'x':{'e1':den}, 'y':{'e1':eq_sim}}
equilibrium.fit_func = models.TwoStateEquilibrium
# generate a two-state chevron curve, with Gaussian noise
# kf, mf, ku, mu
kin_model = models.TwoStateChevron()
kin_raw = kin_model.fit_func(den, *truth['kin'])
kin_sim = np.exp( np.log(kin_raw) + np.random.randn(100,)*0.001 )
chevron.labels = ['[Denaturant] (M)', 'k1']
chevron.data = {'x':{'k1':den}, 'y':{'k1':kin_sim}}
chevron.fit_func = models.TwoStateChevron
# fit the equilibrium data to a two-state model
equilibrium.fit()
# use the midpoint (D_50) of the equilibrium curve as the kinetic midpoint
chevron.midpoint = equilibrium.midpoint
# now fit the chevron to a two-state model
chevron.fit()
# get the parameters and check that they are the same as the
# ground truth set
for p_truth, p_fit in zip(truth['eq'], equilibrium.fit_params):
if (p_truth - p_fit)**2 > acceptible_error:
raise ValueError("PyFolding self-test failed. Fitting error ({0:f}) exceeds \
bounds ({1:f}) \n".format((p_truth - p_fit)**2, acceptible_error))
for p_truth, p_fit in zip(truth['kin'], chevron.fit_params):
if (p_truth - p_fit)**2 > acceptible_error:
raise ValueError("PyFolding self-test failed. Fitting error ({0:f}) exceeds \
bounds ({1:f}) \n".format((p_truth - p_fit)**2, acceptible_error))
print('SUCCESS - Test completed!')
# # plot the output
# if plot_output:
# plot_figure(equilibrium, chevron, display=True)
if __name__ == "__main__":
test()
|
quantumjot/PyFolding
|
pyfolding/core.py
|
Python
|
mit
| 35,921
|
[
"Gaussian"
] |
6c8a4dcb5ca85d5aad638f50ab45af7e19016be3df924dbe97b2377ecdcf252b
|
#!/usr/bin/python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Tests for alchemical factory in `alchemy.py`.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
from __future__ import print_function
import os
import sys
import zlib
import pickle
import itertools
from functools import partial
import nose
import scipy
from nose.plugins.attrib import attr
from openmmtools import testsystems, forces
from openmmtools.constants import kB
from openmmtools.alchemy import *
logger = logging.getLogger(__name__)
# =============================================================================
# CONSTANTS
# =============================================================================
temperature = 300.0 * unit.kelvin # reference temperature
# MAX_DELTA = 0.01 * kB * temperature # maximum allowable deviation
MAX_DELTA = 1.0 * kB * temperature # maximum allowable deviation
GLOBAL_ENERGY_UNIT = unit.kilojoules_per_mole # controls printed units
GLOBAL_ALCHEMY_PLATFORM = None # This is used in every energy calculation.
# GLOBAL_ALCHEMY_PLATFORM = openmm.Platform.getPlatformByName('OpenCL') # DEBUG: Use OpenCL over CPU platform for testing since OpenCL is deterministic, while CPU is not
# =============================================================================
# TESTING UTILITIES
# =============================================================================
def create_context(system, integrator, platform=None):
"""Create a Context.
If platform is None, GLOBAL_ALCHEMY_PLATFORM is used.
"""
if platform is None:
platform = GLOBAL_ALCHEMY_PLATFORM
if platform is not None:
context = openmm.Context(system, integrator, platform)
else:
context = openmm.Context(system, integrator)
return context
def compute_energy(system, positions, platform=None, force_group=-1):
"""Compute energy of the system in the given positions.
Parameters
----------
platform : openmm.Platform or None, optional
If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.
force_group : int flag or set of int, optional
Passed to the groups argument of Context.getState().
"""
timestep = 1.0 * unit.femtoseconds
integrator = openmm.VerletIntegrator(timestep)
context = create_context(system, integrator, platform)
context.setPositions(positions)
state = context.getState(getEnergy=True, groups=force_group)
potential = state.getPotentialEnergy()
del context, integrator, state
return potential
def minimize(system, positions, platform=None, tolerance=1.0*unit.kilocalories_per_mole/unit.angstroms, maxIterations=500):
"""Minimize the energy of the given system.
Parameters
----------
platform : openmm.Platform or None, optional
If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.
tolerance : openmm.unit.Quantity with units compatible with energy/distance, optional, default = 1*kilocalories_per_mole/angstroms
Minimization tolerance
maxIterations : int, optional, default=50
Maximum number of iterations for minimization
Returns
-------
minimized_positions : openmm.Quantity with shape [nparticle,3] with units compatible with distance
The energy-minimized positions.
"""
timestep = 1.0 * unit.femtoseconds
integrator = openmm.VerletIntegrator(timestep)
context = create_context(system, integrator, platform)
context.setPositions(positions)
openmm.LocalEnergyMinimizer.minimize(context, tolerance, maxIterations)
minimized_positions = context.getState(getPositions=True).getPositions(asNumpy=True)
del context, integrator
return minimized_positions
def compute_force_energy(system, positions, force_name):
"""Compute the energy of the force with the given name."""
system = copy.deepcopy(system) # Copy to avoid modifications
force_name_index = 1
found_force = False
# Separate force group of force_name from all others.
for force in system.getForces():
if force.__class__.__name__ == force_name:
force.setForceGroup(force_name_index)
found_force = True
else:
force.setForceGroup(0)
if not found_force:
return None
force_energy = compute_energy(system, positions, force_group=2**force_name_index)
del system
return force_energy
def assert_almost_equal(energy1, energy2, err_msg):
delta = energy1 - energy2
err_msg += ' interactions do not match! Reference {}, alchemical {},' \
' difference {}'.format(energy1, energy2, delta)
assert abs(delta) < MAX_DELTA, err_msg
def turn_off_nonbonded(system, sterics=False, electrostatics=False,
exceptions=False, only_atoms=frozenset()):
"""Turn off sterics and/or electrostatics interactions.
This affects only NonbondedForce and non-alchemical CustomNonbondedForces.
If `exceptions` is True, only the exceptions are turned off.
Support also system that have gone through replace_reaction_field.
The `system` must have only nonbonded forces.
If `only_atoms` is specified, only the those atoms will be turned off.
"""
if len(only_atoms) == 0: # if empty, turn off all particles
only_atoms = set(range(system.getNumParticles()))
epsilon_coeff = 0.0 if sterics else 1.0
charge_coeff = 0.0 if electrostatics else 1.0
if exceptions: # Turn off exceptions
force_idx, nonbonded_force = forces.find_forces(system, openmm.NonbondedForce, only_one=True)
# Exceptions.
for exception_index in range(nonbonded_force.getNumExceptions()):
iatom, jatom, charge, sigma, epsilon = nonbonded_force.getExceptionParameters(exception_index)
if iatom in only_atoms or jatom in only_atoms:
nonbonded_force.setExceptionParameters(exception_index, iatom, jatom,
charge_coeff*charge, sigma, epsilon_coeff*epsilon)
# Offset exceptions.
for offset_index in range(nonbonded_force.getNumExceptionParameterOffsets()):
(parameter, exception_index, chargeprod_scale,
sigma_scale, epsilon_scale) = nonbonded_force.getExceptionParameterOffset(offset_index)
iatom, jatom, _, _, _ = nonbonded_force.getExceptionParameters(exception_index)
if iatom in only_atoms or jatom in only_atoms:
nonbonded_force.setExceptionParameterOffset(offset_index, parameter, exception_index,
charge_coeff*chargeprod_scale, sigma_scale,
epsilon_coeff*epsilon_scale)
else:
# Turn off particle interactions
for force in system.getForces():
# Handle only a Nonbonded and a CustomNonbonded (for RF).
if not (isinstance(force, openmm.CustomNonbondedForce) and 'lambda' not in force.getEnergyFunction() or
isinstance(force, openmm.NonbondedForce)):
continue
# Particle interactions.
for particle_index in range(force.getNumParticles()):
if particle_index in only_atoms:
# Convert tuple parameters to list to allow changes.
parameters = list(force.getParticleParameters(particle_index))
parameters[0] *= charge_coeff # charge
try: # CustomNonbondedForce
force.setParticleParameters(particle_index, parameters)
except TypeError: # NonbondedForce
parameters[2] *= epsilon_coeff # epsilon
force.setParticleParameters(particle_index, *parameters)
# Offset particle interactions.
if isinstance(force, openmm.NonbondedForce):
for offset_index in range(force.getNumParticleParameterOffsets()):
(parameter, particle_index, charge_scale,
sigma_scale, epsilon_scale) = force.getParticleParameterOffset(offset_index)
if particle_index in only_atoms:
force.setParticleParameterOffset(offset_index, parameter, particle_index,
charge_coeff*charge_scale, sigma_scale,
epsilon_coeff*epsilon_scale)
def dissect_nonbonded_energy(reference_system, positions, alchemical_atoms, other_alchemical_atoms):
"""Dissect the nonbonded energy contributions of the reference system
by atom group and sterics/electrostatics.
This works also for systems objects whose CutoffPeriodic force
has been replaced by a CustomNonbondedForce to set c_rf = 0.
Parameters
----------
reference_system : openmm.System
The reference system with the NonbondedForce to dissect.
positions : openmm.unit.Quantity of dimension [nparticles,3] with units compatible with Angstroms
The positions to test.
alchemical_atoms : set of int
The indices of the alchemical atoms.
other_alchemical_atoms : set of int
The indices of the alchemical atoms in other alchemical regions
Returns
-------
tuple of openmm.unit.Quantity with units compatible with kJ/mol
All contributions to the potential energy of NonbondedForce in the order:
nn_particle_sterics: particle sterics interactions between nonalchemical atoms
aa_particle_sterics: particle sterics interactions between alchemical atoms
na_particle_sterics: particle sterics interactions between nonalchemical-alchemical atoms
nn_particle_electro: (direct space) particle electrostatics interactions between nonalchemical atoms
aa_particle_electro: (direct space) particle electrostatics interactions between alchemical atoms
na_particle_electro: (direct space) particle electrostatics interactions between nonalchemical-alchemical atoms
nn_exception_sterics: particle sterics 1,4 exceptions between nonalchemical atoms
aa_exception_sterics: particle sterics 1,4 exceptions between alchemical atoms
na_exception_sterics: particle sterics 1,4 exceptions between nonalchemical-alchemical atoms
nn_exception_electro: particle electrostatics 1,4 exceptions between nonalchemical atoms
aa_exception_electro: particle electrostatics 1,4 exceptions between alchemical atoms
na_exception_electro: particle electrostatics 1,4 exceptions between nonalchemical-alchemical atoms
nn_reciprocal_energy: electrostatics of reciprocal space between nonalchemical atoms
aa_reciprocal_energy: electrostatics of reciprocal space between alchemical atoms
na_reciprocal_energy: electrostatics of reciprocal space between nonalchemical-alchemical atoms
"""
all_alchemical_atoms = set(alchemical_atoms).union(other_alchemical_atoms)
nonalchemical_atoms = set(range(reference_system.getNumParticles())).difference(all_alchemical_atoms)
# Remove all forces but NonbondedForce and eventually the
# CustomNonbondedForce used to model reaction field.
reference_system = copy.deepcopy(reference_system) # don't modify original system
forces_to_remove = list()
for force_index, force in enumerate(reference_system.getForces()):
force.setForceGroup(0)
if isinstance(force, openmm.NonbondedForce):
force.setReciprocalSpaceForceGroup(30) # separate PME reciprocal from direct space
# We keep only CustomNonbondedForces that are not alchemically modified.
elif not (isinstance(force, openmm.CustomNonbondedForce) and
'lambda' not in force.getEnergyFunction()):
forces_to_remove.append(force_index)
for force_index in reversed(forces_to_remove):
reference_system.removeForce(force_index)
assert len(reference_system.getForces()) <= 2
# Compute particle interactions between different groups of atoms
# ----------------------------------------------------------------
# Turn off other alchemical regions
if len(other_alchemical_atoms) > 0:
turn_off_nonbonded(reference_system, sterics=True, electrostatics=True, only_atoms=other_alchemical_atoms)
turn_off_nonbonded(reference_system, sterics=True, electrostatics=True, exceptions=True, only_atoms=other_alchemical_atoms)
system = copy.deepcopy(reference_system)
# Compute total energy from nonbonded interactions
tot_energy = compute_energy(system, positions)
tot_reciprocal_energy = compute_energy(system, positions, force_group={30})
# Compute contributions from particle sterics
turn_off_nonbonded(system, sterics=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_particle_sterics = compute_energy(system, positions)
system = copy.deepcopy(reference_system) # Restore alchemical sterics
turn_off_nonbonded(system, sterics=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_particle_sterics = compute_energy(system, positions)
turn_off_nonbonded(system, sterics=True)
tot_energy_no_particle_sterics = compute_energy(system, positions)
tot_particle_sterics = tot_energy - tot_energy_no_particle_sterics
nn_particle_sterics = tot_energy_no_alchem_particle_sterics - tot_energy_no_particle_sterics
aa_particle_sterics = tot_energy_no_nonalchem_particle_sterics - tot_energy_no_particle_sterics
na_particle_sterics = tot_particle_sterics - nn_particle_sterics - aa_particle_sterics
# Compute contributions from particle electrostatics
system = copy.deepcopy(reference_system) # Restore sterics
turn_off_nonbonded(system, electrostatics=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_particle_electro = compute_energy(system, positions)
nn_reciprocal_energy = compute_energy(system, positions, force_group={30})
system = copy.deepcopy(reference_system) # Restore alchemical electrostatics
turn_off_nonbonded(system, electrostatics=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_particle_electro = compute_energy(system, positions)
aa_reciprocal_energy = compute_energy(system, positions, force_group={30})
turn_off_nonbonded(system, electrostatics=True)
tot_energy_no_particle_electro = compute_energy(system, positions)
na_reciprocal_energy = tot_reciprocal_energy - nn_reciprocal_energy - aa_reciprocal_energy
tot_particle_electro = tot_energy - tot_energy_no_particle_electro
nn_particle_electro = tot_energy_no_alchem_particle_electro - tot_energy_no_particle_electro
aa_particle_electro = tot_energy_no_nonalchem_particle_electro - tot_energy_no_particle_electro
na_particle_electro = tot_particle_electro - nn_particle_electro - aa_particle_electro
nn_particle_electro -= nn_reciprocal_energy
aa_particle_electro -= aa_reciprocal_energy
na_particle_electro -= na_reciprocal_energy
# Compute exceptions between different groups of atoms
# -----------------------------------------------------
# Compute contributions from exceptions sterics
system = copy.deepcopy(reference_system) # Restore particle interactions
turn_off_nonbonded(system, sterics=True, exceptions=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_exception_sterics = compute_energy(system, positions)
system = copy.deepcopy(reference_system) # Restore alchemical sterics
turn_off_nonbonded(system, sterics=True, exceptions=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_exception_sterics = compute_energy(system, positions)
turn_off_nonbonded(system, sterics=True, exceptions=True)
tot_energy_no_exception_sterics = compute_energy(system, positions)
tot_exception_sterics = tot_energy - tot_energy_no_exception_sterics
nn_exception_sterics = tot_energy_no_alchem_exception_sterics - tot_energy_no_exception_sterics
aa_exception_sterics = tot_energy_no_nonalchem_exception_sterics - tot_energy_no_exception_sterics
na_exception_sterics = tot_exception_sterics - nn_exception_sterics - aa_exception_sterics
# Compute contributions from exceptions electrostatics
system = copy.deepcopy(reference_system) # Restore exceptions sterics
turn_off_nonbonded(system, electrostatics=True, exceptions=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_exception_electro = compute_energy(system, positions)
system = copy.deepcopy(reference_system) # Restore alchemical electrostatics
turn_off_nonbonded(system, electrostatics=True, exceptions=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_exception_electro = compute_energy(system, positions)
turn_off_nonbonded(system, electrostatics=True, exceptions=True)
tot_energy_no_exception_electro = compute_energy(system, positions)
tot_exception_electro = tot_energy - tot_energy_no_exception_electro
nn_exception_electro = tot_energy_no_alchem_exception_electro - tot_energy_no_exception_electro
aa_exception_electro = tot_energy_no_nonalchem_exception_electro - tot_energy_no_exception_electro
na_exception_electro = tot_exception_electro - nn_exception_electro - aa_exception_electro
assert tot_particle_sterics == nn_particle_sterics + aa_particle_sterics + na_particle_sterics
assert_almost_equal(tot_particle_electro, nn_particle_electro + aa_particle_electro +
na_particle_electro + nn_reciprocal_energy + aa_reciprocal_energy + na_reciprocal_energy,
'Inconsistency during dissection of nonbonded contributions:')
assert tot_exception_sterics == nn_exception_sterics + aa_exception_sterics + na_exception_sterics
assert tot_exception_electro == nn_exception_electro + aa_exception_electro + na_exception_electro
assert_almost_equal(tot_energy, tot_particle_sterics + tot_particle_electro +
tot_exception_sterics + tot_exception_electro,
'Inconsistency during dissection of nonbonded contributions:')
return nn_particle_sterics, aa_particle_sterics, na_particle_sterics,\
nn_particle_electro, aa_particle_electro, na_particle_electro,\
nn_exception_sterics, aa_exception_sterics, na_exception_sterics,\
nn_exception_electro, aa_exception_electro, na_exception_electro,\
nn_reciprocal_energy, aa_reciprocal_energy, na_reciprocal_energy
def compute_direct_space_correction(nonbonded_force, alchemical_atoms, positions):
"""
Compute the correction added by OpenMM to the direct space to account for
exception in reciprocal space energy.
Parameters
----------
nonbonded_force : openmm.NonbondedForce
The nonbonded force to compute the direct space correction.
alchemical_atoms : set
Set of alchemical particles in the force.
positions : numpy.array
Position of the particles.
Returns
-------
aa_correction : openmm.unit.Quantity with units compatible with kJ/mol
The correction to the direct spaced caused by exceptions between alchemical atoms.
na_correction : openmm.unit.Quantity with units compatible with kJ/mol
The correction to the direct spaced caused by exceptions between nonalchemical-alchemical atoms.
"""
energy_unit = unit.kilojoule_per_mole
aa_correction = 0.0
na_correction = 0.0
# Convert quantity positions into floats.
if isinstance(positions, unit.Quantity):
positions = positions.value_in_unit_system(unit.md_unit_system)
# If there is no reciprocal space, the correction is 0.0
if nonbonded_force.getNonbondedMethod() not in [openmm.NonbondedForce.Ewald, openmm.NonbondedForce.PME]:
return aa_correction * energy_unit, na_correction * energy_unit
# Get alpha ewald parameter
alpha_ewald, _, _, _ = nonbonded_force.getPMEParameters()
if alpha_ewald / alpha_ewald.unit == 0.0:
cutoff_distance = nonbonded_force.getCutoffDistance()
tolerance = nonbonded_force.getEwaldErrorTolerance()
alpha_ewald = (1.0 / cutoff_distance) * np.sqrt(-np.log(2.0*tolerance))
alpha_ewald = alpha_ewald.value_in_unit_system(unit.md_unit_system)
assert alpha_ewald != 0.0
for exception_id in range(nonbonded_force.getNumExceptions()):
# Get particles parameters in md unit system
iatom, jatom, _, _, _ = nonbonded_force.getExceptionParameters(exception_id)
icharge, _, _ = nonbonded_force.getParticleParameters(iatom)
jcharge, _, _ = nonbonded_force.getParticleParameters(jatom)
icharge = icharge.value_in_unit_system(unit.md_unit_system)
jcharge = jcharge.value_in_unit_system(unit.md_unit_system)
# Compute the correction and take care of numerical instabilities
r = np.linalg.norm(positions[iatom] - positions[jatom]) # distance between atoms
alpha_r = alpha_ewald * r
if alpha_r > 1e-6:
correction = ONE_4PI_EPS0 * icharge * jcharge * scipy.special.erf(alpha_r) / r
else: # for small alpha_r we linearize erf()
correction = ONE_4PI_EPS0 * alpha_ewald * icharge * jcharge * 2.0 / np.sqrt(np.pi)
# Assign correction to correct group
if iatom in alchemical_atoms and jatom in alchemical_atoms:
aa_correction += correction
elif iatom in alchemical_atoms or jatom in alchemical_atoms:
na_correction += correction
return aa_correction * energy_unit, na_correction * energy_unit
def is_alchemical_pme_treatment_exact(alchemical_system):
"""Return True if the given alchemical system models PME exactly."""
# If exact PME is here, the NonbondedForce defines a
# lambda_electrostatics variable.
_, nonbonded_force = forces.find_forces(alchemical_system, openmm.NonbondedForce,
only_one=True)
for parameter_idx in range(nonbonded_force.getNumGlobalParameters()):
parameter_name = nonbonded_force.getGlobalParameterName(parameter_idx)
# With multiple alchemical regions, lambda_electrostatics might have a suffix.
if parameter_name.startswith('lambda_electrostatics'):
return True
return False
# =============================================================================
# SUBROUTINES FOR TESTING
# =============================================================================
def compare_system_energies(reference_system, alchemical_system, alchemical_regions, positions):
"""Check that the energies of reference and alchemical systems are close.
This takes care of ignoring the reciprocal space when the nonbonded
method is an Ewald method.
"""
if not isinstance(alchemical_regions, list):
alchemical_regions = [alchemical_regions]
# Default we compare the energy of all groups.
force_group = -1
# Check nonbonded method. Comparing with PME is more complicated
# because the alchemical system with direct-space treatment of PME
# does not take into account the reciprocal space.
force_idx, nonbonded_force = forces.find_forces(reference_system, openmm.NonbondedForce, only_one=True)
nonbonded_method = nonbonded_force.getNonbondedMethod()
is_direct_space_pme = (nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald] and
not is_alchemical_pme_treatment_exact(alchemical_system))
if is_direct_space_pme:
# Separate the reciprocal space force in a different group.
reference_system = copy.deepcopy(reference_system)
alchemical_system = copy.deepcopy(alchemical_system)
for system in [reference_system, alchemical_system]:
for force in system.getForces():
force.setForceGroup(0)
if isinstance(force, openmm.NonbondedForce):
force.setReciprocalSpaceForceGroup(31)
# We compare only the direct space energy
force_group = {0}
# Compute the reciprocal space correction added to the direct space
# energy due to the exceptions of the alchemical atoms.
aa_correction = 0.0 * unit.kilojoule_per_mole
na_correction = 0.0 * unit.kilojoule_per_mole
for region in alchemical_regions:
alchemical_atoms = region.alchemical_atoms
aa, na = compute_direct_space_correction(nonbonded_force, alchemical_atoms, positions)
aa_correction += aa
na_correction += na
# Compute potential of the direct space.
potentials = [compute_energy(system, positions, force_group=force_group)
for system in [reference_system, alchemical_system]]
# Add the direct space correction.
if is_direct_space_pme:
potentials.append(aa_correction + na_correction)
else:
potentials.append(0.0 * GLOBAL_ENERGY_UNIT)
# Check that error is small.
delta = potentials[1] - potentials[2] - potentials[0]
if abs(delta) > MAX_DELTA:
print("========")
for description, potential in zip(['reference', 'alchemical', 'PME correction'], potentials):
print("{}: {} ".format(description, potential))
print("delta : {}".format(delta))
err_msg = "Maximum allowable deviation exceeded (was {:.8f} kcal/mol; allowed {:.8f} kcal/mol)."
raise Exception(err_msg.format(delta / unit.kilocalories_per_mole, MAX_DELTA / unit.kilocalories_per_mole))
def check_multi_interacting_energy_components(reference_system, alchemical_system, alchemical_regions, positions):
"""wrapper around check_interacting_energy_components for multiple regions
Parameters
----------
reference_system : openmm.System
The reference system.
alchemical_system : openmm.System
The alchemically modified system to test.
alchemical_regions : AlchemicalRegion.
The alchemically modified region.
positions : n_particlesx3 array-like of openmm.unit.Quantity
The positions to test (units of length).
Note
----------
Interactions between alchemical regions are not tested here.
Alchemical regions are assumed to be non interacting.
"""
all_alchemical_atoms = set()
for region in alchemical_regions:
for atom in region.alchemical_atoms:
all_alchemical_atoms.add(atom)
for region in alchemical_regions:
check_interacting_energy_components(
reference_system, alchemical_system, region, positions,
all_alchemical_atoms, multi_regions=True)
def check_interacting_energy_components(reference_system, alchemical_system, alchemical_regions, positions,
all_alchemical_atoms=None, multi_regions=False):
"""Compare full and alchemically-modified system energies by energy component.
Parameters
----------
reference_system : openmm.System
The reference system.
alchemical_system : openmm.System
The alchemically modified system to test.
alchemical_regions : AlchemicalRegion.
The alchemically modified region.
positions : n_particlesx3 array-like of openmm.unit.Quantity
The positions to test (units of length).
multi_regions : boolean
Indicates if mutiple regions are being tested
"""
energy_unit = unit.kilojoule_per_mole
reference_system = copy.deepcopy(reference_system)
alchemical_system = copy.deepcopy(alchemical_system)
is_exact_pme = is_alchemical_pme_treatment_exact(alchemical_system)
# Find nonbonded method
_, nonbonded_force = forces.find_forces(reference_system, openmm.NonbondedForce, only_one=True)
nonbonded_method = nonbonded_force.getNonbondedMethod()
# Get energy components of reference system's nonbonded force
if multi_regions:
other_alchemical_atoms = all_alchemical_atoms.difference(alchemical_regions.alchemical_atoms)
print("Dissecting reference system's nonbonded force for region {}".format(alchemical_regions.name))
else:
other_alchemical_atoms = set()
print("Dissecting reference system's nonbonded force")
energy_components = dissect_nonbonded_energy(reference_system, positions,
alchemical_regions.alchemical_atoms, other_alchemical_atoms)
nn_particle_sterics, aa_particle_sterics, na_particle_sterics,\
nn_particle_electro, aa_particle_electro, na_particle_electro,\
nn_exception_sterics, aa_exception_sterics, na_exception_sterics,\
nn_exception_electro, aa_exception_electro, na_exception_electro,\
nn_reciprocal_energy, aa_reciprocal_energy, na_reciprocal_energy = energy_components
# Dissect unmodified nonbonded force in alchemical system
if multi_regions:
print("Dissecting alchemical system's unmodified nonbonded force for region {}".format(alchemical_regions.name))
else:
print("Dissecting alchemical system's unmodified nonbonded force")
energy_components = dissect_nonbonded_energy(alchemical_system, positions,
alchemical_regions.alchemical_atoms, other_alchemical_atoms)
unmod_nn_particle_sterics, unmod_aa_particle_sterics, unmod_na_particle_sterics,\
unmod_nn_particle_electro, unmod_aa_particle_electro, unmod_na_particle_electro,\
unmod_nn_exception_sterics, unmod_aa_exception_sterics, unmod_na_exception_sterics,\
unmod_nn_exception_electro, unmod_aa_exception_electro, unmod_na_exception_electro,\
unmod_nn_reciprocal_energy, unmod_aa_reciprocal_energy, unmod_na_reciprocal_energy = energy_components
# Get alchemically-modified energy components
if multi_regions:
print("Computing alchemical system components energies for region {}".format(alchemical_regions.name))
else:
print("Computing alchemical system components energies")
alchemical_state = AlchemicalState.from_system(alchemical_system, parameters_name_suffix=alchemical_regions.name)
alchemical_state.set_alchemical_parameters(1.0)
energy_components = AbsoluteAlchemicalFactory.get_energy_components(alchemical_system, alchemical_state,
positions, platform=GLOBAL_ALCHEMY_PLATFORM)
if multi_regions:
region_label = ' for region {}'.format(alchemical_regions.name)
else:
region_label = ''
# Sterics particle and exception interactions are always modeled with a custom force.
na_custom_particle_sterics = energy_components['alchemically modified NonbondedForce for non-alchemical/alchemical sterics' + region_label]
aa_custom_particle_sterics = energy_components['alchemically modified NonbondedForce for alchemical/alchemical sterics' + region_label]
na_custom_exception_sterics = energy_components['alchemically modified BondForce for non-alchemical/alchemical sterics exceptions' + region_label]
aa_custom_exception_sterics = energy_components['alchemically modified BondForce for alchemical/alchemical sterics exceptions' + region_label]
# With exact treatment of PME, we use the NonbondedForce offset for electrostatics.
try:
na_custom_particle_electro = energy_components['alchemically modified NonbondedForce for non-alchemical/alchemical electrostatics' + region_label]
aa_custom_particle_electro = energy_components['alchemically modified NonbondedForce for alchemical/alchemical electrostatics' + region_label]
na_custom_exception_electro = energy_components['alchemically modified BondForce for non-alchemical/alchemical electrostatics exceptions' + region_label]
aa_custom_exception_electro = energy_components['alchemically modified BondForce for alchemical/alchemical electrostatics exceptions' + region_label]
except KeyError:
assert is_exact_pme
# Test that all NonbondedForce contributions match
# -------------------------------------------------
# All contributions from alchemical atoms in unmodified nonbonded force are turned off
err_msg = 'Non-zero contribution from unmodified NonbondedForce alchemical atoms: '
assert_almost_equal(unmod_aa_particle_sterics, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_na_particle_sterics, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_aa_exception_sterics, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_na_exception_sterics, 0.0 * energy_unit, err_msg)
if not is_exact_pme:
# With exact PME treatment these are tested below.
assert_almost_equal(unmod_aa_particle_electro, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_na_particle_electro, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_aa_reciprocal_energy, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_na_reciprocal_energy, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_aa_exception_electro, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_na_exception_electro, 0.0 * energy_unit, err_msg)
# Check sterics interactions match
assert_almost_equal(nn_particle_sterics, unmod_nn_particle_sterics,
'Non-alchemical/non-alchemical atoms particle sterics' + region_label)
assert_almost_equal(nn_exception_sterics, unmod_nn_exception_sterics,
'Non-alchemical/non-alchemical atoms exceptions sterics' + region_label)
assert_almost_equal(aa_particle_sterics, aa_custom_particle_sterics,
'Alchemical/alchemical atoms particle sterics' + region_label)
assert_almost_equal(aa_exception_sterics, aa_custom_exception_sterics,
'Alchemical/alchemical atoms exceptions sterics' + region_label)
assert_almost_equal(na_particle_sterics, na_custom_particle_sterics,
'Non-alchemical/alchemical atoms particle sterics' + region_label)
assert_almost_equal(na_exception_sterics, na_custom_exception_sterics,
'Non-alchemical/alchemical atoms exceptions sterics' + region_label)
# Check electrostatics interactions
assert_almost_equal(nn_particle_electro, unmod_nn_particle_electro,
'Non-alchemical/non-alchemical atoms particle electrostatics' + region_label)
assert_almost_equal(nn_exception_electro, unmod_nn_exception_electro,
'Non-alchemical/non-alchemical atoms exceptions electrostatics' + region_label)
# With exact treatment of PME, the electrostatics of alchemical-alchemical
# atoms is modeled with NonbondedForce offsets.
if is_exact_pme:
# Reciprocal space.
assert_almost_equal(aa_reciprocal_energy, unmod_aa_reciprocal_energy,
'Alchemical/alchemical atoms reciprocal space energy' + region_label)
assert_almost_equal(na_reciprocal_energy, unmod_na_reciprocal_energy,
'Non-alchemical/alchemical atoms reciprocal space energy' + region_label)
# Direct space.
assert_almost_equal(aa_particle_electro, unmod_aa_particle_electro,
'Alchemical/alchemical atoms particle electrostatics' + region_label)
assert_almost_equal(na_particle_electro, unmod_na_particle_electro,
'Non-alchemical/alchemical atoms particle electrostatics' + region_label)
# Exceptions.
assert_almost_equal(aa_exception_electro, unmod_aa_exception_electro,
'Alchemical/alchemical atoms exceptions electrostatics' + region_label)
assert_almost_equal(na_exception_electro, unmod_na_exception_electro,
'Non-alchemical/alchemical atoms exceptions electrostatics' + region_label)
# With direct space PME, the custom forces model only the
# direct space of alchemical-alchemical interactions.
else:
# Get direct space correction due to reciprocal space exceptions
aa_correction, na_correction = compute_direct_space_correction(nonbonded_force,
alchemical_regions.alchemical_atoms,
positions)
aa_particle_electro += aa_correction
na_particle_electro += na_correction
# Check direct space energy
assert_almost_equal(aa_particle_electro, aa_custom_particle_electro,
'Alchemical/alchemical atoms particle electrostatics' + region_label)
assert_almost_equal(na_particle_electro, na_custom_particle_electro,
'Non-alchemical/alchemical atoms particle electrostatics' + region_label)
# Check exceptions.
assert_almost_equal(aa_exception_electro, aa_custom_exception_electro,
'Alchemical/alchemical atoms exceptions electrostatics' + region_label)
assert_almost_equal(na_exception_electro, na_custom_exception_electro,
'Non-alchemical/alchemical atoms exceptions electrostatics' + region_label)
# With Ewald methods, the NonbondedForce should always hold the
# reciprocal space energy of nonalchemical-nonalchemical atoms.
if nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald]:
# Reciprocal space.
assert_almost_equal(nn_reciprocal_energy, unmod_nn_reciprocal_energy,
'Non-alchemical/non-alchemical atoms reciprocal space energy')
else:
# Reciprocal space energy should be null in this case
assert nn_reciprocal_energy == unmod_nn_reciprocal_energy == 0.0 * energy_unit
assert aa_reciprocal_energy == unmod_aa_reciprocal_energy == 0.0 * energy_unit
assert na_reciprocal_energy == unmod_na_reciprocal_energy == 0.0 * energy_unit
# Check forces other than nonbonded
# ----------------------------------
for force_name in ['HarmonicBondForce', 'HarmonicAngleForce', 'PeriodicTorsionForce',
'GBSAOBCForce', 'CustomGBForce']:
alchemical_forces_energies = [energy for label, energy in energy_components.items() if force_name in label]
reference_force_energy = compute_force_energy(reference_system, positions, force_name)
# There should be no force in the alchemical system if force_name is missing from the reference
if reference_force_energy is None:
assert len(alchemical_forces_energies) == 0, str(alchemical_forces_energies)
continue
# Check that the energies match
tot_alchemical_forces_energies = 0.0 * energy_unit
for energy in alchemical_forces_energies:
tot_alchemical_forces_energies += energy
assert_almost_equal(reference_force_energy, tot_alchemical_forces_energies,
'{} energy '.format(force_name))
def check_multi_noninteracting_energy_components(reference_system, alchemical_system, alchemical_regions, positions):
"""wrapper around check_noninteracting_energy_components for multiple regions
Parameters
----------
reference_system : openmm.System
The reference system (not alchemically modified).
alchemical_system : openmm.System
The alchemically modified system to test.
alchemical_regions : AlchemicalRegion.
The alchemically modified region.
positions : n_particlesx3 array-like of openmm.unit.Quantity
The positions to test (units of length).
"""
for region in alchemical_regions:
check_noninteracting_energy_components(reference_system, alchemical_system, region, positions, True)
def check_noninteracting_energy_components(reference_system, alchemical_system, alchemical_regions, positions, multi_regions=False):
"""Check non-interacting energy components are zero when appropriate.
Parameters
----------
reference_system : openmm.System
The reference system (not alchemically modified).
alchemical_system : openmm.System
The alchemically modified system to test.
alchemical_regions : AlchemicalRegion.
The alchemically modified region.
positions : n_particlesx3 array-like of openmm.unit.Quantity
The positions to test (units of length).
multi_regions : boolean
Indicates if mutiple regions are being tested
"""
alchemical_system = copy.deepcopy(alchemical_system)
is_exact_pme = is_alchemical_pme_treatment_exact(alchemical_system)
# Set state to non-interacting.
alchemical_state = AlchemicalState.from_system(alchemical_system, parameters_name_suffix=alchemical_regions.name)
alchemical_state.set_alchemical_parameters(0.0)
energy_components = AbsoluteAlchemicalFactory.get_energy_components(alchemical_system, alchemical_state,
positions, platform=GLOBAL_ALCHEMY_PLATFORM)
def assert_zero_energy(label):
# Handle multiple alchemical regions.
if multi_regions:
label = label + ' for region ' + alchemical_regions.name
# Testing energy component of each region.
print('testing {}'.format(label))
value = energy_components[label]
assert abs(value / GLOBAL_ENERGY_UNIT) == 0.0, ("'{}' should have zero energy in annihilated alchemical"
" state, but energy is {}").format(label, str(value))
# Check that non-alchemical/alchemical particle interactions and 1,4 exceptions have been annihilated
assert_zero_energy('alchemically modified BondForce for non-alchemical/alchemical sterics exceptions')
assert_zero_energy('alchemically modified NonbondedForce for non-alchemical/alchemical sterics')
if is_exact_pme:
assert 'alchemically modified NonbondedForce for non-alchemical/alchemical electrostatics' not in energy_components
assert 'alchemically modified BondForce for non-alchemical/alchemical electrostatics exceptions' not in energy_components
else:
assert_zero_energy('alchemically modified NonbondedForce for non-alchemical/alchemical electrostatics')
assert_zero_energy('alchemically modified BondForce for non-alchemical/alchemical electrostatics exceptions')
# Check that alchemical/alchemical particle interactions and 1,4 exceptions have been annihilated
if alchemical_regions.annihilate_sterics:
assert_zero_energy('alchemically modified NonbondedForce for alchemical/alchemical sterics')
assert_zero_energy('alchemically modified BondForce for alchemical/alchemical sterics exceptions')
if alchemical_regions.annihilate_electrostatics:
if is_exact_pme:
assert 'alchemically modified NonbondedForce for alchemical/alchemical electrostatics' not in energy_components
assert 'alchemically modified BondForce for alchemical/alchemical electrostatics exceptions' not in energy_components
else:
assert_zero_energy('alchemically modified NonbondedForce for alchemical/alchemical electrostatics')
assert_zero_energy('alchemically modified BondForce for alchemical/alchemical electrostatics exceptions')
# Check valence terms
for force_name in ['HarmonicBondForce', 'HarmonicAngleForce', 'PeriodicTorsionForce']:
force_label = 'alchemically modified ' + force_name
if force_label in energy_components:
assert_zero_energy(force_label)
# Check implicit solvent force.
for force_name in ['CustomGBForce', 'GBSAOBCForce']:
label = 'alchemically modified ' + force_name
# Check if the system has an implicit solvent force.
try:
alchemical_energy = energy_components[label]
except KeyError: # No implicit solvent.
continue
# If all alchemical particles are modified, the alchemical energy should be zero.
if len(alchemical_regions.alchemical_atoms) == reference_system.getNumParticles():
assert_zero_energy(label)
continue
# Otherwise compare the alchemical energy with a
# reference system with only non-alchemical particles.
# Find implicit solvent force in reference system.
for reference_force in reference_system.getForces():
if reference_force.__class__.__name__ == force_name:
break
system = openmm.System()
force = reference_force.__class__()
# For custom GB forces, we need to copy all computed values,
# energy terms, parameters, tabulated functions and exclusions.
if isinstance(force, openmm.CustomGBForce):
for index in range(reference_force.getNumPerParticleParameters()):
name = reference_force.getPerParticleParameterName(index)
force.addPerParticleParameter(name)
for index in range(reference_force.getNumComputedValues()):
computed_value = reference_force.getComputedValueParameters(index)
force.addComputedValue(*computed_value)
for index in range(reference_force.getNumEnergyTerms()):
energy_term = reference_force.getEnergyTermParameters(index)
force.addEnergyTerm(*energy_term)
for index in range(reference_force.getNumGlobalParameters()):
name = reference_force.getGlobalParameterName(index)
default_value = reference_force.getGlobalParameterDefaultValue(index)
force.addGlobalParameter(name, default_value)
for function_index in range(reference_force.getNumTabulatedFunctions()):
name = reference_force.getTabulatedFunctionName(function_index)
function = reference_force.getTabulatedFunction(function_index)
function_copy = copy.deepcopy(function)
force.addTabulatedFunction(name, function_copy)
for exclusion_index in range(reference_force.getNumExclusions()):
particles = reference_force.getExclusionParticles(exclusion_index)
force.addExclusion(*particles)
# Create a system with only the non-alchemical particles.
for particle_index in range(reference_system.getNumParticles()):
if particle_index not in alchemical_regions.alchemical_atoms:
# Add particle to System.
mass = reference_system.getParticleMass(particle_index)
system.addParticle(mass)
# Add particle to Force..
parameters = reference_force.getParticleParameters(particle_index)
try: # GBSAOBCForce
force.addParticle(*parameters)
except (TypeError, NotImplementedError): # CustomGBForce
force.addParticle(parameters)
system.addForce(force)
# Get positions for all non-alchemical particles.
non_alchemical_positions = [pos for i, pos in enumerate(positions)
if i not in alchemical_regions.alchemical_atoms]
# Compute reference force energy.
reference_force_energy = compute_force_energy(system, non_alchemical_positions, force_name)
assert_almost_equal(reference_force_energy, alchemical_energy,
'reference {}, alchemical {}'.format(reference_force_energy, alchemical_energy))
def check_split_force_groups(system, region_names=None):
"""Check that force groups are split correctly."""
if region_names is None:
region_names = []
# Separate forces groups by lambda parameters that AlchemicalState supports.
for region in region_names:
force_groups_by_lambda = {}
lambdas_by_force_group = {}
for force, lambda_name, _ in AlchemicalState._get_system_controlled_parameters(
system, parameters_name_suffix=region):
force_group = force.getForceGroup()
try:
force_groups_by_lambda[lambda_name].add(force_group)
except KeyError:
force_groups_by_lambda[lambda_name] = {force_group}
try:
lambdas_by_force_group[force_group].add(lambda_name)
except KeyError:
lambdas_by_force_group[force_group] = {lambda_name}
# Check that force group 0 doesn't hold alchemical forces.
assert 0 not in force_groups_by_lambda
# There are as many alchemical force groups as not-None lambda variables.
alchemical_state = AlchemicalState.from_system(system, parameters_name_suffix=region)
valid_lambdas = {lambda_name for lambda_name in alchemical_state._get_controlled_parameters(parameters_name_suffix=region)
if getattr(alchemical_state, lambda_name) is not None}
assert valid_lambdas == set(force_groups_by_lambda.keys())
# Check that force groups and lambda variables are in 1-to-1 correspondence.
assert len(force_groups_by_lambda) == len(lambdas_by_force_group)
for d in [force_groups_by_lambda, lambdas_by_force_group]:
for value in d.values():
assert len(value) == 1
# With exact treatment of PME, the NonbondedForce must
# be in the lambda_electrostatics force group.
if is_alchemical_pme_treatment_exact(system):
force_idx, nonbonded_force = forces.find_forces(system, openmm.NonbondedForce, only_one=True)
assert force_groups_by_lambda['lambda_electrostatics_{}'.format(region)] == {nonbonded_force.getForceGroup()}
# =============================================================================
# BENCHMARKING AND DEBUG FUNCTIONS
# =============================================================================
def benchmark(reference_system, alchemical_regions, positions, nsteps=500,
timestep=1.0*unit.femtoseconds):
"""
Benchmark performance of alchemically modified system relative to original system.
Parameters
----------
reference_system : openmm.System
The reference System object to compare with.
alchemical_regions : AlchemicalRegion
The region to alchemically modify.
positions : n_particlesx3 array-like of openmm.unit.Quantity
The initial positions (units of distance).
nsteps : int, optional
Number of molecular dynamics steps to use for benchmarking (default is 500).
timestep : openmm.unit.Quantity, optional
Timestep to use for benchmarking (units of time, default is 1.0*unit.femtoseconds).
"""
timer = utils.Timer()
# Create the perturbed system.
factory = AbsoluteAlchemicalFactory()
timer.start('Create alchemical system')
alchemical_system = factory.create_alchemical_system(reference_system, alchemical_regions)
timer.stop('Create alchemical system')
# Create an alchemically-perturbed state corresponding to nearly fully-interacting.
# NOTE: We use a lambda slightly smaller than 1.0 because the AbsoluteAlchemicalFactory
# may not use Custom*Force softcore versions if lambda = 1.0 identically.
alchemical_state = AlchemicalState.from_system(alchemical_system)
alchemical_state.set_alchemical_parameters(1.0 - 1.0e-6)
# Create integrators.
reference_integrator = openmm.VerletIntegrator(timestep)
alchemical_integrator = openmm.VerletIntegrator(timestep)
# Create contexts for sampling.
if GLOBAL_ALCHEMY_PLATFORM:
reference_context = openmm.Context(reference_system, reference_integrator, GLOBAL_ALCHEMY_PLATFORM)
alchemical_context = openmm.Context(alchemical_system, alchemical_integrator, GLOBAL_ALCHEMY_PLATFORM)
else:
reference_context = openmm.Context(reference_system, reference_integrator)
alchemical_context = openmm.Context(alchemical_system, alchemical_integrator)
reference_context.setPositions(positions)
alchemical_context.setPositions(positions)
# Make sure all kernels are compiled.
reference_integrator.step(1)
alchemical_integrator.step(1)
# Run simulations.
print('Running reference system...')
timer.start('Run reference system')
reference_integrator.step(nsteps)
timer.stop('Run reference system')
print('Running alchemical system...')
timer.start('Run alchemical system')
alchemical_integrator.step(nsteps)
timer.stop('Run alchemical system')
print('Done.')
timer.report_timing()
def benchmark_alchemy_from_pdb():
"""CLI entry point for benchmarking alchemical performance from a PDB file.
"""
logging.basicConfig(level=logging.DEBUG)
import mdtraj
import argparse
try:
from openmm import app
except ImportError: # OpenMM < 7.6
from simtk.openmm import app
parser = argparse.ArgumentParser(description='Benchmark performance of alchemically-modified system.')
parser.add_argument('-p', '--pdb', metavar='PDBFILE', type=str, action='store', required=True,
help='PDB file to benchmark; only protein forcefields supported for now (no small molecules)')
parser.add_argument('-s', '--selection', metavar='SELECTION', type=str, action='store', default='not water',
help='MDTraj DSL describing alchemical region (default: "not water")')
parser.add_argument('-n', '--nsteps', metavar='STEPS', type=int, action='store', default=1000,
help='Number of benchmarking steps (default: 1000)')
args = parser.parse_args()
# Read the PDB file
print('Loading PDB file...')
pdbfile = app.PDBFile(args.pdb)
print('Loading forcefield...')
forcefield = app.ForceField('amber99sbildn.xml', 'tip3p.xml')
print('Adding missing hydrogens...')
modeller = app.Modeller(pdbfile.topology, pdbfile.positions)
modeller.addHydrogens(forcefield)
print('Creating System...')
reference_system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.PME)
# Minimize
print('Minimizing...')
positions = minimize(reference_system, modeller.positions)
# Select alchemical regions
mdtraj_topology = mdtraj.Topology.from_openmm(modeller.topology)
alchemical_atoms = mdtraj_topology.select(args.selection)
alchemical_region = AlchemicalRegion(alchemical_atoms=alchemical_atoms)
print('There are %d atoms in the alchemical region.' % len(alchemical_atoms))
# Benchmark
print('Benchmarking...')
benchmark(reference_system, alchemical_region, positions, nsteps=args.nsteps, timestep=1.0*unit.femtoseconds)
def overlap_check(reference_system, alchemical_system, positions, nsteps=50, nsamples=200,
cached_trajectory_filename=None, name=""):
"""
Test overlap between reference system and alchemical system by running a short simulation.
Parameters
----------
reference_system : openmm.System
The reference System object to compare with.
alchemical_system : openmm.System
Alchemically-modified system.
positions : n_particlesx3 array-like of openmm.unit.Quantity
The initial positions (units of distance).
nsteps : int, optional
Number of molecular dynamics steps between samples (default is 50).
nsamples : int, optional
Number of samples to collect (default is 100).
cached_trajectory_filename : str, optional, default=None
If not None, this file will be used to cache intermediate results with pickle.
name : str, optional, default=None
Name of test system being evaluated.
"""
temperature = 300.0 * unit.kelvin
pressure = 1.0 * unit.atmospheres
collision_rate = 5.0 / unit.picoseconds
timestep = 2.0 * unit.femtoseconds
kT = kB * temperature
# Minimize
positions = minimize(reference_system, positions)
# Add a barostat if possible.
reference_system = copy.deepcopy(reference_system)
if reference_system.usesPeriodicBoundaryConditions():
reference_system.addForce(openmm.MonteCarloBarostat(pressure, temperature))
# Create integrators.
reference_integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep)
alchemical_integrator = openmm.VerletIntegrator(timestep)
# Create contexts.
reference_context = create_context(reference_system, reference_integrator)
alchemical_context = create_context(alchemical_system, alchemical_integrator)
# Initialize data structure or load if from cache.
# du_n[n] is the potential energy difference of sample n.
if cached_trajectory_filename is not None:
try:
with open(cached_trajectory_filename, 'rb') as f:
data = pickle.load(f)
except FileNotFoundError:
data = dict(du_n=[])
# Create directory if it doesn't exist.
directory = os.path.dirname(cached_trajectory_filename)
if not os.path.exists(directory):
os.makedirs(directory)
else:
positions = data['positions']
reference_context.setPeriodicBoxVectors(*data['box_vectors'])
else:
data = dict(du_n=[])
# Collect simulation data.
iteration = len(data['du_n'])
reference_context.setPositions(positions)
print()
for sample in range(iteration, nsamples):
print('\rSample {}/{}'.format(sample+1, nsamples), end='')
sys.stdout.flush()
# Run dynamics.
reference_integrator.step(nsteps)
# Get reference energies.
reference_state = reference_context.getState(getEnergy=True, getPositions=True)
reference_potential = reference_state.getPotentialEnergy()
if np.isnan(reference_potential/kT):
raise Exception("Reference potential is NaN")
# Get alchemical energies.
alchemical_context.setPeriodicBoxVectors(*reference_state.getPeriodicBoxVectors())
alchemical_context.setPositions(reference_state.getPositions(asNumpy=True))
alchemical_state = alchemical_context.getState(getEnergy=True)
alchemical_potential = alchemical_state.getPotentialEnergy()
if np.isnan(alchemical_potential/kT):
raise Exception("Alchemical potential is NaN")
# Update and cache data.
data['du_n'].append((alchemical_potential - reference_potential) / kT)
if cached_trajectory_filename is not None:
# Save only last iteration positions and vectors.
data['positions'] = reference_state.getPositions()
data['box_vectors'] = reference_state.getPeriodicBoxVectors()
with open(cached_trajectory_filename, 'wb') as f:
pickle.dump(data, f)
# Discard data to equilibration and subsample.
du_n = np.array(data['du_n'])
from pymbar import timeseries, EXP
t0, g, Neff = timeseries.detectEquilibration(du_n)
indices = timeseries.subsampleCorrelatedData(du_n, g=g)
du_n = du_n[indices]
# Compute statistics.
DeltaF, dDeltaF = EXP(du_n)
# Raise an exception if the error is larger than 3kT.
MAX_DEVIATION = 3.0 # kT
report = ('\nDeltaF = {:12.3f} +- {:12.3f} kT ({:3.2f} samples, g = {:3.1f}); '
'du mean {:.3f} kT stddev {:.3f} kT').format(DeltaF, dDeltaF, Neff, g, du_n.mean(), du_n.std())
print(report)
if dDeltaF > MAX_DEVIATION:
raise Exception(report)
def rstyle(ax):
"""Styles x,y axes to appear like ggplot2
Must be called after all plot and axis manipulation operations have been
carried out (needs to know final tick spacing)
From:
http://nbviewer.ipython.org/github/wrobstory/climatic/blob/master/examples/ggplot_styling_for_matplotlib.ipynb
"""
import pylab
import matplotlib
import matplotlib.pyplot as plt
#Set the style of the major and minor grid lines, filled blocks
ax.grid(True, 'major', color='w', linestyle='-', linewidth=1.4)
ax.grid(True, 'minor', color='0.99', linestyle='-', linewidth=0.7)
ax.patch.set_facecolor('0.90')
ax.set_axisbelow(True)
#Set minor tick spacing to 1/2 of the major ticks
ax.xaxis.set_minor_locator((pylab.MultipleLocator((plt.xticks()[0][1] - plt.xticks()[0][0]) / 2.0)))
ax.yaxis.set_minor_locator((pylab.MultipleLocator((plt.yticks()[0][1] - plt.yticks()[0][0]) / 2.0)))
#Remove axis border
for child in ax.get_children():
if isinstance(child, matplotlib.spines.Spine):
child.set_alpha(0)
#Restyle the tick lines
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markersize(5)
line.set_color("gray")
line.set_markeredgewidth(1.4)
#Remove the minor tick lines
for line in (ax.xaxis.get_ticklines(minor=True) +
ax.yaxis.get_ticklines(minor=True)):
line.set_markersize(0)
#Only show bottom left ticks, pointing out of axis
plt.rcParams['xtick.direction'] = 'out'
plt.rcParams['ytick.direction'] = 'out'
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
def lambda_trace(reference_system, alchemical_regions, positions, nsteps=100):
"""
Compute potential energy as a function of lambda.
"""
# Create a factory to produce alchemical intermediates.
factory = AbsoluteAlchemicalFactory()
alchemical_system = factory.create_alchemical_system(reference_system, alchemical_regions)
alchemical_state = AlchemicalState.from_system(alchemical_system)
# Take equally-sized steps.
delta = 1.0 / nsteps
# Compute unmodified energy.
u_original = compute_energy(reference_system, positions)
# Scan through lambda values.
lambda_i = np.zeros([nsteps+1], np.float64) # lambda values for u_i
# u_i[i] is the potential energy for lambda_i[i]
u_i = unit.Quantity(np.zeros([nsteps+1], np.float64), unit.kilocalories_per_mole)
for i in range(nsteps+1):
lambda_i[i] = 1.0-i*delta
alchemical_state.set_alchemical_parameters(lambda_i[i])
alchemical_state.apply_to_system(alchemical_system)
u_i[i] = compute_energy(alchemical_system, positions)
logger.info("{:12.9f} {:24.8f} kcal/mol".format(lambda_i[i], u_i[i] / GLOBAL_ENERGY_UNIT))
# Write figure as PDF.
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
with PdfPages('lambda-trace.pdf') as pdf:
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111)
plt.plot(1, u_original / unit.kilocalories_per_mole, 'ro', label='unmodified')
plt.plot(lambda_i, u_i / unit.kilocalories_per_mole, 'k.', label='alchemical')
plt.title('T4 lysozyme L99A + p-xylene : AMBER96 + OBC GBSA')
plt.ylabel('potential (kcal/mol)')
plt.xlabel('lambda')
ax.legend()
rstyle(ax)
pdf.savefig() # saves the current figure into a pdf page
plt.close()
def generate_trace(test_system):
lambda_trace(test_system['test'].system, test_system['test'].positions, test_system['receptor_atoms'], test_system['ligand_atoms'])
# =============================================================================
# TEST ALCHEMICAL FACTORY SUITE
# =============================================================================
def test_resolve_alchemical_region():
"""Test the method AbsoluteAlchemicalFactory._resolve_alchemical_region."""
test_cases = [
(testsystems.AlanineDipeptideVacuum(), range(22), 9, 36, 48),
(testsystems.AlanineDipeptideVacuum(), range(11, 22), 4, 21, 31),
(testsystems.LennardJonesCluster(), range(27), 0, 0, 0)
]
for i, (test_case, atoms, n_bonds, n_angles, n_torsions) in enumerate(test_cases):
system = test_case.system
# Default arguments are converted to empty list.
alchemical_region = AlchemicalRegion(alchemical_atoms=atoms)
resolved_region = AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)
for region in ['bonds', 'angles', 'torsions']:
assert getattr(resolved_region, 'alchemical_' + region) == set()
# Numpy arrays are converted to sets.
alchemical_region = AlchemicalRegion(alchemical_atoms=np.array(atoms),
alchemical_bonds=np.array(range(n_bonds)),
alchemical_angles=np.array(range(n_angles)),
alchemical_torsions=np.array(range(n_torsions)))
resolved_region = AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)
for region in ['atoms', 'bonds', 'angles', 'torsions']:
assert isinstance(getattr(resolved_region, 'alchemical_' + region), frozenset)
# Bonds, angles and torsions are inferred correctly.
alchemical_region = AlchemicalRegion(alchemical_atoms=atoms, alchemical_bonds=True,
alchemical_angles=True, alchemical_torsions=True)
resolved_region = AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)
for j, region in enumerate(['bonds', 'angles', 'torsions']):
assert len(getattr(resolved_region, 'alchemical_' + region)) == test_cases[i][j+2]
# An exception is if indices are not part of the system.
alchemical_region = AlchemicalRegion(alchemical_atoms=[10000000])
with nose.tools.assert_raises(ValueError):
AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)
# An exception is raised if nothing is defined.
alchemical_region = AlchemicalRegion()
with nose.tools.assert_raises(ValueError):
AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)
class TestAbsoluteAlchemicalFactory(object):
"""Test AbsoluteAlchemicalFactory class."""
@classmethod
def setup_class(cls):
"""Create test systems and shared objects."""
cls.define_systems()
cls.define_regions()
cls.generate_cases()
@classmethod
def define_systems(cls):
"""Create shared test systems in cls.test_systems for the test suite."""
cls.test_systems = dict()
# Basic test systems: Lennard-Jones and water particles only.
# Test also dispersion correction and switch off ("on" values
# for these options are tested in HostGuestExplicit system).
cls.test_systems['LennardJonesCluster'] = testsystems.LennardJonesCluster()
cls.test_systems['LennardJonesFluid with dispersion correction'] = \
testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=True)
cls.test_systems['TIP3P WaterBox with reaction field, no switch, no dispersion correction'] = \
testsystems.WaterBox(dispersion_correction=False, switch=False, nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['TIP4P-EW WaterBox and NaCl with PME'] = \
testsystems.WaterBox(nonbondedMethod=openmm.app.PME, model='tip4pew', ionic_strength=200*unit.millimolar)
# Vacuum and implicit.
cls.test_systems['AlanineDipeptideVacuum'] = testsystems.AlanineDipeptideVacuum()
cls.test_systems['AlanineDipeptideImplicit'] = testsystems.AlanineDipeptideImplicit()
cls.test_systems['TolueneImplicitOBC2'] = testsystems.TolueneImplicitOBC2()
cls.test_systems['TolueneImplicitGBn'] = testsystems.TolueneImplicitGBn()
# Explicit test system: PME and CutoffPeriodic.
#cls.test_systems['AlanineDipeptideExplicit with CutoffPeriodic'] = \
# testsystems.AlanineDipeptideExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['HostGuestExplicit with PME'] = \
testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.PME)
cls.test_systems['HostGuestExplicit with CutoffPeriodic'] = \
testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
@classmethod
def define_regions(cls):
"""Create shared AlchemicalRegions for test systems in cls.test_regions."""
cls.test_regions = dict()
cls.test_regions['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(2))
cls.test_regions['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10))
cls.test_regions['Toluene'] = AlchemicalRegion(alchemical_atoms=range(6)) # Only partially modified.
cls.test_regions['AlanineDipeptide'] = AlchemicalRegion(alchemical_atoms=range(22))
cls.test_regions['HostGuestExplicit'] = AlchemicalRegion(alchemical_atoms=range(126, 156))
cls.test_regions['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(0,3))
# Modify ions.
for atom in cls.test_systems['TIP4P-EW WaterBox and NaCl with PME'].topology.atoms():
if atom.name in ['Na', 'Cl']:
cls.test_regions['TIP4P-EW WaterBox and NaCl'] = AlchemicalRegion(alchemical_atoms=range(atom.index, atom.index+1))
break
@classmethod
def generate_cases(cls):
"""Generate all test cases in cls.test_cases combinatorially."""
cls.test_cases = dict()
direct_space_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='direct-space',
alchemical_rf_treatment='switched')
exact_pme_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='exact')
# We generate all possible combinations of annihilate_sterics/electrostatics
# for each test system. We also annihilate bonds, angles and torsions every
# 3 test cases so that we test it at least one for each test system and for
# each combination of annihilate_sterics/electrostatics.
n_test_cases = 0
for test_system_name, test_system in cls.test_systems.items():
# Find standard alchemical region.
for region_name, region in cls.test_regions.items():
if region_name in test_system_name:
break
assert region_name in test_system_name, test_system_name
# Find nonbonded method.
force_idx, nonbonded_force = forces.find_forces(test_system.system, openmm.NonbondedForce, only_one=True)
nonbonded_method = nonbonded_force.getNonbondedMethod()
# Create all combinations of annihilate_sterics/electrostatics.
for annihilate_sterics, annihilate_electrostatics in itertools.product((True, False), repeat=2):
# Create new region that we can modify.
test_region = region._replace(annihilate_sterics=annihilate_sterics,
annihilate_electrostatics=annihilate_electrostatics)
# Create test name.
test_case_name = test_system_name[:]
if annihilate_sterics:
test_case_name += ', annihilated sterics'
if annihilate_electrostatics:
test_case_name += ', annihilated electrostatics'
# Annihilate bonds and angles every three test_cases.
if n_test_cases % 3 == 0:
test_region = test_region._replace(alchemical_bonds=True, alchemical_angles=True,
alchemical_torsions=True)
test_case_name += ', annihilated bonds, angles and torsions'
# Add different softcore parameters every five test_cases.
if n_test_cases % 5 == 0:
test_region = test_region._replace(softcore_alpha=1.0, softcore_beta=1.0, softcore_a=1.0, softcore_b=1.0,
softcore_c=1.0, softcore_d=1.0, softcore_e=1.0, softcore_f=1.0)
test_case_name += ', modified softcore parameters'
# Pre-generate alchemical system.
alchemical_system = direct_space_factory.create_alchemical_system(test_system.system, test_region)
# Add test case.
cls.test_cases[test_case_name] = (test_system, alchemical_system, test_region)
n_test_cases += 1
# If we don't use softcore electrostatics and we annihilate charges
# we can test also exact PME treatment. We don't increase n_test_cases
# purposely to keep track of which tests are added above.
if (test_region.softcore_beta == 0.0 and annihilate_electrostatics and
nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald]):
alchemical_system = exact_pme_factory.create_alchemical_system(test_system.system, test_region)
test_case_name += ', exact PME'
cls.test_cases[test_case_name] = (test_system, alchemical_system, test_region)
# If the test system uses reaction field replace reaction field
# of the reference system to allow comparisons.
if nonbonded_method == openmm.NonbondedForce.CutoffPeriodic:
forcefactories.replace_reaction_field(test_system.system, return_copy=False,
switch_width=direct_space_factory.switch_width)
def filter_cases(self, condition_func, max_number=None):
"""Return the list of test cases that satisfy condition_func(test_case_name)."""
if max_number is None:
max_number = len(self.test_cases)
test_cases = {}
for test_name, test_case in self.test_cases.items():
if condition_func(test_name):
test_cases[test_name] = test_case
if len(test_cases) >= max_number:
break
return test_cases
def test_split_force_groups(self):
"""Forces having different lambda variables should have a different force group."""
# Select 1 implicit, 1 explicit, and 1 exact PME explicit test case randomly.
test_cases = self.filter_cases(lambda x: 'Implicit' in x, max_number=1)
test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' in x, max_number=1))
test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' not in x, max_number=1))
for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():
f = partial(check_split_force_groups, alchemical_system)
f.description = "Testing force splitting among groups of {}".format(test_name)
yield f
def test_fully_interacting_energy(self):
"""Compare the energies of reference and fully interacting alchemical system."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
f = partial(compare_system_energies, test_system.system,
alchemical_system, alchemical_region, test_system.positions)
f.description = "Testing fully interacting energy of {}".format(test_name)
yield f
def test_noninteracting_energy_components(self):
"""Check all forces annihilated/decoupled when their lambda variables are zero."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
f = partial(check_noninteracting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Testing non-interacting energy of {}".format(test_name)
yield f
@attr('slow')
def test_fully_interacting_energy_components(self):
"""Test interacting state energy by force component."""
# This is a very expensive but very informative test. We can
# run this locally when test_fully_interacting_energies() fails.
test_cases = self.filter_cases(lambda x: 'Explicit' in x)
for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():
f = partial(check_interacting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Testing energy components of %s..." % test_name
yield f
@attr('slow')
def test_platforms(self):
"""Test interacting and noninteracting energies on all platforms."""
global GLOBAL_ALCHEMY_PLATFORM
old_global_platform = GLOBAL_ALCHEMY_PLATFORM
# Do not repeat tests on the platform already tested.
if old_global_platform is None:
default_platform_name = utils.get_fastest_platform().getName()
else:
default_platform_name = old_global_platform.getName()
platforms = [platform for platform in utils.get_available_platforms()
if platform.getName() != default_platform_name]
# Test interacting and noninteracting energies on all platforms.
for platform in platforms:
GLOBAL_ALCHEMY_PLATFORM = platform
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
f = partial(compare_system_energies, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Test fully interacting energy of {} on {}".format(test_name, platform.getName())
yield f
f = partial(check_noninteracting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Test non-interacting energy of {} on {}".format(test_name, platform.getName())
yield f
# Restore global platform
GLOBAL_ALCHEMY_PLATFORM = old_global_platform
@attr('slow')
def test_overlap(self):
"""Tests overlap between reference and alchemical systems."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
#cached_trajectory_filename = os.path.join(os.environ['HOME'], '.cache', 'alchemy', 'tests',
# test_name + '.pickle')
cached_trajectory_filename = None
f = partial(overlap_check, test_system.system, alchemical_system, test_system.positions,
cached_trajectory_filename=cached_trajectory_filename, name=test_name)
f.description = "Testing reference/alchemical overlap for {}".format(test_name)
yield f
class TestMultiRegionAbsoluteAlchemicalFactory(TestAbsoluteAlchemicalFactory):
"""Test AbsoluteAlchemicalFactory class using multiple regions."""
@classmethod
def define_systems(cls):
"""Create shared test systems in cls.test_systems for the test suite."""
cls.test_systems = dict()
# Basic test systems: Lennard-Jones and water particles only.
# Test also dispersion correction and switch off ("on" values
# for these options are tested in HostGuestExplicit system).
cls.test_systems['LennardJonesCluster'] = testsystems.LennardJonesCluster()
cls.test_systems['LennardJonesFluid with dispersion correction'] = \
testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=True)
cls.test_systems['TIP3P WaterBox with reaction field, no switch, no dispersion correction'] = \
testsystems.WaterBox(dispersion_correction=False, switch=False, nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['HostGuestExplicit with PME'] = \
testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.PME)
cls.test_systems['HostGuestExplicit with CutoffPeriodic'] = \
testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
@classmethod
def define_regions(cls):
"""Create shared AlchemicalRegions for test systems in cls.test_regions."""
cls.test_region_zero = dict()
cls.test_region_one = dict()
cls.test_region_two = dict()
cls.test_region_zero['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(2), name='zero')
cls.test_region_one['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(2,4), name='one')
cls.test_region_two['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(4,6), name='two')
cls.test_region_zero['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10), name='zero')
cls.test_region_one['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10,20), name='one')
cls.test_region_two['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(20,30), name='two')
cls.test_region_zero['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(3), name='zero')
cls.test_region_one['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(3,6), name='one')
cls.test_region_two['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(6,9), name='two')
#Three regions push HostGuest system beyond 32 force groups
cls.test_region_zero['HostGuestExplicit'] = AlchemicalRegion(alchemical_atoms=range(126, 156), name='zero')
cls.test_region_one['HostGuestExplicit'] = AlchemicalRegion(alchemical_atoms=range(156,160), name='one')
cls.test_region_two['HostGuestExplicit'] = None
@classmethod
def generate_cases(cls):
"""Generate all test cases in cls.test_cases combinatorially."""
cls.test_cases = dict()
direct_space_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='direct-space',
alchemical_rf_treatment='switched')
exact_pme_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='exact')
# We generate all possible combinations of annihilate_sterics/electrostatics
# for each test system. We also annihilate bonds, angles and torsions every
# 3 test cases so that we test it at least one for each test system and for
# each combination of annihilate_sterics/electrostatics.
n_test_cases = 0
for test_system_name, test_system in cls.test_systems.items():
# Find standard alchemical region zero.
for region_name_zero, region_zero in cls.test_region_zero.items():
if region_name_zero in test_system_name:
break
assert region_name_zero in test_system_name, test_system_name
# Find standard alchemical region one.
for region_name_one, region_one in cls.test_region_one.items():
if region_name_one in test_system_name:
break
assert region_name_one in test_system_name, test_system_name
# Find standard alchemical region two.
for region_name_two, region_two in cls.test_region_two.items():
if region_name_two in test_system_name:
break
assert region_name_two in test_system_name, test_system_name
assert region_name_zero == region_name_one and region_name_one == region_name_two
#We only want two regions for HostGuest or we get too many force groups
if 'HostGuestExplicit' in region_name_one:
test_regions = [region_zero, region_one]
else:
test_regions = [region_zero, region_one, region_two]
# Find nonbonded method.
force_idx, nonbonded_force = forces.find_forces(test_system.system, openmm.NonbondedForce, only_one=True)
nonbonded_method = nonbonded_force.getNonbondedMethod()
# Create all combinations of annihilate_sterics/electrostatics.
for annihilate_sterics, annihilate_electrostatics in itertools.product((True, False), repeat=2):
# Create new region that we can modify.
for i, test_region in enumerate(test_regions):
test_regions[i] = test_region._replace(annihilate_sterics=annihilate_sterics,
annihilate_electrostatics=annihilate_electrostatics)
# Create test name.
test_case_name = test_system_name[:]
if annihilate_sterics:
test_case_name += ', annihilated sterics'
if annihilate_electrostatics:
test_case_name += ', annihilated electrostatics'
# Annihilate bonds and angles every three test_cases.
if n_test_cases % 3 == 0:
for i, test_region in enumerate(test_regions):
test_regions[i] = test_region._replace(alchemical_bonds=True, alchemical_angles=True,
alchemical_torsions=True)
test_case_name += ', annihilated bonds, angles and torsions'
# Add different softcore parameters every five test_cases.
if n_test_cases % 5 == 0:
for i, test_region in enumerate(test_regions):
test_regions[i] = test_region._replace(softcore_alpha=1.0, softcore_beta=1.0, softcore_a=1.0, softcore_b=1.0,
softcore_c=1.0, softcore_d=1.0, softcore_e=1.0, softcore_f=1.0)
test_case_name += ', modified softcore parameters'
#region_interactions = frozenset(itertools.combinations(range(len(test_regions)), 2))
# Pre-generate alchemical system.
alchemical_system = direct_space_factory.create_alchemical_system(test_system.system, alchemical_regions = test_regions)
# Add test case.
cls.test_cases[test_case_name] = (test_system, alchemical_system, test_regions)
n_test_cases += 1
# If we don't use softcore electrostatics and we annihilate charges
# we can test also exact PME treatment. We don't increase n_test_cases
# purposely to keep track of which tests are added above.
if (test_regions[1].softcore_beta == 0.0 and annihilate_electrostatics and
nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald]):
alchemical_system = exact_pme_factory.create_alchemical_system(test_system.system, alchemical_regions = test_regions)
test_case_name += ', exact PME'
cls.test_cases[test_case_name] = (test_system, alchemical_system, test_regions)
# If the test system uses reaction field replace reaction field
# of the reference system to allow comparisons.
if nonbonded_method == openmm.NonbondedForce.CutoffPeriodic:
forcefactories.replace_reaction_field(test_system.system, return_copy=False,
switch_width=direct_space_factory.switch_width)
def test_split_force_groups(self):
"""Forces having different lambda variables should have a different force group."""
# Select 1 implicit, 1 explicit, and 1 exact PME explicit test case randomly.
test_cases = self.filter_cases(lambda x: 'Implicit' in x, max_number=1)
test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' in x, max_number=1))
test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' not in x, max_number=1))
for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():
region_names = []
for region in alchemical_region:
region_names.append(region.name)
f = partial(check_split_force_groups, alchemical_system, region_names)
f.description = "Testing force splitting among groups of {}".format(test_name)
yield f
def test_noninteracting_energy_components(self):
"""Check all forces annihilated/decoupled when their lambda variables are zero."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
f = partial(check_multi_noninteracting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Testing non-interacting energy of {}".format(test_name)
yield f
@attr('slow')
def test_platforms(self):
"""Test interacting and noninteracting energies on all platforms."""
global GLOBAL_ALCHEMY_PLATFORM
old_global_platform = GLOBAL_ALCHEMY_PLATFORM
# Do not repeat tests on the platform already tested.
if old_global_platform is None:
default_platform_name = utils.get_fastest_platform().getName()
else:
default_platform_name = old_global_platform.getName()
platforms = [platform for platform in utils.get_available_platforms()
if platform.getName() != default_platform_name]
# Test interacting and noninteracting energies on all platforms.
for platform in platforms:
GLOBAL_ALCHEMY_PLATFORM = platform
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
f = partial(compare_system_energies, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Test fully interacting energy of {} on {}".format(test_name, platform.getName())
yield f
f = partial(check_multi_noninteracting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Test non-interacting energy of {} on {}".format(test_name, platform.getName())
yield f
# Restore global platform
GLOBAL_ALCHEMY_PLATFORM = old_global_platform
@attr('slow')
def test_fully_interacting_energy_components(self):
"""Test interacting state energy by force component."""
# This is a very expensive but very informative test. We can
# run this locally when test_fully_interacting_energies() fails.
test_cases = self.filter_cases(lambda x: 'Explicit' in x)
for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():
f = partial(check_multi_interacting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Testing energy components of %s..." % test_name
yield f
class TestDispersionlessAlchemicalFactory(object):
"""
Only test overlap for dispersionless alchemical factory, since energy agreement
will be poor.
"""
@classmethod
def setup_class(cls):
"""Create test systems and shared objects."""
cls.define_systems()
cls.define_regions()
cls.generate_cases()
@classmethod
def define_systems(cls):
"""Create test systems and shared objects."""
cls.test_systems = dict()
cls.test_systems['LennardJonesFluid with dispersion correction'] = \
testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=True)
@classmethod
def define_regions(cls):
"""Create shared AlchemicalRegions for test systems in cls.test_regions."""
cls.test_regions = dict()
cls.test_regions['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10))
@classmethod
def generate_cases(cls):
"""Generate all test cases in cls.test_cases combinatorially."""
cls.test_cases = dict()
factory = AbsoluteAlchemicalFactory(disable_alchemical_dispersion_correction=True)
# We generate all possible combinations of annihilate_sterics/electrostatics
# for each test system. We also annihilate bonds, angles and torsions every
# 3 test cases so that we test it at least one for each test system and for
# each combination of annihilate_sterics/electrostatics.
n_test_cases = 0
for test_system_name, test_system in cls.test_systems.items():
# Find standard alchemical region.
for region_name, region in cls.test_regions.items():
if region_name in test_system_name:
break
assert region_name in test_system_name
# Create all combinations of annihilate_sterics.
for annihilate_sterics in itertools.product((True, False), repeat=1):
region = region._replace(annihilate_sterics=annihilate_sterics,
annihilate_electrostatics=True)
# Create test name.
test_case_name = test_system_name[:]
if annihilate_sterics:
test_case_name += ', annihilated sterics'
# Pre-generate alchemical system
alchemical_system = factory.create_alchemical_system(test_system.system, region)
cls.test_cases[test_case_name] = (test_system, alchemical_system, region)
n_test_cases += 1
def test_overlap(self):
"""Tests overlap between reference and alchemical systems."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
#cached_trajectory_filename = os.path.join(os.environ['HOME'], '.cache', 'alchemy', 'tests',
# test_name + '.pickle')
cached_trajectory_filename = None
f = partial(overlap_check, test_system.system, alchemical_system, test_system.positions,
cached_trajectory_filename=cached_trajectory_filename, name=test_name)
f.description = "Testing reference/alchemical overlap for no alchemical dispersion {}".format(test_name)
yield f
@attr('slow')
class TestAbsoluteAlchemicalFactorySlow(TestAbsoluteAlchemicalFactory):
"""Test AbsoluteAlchemicalFactory class with a more comprehensive set of systems."""
@classmethod
def define_systems(cls):
"""Create test systems and shared objects."""
cls.test_systems = dict()
cls.test_systems['LennardJonesFluid without dispersion correction'] = \
testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=False)
cls.test_systems['DischargedWaterBox with reaction field, no switch, no dispersion correction'] = \
testsystems.DischargedWaterBox(dispersion_correction=False, switch=False,
nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['WaterBox with reaction field, no switch, dispersion correction'] = \
testsystems.WaterBox(dispersion_correction=False, switch=True, nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['WaterBox with reaction field, switch, no dispersion correction'] = \
testsystems.WaterBox(dispersion_correction=False, switch=True, nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['WaterBox with PME, switch, dispersion correction'] = \
testsystems.WaterBox(dispersion_correction=True, switch=True, nonbondedMethod=openmm.app.PME)
# Big systems.
cls.test_systems['LysozymeImplicit'] = testsystems.LysozymeImplicit()
cls.test_systems['DHFRExplicit with reaction field'] = \
testsystems.DHFRExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['SrcExplicit with PME'] = \
testsystems.SrcExplicit(nonbondedMethod=openmm.app.PME)
cls.test_systems['SrcExplicit with reaction field'] = \
testsystems.SrcExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['SrcImplicit'] = testsystems.SrcImplicit()
@classmethod
def define_regions(cls):
super(TestAbsoluteAlchemicalFactorySlow, cls).define_regions()
cls.test_regions['WaterBox'] = AlchemicalRegion(alchemical_atoms=range(3))
cls.test_regions['LysozymeImplicit'] = AlchemicalRegion(alchemical_atoms=range(2603, 2621))
cls.test_regions['DHFRExplicit'] = AlchemicalRegion(alchemical_atoms=range(0, 2849))
cls.test_regions['Src'] = AlchemicalRegion(alchemical_atoms=range(0, 21))
# =============================================================================
# TEST ALCHEMICAL STATE
# =============================================================================
class TestAlchemicalState(object):
"""Test AlchemicalState compatibility with CompoundThermodynamicState."""
@classmethod
def setup_class(cls):
"""Create test systems and shared objects."""
alanine_vacuum = testsystems.AlanineDipeptideVacuum()
alanine_explicit = testsystems.AlanineDipeptideExplicit()
factory = AbsoluteAlchemicalFactory()
factory_exact_pme = AbsoluteAlchemicalFactory(alchemical_pme_treatment='exact')
cls.alanine_alchemical_atoms = list(range(22))
cls.alanine_test_system = alanine_explicit
# System with only lambda_sterics and lambda_electrostatics.
alchemical_region = AlchemicalRegion(alchemical_atoms=cls.alanine_alchemical_atoms)
alchemical_alanine_system = factory.create_alchemical_system(alanine_vacuum.system, alchemical_region)
cls.alanine_state = states.ThermodynamicState(alchemical_alanine_system,
temperature=300*unit.kelvin)
# System with lambda_sterics and lambda_electrostatics and exact PME treatment.
alchemical_alanine_system_exact_pme = factory_exact_pme.create_alchemical_system(alanine_explicit.system,
alchemical_region)
cls.alanine_state_exact_pme = states.ThermodynamicState(alchemical_alanine_system_exact_pme,
temperature=300*unit.kelvin,
pressure=1.0*unit.atmosphere)
# System with all lambdas.
alchemical_region = AlchemicalRegion(alchemical_atoms=cls.alanine_alchemical_atoms,
alchemical_torsions=True, alchemical_angles=True,
alchemical_bonds=True)
fully_alchemical_alanine_system = factory.create_alchemical_system(alanine_vacuum.system, alchemical_region)
cls.full_alanine_state = states.ThermodynamicState(fully_alchemical_alanine_system,
temperature=300*unit.kelvin)
# Test case: (ThermodynamicState, defined_lambda_parameters)
cls.test_cases = [
(cls.alanine_state, {'lambda_sterics', 'lambda_electrostatics'}),
(cls.alanine_state_exact_pme, {'lambda_sterics', 'lambda_electrostatics'}),
(cls.full_alanine_state, {'lambda_sterics', 'lambda_electrostatics', 'lambda_bonds',
'lambda_angles', 'lambda_torsions'})
]
@staticmethod
def test_constructor():
"""Test AlchemicalState constructor behave as expected."""
# Raise an exception if parameter is not recognized.
with nose.tools.assert_raises(AlchemicalStateError):
AlchemicalState(lambda_electro=1.0)
# Properties are initialized correctly.
test_cases = [{},
{'lambda_sterics': 0.5, 'lambda_angles': 0.5},
{'lambda_electrostatics': 1.0}]
for test_kwargs in test_cases:
alchemical_state = AlchemicalState(**test_kwargs)
for parameter in AlchemicalState._get_controlled_parameters():
if parameter in test_kwargs:
assert getattr(alchemical_state, parameter) == test_kwargs[parameter]
else:
assert getattr(alchemical_state, parameter) is None
def test_from_system_constructor(self):
"""Test AlchemicalState.from_system constructor."""
# A non-alchemical system raises an error.
with nose.tools.assert_raises(AlchemicalStateError):
AlchemicalState.from_system(testsystems.AlanineDipeptideVacuum().system)
# Valid parameters are 1.0 by default in AbsoluteAlchemicalFactory,
# and all the others must be None.
for state, defined_lambdas in self.test_cases:
alchemical_state = AlchemicalState.from_system(state.system)
for parameter in AlchemicalState._get_controlled_parameters():
property_value = getattr(alchemical_state, parameter)
if parameter in defined_lambdas:
assert property_value == 1.0, '{}: {}'.format(parameter, property_value)
else:
assert property_value is None, '{}: {}'.format(parameter, property_value)
@staticmethod
def test_equality_operator():
"""Test equality operator between AlchemicalStates."""
state1 = AlchemicalState(lambda_electrostatics=1.0)
state2 = AlchemicalState(lambda_electrostatics=1.0)
state3 = AlchemicalState(lambda_electrostatics=0.9)
state4 = AlchemicalState(lambda_electrostatics=0.9, lambda_sterics=1.0)
assert state1 == state2
assert state2 != state3
assert state3 != state4
def test_apply_to_system(self):
"""Test method AlchemicalState.apply_to_system()."""
# Do not modify cached test cases.
test_cases = copy.deepcopy(self.test_cases)
# Test precondition: all parameters are 1.0.
for state, defined_lambdas in test_cases:
kwargs = dict.fromkeys(defined_lambdas, 1.0)
alchemical_state = AlchemicalState(**kwargs)
assert alchemical_state == AlchemicalState.from_system(state.system)
# apply_to_system() modifies the state.
for state, defined_lambdas in test_cases:
kwargs = dict.fromkeys(defined_lambdas, 0.5)
alchemical_state = AlchemicalState(**kwargs)
system = state.system
alchemical_state.apply_to_system(system)
system_state = AlchemicalState.from_system(system)
assert system_state == alchemical_state
# Raise an error if an extra parameter is defined in the system.
for state, defined_lambdas in test_cases:
defined_lambdas = set(defined_lambdas) # Copy
defined_lambdas.pop() # Remove one element.
kwargs = dict.fromkeys(defined_lambdas, 1.0)
alchemical_state = AlchemicalState(**kwargs)
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.apply_to_system(state.system)
# Raise an error if an extra parameter is defined in the state.
for state, defined_lambdas in test_cases:
if 'lambda_bonds' in defined_lambdas:
continue
defined_lambdas = set(defined_lambdas) # Copy
defined_lambdas.add('lambda_bonds') # Add extra parameter.
kwargs = dict.fromkeys(defined_lambdas, 1.0)
alchemical_state = AlchemicalState(**kwargs)
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.apply_to_system(state.system)
def test_check_system_consistency(self):
"""Test method AlchemicalState.check_system_consistency()."""
# A system is consistent with itself.
alchemical_state = AlchemicalState.from_system(self.alanine_state.system)
alchemical_state.check_system_consistency(self.alanine_state.system)
# Raise error if system has MORE lambda parameters.
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.check_system_consistency(self.full_alanine_state.system)
# Raise error if system has LESS lambda parameters.
alchemical_state = AlchemicalState.from_system(self.full_alanine_state.system)
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.check_system_consistency(self.alanine_state.system)
# Raise error if system has different lambda values.
alchemical_state.lambda_bonds = 0.5
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.check_system_consistency(self.full_alanine_state.system)
def test_apply_to_context(self):
"""Test method AlchemicalState.apply_to_context."""
integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
# Raise error if Context has more parameters than AlchemicalState.
alchemical_state = AlchemicalState.from_system(self.alanine_state.system)
context = self.full_alanine_state.create_context(copy.deepcopy(integrator))
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.apply_to_context(context)
del context
# Raise error if AlchemicalState is applied to a Context with missing parameters.
alchemical_state = AlchemicalState.from_system(self.full_alanine_state.system)
context = self.alanine_state.create_context(copy.deepcopy(integrator))
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.apply_to_context(context)
del context
# Correctly sets Context's parameters.
for state in [self.full_alanine_state, self.alanine_state_exact_pme]:
alchemical_state = AlchemicalState.from_system(state.system)
context = state.create_context(copy.deepcopy(integrator))
alchemical_state.set_alchemical_parameters(0.5)
alchemical_state.apply_to_context(context)
for parameter_name, parameter_value in context.getParameters().items():
if parameter_name in alchemical_state._parameters:
assert parameter_value == 0.5
del context
def test_standardize_system(self):
"""Test method AlchemicalState.standardize_system."""
test_cases = [self.full_alanine_state, self.alanine_state_exact_pme]
for state in test_cases:
# First create a non-standard system.
system = copy.deepcopy(state.system)
alchemical_state = AlchemicalState.from_system(system)
alchemical_state.set_alchemical_parameters(0.5)
alchemical_state.apply_to_system(system)
# Test pre-condition: The state of the System has been changed.
assert AlchemicalState.from_system(system).lambda_electrostatics == 0.5
# Check that _standardize_system() sets all parameters back to 1.0.
alchemical_state._standardize_system(system)
standard_alchemical_state = AlchemicalState.from_system(system)
assert alchemical_state != standard_alchemical_state
for parameter_name, value in alchemical_state._parameters.items():
standard_value = getattr(standard_alchemical_state, parameter_name)
assert (value is None and standard_value is None) or (standard_value == 1.0)
def test_find_force_groups_to_update(self):
"""Test method AlchemicalState._find_force_groups_to_update."""
test_cases = [self.full_alanine_state, self.alanine_state_exact_pme]
for thermodynamic_state in test_cases:
system = copy.deepcopy(thermodynamic_state.system)
alchemical_state = AlchemicalState.from_system(system)
alchemical_state2 = copy.deepcopy(alchemical_state)
# Each lambda should be separated in its own force group.
expected_force_groups = {}
for force, lambda_name, _ in AlchemicalState._get_system_controlled_parameters(
system, parameters_name_suffix=None):
expected_force_groups[lambda_name] = force.getForceGroup()
integrator = openmm.VerletIntegrator(2.0*unit.femtoseconds)
context = create_context(system, integrator)
# No force group should be updated if we don't move.
assert alchemical_state._find_force_groups_to_update(context, alchemical_state2, memo={}) == set()
# Change the lambdas one by one and check that the method
# recognize that the force group energy must be updated.
for lambda_name in AlchemicalState._get_controlled_parameters():
# Check that the system defines the global variable.
if getattr(alchemical_state, lambda_name) is None:
continue
# Change the current state.
setattr(alchemical_state2, lambda_name, 0.0)
force_group = expected_force_groups[lambda_name]
assert alchemical_state._find_force_groups_to_update(context, alchemical_state2, memo={}) == {force_group}
setattr(alchemical_state2, lambda_name, 1.0) # Reset current state.
del context
def test_alchemical_functions(self):
"""Test alchemical variables and functions work correctly."""
system = copy.deepcopy(self.full_alanine_state.system)
alchemical_state = AlchemicalState.from_system(system)
# Add two alchemical variables to the state.
alchemical_state.set_function_variable('lambda', 1.0)
alchemical_state.set_function_variable('lambda2', 0.5)
assert alchemical_state.get_function_variable('lambda') == 1.0
assert alchemical_state.get_function_variable('lambda2') == 0.5
# Cannot call an alchemical variable as a supported parameter.
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.set_function_variable('lambda_sterics', 0.5)
# Assign string alchemical functions to parameters.
alchemical_state.lambda_sterics = AlchemicalFunction('lambda')
alchemical_state.lambda_electrostatics = AlchemicalFunction('(lambda + lambda2) / 2.0')
assert alchemical_state.lambda_sterics == 1.0
assert alchemical_state.lambda_electrostatics == 0.75
# Setting alchemical variables updates alchemical parameter as well.
alchemical_state.set_function_variable('lambda2', 0)
assert alchemical_state.lambda_electrostatics == 0.5
# ---------------------------------------------------
# Integration tests with CompoundThermodynamicStates
# ---------------------------------------------------
def test_constructor_compound_state(self):
"""The AlchemicalState is set on construction of the CompoundState."""
test_cases = copy.deepcopy(self.test_cases)
# Test precondition: the original systems are in fully interacting state.
for state, defined_lambdas in test_cases:
system_state = AlchemicalState.from_system(state.system)
kwargs = dict.fromkeys(defined_lambdas, 1.0)
assert system_state == AlchemicalState(**kwargs)
# CompoundThermodynamicState set the system state in constructor.
for state, defined_lambdas in test_cases:
kwargs = dict.fromkeys(defined_lambdas, 0.5)
alchemical_state = AlchemicalState(**kwargs)
compound_state = states.CompoundThermodynamicState(state, [alchemical_state])
system_state = AlchemicalState.from_system(compound_state.system)
assert system_state == alchemical_state
def test_lambda_properties_compound_state(self):
"""Lambda properties setters/getters work in the CompoundState system."""
test_cases = copy.deepcopy(self.test_cases)
for state, defined_lambdas in test_cases:
alchemical_state = AlchemicalState.from_system(state.system)
compound_state = states.CompoundThermodynamicState(state, [alchemical_state])
# Defined properties can be assigned and read.
for parameter_name in defined_lambdas:
assert getattr(compound_state, parameter_name) == 1.0
setattr(compound_state, parameter_name, 0.5)
assert getattr(compound_state, parameter_name) == 0.5
# System global variables are updated correctly
system_alchemical_state = AlchemicalState.from_system(compound_state.system)
for parameter_name in defined_lambdas:
assert getattr(system_alchemical_state, parameter_name) == 0.5
# Same for parameters setters.
compound_state.set_alchemical_parameters(1.0)
system_alchemical_state = AlchemicalState.from_system(compound_state.system)
for parameter_name in defined_lambdas:
assert getattr(compound_state, parameter_name) == 1.0
assert getattr(system_alchemical_state, parameter_name) == 1.0
# Same for alchemical variables setters.
compound_state.set_function_variable('lambda', 0.25)
for parameter_name in defined_lambdas:
setattr(compound_state, parameter_name, AlchemicalFunction('lambda'))
system_alchemical_state = AlchemicalState.from_system(compound_state.system)
for parameter_name in defined_lambdas:
assert getattr(compound_state, parameter_name) == 0.25
assert getattr(system_alchemical_state, parameter_name) == 0.25
def test_set_system_compound_state(self):
"""Setting inconsistent system in compound state raise errors."""
alanine_state = copy.deepcopy(self.alanine_state)
alchemical_state = AlchemicalState.from_system(alanine_state.system)
compound_state = states.CompoundThermodynamicState(alanine_state, [alchemical_state])
# We create an inconsistent state that has different parameters.
incompatible_state = copy.deepcopy(alchemical_state)
incompatible_state.lambda_electrostatics = 0.5
# Setting an inconsistent alchemical system raise an error.
system = compound_state.system
incompatible_state.apply_to_system(system)
with nose.tools.assert_raises(AlchemicalStateError):
compound_state.system = system
# Same for set_system when called with default arguments.
with nose.tools.assert_raises(AlchemicalStateError):
compound_state.set_system(system)
# This doesn't happen if we fix the state.
compound_state.set_system(system, fix_state=True)
assert AlchemicalState.from_system(compound_state.system) != incompatible_state
def test_method_compatibility_compound_state(self):
"""Compatibility between states is handled correctly in compound state."""
test_cases = [self.alanine_state, self.alanine_state_exact_pme]
# An incompatible state has a different set of defined lambdas.
full_alanine_state = copy.deepcopy(self.full_alanine_state)
alchemical_state_incompatible = AlchemicalState.from_system(full_alanine_state.system)
compound_state_incompatible = states.CompoundThermodynamicState(full_alanine_state,
[alchemical_state_incompatible])
for state in test_cases:
state = copy.deepcopy(state)
alchemical_state = AlchemicalState.from_system(state.system)
compound_state = states.CompoundThermodynamicState(state, [alchemical_state])
# A compatible state has the same defined lambda parameters,
# but their values can be different.
alchemical_state_compatible = copy.deepcopy(alchemical_state)
assert alchemical_state.lambda_electrostatics != 0.5 # Test pre-condition.
alchemical_state_compatible.lambda_electrostatics = 0.5
compound_state_compatible = states.CompoundThermodynamicState(copy.deepcopy(state),
[alchemical_state_compatible])
# Test states compatibility.
assert compound_state.is_state_compatible(compound_state_compatible)
assert not compound_state.is_state_compatible(compound_state_incompatible)
# Test context compatibility.
integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
context = compound_state_compatible.create_context(copy.deepcopy(integrator))
assert compound_state.is_context_compatible(context)
context = compound_state_incompatible.create_context(copy.deepcopy(integrator))
assert not compound_state.is_context_compatible(context)
@staticmethod
def _check_compatibility(state1, state2, context_state1, is_compatible):
"""Check the compatibility of states and contexts between 2 states."""
# Compatibility should be commutative
assert state1.is_state_compatible(state2) is is_compatible
assert state2.is_state_compatible(state1) is is_compatible
# Test context incompatibility is commutative.
context_state2 = state2.create_context(openmm.VerletIntegrator(1.0*unit.femtosecond))
assert state2.is_context_compatible(context_state1) is is_compatible
assert state1.is_context_compatible(context_state2) is is_compatible
del context_state2
def test_method_reduced_potential_compound_state(self):
"""Test CompoundThermodynamicState.reduced_potential_at_states() method.
Computing the reduced potential singularly and with the class
method should give the same result.
"""
# Build a mixed collection of compatible and incompatible thermodynamic states.
thermodynamic_states = [
copy.deepcopy(self.alanine_state),
copy.deepcopy(self.alanine_state_exact_pme)
]
alchemical_states = [
AlchemicalState(lambda_electrostatics=1.0, lambda_sterics=1.0),
AlchemicalState(lambda_electrostatics=0.5, lambda_sterics=1.0),
AlchemicalState(lambda_electrostatics=0.5, lambda_sterics=0.0),
AlchemicalState(lambda_electrostatics=1.0, lambda_sterics=1.0)
]
compound_states = []
for thermo_state in thermodynamic_states:
for alchemical_state in alchemical_states:
compound_states.append(states.CompoundThermodynamicState(
copy.deepcopy(thermo_state), [copy.deepcopy(alchemical_state)]))
# Group thermodynamic states by compatibility.
compatible_groups, _ = states.group_by_compatibility(compound_states)
assert len(compatible_groups) == 2
# Compute the reduced potentials.
expected_energies = []
obtained_energies = []
for compatible_group in compatible_groups:
# Create context.
integrator = openmm.VerletIntegrator(2.0*unit.femtoseconds)
context = compatible_group[0].create_context(integrator)
context.setPositions(self.alanine_test_system.positions[:compatible_group[0].n_particles])
# Compute with single-state method.
for state in compatible_group:
state.apply_to_context(context)
expected_energies.append(state.reduced_potential(context))
# Compute with multi-state method.
compatible_energies = states.ThermodynamicState.reduced_potential_at_states(context, compatible_group)
# The first and the last state must be equal.
assert np.isclose(compatible_energies[0], compatible_energies[-1])
obtained_energies.extend(compatible_energies)
assert np.allclose(np.array(expected_energies), np.array(obtained_energies))
def test_serialization(self):
"""Test AlchemicalState serialization alone and in a compound state."""
alchemical_state = AlchemicalState(lambda_electrostatics=0.5, lambda_angles=None)
alchemical_state.set_function_variable('lambda', 0.0)
alchemical_state.lambda_sterics = AlchemicalFunction('lambda')
# Test serialization/deserialization of AlchemicalState.
serialization = utils.serialize(alchemical_state)
deserialized_state = utils.deserialize(serialization)
original_pickle = pickle.dumps(alchemical_state)
deserialized_pickle = pickle.dumps(deserialized_state)
assert original_pickle == deserialized_pickle
# Test serialization/deserialization of AlchemicalState in CompoundState.
test_cases = [copy.deepcopy(self.alanine_state), copy.deepcopy(self.alanine_state_exact_pme)]
for thermodynamic_state in test_cases:
compound_state = states.CompoundThermodynamicState(thermodynamic_state, [alchemical_state])
# The serialized system is standard.
serialization = utils.serialize(compound_state)
serialized_standard_system = serialization['thermodynamic_state']['standard_system']
# Decompress the serialized_system
serialized_standard_system = zlib.decompress(serialized_standard_system).decode(
states.ThermodynamicState._ENCODING)
assert serialized_standard_system.__hash__() == compound_state._standard_system_hash
# The object is deserialized correctly.
deserialized_state = utils.deserialize(serialization)
assert pickle.dumps(compound_state) == pickle.dumps(deserialized_state)
# =============================================================================
# MAIN FOR MANUAL DEBUGGING
# =============================================================================
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
|
choderalab/openmmtools
|
openmmtools/tests/test_alchemy.py
|
Python
|
mit
| 122,836
|
[
"MDTraj",
"OpenMM"
] |
d237d0a3b933053e0ae8c85a3774804d73f074e12c5d087bd7972f1b1724e469
|
# -*- coding: utf-8 -*-
"""Transport functions for `Fraunhofer's OrientDB <http://graphstore.scai.fraunhofer.de>`_.
`Fraunhofer <https://www.scai.fraunhofer.de/en/business-research-areas/bioinformatics.html>`_ hosts
an instance of `OrientDB <https://orientdb.com/>`_ that contains BEL in a schema similar to
:mod:`pybel.io.umbrella_nodelink`. However, they include custom relations that do not come
from a controlled vocabulary, and have not made the schema, ETL scripts, or documentation available.
Unlike BioDati and BEL Commons, the Fraunhofer OrientDB does not allow for uploads, so only
a single function :func:`pybel.from_fraunhofer_orientdb` is provided by PyBEL.
"""
import logging
from typing import Any, Iterable, Mapping, Optional
from urllib.parse import quote_plus
import requests
from pyparsing import ParseException
from .. import constants as pc
from ..parser import BELParser
from ..struct import BELGraph
__all__ = [
"from_fraunhofer_orientdb",
]
logger = logging.getLogger(__name__)
def from_fraunhofer_orientdb( # noqa:S107
database: str = "covid",
user: str = "covid_user",
password: str = "covid",
query: Optional[str] = None,
) -> BELGraph:
"""Get a BEL graph from the Fraunhofer OrientDB.
:param database: The OrientDB database to connect to
:param user: The user to connect to OrientDB
:param password: The password to connect to OrientDB
:param query: The query to run. Defaults to the URL encoded version of ``select from E``,
where ``E`` is all edges in the OrientDB edge database. Likely does not need to be changed,
except in the case of selecting specific subsets of edges. Make sure you URL encode it
properly, because OrientDB's RESTful API puts it in the URL's path.
By default, this function connects to the ``covid`` database, that corresponds to the
COVID-19 Knowledge Graph [0]_. If other databases in the Fraunhofer OrientDB are
published and demo username/password combinations are given, the following table will
be updated.
+----------+------------+----------+
| Database | Username | Password |
+==========+============+==========+
| covid | covid_user | covid |
+----------+------------+----------+
The ``covid`` database can be downloaded and converted to a BEL graph like this:
.. code-block:: python
import pybel
graph = pybel.from_fraunhofer_orientdb(
database='covid',
user='covid_user',
password='covid',
)
graph.summarize()
However, because the source BEL scripts for the COVID-19 Knowledge Graph are available on
`GitHub <https://github.com/covid19kg/covid19kg>`_ and the authors pre-enabled it for PyBEL, it can
be downloaded with ``pip install git+https://github.com/covid19kg/covid19kg.git`` and used
with the following python code:
.. code-block:: python
import covid19kg
graph = covid19kg.get_graph()
graph.summarize()
.. warning::
It was initially planned to handle some of the non-standard relationships listed in the
Fraunhofer OrientDB's `schema <http://graphstore.scai.fraunhofer.de/studio/index.html#/database/covid/schema>`_
in their OrientDB Studio instance, but none of them actually appear in the only network that is accessible.
If this changes, please leave an issue at https://github.com/pybel/pybel/issues so it can be addressed.
.. [0] Domingo-Fernández, D., *et al.* (2020). `COVID-19 Knowledge Graph: a computable, multi-modal,
cause-and-effect knowledge model of COVID-19 pathophysiology
<https://doi.org/10.1101/2020.04.14.040667>`_. *bioRxiv* 2020.04.14.040667.
"""
graph = BELGraph(name="Fraunhofer OrientDB: {}".format(database))
parser = BELParser(graph, skip_validation=True)
results = _request_graphstore(database, user, password, select_query_template=query)
for result in results:
_parse_result(parser, result)
return graph
def _parse_result(parser: BELParser, result: Mapping[str, Any]) -> None:
citation_db, citation_id = pc.CITATION_TYPE_PUBMED, result.get("pmid")
if citation_id is None:
citation_db, citation_id = pc.CITATION_TYPE_PMC, result.get("pmc")
if citation_id is None:
if "citation" in result:
logger.warning(
"incorrect citation information for %s: %s",
result["@rid"],
result["citation"],
)
else:
logger.debug("no citation information for %s", result["@rid"])
return
parser.control_parser.clear()
parser.control_parser.citation_db = citation_db
parser.control_parser.citation_db_id = citation_id
parser.control_parser.evidence = result["evidence"]
parser.control_parser.annotations.update(result["annotation"])
source = result["in"]["bel"]
relation = result["@class"]
relation = RELATION_MAP.get(relation, relation)
target = result["out"]["bel"]
statement = " ".join([source, relation, target])
try:
parser.parseString(statement)
except ParseException:
logger.warning("could not parse %s", statement)
RELATION_MAP = {
"causes_no_change": pc.CAUSES_NO_CHANGE,
"positive_correlation": pc.POSITIVE_CORRELATION,
"negative_correlation": pc.NEGATIVE_CORRELATION,
"is_a": pc.IS_A,
"has_member": "hasMember",
"has_members": "hasMembers",
"has_component": "hasComponent",
"has_components": "hasComponents",
}
def _request_graphstore(
database: str,
user: str,
password: str,
count_query: Optional[str] = None,
select_query_template: Optional[str] = None,
page_size: int = 500,
base: str = "http://graphstore.scai.fraunhofer.de/query",
) -> Iterable[Mapping[str, Any]]:
"""Make an API call to the OrientDB."""
if count_query is None:
count_query = "select count(@rid) from E"
count_query = quote_plus(count_query)
count_url = "{base}/{database}/sql/{count_query}".format(base=base, database=database, count_query=count_query)
count_res = requests.get(count_url, auth=(user, password))
count = count_res.json()["result"][0]["count"]
logging.debug("fraunhofer orientdb has %d edges", count)
if select_query_template is None:
select_query_template = "select from E order by @rid limit {limit} offset {offset}"
offsets = count // page_size
for offset in range(offsets + 1):
select_query = select_query_template.format(limit=page_size, offset=offset * page_size)
logger.debug("query: %s", select_query)
select_query = quote_plus(select_query)
select_url = "{base}/{database}/sql/{select_query}/{page_size}/*:1".format(
base=base,
database=database,
select_query=select_query,
page_size=page_size,
)
res = requests.get(select_url, auth=(user, password))
res_json = res.json()
result = res_json["result"]
yield from result
|
pybel/pybel
|
src/pybel/io/fraunhofer_orientdb.py
|
Python
|
mit
| 7,079
|
[
"Pybel"
] |
8d81c431b5242300713fcc20005836448b71031bf012fbf83bd3b5d74bb82d6d
|
# -*- coding: utf-8 -*-
# from matplotlib import pyplot
#
# pyplot.plot([1, 3, 5, 7], [12, 5, 8, 11])
#
# pyplot.show()
import matplotlib.pyplot as plt
import numpy as np
# create some data to use for the plot
dt = 0.001
t = np.arange(0.0, 10.0, dt)
r = np.exp(-t[:1000]/0.05) # impulse response
x = np.random.randn(len(t))
s = np.convolve(x, r)[:len(x)]*dt # colored noise
# the main axes is subplot(111) by default
plt.plot(t, s)
plt.axis([0, 1, 1.1*np.amin(s), 2*np.amax(s)])
plt.xlabel('time (s)')
plt.ylabel('current (nA)')
plt.title('Gaussian colored noise')
# this is an inset axes over the main axes
a = plt.axes([.65, .6, .2, .2], axisbg='y')
n, bins, patches = plt.hist(s, 400, normed=1)
plt.title('Probability')
plt.xticks([])
plt.yticks([])
# this is another inset axes over the main axes
a = plt.axes([0.2, 0.6, .2, .2], axisbg='y')
plt.plot(t[:len(r)], r)
plt.title('Impulse response')
plt.xlim(0, 0.2)
plt.xticks([])
plt.yticks([])
plt.show()
|
mocne/PycharmProjects
|
HanderCode/handerCode/math.py
|
Python
|
mit
| 979
|
[
"Gaussian"
] |
c8bd3501778b8e02e82f2a78da2bbbf41268fc2a4e110c19566897c947db70c5
|
#!/usr/bin/python
# Copyright 2012 Aaron S. Joyner <aaron@joyner.ws>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lewis_class
import random
import unittest
class TestLewisClassScorer(unittest.TestCase):
def test_simple(self):
"""Test the basic behavior of the sorting and dividing."""
scores = [[7, 7], [9, 9], [4, 4], [8, 8], [6, 6], [1, 1], [3, 3], [5, 5],
[2, 2]]
grouped_scores = [[[9, 9], [8, 8], [7, 7]], # Group 0
[[6, 6], [5, 5], [4, 4]], # Group 1
[[3, 3], [2, 2], [1, 1]]] # Group 2
random.shuffle(scores)
output = lewis_class.LewisClassScorer(scores, num_classes=3,
scoring_fields=2)
self.assertEqual(grouped_scores, output)
def test_short_upper_class(self):
"""Test that the short class will be the upper class."""
scores = [[7, 7], [9, 9], [4, 4], [8, 8], [6, 6], [1, 1], [3, 3], [5, 5]]
grouped_scores = [[[9, 9], [8, 8]], # Group 0
[[7, 7], [6, 6], [5, 5]], # Group 1
[[4, 4], [3, 3], [1, 1]]] # Group 2
random.shuffle(scores)
output = lewis_class.LewisClassScorer(scores, num_classes=3,
scoring_fields=2)
self.assertEqual(grouped_scores, output)
scores = [[7, 7], [9, 9], [4, 4], [8, 8], [6, 6], [3, 3], [5, 5]]
grouped_scores = [[[9, 9], [8, 8], [7, 7]], # Group 0
[[6, 6], [5, 5], [4, 4], [3, 3]]] # Group 1
random.shuffle(scores)
output = lewis_class.LewisClassScorer(scores, num_classes=2,
scoring_fields=2)
self.assertEqual(grouped_scores, output)
def test_tie_uses_secondary(self):
scores = [[0, 9, 'Aaron'],
[0, 8, 'Brian'],
[0, 7, 'Chuck'],
[0, 6, 'Doris'],
[0, 5, 'Elena'],
[0, 4, 'Frank'],
[0, 3, 'Gavin'],
[0, 2, 'Hanna'],
[0, 1, 'Irine']]
grouped_scores = [[[0, 9, 'Aaron'], # Group 1
[0, 8, 'Brian'],
[0, 7, 'Chuck']],
[[0, 6, 'Doris'], # Group 2
[0, 5, 'Elena'],
[0, 4, 'Frank']],
[[0, 3, 'Gavin'], # Group 3
[0, 2, 'Hanna'],
[0, 1, 'Irine']]]
random.shuffle(scores)
output = lewis_class.LewisClassScorer(scores, num_classes=3,
scoring_fields=2)
self.assertEqual(grouped_scores, output)
def test_numerical_sorting(self):
"""Ensures that ints are sorted numerically not lexicaly."""
scores = [[3], [20], [100]]
grouped_scores = [[[100]], [[20]], [[3]]]
random.shuffle(scores)
output = lewis_class.LewisClassScorer(scores, num_classes=3,
scoring_fields=1)
self.assertEqual(grouped_scores, output)
def test_group_boundary_shifts_up(self):
scores = [[100, 'Jim'],
[99, 'Jan'],
[99, 'John'],
[98, 'Terry'],
[96, 'Eric'],
[96, 'Susie'],
[95, 'Dolly'],
[95, 'Mike'],
[94, 'Sam'],
[94, 'Dana'],
[93, 'Joshua'],
[93, 'Janie'],
[93, 'Debbie'],
[92, 'Lucy'],
[92, 'Patty'],
[91, 'Zelda'],
[91, 'George'],
[90, 'Paul'],
[90, 'Rita'],
[90, 'Ofelia'],
[90, 'Pamela'],
[89, 'Greg'],
[89, 'Art'],
[88, 'Olga'],
[85, 'Joseph'],
[85, 'Mary'],
[84, 'Will'],
[80, 'Lee'],
[79, 'Renee'],
[75, 'Jonathon'],
[74, 'Lisa'],
[70, 'Bart']]
grouped_scores = [[[100, 'Jim'],
[99, 'John'],
[99, 'Jan'],
[98, 'Terry'],
[96, 'Susie'],
[96, 'Eric']],
[[95, 'Mike'],
[95, 'Dolly'],
[94, 'Sam'],
[94, 'Dana'],
[93, 'Joshua'],
[93, 'Janie'],
[93, 'Debbie']],
[[92, 'Patty'],
[92, 'Lucy'],
[91, 'Zelda'],
[91, 'George']],
[[90, 'Rita'],
[90, 'Paul'],
[90, 'Pamela'],
[90, 'Ofelia'],
[89, 'Greg'],
[89, 'Art'],
[88, 'Olga']],
[[85, 'Mary'],
[85, 'Joseph'],
[84, 'Will'],
[80, 'Lee'],
[79, 'Renee'],
[75, 'Jonathon'],
[74, 'Lisa'],
[70, 'Bart']]]
self.maxDiff = None
random.shuffle(scores)
output = lewis_class.LewisClassScorer(scores, num_classes=5,
scoring_fields=1)
self.assertEqual(grouped_scores, output)
if __name__ == '__main__':
unittest.main()
|
asjoyner/lewis-class-scorer
|
lewis_class_test.py
|
Python
|
apache-2.0
| 5,963
|
[
"Brian"
] |
9952456b89ce93114f1f4d618effd1421e3bafc14b48ad03e5659c96711fbc8e
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`slidecontroller` module contains argubly the most important part of OpenLP - the slide controller
"""
import os
import logging
import copy
from collections import deque
from PyQt4 import QtCore, QtGui
from openlp.core.lib import OpenLPToolbar, Receiver, ItemCapabilities, ServiceItem, ImageSource, SlideLimits, \
ServiceItemAction, Settings, Registry, UiStrings, ScreenList, build_icon, build_html, translate
from openlp.core.ui import HideMode, MainDisplay, Display, DisplayControllerType
from openlp.core.lib.ui import create_action
from openlp.core.utils.actions import ActionList, CategoryOrder
log = logging.getLogger(__name__)
class DisplayController(QtGui.QWidget):
"""
Controller is a general display controller widget.
"""
def __init__(self, parent, isLive=False):
"""
Set up the general Controller.
"""
QtGui.QWidget.__init__(self, parent)
self.isLive = isLive
self.display = None
self.controllerType = DisplayControllerType.Plugin
def sendToPlugins(self, *args):
"""
This is the generic function to send signal for control widgets,
created from within other plugins
This function is needed to catch the current controller
"""
sender = self.sender().objectName() if self.sender().objectName() else self.sender().text()
controller = self
Receiver.send_message('%s' % sender, [controller, args])
class SlideController(DisplayController):
"""
SlideController is the slide controller widget. This widget is what the
user uses to control the displaying of verses/slides/etc on the screen.
"""
def __init__(self, parent, isLive=False):
"""
Set up the Slide Controller.
"""
DisplayController.__init__(self, parent, isLive)
self.screens = ScreenList()
try:
self.ratio = float(self.screens.current[u'size'].width()) / float(self.screens.current[u'size'].height())
except ZeroDivisionError:
self.ratio = 1
self.loopList = [
u'playSlidesMenu',
u'loopSeparator',
u'delaySpinBox'
]
self.audioList = [
u'songMenu',
u'audioPauseItem',
u'audioTimeLabel'
]
self.wideMenu = [
u'blankScreenButton',
u'themeScreenButton',
u'desktopScreenButton'
]
self.hideMenuList = [
u'hideMenu'
]
self.timer_id = 0
self.songEdit = False
self.selectedRow = 0
self.serviceItem = None
self.slide_limits = None
self.updateSlideLimits()
self.panel = QtGui.QWidget(parent.controlSplitter)
self.slideList = {}
# Layout for holding panel
self.panelLayout = QtGui.QVBoxLayout(self.panel)
self.panelLayout.setSpacing(0)
self.panelLayout.setMargin(0)
# Type label for the top of the slide controller
self.typeLabel = QtGui.QLabel(self.panel)
if self.isLive:
Registry().register(u'live_controller', self)
self.typeLabel.setText(UiStrings().Live)
self.split = 1
self.typePrefix = u'live'
self.keypress_queue = deque()
self.keypress_loop = False
self.category = UiStrings().LiveToolbar
ActionList.get_instance().add_category(unicode(self.category), CategoryOrder.standardToolbar)
else:
Registry().register(u'preview_controller', self)
self.typeLabel.setText(UiStrings().Preview)
self.split = 0
self.typePrefix = u'preview'
self.category = None
self.typeLabel.setStyleSheet(u'font-weight: bold; font-size: 12pt;')
self.typeLabel.setAlignment(QtCore.Qt.AlignCenter)
self.panelLayout.addWidget(self.typeLabel)
# Splitter
self.splitter = QtGui.QSplitter(self.panel)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.panelLayout.addWidget(self.splitter)
# Actual controller section
self.controller = QtGui.QWidget(self.splitter)
self.controller.setGeometry(QtCore.QRect(0, 0, 100, 536))
self.controller.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum))
self.controllerLayout = QtGui.QVBoxLayout(self.controller)
self.controllerLayout.setSpacing(0)
self.controllerLayout.setMargin(0)
# Controller list view
self.previewListWidget = QtGui.QTableWidget(self.controller)
self.previewListWidget.setColumnCount(1)
self.previewListWidget.horizontalHeader().setVisible(False)
self.previewListWidget.setColumnWidth(0, self.controller.width())
self.previewListWidget.isLive = self.isLive
self.previewListWidget.setObjectName(u'previewListWidget')
self.previewListWidget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.previewListWidget.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.previewListWidget.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.previewListWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.previewListWidget.setAlternatingRowColors(True)
self.controllerLayout.addWidget(self.previewListWidget)
# Build the full toolbar
self.toolbar = OpenLPToolbar(self)
sizeToolbarPolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizeToolbarPolicy.setHorizontalStretch(0)
sizeToolbarPolicy.setVerticalStretch(0)
sizeToolbarPolicy.setHeightForWidth(self.toolbar.sizePolicy().hasHeightForWidth())
self.toolbar.setSizePolicy(sizeToolbarPolicy)
self.previousItem = create_action(self, u'previousItem_' + self.typePrefix,
text=translate('OpenLP.SlideController', 'Previous Slide'), icon=u':/slides/slide_previous.png',
tooltip=translate('OpenLP.SlideController', 'Move to previous.'),
shortcuts=[QtCore.Qt.Key_Up, QtCore.Qt.Key_PageUp], context=QtCore.Qt.WidgetWithChildrenShortcut,
category=self.category, triggers=self.onSlideSelectedPrevious)
self.toolbar.addAction(self.previousItem)
self.nextItem = create_action(self, u'nextItem_' + self.typePrefix,
text=translate('OpenLP.SlideController', 'Next Slide'), icon=u':/slides/slide_next.png',
tooltip=translate('OpenLP.SlideController', 'Move to next.'),
shortcuts=[QtCore.Qt.Key_Down, QtCore.Qt.Key_PageDown], context=QtCore.Qt.WidgetWithChildrenShortcut,
category=self.category, triggers=self.onSlideSelectedNextAction)
self.toolbar.addAction(self.nextItem)
self.toolbar.addSeparator()
self.controllerType = DisplayControllerType.Preview
if self.isLive:
self.controllerType = DisplayControllerType.Live
# Hide Menu
self.hideMenu = QtGui.QToolButton(self.toolbar)
self.hideMenu.setObjectName(u'hideMenu')
self.hideMenu.setText(translate('OpenLP.SlideController', 'Hide'))
self.hideMenu.setPopupMode(QtGui.QToolButton.MenuButtonPopup)
self.hideMenu.setMenu(QtGui.QMenu(translate('OpenLP.SlideController', 'Hide'), self.toolbar))
self.toolbar.addToolbarWidget(self.hideMenu)
self.blankScreen = create_action(self, u'blankScreen',
text=translate('OpenLP.SlideController', 'Blank Screen'), icon=u':/slides/slide_blank.png',
checked=False, shortcuts=[QtCore.Qt.Key_Period], category=self.category, triggers=self.onBlankDisplay)
self.themeScreen = create_action(self, u'themeScreen',
text=translate('OpenLP.SlideController', 'Blank to Theme'), icon=u':/slides/slide_theme.png',
checked=False, shortcuts=[QtGui.QKeySequence(u'T')], category=self.category,
triggers=self.onThemeDisplay)
self.desktopScreen = create_action(self, u'desktopScreen',
text=translate('OpenLP.SlideController', 'Show Desktop'), icon=u':/slides/slide_desktop.png',
checked=False, shortcuts=[QtGui.QKeySequence(u'D')], category=self.category,
triggers=self.onHideDisplay)
self.hideMenu.setDefaultAction(self.blankScreen)
self.hideMenu.menu().addAction(self.blankScreen)
self.hideMenu.menu().addAction(self.themeScreen)
self.hideMenu.menu().addAction(self.desktopScreen)
# Wide menu of display control buttons.
self.blankScreenButton = QtGui.QToolButton(self.toolbar)
self.blankScreenButton.setObjectName(u'blankScreenButton')
self.toolbar.addToolbarWidget(self.blankScreenButton)
self.blankScreenButton.setDefaultAction(self.blankScreen)
self.themeScreenButton = QtGui.QToolButton(self.toolbar)
self.themeScreenButton.setObjectName(u'themeScreenButton')
self.toolbar.addToolbarWidget(self.themeScreenButton)
self.themeScreenButton.setDefaultAction(self.themeScreen)
self.desktopScreenButton = QtGui.QToolButton(self.toolbar)
self.desktopScreenButton.setObjectName(u'desktopScreenButton')
self.toolbar.addToolbarWidget(self.desktopScreenButton)
self.desktopScreenButton.setDefaultAction(self.desktopScreen)
self.toolbar.addToolbarAction(u'loopSeparator', separator=True)
# Play Slides Menu
self.playSlidesMenu = QtGui.QToolButton(self.toolbar)
self.playSlidesMenu.setObjectName(u'playSlidesMenu')
self.playSlidesMenu.setText(translate('OpenLP.SlideController', 'Play Slides'))
self.playSlidesMenu.setPopupMode(QtGui.QToolButton.MenuButtonPopup)
self.playSlidesMenu.setMenu(QtGui.QMenu(translate('OpenLP.SlideController', 'Play Slides'), self.toolbar))
self.toolbar.addToolbarWidget(self.playSlidesMenu)
self.playSlidesLoop = create_action(self, u'playSlidesLoop', text=UiStrings().PlaySlidesInLoop,
icon=u':/media/media_time.png', checked=False, shortcuts=[],
category=self.category, triggers=self.onPlaySlidesLoop)
self.playSlidesOnce = create_action(self, u'playSlidesOnce', text=UiStrings().PlaySlidesToEnd,
icon=u':/media/media_time.png', checked=False, shortcuts=[],
category=self.category, triggers=self.onPlaySlidesOnce)
if Settings().value(self.parent().advancedSettingsSection + u'/slide limits') == SlideLimits.Wrap:
self.playSlidesMenu.setDefaultAction(self.playSlidesLoop)
else:
self.playSlidesMenu.setDefaultAction(self.playSlidesOnce)
self.playSlidesMenu.menu().addAction(self.playSlidesLoop)
self.playSlidesMenu.menu().addAction(self.playSlidesOnce)
# Loop Delay Spinbox
self.delaySpinBox = QtGui.QSpinBox()
self.delaySpinBox.setObjectName(u'delaySpinBox')
self.delaySpinBox.setRange(1, 180)
self.delaySpinBox.setSuffix(UiStrings().Seconds)
self.delaySpinBox.setToolTip(translate('OpenLP.SlideController', 'Delay between slides in seconds.'))
self.toolbar.addToolbarWidget(self.delaySpinBox)
else:
self.toolbar.addToolbarAction(u'goLive', icon=u':/general/general_live.png',
tooltip=translate('OpenLP.SlideController', 'Move to live.'), triggers=self.onGoLive)
self.toolbar.addToolbarAction(u'addToService', icon=u':/general/general_add.png',
tooltip=translate('OpenLP.SlideController', 'Add to Service.'), triggers=self.onPreviewAddToService)
self.toolbar.addSeparator()
self.toolbar.addToolbarAction(u'editSong', icon=u':/general/general_edit.png',
tooltip=translate('OpenLP.SlideController', 'Edit and reload song preview.'), triggers=self.onEditSong)
self.controllerLayout.addWidget(self.toolbar)
# Build the Media Toolbar
self.media_controller.register_controller(self)
if self.isLive:
# Build the Song Toolbar
self.songMenu = QtGui.QToolButton(self.toolbar)
self.songMenu.setObjectName(u'songMenu')
self.songMenu.setText(translate('OpenLP.SlideController', 'Go To'))
self.songMenu.setPopupMode(QtGui.QToolButton.InstantPopup)
self.songMenu.setMenu(QtGui.QMenu(translate('OpenLP.SlideController', 'Go To'), self.toolbar))
self.toolbar.addToolbarWidget(self.songMenu)
# Stuff for items with background audio.
self.audioPauseItem = self.toolbar.addToolbarAction(u'audioPauseItem',
icon=u':/slides/media_playback_pause.png', text=translate('OpenLP.SlideController', 'Pause Audio'),
tooltip=translate('OpenLP.SlideController', 'Pause audio.'),
checked=False, visible=False, category=self.category, context=QtCore.Qt.WindowShortcut,
shortcuts=[], triggers=self.onAudioPauseClicked)
self.audioMenu = QtGui.QMenu(translate('OpenLP.SlideController', 'Background Audio'), self.toolbar)
self.audioPauseItem.setMenu(self.audioMenu)
self.audioPauseItem.setParent(self.toolbar)
self.toolbar.widgetForAction(self.audioPauseItem).setPopupMode(
QtGui.QToolButton.MenuButtonPopup)
self.nextTrackItem = create_action(self, u'nextTrackItem', text=UiStrings().NextTrack,
icon=u':/slides/media_playback_next.png',
tooltip=translate('OpenLP.SlideController', 'Go to next audio track.'),
category=self.category, shortcuts=[], triggers=self.onNextTrackClicked)
self.audioMenu.addAction(self.nextTrackItem)
self.trackMenu = self.audioMenu.addMenu(translate('OpenLP.SlideController', 'Tracks'))
self.audioTimeLabel = QtGui.QLabel(u' 00:00 ', self.toolbar)
self.audioTimeLabel.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignHCenter)
self.audioTimeLabel.setStyleSheet(
u'background-color: palette(background); '
u'border-top-color: palette(shadow); '
u'border-left-color: palette(shadow); '
u'border-bottom-color: palette(light); '
u'border-right-color: palette(light); '
u'border-radius: 3px; border-style: inset; '
u'border-width: 1; font-family: monospace; margin: 2px;'
)
self.audioTimeLabel.setObjectName(u'audioTimeLabel')
self.toolbar.addToolbarWidget(self.audioTimeLabel)
self.toolbar.setWidgetVisible(self.audioList, False)
# Screen preview area
self.previewFrame = QtGui.QFrame(self.splitter)
self.previewFrame.setGeometry(QtCore.QRect(0, 0, 300, 300 * self.ratio))
self.previewFrame.setMinimumHeight(100)
self.previewFrame.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Ignored,
QtGui.QSizePolicy.Label))
self.previewFrame.setFrameShape(QtGui.QFrame.StyledPanel)
self.previewFrame.setFrameShadow(QtGui.QFrame.Sunken)
self.previewFrame.setObjectName(u'previewFrame')
self.grid = QtGui.QGridLayout(self.previewFrame)
self.grid.setMargin(8)
self.grid.setObjectName(u'grid')
self.slideLayout = QtGui.QVBoxLayout()
self.slideLayout.setSpacing(0)
self.slideLayout.setMargin(0)
self.slideLayout.setObjectName(u'SlideLayout')
self.previewDisplay = Display(self, self.isLive, self)
self.previewDisplay.setGeometry(QtCore.QRect(0, 0, 300, 300))
self.previewDisplay.screen = {u'size': self.previewDisplay.geometry()}
self.previewDisplay.setup()
self.slideLayout.insertWidget(0, self.previewDisplay)
self.previewDisplay.hide()
# Actual preview screen
self.slidePreview = QtGui.QLabel(self)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.slidePreview.sizePolicy().hasHeightForWidth())
self.slidePreview.setSizePolicy(sizePolicy)
self.slidePreview.setFrameShape(QtGui.QFrame.Box)
self.slidePreview.setFrameShadow(QtGui.QFrame.Plain)
self.slidePreview.setLineWidth(1)
self.slidePreview.setScaledContents(True)
self.slidePreview.setObjectName(u'slidePreview')
self.slideLayout.insertWidget(0, self.slidePreview)
self.grid.addLayout(self.slideLayout, 0, 0, 1, 1)
if self.isLive:
self.current_shortcut = u''
self.shortcutTimer = QtCore.QTimer()
self.shortcutTimer.setObjectName(u'shortcutTimer')
self.shortcutTimer.setSingleShot(True)
shortcuts = [{u'key': u'V', u'configurable': True,
u'text': translate('OpenLP.SlideController', 'Go to "Verse"')},
{u'key': u'C', u'configurable': True,
u'text': translate('OpenLP.SlideController', 'Go to "Chorus"')},
{u'key': u'B', u'configurable': True,
u'text': translate('OpenLP.SlideController', 'Go to "Bridge"')},
{u'key': u'P', u'configurable': True,
u'text': translate('OpenLP.SlideController',
'Go to "Pre-Chorus"')},
{u'key': u'I', u'configurable': True,
u'text': translate('OpenLP.SlideController', 'Go to "Intro"')},
{u'key': u'E', u'configurable': True,
u'text': translate('OpenLP.SlideController', 'Go to "Ending"')},
{u'key': u'O', u'configurable': True,
u'text': translate('OpenLP.SlideController', 'Go to "Other"')}]
shortcuts += [{u'key': unicode(number)} for number in range(10)]
self.previewListWidget.addActions([create_action(self,
u'shortcutAction_%s' % s[u'key'], text=s.get(u'text'),
shortcuts=[QtGui.QKeySequence(s[u'key'])],
context=QtCore.Qt.WidgetWithChildrenShortcut,
category=self.category if s.get(u'configurable') else None,
triggers=self._slideShortcutActivated) for s in shortcuts])
QtCore.QObject.connect(
self.shortcutTimer, QtCore.SIGNAL(u'timeout()'),
self._slideShortcutActivated)
# Signals
QtCore.QObject.connect(self.previewListWidget, QtCore.SIGNAL(u'clicked(QModelIndex)'), self.onSlideSelected)
if self.isLive:
QtCore.QObject.connect(Receiver.get_receiver(),
QtCore.SIGNAL(u'slidecontroller_live_spin_delay'), self.receiveSpinDelay)
QtCore.QObject.connect(Receiver.get_receiver(),
QtCore.SIGNAL(u'slidecontroller_toggle_display'), self.toggleDisplay)
self.toolbar.setWidgetVisible(self.loopList, False)
self.toolbar.setWidgetVisible(self.wideMenu, False)
else:
QtCore.QObject.connect(self.previewListWidget,
QtCore.SIGNAL(u'doubleClicked(QModelIndex)'), self.onGoLiveClick)
self.toolbar.setWidgetVisible([u'editSong'], False)
if self.isLive:
self.setLiveHotkeys(self)
self.__addActionsToWidget(self.previewListWidget)
else:
self.previewListWidget.addActions([self.nextItem, self.previousItem])
QtCore.QObject.connect(Receiver.get_receiver(),
QtCore.SIGNAL(u'slidecontroller_%s_stop_loop' % self.typePrefix), self.onStopLoop)
QtCore.QObject.connect(Receiver.get_receiver(),
QtCore.SIGNAL(u'slidecontroller_%s_next' % self.typePrefix), self.onSlideSelectedNext)
QtCore.QObject.connect(Receiver.get_receiver(),
QtCore.SIGNAL(u'slidecontroller_%s_previous' % self.typePrefix), self.onSlideSelectedPrevious)
QtCore.QObject.connect(Receiver.get_receiver(),
QtCore.SIGNAL(u'slidecontroller_%s_change' % self.typePrefix), self.onSlideChange)
QtCore.QObject.connect(Receiver.get_receiver(),
QtCore.SIGNAL(u'slidecontroller_%s_set' % self.typePrefix), self.onSlideSelectedIndex)
QtCore.QObject.connect(Receiver.get_receiver(),
QtCore.SIGNAL(u'slidecontroller_%s_blank' % self.typePrefix), self.onSlideBlank)
QtCore.QObject.connect(Receiver.get_receiver(),
QtCore.SIGNAL(u'slidecontroller_%s_unblank' % self.typePrefix), self.onSlideUnblank)
QtCore.QObject.connect(Receiver.get_receiver(),
QtCore.SIGNAL(u'slidecontroller_update_slide_limits'), self.updateSlideLimits)
def _slideShortcutActivated(self):
"""
Called, when a shortcut has been activated to jump to a chorus, verse,
etc.
**Note**: This implementation is based on shortcuts. But it rather works
like "key sequenes". You have to press one key after the other and
**not** at the same time.
For example to jump to "V3" you have to press "V" and afterwards but
within a time frame of 350ms you have to press "3".
"""
try:
from openlp.plugins.songs.lib import VerseType
SONGS_PLUGIN_AVAILABLE = True
except ImportError:
SONGS_PLUGIN_AVAILABLE = False
sender_name = self.sender().objectName()
verse_type = sender_name[15:] if sender_name[:15] == u'shortcutAction_' else u''
if SONGS_PLUGIN_AVAILABLE:
if verse_type == u'V':
self.current_shortcut = VerseType.TranslatedTags[VerseType.Verse]
elif verse_type == u'C':
self.current_shortcut = VerseType.TranslatedTags[VerseType.Chorus]
elif verse_type == u'B':
self.current_shortcut = VerseType.TranslatedTags[VerseType.Bridge]
elif verse_type == u'P':
self.current_shortcut = VerseType.TranslatedTags[VerseType.PreChorus]
elif verse_type == u'I':
self.current_shortcut = VerseType.TranslatedTags[VerseType.Intro]
elif verse_type == u'E':
self.current_shortcut = VerseType.TranslatedTags[VerseType.Ending]
elif verse_type == u'O':
self.current_shortcut = VerseType.TranslatedTags[VerseType.Other]
elif verse_type.isnumeric():
self.current_shortcut += verse_type
self.current_shortcut = self.current_shortcut.upper()
elif verse_type.isnumeric():
self.current_shortcut += verse_type
elif verse_type:
self.current_shortcut = verse_type
keys = self.slideList.keys()
matches = [match for match in keys
if match.startswith(self.current_shortcut)]
if len(matches) == 1:
self.shortcutTimer.stop()
self.current_shortcut = u''
self.__checkUpdateSelectedSlide(self.slideList[matches[0]])
self.slideSelected()
elif sender_name != u'shortcutTimer':
# Start the time as we did not have any match.
self.shortcutTimer.start(350)
else:
# The timer timed out.
if self.current_shortcut in keys:
# We had more than one match for example "V1" and "V10", but
# "V1" was the slide we wanted to go.
self.__checkUpdateSelectedSlide(self.slideList[self.current_shortcut])
self.slideSelected()
# Reset the shortcut.
self.current_shortcut = u''
def setLiveHotkeys(self, parent=None):
"""
Set the live hotkeys
"""
self.previousService = create_action(parent, u'previousService',
text=translate('OpenLP.SlideController', 'Previous Service'),
shortcuts=[QtCore.Qt.Key_Left], context=QtCore.Qt.WidgetWithChildrenShortcut, category=self.category,
triggers=self.servicePrevious)
self.nextService = create_action(parent, 'nextService',
text=translate('OpenLP.SlideController', 'Next Service'),
shortcuts=[QtCore.Qt.Key_Right], context=QtCore.Qt.WidgetWithChildrenShortcut, category=self.category,
triggers=self.serviceNext)
self.escapeItem = create_action(parent, 'escapeItem',
text=translate('OpenLP.SlideController', 'Escape Item'),
shortcuts=[QtCore.Qt.Key_Escape], context=QtCore.Qt.WidgetWithChildrenShortcut, category=self.category,
triggers=self.liveEscape)
def liveEscape(self):
"""
If you press ESC on the live screen it should close the display temporarily.
"""
self.display.setVisible(False)
self.media_controller.media_stop(self)
def toggleDisplay(self, action):
"""
Toggle the display settings triggered from remote messages.
"""
if action == u'blank' or action == u'hide':
self.onBlankDisplay(True)
elif action == u'theme':
self.onThemeDisplay(True)
elif action == u'desktop':
self.onHideDisplay(True)
elif action == u'show':
self.onBlankDisplay(False)
self.onThemeDisplay(False)
self.onHideDisplay(False)
def servicePrevious(self):
"""
Live event to select the previous service item from the service manager.
"""
self.keypress_queue.append(ServiceItemAction.Previous)
self._process_queue()
def serviceNext(self):
"""
Live event to select the next service item from the service manager.
"""
self.keypress_queue.append(ServiceItemAction.Next)
self._process_queue()
def _process_queue(self):
"""
Process the service item request queue. The key presses can arrive
faster than the processing so implement a FIFO queue.
"""
if self.keypress_queue:
while len(self.keypress_queue) and not self.keypress_loop:
self.keypress_loop = True
keypressCommand = self.keypress_queue.popleft()
if keypressCommand == ServiceItemAction.Previous:
self.service_manager.previous_item()
elif keypressCommand == ServiceItemAction.PreviousLastSlide:
# Go to the last slide of the previous item
self.service_manager.previous_item(last_slide=True)
else:
self.service_manager.next_item()
self.keypress_loop = False
def screenSizeChanged(self):
"""
Settings dialog has changed the screen size of adjust output and
screen previews.
"""
# rebuild display as screen size changed
if self.display:
self.display.close()
self.display = MainDisplay(self, self.isLive, self)
self.display.setup()
if self.isLive:
self.__addActionsToWidget(self.display)
self.display.audioPlayer.connectSlot(QtCore.SIGNAL(u'tick(qint64)'), self.onAudioTimeRemaining)
# The SlidePreview's ratio.
try:
self.ratio = float(self.screens.current[u'size'].width()) / float(self.screens.current[u'size'].height())
except ZeroDivisionError:
self.ratio = 1
self.media_controller.setup_display(self.display, False)
self.previewSizeChanged()
self.previewDisplay.setup()
serviceItem = ServiceItem()
self.previewDisplay.webView.setHtml(build_html(serviceItem, self.previewDisplay.screen, None, self.isLive,
plugins=self.plugin_manager.plugins))
self.media_controller.setup_display(self.previewDisplay, True)
if self.serviceItem:
self.refreshServiceItem()
def __addActionsToWidget(self, widget):
"""
Add actions to the widget specified by `widget`
"""
widget.addActions([
self.previousItem, self.nextItem,
self.previousService, self.nextService,
self.escapeItem])
def previewSizeChanged(self):
"""
Takes care of the SlidePreview's size. Is called when one of the the
splitters is moved or when the screen size is changed. Note, that this
method is (also) called frequently from the mainwindow *paintEvent*.
"""
if self.ratio < float(self.previewFrame.width()) / float(self.previewFrame.height()):
# We have to take the height as limit.
max_height = self.previewFrame.height() - self.grid.margin() * 2
self.slidePreview.setFixedSize(QtCore.QSize(max_height * self.ratio, max_height))
self.previewDisplay.setFixedSize(QtCore.QSize(max_height * self.ratio, max_height))
self.previewDisplay.screen = {
u'size': self.previewDisplay.geometry()}
else:
# We have to take the width as limit.
max_width = self.previewFrame.width() - self.grid.margin() * 2
self.slidePreview.setFixedSize(QtCore.QSize(max_width, max_width / self.ratio))
self.previewDisplay.setFixedSize(QtCore.QSize(max_width, max_width / self.ratio))
self.previewDisplay.screen = {
u'size': self.previewDisplay.geometry()}
# Make sure that the frames have the correct size.
self.previewListWidget.setColumnWidth(0,
self.previewListWidget.viewport().size().width())
if self.serviceItem:
# Sort out songs, bibles, etc.
if self.serviceItem.is_text():
self.previewListWidget.resizeRowsToContents()
else:
# Sort out image heights.
width = self.parent().controlSplitter.sizes()[self.split]
for framenumber in range(len(self.serviceItem.get_frames())):
self.previewListWidget.setRowHeight(framenumber, width / self.ratio)
self.onControllerSizeChanged(self.controller.width(), self.controller.height())
def onControllerSizeChanged(self, width, height):
"""
Change layout of display control buttons on controller size change
"""
if self.isLive:
if width > 300 and self.hideMenu.isVisible():
self.toolbar.setWidgetVisible(self.hideMenuList, False)
self.toolbar.setWidgetVisible(self.wideMenu)
elif width < 300 and not self.hideMenu.isVisible():
self.toolbar.setWidgetVisible(self.wideMenu, False)
self.toolbar.setWidgetVisible(self.hideMenuList)
def onSongBarHandler(self):
"""
Some song handler
"""
request = self.sender().text()
slide_no = self.slideList[request]
self.__updatePreviewSelection(slide_no)
self.slideSelected()
def receiveSpinDelay(self, value):
"""
Adjusts the value of the ``delaySpinBox`` to the given one.
"""
self.delaySpinBox.setValue(int(value))
def updateSlideLimits(self):
"""
Updates the Slide Limits variable from the settings.
"""
self.slide_limits = Settings().value(self.parent().advancedSettingsSection + u'/slide limits')
def enableToolBar(self, item):
"""
Allows the toolbars to be reconfigured based on Controller Type
and ServiceItem Type
"""
if self.isLive:
self.enableLiveToolBar(item)
else:
self.enablePreviewToolBar(item)
def enableLiveToolBar(self, item):
"""
Allows the live toolbar to be customised
"""
# Work-around for OS X, hide and then show the toolbar
# See bug #791050
self.toolbar.hide()
self.mediabar.hide()
self.songMenu.hide()
self.toolbar.setWidgetVisible(self.loopList, False)
# Reset the button
self.playSlidesOnce.setChecked(False)
self.playSlidesOnce.setIcon(build_icon(u':/media/media_time.png'))
self.playSlidesLoop.setChecked(False)
self.playSlidesLoop.setIcon(build_icon(u':/media/media_time.png'))
if item.is_text():
if Settings().value(self.parent().songsSettingsSection + u'/display songbar') and self.slideList:
self.songMenu.show()
if item.is_capable(ItemCapabilities.CanLoop) and len(item.get_frames()) > 1:
self.toolbar.setWidgetVisible(self.loopList)
if item.is_media():
self.mediabar.show()
self.previousItem.setVisible(not item.is_media())
self.nextItem.setVisible(not item.is_media())
# Work-around for OS X, hide and then show the toolbar
# See bug #791050
self.toolbar.show()
def enablePreviewToolBar(self, item):
"""
Allows the Preview toolbar to be customised
"""
# Work-around for OS X, hide and then show the toolbar
# See bug #791050
self.toolbar.hide()
self.mediabar.hide()
self.toolbar.setWidgetVisible([u'editSong'], False)
if item.is_capable(ItemCapabilities.CanEdit) and item.from_plugin:
self.toolbar.setWidgetVisible([u'editSong'])
elif item.is_media():
self.mediabar.show()
self.previousItem.setVisible(not item.is_media())
self.nextItem.setVisible(not item.is_media())
# Work-around for OS X, hide and then show the toolbar
# See bug #791050
self.toolbar.show()
def refreshServiceItem(self):
"""
Method to update the service item if the screen has changed
"""
log.debug(u'refreshServiceItem live = %s' % self.isLive)
if self.serviceItem.is_text() or self.serviceItem.is_image():
item = self.serviceItem
item.render()
self._processItem(item, self.selectedRow)
def add_service_item(self, item):
"""
Method to install the service item into the controller
Called by plugins
"""
log.debug(u'add_service_item live = %s' % self.isLive)
item.render()
slideno = 0
if self.songEdit:
slideno = self.selectedRow
self.songEdit = False
self._processItem(item, slideno)
def replaceServiceManagerItem(self, item):
"""
Replacement item following a remote edit
"""
if item == self.serviceItem:
self._processItem(item, self.previewListWidget.currentRow())
def addServiceManagerItem(self, item, slideno):
"""
Method to install the service item into the controller and
request the correct toolbar for the plugin.
Called by ServiceManager
"""
log.debug(u'addServiceManagerItem live = %s' % self.isLive)
# If no valid slide number is specified we take the first one, but we
# remember the initial value to see if we should reload the song or not
slidenum = slideno
if slideno == -1:
slidenum = 0
# If service item is the same as the current one, only change slide
if slideno >= 0 and item == self.serviceItem:
self.__checkUpdateSelectedSlide(slidenum)
self.slideSelected()
else:
self._processItem(item, slidenum)
if self.isLive and item.auto_play_slides_loop and item.timed_slide_interval > 0:
self.playSlidesLoop.setChecked(item.auto_play_slides_loop)
self.delaySpinBox.setValue(int(item.timed_slide_interval))
self.onPlaySlidesLoop()
elif self.isLive and item.auto_play_slides_once and item.timed_slide_interval > 0:
self.playSlidesOnce.setChecked(item.auto_play_slides_once)
self.delaySpinBox.setValue(int(item.timed_slide_interval))
self.onPlaySlidesOnce()
def _processItem(self, serviceItem, slideno):
"""
Loads a ServiceItem into the system from ServiceManager
Display the slide number passed
"""
log.debug(u'processManagerItem live = %s' % self.isLive)
self.onStopLoop()
old_item = self.serviceItem
# take a copy not a link to the servicemanager copy.
self.serviceItem = copy.copy(serviceItem)
if old_item and self.isLive and old_item.is_capable(ItemCapabilities.ProvidesOwnDisplay):
self._resetBlank()
Receiver.send_message(u'%s_start' % serviceItem.name.lower(),
[serviceItem, self.isLive, self.hideMode(), slideno])
self.slideList = {}
width = self.parent().controlSplitter.sizes()[self.split]
self.previewListWidget.clear()
self.previewListWidget.setRowCount(0)
self.previewListWidget.setColumnWidth(0, width)
if self.isLive:
self.songMenu.menu().clear()
self.display.audioPlayer.reset()
self.setAudioItemsVisibility(False)
self.audioPauseItem.setChecked(False)
# If the current item has background audio
if self.serviceItem.is_capable(ItemCapabilities.HasBackgroundAudio):
log.debug(u'Starting to play...')
self.display.audioPlayer.addToPlaylist(self.serviceItem.background_audio)
self.trackMenu.clear()
for counter in range(len(self.serviceItem.background_audio)):
action = self.trackMenu.addAction(os.path.basename(self.serviceItem.background_audio[counter]))
action.setData(counter)
QtCore.QObject.connect(action, QtCore.SIGNAL(u'triggered(bool)'), self.onTrackTriggered)
self.display.audioPlayer.repeat = Settings().value(
self.parent().generalSettingsSection + u'/audio repeat list')
if Settings().value(self.parent().generalSettingsSection + u'/audio start paused'):
self.audioPauseItem.setChecked(True)
self.display.audioPlayer.pause()
else:
self.display.audioPlayer.play()
self.setAudioItemsVisibility(True)
row = 0
text = []
for framenumber, frame in enumerate(self.serviceItem.get_frames()):
self.previewListWidget.setRowCount(self.previewListWidget.rowCount() + 1)
item = QtGui.QTableWidgetItem()
slideHeight = 0
if self.serviceItem.is_text():
if frame[u'verseTag']:
# These tags are already translated.
verse_def = frame[u'verseTag']
verse_def = u'%s%s' % (verse_def[0], verse_def[1:])
two_line_def = u'%s\n%s' % (verse_def[0], verse_def[1:])
row = two_line_def
if verse_def not in self.slideList:
self.slideList[verse_def] = framenumber
if self.isLive:
self.songMenu.menu().addAction(verse_def,
self.onSongBarHandler)
else:
row += 1
self.slideList[unicode(row)] = row - 1
item.setText(frame[u'text'])
else:
label = QtGui.QLabel()
label.setMargin(4)
if serviceItem.is_media():
label.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
else:
label.setScaledContents(True)
if self.serviceItem.is_command():
label.setPixmap(QtGui.QPixmap(frame[u'image']))
else:
# If current slide set background to image
if framenumber == slideno:
self.serviceItem.bg_image_bytes = self.image_manager.get_image_bytes(frame[u'path'],
ImageSource.ImagePlugin)
image = self.image_manager.get_image(frame[u'path'], ImageSource.ImagePlugin)
label.setPixmap(QtGui.QPixmap.fromImage(image))
self.previewListWidget.setCellWidget(framenumber, 0, label)
slideHeight = width * (1 / self.ratio)
row += 1
self.slideList[unicode(row)] = row - 1
text.append(unicode(row))
self.previewListWidget.setItem(framenumber, 0, item)
if slideHeight:
self.previewListWidget.setRowHeight(framenumber, slideHeight)
self.previewListWidget.setVerticalHeaderLabels(text)
if self.serviceItem.is_text():
self.previewListWidget.resizeRowsToContents()
self.previewListWidget.setColumnWidth(0,
self.previewListWidget.viewport().size().width())
self.__updatePreviewSelection(slideno)
self.enableToolBar(serviceItem)
# Pass to display for viewing.
# Postpone image build, we need to do this later to avoid the theme
# flashing on the screen
if not self.serviceItem.is_image():
self.display.buildHtml(self.serviceItem)
if serviceItem.is_media():
self.onMediaStart(serviceItem)
self.slideSelected(True)
self.previewListWidget.setFocus()
if old_item:
# Close the old item after the new one is opened
# This avoids the service theme/desktop flashing on screen
# However opening a new item of the same type will automatically
# close the previous, so make sure we don't close the new one.
if old_item.is_command() and not serviceItem.is_command():
Receiver.send_message(u'%s_stop' % old_item.name.lower(), [old_item, self.isLive])
if old_item.is_media() and not serviceItem.is_media():
self.onMediaClose()
Receiver.send_message(u'slidecontroller_%s_started' % self.typePrefix, [serviceItem])
def __updatePreviewSelection(self, slideno):
"""
Utility method to update the selected slide in the list.
"""
if slideno > self.previewListWidget.rowCount():
self.previewListWidget.selectRow(
self.previewListWidget.rowCount() - 1)
else:
self.__checkUpdateSelectedSlide(slideno)
# Screen event methods
def onSlideSelectedIndex(self, message):
"""
Go to the requested slide
"""
index = int(message[0])
if not self.serviceItem:
return
if self.serviceItem.is_command():
Receiver.send_message(u'%s_slide' % self.serviceItem.name.lower(), [self.serviceItem, self.isLive, index])
self.updatePreview()
else:
self.__checkUpdateSelectedSlide(index)
self.slideSelected()
def mainDisplaySetBackground(self):
"""
Allow the main display to blank the main display at startup time
"""
log.debug(u'mainDisplaySetBackground live = %s' % self.isLive)
display_type = Settings().value(self.parent().generalSettingsSection + u'/screen blank')
if self.screens.which_screen(self.window()) != self.screens.which_screen(self.display):
# Order done to handle initial conversion
if display_type == u'themed':
self.onThemeDisplay(True)
elif display_type == u'hidden':
self.onHideDisplay(True)
elif display_type == u'blanked':
self.onBlankDisplay(True)
else:
Receiver.send_message(u'live_display_show')
else:
self.liveEscape()
def onSlideBlank(self):
"""
Handle the slidecontroller blank event
"""
self.onBlankDisplay(True)
def onSlideUnblank(self):
"""
Handle the slidecontroller unblank event
"""
self.onBlankDisplay(False)
def onBlankDisplay(self, checked=None):
"""
Handle the blank screen button actions
"""
if checked is None:
checked = self.blankScreen.isChecked()
log.debug(u'onBlankDisplay %s' % checked)
self.hideMenu.setDefaultAction(self.blankScreen)
self.blankScreen.setChecked(checked)
self.themeScreen.setChecked(False)
self.desktopScreen.setChecked(False)
if checked:
Settings().setValue(self.parent().generalSettingsSection + u'/screen blank', u'blanked')
else:
Settings().remove(self.parent().generalSettingsSection + u'/screen blank')
self.blankPlugin()
self.updatePreview()
self.onToggleLoop()
def onThemeDisplay(self, checked=None):
"""
Handle the Theme screen button
"""
if checked is None:
checked = self.themeScreen.isChecked()
log.debug(u'onThemeDisplay %s' % checked)
self.hideMenu.setDefaultAction(self.themeScreen)
self.blankScreen.setChecked(False)
self.themeScreen.setChecked(checked)
self.desktopScreen.setChecked(False)
if checked:
Settings().setValue(self.parent().generalSettingsSection + u'/screen blank', u'themed')
else:
Settings().remove(self.parent().generalSettingsSection + u'/screen blank')
self.blankPlugin()
self.updatePreview()
self.onToggleLoop()
def onHideDisplay(self, checked=None):
"""
Handle the Hide screen button
"""
if checked is None:
checked = self.desktopScreen.isChecked()
log.debug(u'onHideDisplay %s' % checked)
self.hideMenu.setDefaultAction(self.desktopScreen)
self.blankScreen.setChecked(False)
self.themeScreen.setChecked(False)
self.desktopScreen.setChecked(checked)
if checked:
Settings().setValue(self.parent().generalSettingsSection + u'/screen blank', u'hidden')
else:
Settings().remove(self.parent().generalSettingsSection + u'/screen blank')
self.hidePlugin(checked)
self.updatePreview()
self.onToggleLoop()
def blankPlugin(self):
"""
Blank/Hide the display screen within a plugin if required.
"""
hide_mode = self.hideMode()
log.debug(u'blankPlugin %s ', hide_mode)
if self.serviceItem is not None:
if hide_mode:
if not self.serviceItem.is_command():
Receiver.send_message(u'live_display_hide', hide_mode)
Receiver.send_message(u'%s_blank' % self.serviceItem.name.lower(),
[self.serviceItem, self.isLive, hide_mode])
else:
if not self.serviceItem.is_command():
Receiver.send_message(u'live_display_show')
Receiver.send_message(u'%s_unblank' % self.serviceItem.name.lower(), [self.serviceItem, self.isLive])
else:
if hide_mode:
Receiver.send_message(u'live_display_hide', hide_mode)
else:
Receiver.send_message(u'live_display_show')
def hidePlugin(self, hide):
"""
Tell the plugin to hide the display screen.
"""
log.debug(u'hidePlugin %s ', hide)
if self.serviceItem is not None:
if hide:
Receiver.send_message(u'live_display_hide', HideMode.Screen)
Receiver.send_message(u'%s_hide' % self.serviceItem.name.lower(), [self.serviceItem, self.isLive])
else:
if not self.serviceItem.is_command():
Receiver.send_message(u'live_display_show')
Receiver.send_message(u'%s_unblank' % self.serviceItem.name.lower(), [self.serviceItem, self.isLive])
else:
if hide:
Receiver.send_message(u'live_display_hide', HideMode.Screen)
else:
Receiver.send_message(u'live_display_show')
def onSlideSelected(self):
"""
Slide selected in controller
"""
self.slideSelected()
def slideSelected(self, start=False):
"""
Generate the preview when you click on a slide.
if this is the Live Controller also display on the screen
"""
row = self.previewListWidget.currentRow()
self.selectedRow = 0
if -1 < row < self.previewListWidget.rowCount():
if self.serviceItem.is_command():
if self.isLive and not start:
Receiver.send_message(u'%s_slide' % self.serviceItem.name.lower(),
[self.serviceItem, self.isLive, row])
else:
to_display = self.serviceItem.get_rendered_frame(row)
if self.serviceItem.is_text():
self.display.text(to_display)
else:
if start:
self.display.buildHtml(self.serviceItem, to_display)
else:
self.display.image(to_display)
# reset the store used to display first image
self.serviceItem.bg_image_bytes = None
self.updatePreview()
self.selectedRow = row
self.__checkUpdateSelectedSlide(row)
Receiver.send_message(u'slidecontroller_%s_changed' % self.typePrefix, row)
self.display.setFocus()
def onSlideChange(self, row):
"""
The slide has been changed. Update the slidecontroller accordingly
"""
self.__checkUpdateSelectedSlide(row)
self.updatePreview()
Receiver.send_message(u'slidecontroller_%s_changed' % self.typePrefix, row)
def updatePreview(self):
"""
This updates the preview frame, for example after changing a slide or
using *Blank to Theme*.
"""
log.debug(u'updatePreview %s ' % self.screens.current[u'primary'])
if not self.screens.current[u'primary'] and self.serviceItem and \
self.serviceItem.is_capable(ItemCapabilities.ProvidesOwnDisplay):
# Grab now, but try again in a couple of seconds if slide change
# is slow
QtCore.QTimer.singleShot(0.5, self.grabMainDisplay)
QtCore.QTimer.singleShot(2.5, self.grabMainDisplay)
else:
self.slidePreview.setPixmap(self.display.preview())
def grabMainDisplay(self):
"""
Creates an image of the current screen and updates the preview frame.
"""
winid = QtGui.QApplication.desktop().winId()
rect = self.screens.current[u'size']
winimg = QtGui.QPixmap.grabWindow(winid, rect.x(), rect.y(), rect.width(), rect.height())
self.slidePreview.setPixmap(winimg)
def onSlideSelectedNextAction(self, checked):
"""
Wrapper function from create_action so we can throw away the
incorrect parameter
"""
self.onSlideSelectedNext()
def onSlideSelectedNext(self, wrap=None):
"""
Go to the next slide.
"""
if not self.serviceItem:
return
Receiver.send_message(u'%s_next' % self.serviceItem.name.lower(), [self.serviceItem, self.isLive])
if self.serviceItem.is_command() and self.isLive:
self.updatePreview()
else:
row = self.previewListWidget.currentRow() + 1
if row == self.previewListWidget.rowCount():
if wrap is None:
if self.slide_limits == SlideLimits.Wrap:
row = 0
elif self.isLive and self.slide_limits == SlideLimits.Next:
self.serviceNext()
return
else:
row = self.previewListWidget.rowCount() - 1
elif wrap:
row = 0
else:
row = self.previewListWidget.rowCount() - 1
self.__checkUpdateSelectedSlide(row)
self.slideSelected()
def onSlideSelectedPrevious(self):
"""
Go to the previous slide.
"""
if not self.serviceItem:
return
Receiver.send_message(u'%s_previous' % self.serviceItem.name.lower(), [self.serviceItem, self.isLive])
if self.serviceItem.is_command() and self.isLive:
self.updatePreview()
else:
row = self.previewListWidget.currentRow() - 1
if row == -1:
if self.slide_limits == SlideLimits.Wrap:
row = self.previewListWidget.rowCount() - 1
elif self.isLive and self.slide_limits == SlideLimits.Next:
self.keypress_queue.append(ServiceItemAction.PreviousLastSlide)
self._process_queue()
return
else:
row = 0
self.__checkUpdateSelectedSlide(row)
self.slideSelected()
def __checkUpdateSelectedSlide(self, row):
"""
Check if this slide has been updated
"""
if row + 1 < self.previewListWidget.rowCount():
self.previewListWidget.scrollToItem(self.previewListWidget.item(row + 1, 0))
self.previewListWidget.selectRow(row)
def onToggleLoop(self):
"""
Toggles the loop state.
"""
hide_mode = self.hideMode()
if hide_mode is None and (self.playSlidesLoop.isChecked() or self.playSlidesOnce.isChecked()):
self.onStartLoop()
else:
self.onStopLoop()
def onStartLoop(self):
"""
Start the timer loop running and store the timer id
"""
if self.previewListWidget.rowCount() > 1:
self.timer_id = self.startTimer(int(self.delaySpinBox.value()) * 1000)
def onStopLoop(self):
"""
Stop the timer loop running
"""
if self.timer_id:
self.killTimer(self.timer_id)
self.timer_id = 0
def onPlaySlidesLoop(self, checked=None):
"""
Start or stop 'Play Slides in Loop'
"""
if checked is None:
checked = self.playSlidesLoop.isChecked()
else:
self.playSlidesLoop.setChecked(checked)
log.debug(u'onPlaySlidesLoop %s' % checked)
if checked:
self.playSlidesLoop.setIcon(build_icon(u':/media/media_stop.png'))
self.playSlidesLoop.setText(UiStrings().StopPlaySlidesInLoop)
self.playSlidesOnce.setIcon(build_icon(u':/media/media_time.png'))
self.playSlidesOnce.setText(UiStrings().PlaySlidesToEnd)
self.playSlidesMenu.setDefaultAction(self.playSlidesLoop)
self.playSlidesOnce.setChecked(False)
else:
self.playSlidesLoop.setIcon(build_icon(u':/media/media_time.png'))
self.playSlidesLoop.setText(UiStrings().PlaySlidesInLoop)
self.onToggleLoop()
def onPlaySlidesOnce(self, checked=None):
"""
Start or stop 'Play Slides to End'
"""
if checked is None:
checked = self.playSlidesOnce.isChecked()
else:
self.playSlidesOnce.setChecked(checked)
log.debug(u'onPlaySlidesOnce %s' % checked)
if checked:
self.playSlidesOnce.setIcon(build_icon(u':/media/media_stop.png'))
self.playSlidesOnce.setText(UiStrings().StopPlaySlidesToEnd)
self.playSlidesLoop.setIcon(build_icon(u':/media/media_time.png'))
self.playSlidesLoop.setText(UiStrings().PlaySlidesInLoop)
self.playSlidesMenu.setDefaultAction(self.playSlidesOnce)
self.playSlidesLoop.setChecked(False)
else:
self.playSlidesOnce.setIcon(build_icon(u':/media/media_time'))
self.playSlidesOnce.setText(UiStrings().PlaySlidesToEnd)
self.onToggleLoop()
def setAudioItemsVisibility(self, visible):
"""
Set the visibility of the audio stuff
"""
self.toolbar.setWidgetVisible(self.audioList, visible)
def onAudioPauseClicked(self, checked):
"""
Pause the audio player
"""
if not self.audioPauseItem.isVisible():
return
if checked:
self.display.audioPlayer.pause()
else:
self.display.audioPlayer.play()
def timerEvent(self, event):
"""
If the timer event is for this window select next slide
"""
if event.timerId() == self.timer_id:
self.onSlideSelectedNext(self.playSlidesLoop.isChecked())
def onEditSong(self):
"""
From the preview display requires the service Item to be editied
"""
self.songEdit = True
new_item = Registry().get(self.serviceItem.name).onRemoteEdit(self.serviceItem.edit_id, True)
if new_item:
self.add_service_item(new_item)
def onPreviewAddToService(self):
"""
From the preview display request the Item to be added to service
"""
if self.serviceItem:
self.service_manager.add_service_item(self.serviceItem)
def onGoLiveClick(self):
"""
triggered by clicking the Preview slide items
"""
if Settings().value(u'advanced/double click live'):
# Live and Preview have issues if we have video or presentations
# playing in both at the same time.
if self.serviceItem.is_command():
Receiver.send_message(u'%s_stop' % self.serviceItem.name.lower(), [self.serviceItem, self.isLive])
if self.serviceItem.is_media():
self.onMediaClose()
self.onGoLive()
def onGoLive(self):
"""
If preview copy slide item to live
"""
row = self.previewListWidget.currentRow()
if -1 < row < self.previewListWidget.rowCount():
if self.serviceItem.from_service:
self.service_manager.preview_live(self.serviceItem.unique_identifier, row)
else:
self.live_controller.addServiceManagerItem(self.serviceItem, row)
def onMediaStart(self, item):
"""
Respond to the arrival of a media service item
"""
log.debug(u'SlideController onMediaStart')
self.media_controller.video(self.controllerType, item, self.hideMode())
if not self.isLive:
self.previewDisplay.show()
self.slidePreview.hide()
def onMediaClose(self):
"""
Respond to a request to close the Video
"""
log.debug(u'SlideController onMediaClose')
self.media_controller.media_reset(self)
self.previewDisplay.hide()
self.slidePreview.show()
def _resetBlank(self):
"""
Used by command items which provide their own displays to reset the
screen hide attributes
"""
hide_mode = self.hideMode()
if hide_mode == HideMode.Blank:
self.onBlankDisplay(True)
elif hide_mode == HideMode.Theme:
self.onThemeDisplay(True)
elif hide_mode == HideMode.Screen:
self.onHideDisplay(True)
else:
self.hidePlugin(False)
def hideMode(self):
"""
Determine what the hide mode should be according to the blank button
"""
if not self.isLive:
return None
elif self.blankScreen.isChecked():
return HideMode.Blank
elif self.themeScreen.isChecked():
return HideMode.Theme
elif self.desktopScreen.isChecked():
return HideMode.Screen
else:
return None
def onNextTrackClicked(self):
"""
Go to the next track when next is clicked
"""
self.display.audioPlayer.next()
def onAudioTimeRemaining(self, time):
"""
Update how much time is remaining
"""
seconds = self.display.audioPlayer.mediaObject.remainingTime() // 1000
minutes = seconds // 60
seconds %= 60
self.audioTimeLabel.setText(u' %02d:%02d ' % (minutes, seconds))
def onTrackTriggered(self):
"""
Start playing a track
"""
action = self.sender()
self.display.audioPlayer.goTo(action.data())
def _get_plugin_manager(self):
"""
Adds the plugin manager to the class dynamically
"""
if not hasattr(self, u'_plugin_manager'):
self._plugin_manager = Registry().get(u'plugin_manager')
return self._plugin_manager
plugin_manager = property(_get_plugin_manager)
def _get_image_manager(self):
"""
Adds the image manager to the class dynamically
"""
if not hasattr(self, u'_image_manager'):
self._image_manager = Registry().get(u'image_manager')
return self._image_manager
image_manager = property(_get_image_manager)
def _get_media_controller(self):
"""
Adds the media controller to the class dynamically
"""
if not hasattr(self, u'_media_controller'):
self._media_controller = Registry().get(u'media_controller')
return self._media_controller
media_controller = property(_get_media_controller)
def _get_service_manager(self):
"""
Adds the service manager to the class dynamically
"""
if not hasattr(self, u'_service_manager'):
self._service_manager = Registry().get(u'service_manager')
return self._service_manager
service_manager = property(_get_service_manager)
def _get_live_controller(self):
"""
Adds the live controller to the class dynamically
"""
if not hasattr(self, u'_live_controller'):
self._live_controller = Registry().get(u'live_controller')
return self._live_controller
live_controller = property(_get_live_controller)
|
marmyshev/transitions
|
openlp/core/ui/slidecontroller.py
|
Python
|
gpl-2.0
| 64,513
|
[
"Brian"
] |
3f4b19330e8439824d0793d3588f0ad99b71aa79a433948893116b7c47f187c5
|
# Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classes corresponding to Newick trees, also used for Nexus trees.
See classes in `Bio.Nexus`: Trees.Tree, Trees.NodeData, and Nodes.Chain.
"""
__docformat__ = "restructuredtext en"
import warnings
from Bio.Phylo import BaseTree
class Tree(BaseTree.Tree):
"""Newick Tree object."""
def __init__(self, root=None, rooted=False, id=None, name=None, weight=1.0):
BaseTree.Tree.__init__(self, root=root or Clade(),
rooted=rooted, id=id, name=name)
self.weight = weight
class Clade(BaseTree.Clade):
"""Newick Clade (sub-tree) object."""
def __init__(self, branch_length=1.0, name=None, clades=None,
confidence=None, comment=None):
BaseTree.Clade.__init__(self, branch_length=branch_length,
name=name, clades=clades, confidence=confidence)
self.comment = comment
|
bryback/quickseq
|
genescript/Bio/Phylo/Newick.py
|
Python
|
mit
| 1,097
|
[
"Biopython"
] |
6709a6013a6aa01f66fd72dec5e880b839be87a6826b2a5eebac3e4316422c12
|
#Import packages that will be used
#GUI generation Packages
import wx
import wx.aui
from wx.lib.buttons import GenBitmapButton,GenBitmapToggleButton
#Operational Packages
import subprocess
import traceback
import types
import os
import sys
import errno
import numpy as np
from Bio import Entrez
from xml.dom import minidom
#----------------------------------------------------------------------------
class GuiFrame(wx.Frame):
""" A frame showing the contents of a single document. """
# ==========================================
# ===== Methods for Plug-in Management =====
# ==========================================
def GetName(self):
'''
Method to return name of tool
'''
return 'ImageJ'
def GetBMP(self):
'''
Method to return identifying image
'''
return r".\Utils\Icons\imagej.bmp"
def GetPlugIns(self):
'''
Method to identify 3D visualization plugins
'''
self.PIlist = os.listdir(self.homeDir + r"\plugins\Tools\EtoolsPlugins")
sys.path.append(self.homeDir + r"\plugins\Tools\EtoolsPlugins")
self.toolPlugins=[]
for i,filePI in enumerate(self.PIlist):
(self.PIname, self.PIext) = os.path.splitext(filePI)
if self.PIext == '.py':
self.toolPlugins.append(__import__(str(self.PIname)))
def SetQuery(self,qseq):
self.SeqRecs = qseq
# ==========================================
# == Initialization and Window Management ==
# ==========================================
def __init__(self, parent, title, homeDir, cL):
self.homeDir = homeDir
Entrez.email = "bioGui@BioGUI.com"
"""
Standard constructor.
'parent', 'id' and 'title' are all passed to the standard wx.Frame
constructor. 'fileName' is the name and path of a saved file to
load into this frame, if any.
"""
wx.Frame.__init__(self, parent, title='Entrez E-Utilities GUI', size=(1000, 620))
self.SetBackgroundColour('#FBFFCF')
self.GetPlugIns()
self.buttons = []
# Menu item IDs:
menu_OPTIONS = wx.NewId() # File menu items
menu_ETOOLS = wx.NewId() # BLAST command
self.toolGoMenu=[] # Tools menu options.
for dummy in self.toolPlugins: # Iterate through all available tools
self.toolGoMenu.append(wx.NewId())
#menu_ABOUT = wx.NewId() # Help menu items.
# Setup our menu bar.
self.menuBar = wx.MenuBar()
#Setup options for the file menu
self.fileMenu = wx.Menu()
self.fileMenu.Append(wx.ID_NEW, "New\tCtrl-N", "Create a new window")
self.fileMenu.Append(wx.ID_OPEN, "Load...", "Load an existing eTools result")
self.fileMenu.Append(menu_ETOOLS, "ETOOL GO", "Perform eTools action")
self.fileMenu.Append(menu_OPTIONS, "Options...", "Options...")
self.fileMenu.AppendSeparator()
self.fileMenu.Append(wx.ID_SAVEAS, "Save As...")
self.fileMenu.AppendSeparator()
self.fileMenu.Append(wx.ID_EXIT, "Quit\tCtrl-Q")
#Setup the file menu
self.menuBar.Append(self.fileMenu, "File")
#Setup the Program type menu options
self.typeMenu = wx.Menu()
for itnum,tool in enumerate(self.toolPlugins):
self.typeMenu.Append(self.toolGoMenu[itnum], tool.etPlugin().GetName(), kind=wx.ITEM_RADIO)
#Setup the type menu
self.menuBar.Append(self.typeMenu, "Type")
# Create our toolbar.
tsize = (15,15)
self.toolbar = self.CreateToolBar(wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT)
self.toolbar.AddSimpleTool(
wx.ID_NEW, wx.Bitmap(self.homeDir + r"\Utils\Icons\Blank.bmp", wx.BITMAP_TYPE_BMP), "New")
self.toolbar.AddSimpleTool(
wx.ID_OPEN, wx.Bitmap(self.homeDir + r"\Utils\Icons\openFolder.bmp", wx.BITMAP_TYPE_BMP), "Open")
self.toolbar.AddSimpleTool(
wx.ID_SAVEAS, wx.Bitmap(self.homeDir + r"\Utils\Icons\Disk.bmp", wx.BITMAP_TYPE_BMP), "Save")
self.toolbar.AddSimpleTool(
wx.ID_SAVEAS, wx.Bitmap(self.homeDir + r"\Utils\Icons\diskCopied.bmp", wx.BITMAP_TYPE_BMP), "Save As...")
self.toolbar.AddSimpleTool(
wx.ID_EXIT, wx.Bitmap(self.homeDir + r"\Utils\Icons\RedX.bmp", wx.BITMAP_TYPE_BMP), "Exit")
self.toolbar.AddSeparator()
for itnum,tool in enumerate(self.toolPlugins):
name = tool.etPlugin().GetName()
BMP = tool.etPlugin().GetBMP(self.homeDir)
self.toolbar.AddSimpleTool(
self.toolGoMenu[itnum], wx.Bitmap(BMP, wx.BITMAP_TYPE_BMP), name)
self.toolbar.SetBackgroundColour('LIGHT GRAY')
self.toolbar.Realize()
# Associate menu/toolbar items with their handlers.
self.menuHandlers = [
(menu_ETOOLS, self.toolEXE),
(wx.ID_NEW, self.doNew),
(wx.ID_OPEN, self.doOpen),
(wx.ID_EXIT, self.doExit),
(wx.ID_SAVEAS, self.doSaveAs),
(menu_OPTIONS, self.doOptions),
]
tempIdNum = len(self.menuHandlers)
for itnum,tool in enumerate(self.toolPlugins):
self.menuHandlers.append((self.toolGoMenu[itnum], self.helpEXE))
self.lowID,dummy = self.menuHandlers[tempIdNum]
#Update Menu Bar with User Input
for combo in self.menuHandlers:
id, handler = combo[:2]
self.Bind(wx.EVT_MENU, handler, id = id)
if len(combo)>2:
self.Bind(wx.EVT_UPDATE_UI, combo[2], id = id)
#Set the menu bar
self.SetMenuBar(self.menuBar)
#Create user interface appearance
self.panelSQ = wx.Panel(self, -1, pos=(7,15), size=(976,70), style=wx.BORDER_RAISED)
self.panelSQ.SetBackgroundColour('#53728c')
self.panelRSLT = wx.Panel(self, -1, pos=(7,85), size=(976,440), style=wx.BORDER_RAISED)
self.panelRSLT.SetBackgroundColour('#53728c')
#Create user interface text boxes
#Create sequence input box and label. Allow input to be modified.
self.l1 = wx.StaticText(self.panelSQ, -1, "Query: ", pos=(445,10))
self.l1.SetForegroundColour('WHITE')
self.text1 = wx.TextCtrl(self.panelSQ, -1, "", size=(260, 53),
style=wx.TE_MULTILINE|wx.TE_PROCESS_ENTER, pos=(490,6))
wx.CallAfter(self.text1.SetInsertionPoint, 0)
#Create results outbox and lable. The box is able to be modified.
self.l2 = wx.StaticText(self.panelRSLT, -1, "Results: ", pos=(25,10))
self.l2.SetForegroundColour('WHITE')
self.text2 = []
self.text2.append(wx.TextCtrl(self.panelRSLT, -1, "", size=(892, 390),
style=wx.TE_MULTILINE|wx.TE_PROCESS_ENTER, pos=(75,10)))
wx.CallAfter(self.text2[0].SetInsertionPoint, 0)
#Create ETOOL GO action button
etGObutton = wx.Button(self.panelSQ, -1, "GO!", pos=(820,6), size=(75,25))
etGObutton.SetBackgroundColour('RED')
etGObutton.SetForegroundColour('WHITE')
self.Bind(wx.EVT_BUTTON, self.toolEXE, etGObutton)
#Create USE RESULTS action button
urGObutton = wx.Button(self.panelSQ, -1, "Use Results", pos=(820,35), size=(75,25))
urGObutton.SetBackgroundColour('WHITE')
urGObutton.SetForegroundColour('RED')
self.Bind(wx.EVT_BUTTON, self.doReuse, urGObutton)
#Create User Modifiable search parameters.
self.l3 = wx.StaticText(self.panelSQ, -1, "Database: ", pos=(195,10))
self.l3.SetForegroundColour('WHITE')
self.dbCB = wx.ComboBox(parent=self.panelSQ, id=-1, pos=(256,6),
choices=["All", "Books", "Cancer Chromosomes", "CDD","CoreNucleotide", "3D Domains",
"EST", "Gene", "Genome", "Genome Project", "dbGaP", "GENSAT", "GEO Datasets",
"GEO Profiles", "GSS", "HomoloGene", "Journnals", "MeSH", "NCBI Web Site",
"NLM Catalog", "OMIA", "OMIM", "PopSet", "Probe", "Protein",
"PubChem BioAssay", "PubChem Compound", "PubChem Substance", "PubMed",
"PubMed Central", "SNP", "Structure", "Taxonomy", "UniGene", "UniSTS"],
style=wx.CB_READONLY)
self.dbCB.SetSelection(0)
self.l4 = wx.StaticText(self.panelSQ, -1, "E-Utility: ", pos=(18,10))
self.l4.SetForegroundColour('WHITE')
etCbchoices = []
for tool in self.toolPlugins:
etCbchoices.append(tool.etPlugin().GetName())
self.etoolCB = wx.ComboBox(parent=self.panelSQ, id=-1, pos=(75,6), choices=etCbchoices, style=wx.CB_READONLY)
#Initialization of variables
self.curIDs=[]
#Additional methods
def doReuse(self,event):
print self.curIDs
if len(self.curIDs)>0:
idBox = reuseBox(self,self.curIDs)
idBox.ShowModal()
newID = idBox.getRet()
self.text1.Clear()
self.text1.write(newID)
else:
print 'holdup'
#File Menu Executables
def doSaveAs(self, event):
""" Respond to the "Save As" menu command.
"""
if self.fileName == None:
default = ""
else:
default = self.fileName = wx.FileSelector("Save File As", "Saving",
default_filename=default,
default_extension="xml",
wildcard="*.xml",
flags = wx.SAVE | wx.OVERWRITE_PROMPT)
if fileName == "": return # User cancelled.
fileName = os.path.join(os.getcwd(), fileName)
os.chdir(curDir)
title = os.path.basename(fileName)
self.SetTitle(title)
self.fileName = fileName
self.saveContents()
def doExit(self, event):
""" Respond to the "Quit" menu command.
"""
self.askIfUserWantsToSave("closing")
self.Destroy()
def doOptions(self, event):
""" Respond to the "Load" menu command.
"""
self.optList=self.optBox.getOpts(self.abet,self.bnum)
self.optBox.ShowModal()
id,handle=self.menuHandlers[self.optBox.getBType()+10]
handle(wx.EVT_MENU)
self.typeMenu.Check(id,True)
def doOpen(self, event):
""" Respond to the "Load" menu command.
"""
curDir = os.getcwd()
fileName = wx.FileSelector("Load File", default_extension=".fasta",
flags = wx.OPEN | wx.FILE_MUST_EXIST)
if fileName == "": return
fileName = os.path.join(os.getcwd(), fileName)
os.chdir(curDir)
fasta_string = open(fileName).read()
self.text1.write(fasta_string)
def doNew(self, event):
""" Respond to the "New" menu command.
"""
newFrame = GuiFrame(None, -1)
newFrame.Show(True)
def saveContents(self):
""" Save the contents of our document to disk.
"""
try:
objData = []
for obj in self.contents:
objData.append([obj.__class__, obj.getData()])
f = open(self.fileName, "wb")
#cPickle.dump(objData, f)
e = open("my_blast.xml")
te = e.read()
f.write(te)
e.close()
f.close()
#self._adjustMenus()
except:
response = wx.MessageBox("Unable to load " + self.fileName + ".",
"Error", wx.OK|wx.ICON_ERROR, self)
def askIfUserWantsToSave(self, action):
""" Give the user the opportunity to save the current document.
'action' is a string describing the action about to be taken. If
the user wants to save the document, it is saved immediately. If
the user cancels, we return False.
"""
response = wx.MessageBox("Save changes before " + action + "?",
"Confirm", wx.YES_NO | wx.CANCEL, self)
if response == wx.YES:
if self.fileName == None:
fileName = wx.FileSelector("Save File As", "Saving",
default_extension="psk",
wildcard="*.psk",
flags = wx.SAVE | wx.OVERWRITE_PROMPT)
if fileName == "": return False # User cancelled.
self.fileName = fileName
#self.saveContents()
return True
elif response == wx.NO:
return True # User doesn't want changes saved.
elif response == wx.CANCEL:
return False # User cancelled.
#Type Menu Executables
def helpEXE(self,event):
tempId = event.GetId()
self.bnum=tempId-self.lowID
self.ccmd = self.toolPlugins[self.bnum].blastPlugin().pluginEXE()
self.typeMenu.Check(tempId,True)
#eTools Executable
def toolEXE(self, evt):
dbName = self.dbCB.GetValue()
if dbName == 'All':
dbName = ''
idName = self.text1.GetValue()
if idName == '':
self.text2.write('plese enter a query')
else:
Btn = evt.GetEventObject()
print self.toolPlugins[self.etoolCB.GetSelection()].etPlugin().GetName()
self.curIDs = self.toolPlugins[self.etoolCB.GetSelection()].etPlugin().GetExec(self,dbName,idName)
#----------------------------------------------------------------------------
'''
#Code to start gui
class MyApp(wx.App):
def OnInit(self):
frame = GuiFrame(None,-1,r'C:\Users\francis\Documents\Monguis\BioGui')
self.SetTopWindow(frame)
frame.Show(True)
return True
'''
class reuseBox(wx.Dialog):
""" A frame showing the alignment of the selected Blast Record alignment"""
# ==========================================
# == Initialisation and Window Management ==
# ==========================================
def __init__(self, parent, idChoices):
wx.Dialog.__init__(self, parent, title='ID selection', size=(200, 100))
selTxt = wx.StaticText(self, -1, "Please select ID:", pos=(5,10))
selTxt.SetForegroundColour('BLACK')
self.selBool = False
self.retkey=idChoices[0]
self.idCB = wx.ComboBox(parent=self, id=-1, pos=(100,10), choices=idChoices, style=wx.CB_READONLY)
#Create return button
retbutton = wx.Button(self, -1, "OK", pos=(40,50), size=(50,25))
retbutton.SetBackgroundColour('RED')
retbutton.SetForegroundColour('WHITE')
self.Bind(wx.EVT_BUTTON, self.retMain, retbutton)
def retMain(self,evt):
self.retkey = self.idCB.GetValue()
self.Destroy()
def getRet(self):
return self.retkey
def GetName():
'''
Method to return name of tool
'''
return 'ImageJ'
def GetBMP():
'''
Method to return identifying image
'''
return r".\Utils\Icons\imagej.bmp"
'''
global app
app = MyApp(redirect=True)
app.MainLoop()
'''
|
fxb22/BioGUI
|
plugins/Tools/ImageJgui.py
|
Python
|
gpl-2.0
| 15,688
|
[
"BLAST"
] |
af67588ec7f1ea2265f4e4cf26bcfeb54a296a1a090ff09dad45146707965daf
|
''' Copyright (c) 2013 Potential Ventures Ltd
Copyright (c) 2013 SolarFlare Communications Inc
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Potential Ventures Ltd,
SolarFlare Communications Inc nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''
"""
Set of general generic generators
"""
import math
import random
from cocotb.decorators import public
@public
def repeat(obj, nrepeat=None):
"""Generator to repeatedly yield the same object
Args:
obj (any): The object to yield
Kwargs:
nrepeat (int): The number of times to repeatedly yield obj
"""
if nrepeat is None:
while True:
yield obj
else:
for i in range(nrepeat):
yield obj
@public
def combine(generators):
"""
Generator for serially combining multiple generators together
Args:
generators (iterable): Generators to combine together
"""
for gen in generators:
for item in gen:
yield item
@public
def gaussian(mean, sigma):
"""
Generate a guasian distribution indefinitely
Args:
mean (int/float): mean value
signma (int/float): Standard deviation
"""
while True:
yield random.gauss(mean, sigma)
@public
def sine_wave(amplitude, w, offset=0):
"""
Generates a sine wave that repeats forever
Args:
amplitude (int/float): peak deviation of the function from zero
w (int/float): is the rate of change of the function argument
Yields:
floats that form a sine wave
"""
twoPiF_DIV_sampleRate = math.pi * 2
while True:
for idx in (i / float(w) for i in range(int(w))):
yield amplitude*math.sin(twoPiF_DIV_sampleRate * idx) + offset
def get_generators(module):
"""Return an iterator which yields all the generators in a module
Args:
module (python module): The module to get the generators from
"""
return (getattr(module, gen) for gen in module.__all__)
|
stuarthodgson/cocotb
|
cocotb/generators/__init__.py
|
Python
|
bsd-3-clause
| 3,358
|
[
"Gaussian"
] |
2f1bc6d6fcd9afa1e929ac48f99d25d838d981f9ed3455fee55b448ee8553b7d
|
from .AbstractLinker import AbstractLinker
from .utils import common_subprocess, get_input_file_type, get_text, dict_to_csv, eprint
from .normalize import Normalizer
from .OPSIN import OPSIN
from rdkit.Chem import MolFromSmiles, MolToInchi, InchiToInchiKey
from pubchempy import get_compounds, BadRequestError, NotFoundError, PubChemHTTPError, ResponseParseError, ServerError, TimeoutError, PubChemPyError
from chemspipy import ChemSpider
from collections import ChainMap, OrderedDict
import logging
from tempfile import NamedTemporaryFile
import os
import re
import bisect
from time import sleep
logging.basicConfig(format="[%(levelname)s - %(filename)s:%(funcName)s:%(lineno)s] %(message)s")
verbosity_levels = {
0: 100,
1: logging.WARNING,
2: logging.INFO
}
CHEMSPOT_VERSION = "2.0"
class ChemSpot(AbstractLinker):
"""
Represents the ChemSpot software and acts as a linker between Python and command-line interface of ChemSpot.
ChemSpot version: 2.0
ChemSpot is a software for chemical Named Entity Recognition. It assigns to each chemical entity one of this classes:
"SYSTEMATIC", "IDENTIFIER", "FORMULA", "TRIVIAL", "ABBREVIATION", "FAMILY", "MULTIPLE"
More information here: https://www.informatik.hu-berlin.de/de/forschung/gebiete/wbi/resources/chemspot/chemspot
ChemSpot is very memory-consuming so dictionary and ID lookup is disabled by default. Only CRF, OpenNLP sentence and
multiclass models will be used by default.
Maximum memory used by Java process is set to 8 GB by default. It is strongly recommended to use swap file on SSD disk when
available memory is under 8 GB (see https://www.digitalocean.com/community/tutorials/how-to-add-swap-space-on-ubuntu-16-04 for more details).
**To show the meaning of options:** ::
chemspot = ChemSpot()
print(chemspot.help()) # this will show the output of "$ chemspot -h"
print(chemspot._OPTIONS_REAL) # this will show the mapping between ChemSpot class and real ChemSpot parameters
Attributes
----------
_OPTIONS_REAL : dict
Internal dict which maps the passed options to real ChemSpot command-line arguments. Static attribute.
options : dict
Get or set options.
options_internal : dict
Return dict with options having internal names.
path_to_binary : str
Path to ChemSpot binary.
Methods
-------
process
Process the input file with ChemSpot.
help
Return ChemSpot help message.
"""
_OPTIONS_REAL = {
"path_to_crf": ("-m", "{}"),
"path_to_nlp": ("-s", "{}"),
"path_to_dict": ("-d", "{}"),
"path_to_ids": ("-i", "{}"),
"path_to_multiclass": ("-M", "{}"),
#"n_threads": ("-T", "{}"),
"iob_format": ("-I", "")
}
# matches ion and charge e.g. from "Cu(2+)"
RE_ION = re.compile(r"^\s*(?P<ion>[A-Z][a-z]?)\s*\((?P<charge>-?\+?i+\+?-?|-?\+?I+\+?-?|\d+\+|\d+-|\+\d+|-\d+|\++|-+)\)\s*$")
# matches charge digit or its signs
RE_CHARGE = re.compile(r"(?P<roman>i+|I+)|(?P<digit>\d+)|(?P<signs>^\++|-+$)")
logger = logging.getLogger("chemspot")
def __init__(self,
path_to_binary: str = "chemspot",
path_to_crf: str = "",
path_to_nlp: str = "",
path_to_dict: str = "",
path_to_ids: str = "",
path_to_multiclass: str = "multiclass.bin",
tessdata_path: str = "",
#n_threads: int = 0,
max_memory: int = 8,
verbosity: int = 1):
"""
Parameters
----------
path_to_binary : str
path_to_crf : str
Path to a CRF model file (internal default model file will be used if not provided).
path_to_nlp : str
Path to a OpenNLP sentence model file (internal default model file will be used if not provided).
path_to_dict : str
Path to a zipped set of brics dictionary automata. Disabled by default, set to 'dict.zip' to use default
dictionary.
path_to_ids : str
Path to a zipped tab-separated text file representing a map of terms to ids. Disabled by default,
set to `ids.zip` to use default IDs.
path_to_multiclass : str
Path to a multi-class model file. Enabled by default.
tessdata_path : str
Path to directory with Tesseract language data. If empty, the TESSDATA_PREFIX environment variable will be used.
max_memory : int
Maximum amount of memory [GB] which can be used by Java process.
verbosity : int
This class's verbosity. Values: 0, 1, 2
"""
if verbosity > 2:
verbosity = 2
elif verbosity not in verbosity_levels:
verbosity = 1
self.logger.setLevel(verbosity_levels[verbosity])
self.verbosity = verbosity
self.path_to_binary = path_to_binary
if not path_to_dict:
path_to_dict = "\"''\""
elif path_to_dict == "dict.zip" and "CHEMSPOT_DATA_PATH" in os.environ:
path_to_dict = "{}/{}".format(os.environ["CHEMSPOT_DATA_PATH"], "dict.zip")
if not path_to_ids:
path_to_ids = "\"''\""
elif path_to_ids == "ids.zip" and "CHEMSPOT_DATA_PATH" in os.environ:
path_to_ids = "{}/{}".format(os.environ["CHEMSPOT_DATA_PATH"], "ids.zip")
if path_to_multiclass == "multiclass.bin" and "CHEMSPOT_DATA_PATH" in os.environ:
path_to_multiclass = "{}/{}".format(os.environ["CHEMSPOT_DATA_PATH"], "multiclass.bin")
elif not path_to_multiclass:
path_to_multiclass = "\"''\""
if tessdata_path:
os.environ["TESSDATA_PREFIX"] = tessdata_path
self.re_ion = self.RE_ION
self.re_charge = self.RE_CHARGE
_, self.options, self.options_internal = self.build_commands(locals(), self._OPTIONS_REAL, path_to_binary)
self.options_internal["max_memory"] = max_memory
def set_options(self, options: dict):
"""
Sets the options passed in dict. Keys are the same as optional parameters in ChemSpot constructor (__init__()).
Parameters
----------
options
Dict of new options.
"""
_, self.options, self.options_internal = self.build_commands(options, self._OPTIONS_REAL, self.path_to_binary)
@staticmethod
def version(self) -> str:
"""
Returns
-------
str
ChemSpot version.
"""
return CHEMSPOT_VERSION
def help(self) -> str:
"""
Returns
-------
str
ChemSpot help message.
"""
stdout, stderr, _ = common_subprocess([self.path_to_binary, "1"])
if stderr:
return stderr
else:
return stdout
def process(self,
input_text: str = "",
input_file: str = "",
output_file: str = "",
output_file_sdf: str = "",
sdf_append: bool = False,
input_type: str = "",
lang: str = "eng",
paged_text: bool = False,
format_output: bool = True,
opsin_types: list = None,
standardize_mols: bool = True,
convert_ions: bool = True,
write_header: bool = True,
iob_format: bool = False,
dry_run: bool = False,
csv_delimiter: str = ";",
normalize_text: bool = True,
remove_duplicates: bool = False,
annotate: bool = True,
annotation_sleep: int = 2,
chemspider_token: str = "",
continue_on_failure: bool = False) -> OrderedDict:
r"""
Process the input file with ChemSpot.
Parameters
----------
input_text : str
String to be processed by ChemSpot.
input_file : str
Path to file to be processed by ChemSpot.
output_file : str
File to write output in.
output_file_sdf : str
File to write SDF output in. SDF is from OPSIN converted entities.
sdf_append : bool
If True, append new molecules to existing SDF file or create new one if doesn't exist. SDF is from OPSIN converted entities.
input_type : str
| When empty, input (MIME) type will be determined from magic bytes.
| Or you can specify "pdf", "pdf_scan", "image" or "text" and magic bytes check will be skipped.
lang : str
| Language which will Tesseract use for OCR. Available languages: https://github.com/tesseract-ocr/tessdata
| Multiple languages can be specified with "+" character, i.e. "eng+bul+fra".
paged_text : bool
If True and `input_type` is "text" or `input_text` is provided, try to assign pages to chemical entities.
ASCII control character 12 (Form Feed, '\f') is expected between pages.
format_output : bool
| If True, the value of "content" key of returned dict will be list of OrderedDicts.
| If True and `output_file` is set, the CSV file will be written.
| If False, the value of "content" key of returned dict will be None.
opsin_types : list
| List of ChemSpot entity types. Entities of types in this list will be converted with OPSIN. If you don't want
to convert entities, pass empty list.
| OPSIN is designed to convert IUPAC names to linear notation (SMILES etc.) so default value of `opsin_types`
is ["SYSTEMATIC"] (these should be only IUPAC names).
| ChemSpot entity types: "SYSTEMATIC", "IDENTIFIER", "FORMULA", "TRIVIAL", "ABBREVIATION", "FAMILY", "MULTIPLE"
standardize_mols : bool
If True, use molvs (https://github.com/mcs07/MolVS) to standardize molecules converted by OPSIN.
convert_ions : bool
If True, try to convert ion entities (e.g. "Ni(II)") to SMILES. Entities matching ion regex won't be converted
with OPSIN.
write_header : bool
If True and if `output_file` is set and `output_format` is True, write a CSV write_header:
"smiles", "bond_length", "resolution", "confidence", "learn", "page", "coordinates"
iob_format : bool
If True, output will be in IOB format.
dry_run : bool
If True, only return list of commands to be called by subprocess.
csv_delimiter : str
Delimiter for output CSV file.
normalize_text : bool
If True, normalize text before performing NER. It is strongly recommended to do so, because without normalization
can ChemSpot produce unpredictable results which cannot be parsed.
remove_duplicates : bool
If True, remove duplicated chemical entities. Note that some entities-compounds can have different names, but
same notation (SMILES, InChI etc.). This will only remove entities with same names. Not applicable for IOB format.
annotate : bool
| If True, try to annotate entities in PubChem and ChemSpider. Compound IDs will be assigned by searching with
each identifier, separately for entity name, SMILES etc.
| If entity has InChI key yet, prefer it in searching.
| If "*" is present in SMILES, skip annotation.
| If textual entity has single result in DB when searched by name, fill in missing identifiers (SMILES etc.).
annotation_sleep: int
How many seconds to sleep between annotation of each entity. It's for preventing overloading of databases.
chemspider_token : str
Your personal token for accessing the ChemSpider API (needed for annotation). Make account there to obtain it.
continue_on_failure : bool
| If True, continue running even if ChemSpot returns non-zero exit code.
| If False and error occurs, print it and return.
Returns
-------
dict
Keys:
- stdout: str ... standard output from ChemSpot
- stderr: str ... standard error output from ChemSpot
- exit_code: int ... exit code from ChemSpot
- content
- list of OrderedDicts ... when `format_output` is True
- None ... when `format_output` is False
- normalized_text : str
"""
if opsin_types is None:
opsin_types = ["SYSTEMATIC"]
if input_text and input_file:
input_file = ""
self.logger.warning("Both 'input_text' and 'input_file' are set, but 'input_text' will be prefered.")
elif not input_text and not input_file:
raise ValueError("One of 'input_text' or 'input_file' must be set.")
if not input_type and not input_text:
possible_input_types = ["pdf", "image", "text"]
input_type = get_input_file_type(input_file)
if input_type not in possible_input_types:
raise ValueError("Input file type ({}) is not one of {}".format(input_type, possible_input_types))
elif input_type and not input_text:
possible_input_types = ["pdf", "pdf_scan", "image", "text"]
if input_type not in possible_input_types:
raise ValueError("Unknown 'input_type'. Possible 'input_type' values are {}".format(possible_input_types))
if input_type in ["pdf", "pdf_scan", "image"]:
input_text, _ = get_text(input_file, input_type, lang=lang, tessdata_prefix=os.environ["TESSDATA_PREFIX"])
input_file = ""
if annotate and not chemspider_token:
self.logger.warning("Cannot perform annotation in ChemSpider: 'chemspider_token' is empty.")
options = ChainMap({k: v for k, v in {"iob_format": iob_format}.items() if v},
self.options_internal)
output_file_temp = None
commands, _, _ = self.build_commands(options, self._OPTIONS_REAL, self.path_to_binary)
commands.insert(1, str(self.options_internal["max_memory"]))
commands.append("-t")
if normalize_text:
normalizer = Normalizer(strip=True, collapse=True, hyphens=True, quotes=True, slashes=True, tildes=True, ellipsis=True)
if input_file:
with open(input_file, mode="r") as f:
input_text = f.read()
input_text = normalizer(input_text)
if not input_text:
raise UserWarning("'input_text' is empty after normalization.")
input_text = self.normalize_text(text=input_text)
input_file_normalized = NamedTemporaryFile(mode="w", encoding="utf-8")
input_file_normalized.write(input_text)
input_file_normalized.flush()
input_file = input_file_normalized.name
else:
if input_text:
input_file_temp = NamedTemporaryFile(mode="w", encoding="utf-8")
input_file_temp.write(input_text)
input_file_temp.flush()
input_file = input_file_temp.name
commands.append(os.path.abspath(input_file))
commands.append("-o")
if format_output:
output_file_temp = NamedTemporaryFile(mode="w", encoding="utf-8")
commands.append(os.path.abspath(output_file_temp.name))
else:
commands.append(os.path.abspath(output_file))
if dry_run:
return " ".join(commands)
stdout, stderr, exit_code = common_subprocess(commands)
if "OutOfMemoryError" in stderr:
raise RuntimeError("ChemSpot memory error: {}".format(stderr))
to_return = {"stdout": stdout, "stderr": stderr, "exit_code": exit_code, "content": None,
"normalized_text": input_text if normalize_text else None}
if not continue_on_failure and exit_code > 0:
self.logger.warning("ChemSpot error:")
eprint("\n\t".join("\n{}".format(stderr).splitlines()))
return to_return
if normalize_text:
to_return["normalized_text"] = input_text
if not format_output:
return to_return
elif format_output:
with open(output_file_temp.name, mode="r", encoding="utf-8") as f:
output_chs = f.read()
entities = self.parse_chemspot_iob(text=output_chs) if iob_format else self.parse_chemspot(text=output_chs)
to_return["content"] = entities
if remove_duplicates and not iob_format:
seen = set()
seen_add = seen.add
to_return["content"] = [x for x in to_return["content"] if not (x["entity"] in seen or seen_add(x["entity"]))]
if input_type in ["pdf", "pdf_scan"] or paged_text:
page_ends = []
for i, page in enumerate(input_text.split("\f")):
if page.strip():
try:
page_ends.append(page_ends[-1] + len(page) - 1)
except IndexError:
page_ends.append(len(page) - 1)
if opsin_types:
if convert_ions:
to_convert = [x["entity"] for x in to_return["content"] if x["type"] in opsin_types and not self.re_ion.match(x["entity"])]
else:
to_convert = [x["entity"] for x in to_return["content"] if x["type"] in opsin_types]
if to_convert:
opsin = OPSIN(verbosity=self.verbosity)
opsin_converted = opsin.process(input=to_convert, output_formats=["smiles", "inchi", "inchikey"],
standardize_mols=standardize_mols, output_file_sdf=output_file_sdf,
sdf_append=sdf_append)
opsin_converted = iter(opsin_converted["content"])
else:
self.logger.info("Nothing to convert with OPSIN.")
if annotate:
chemspider = ChemSpider(chemspider_token) if chemspider_token else None
for i, ent in enumerate(to_return["content"]):
if input_type in ["pdf", "pdf_scan"] or paged_text:
ent["page"] = str(bisect.bisect_left(page_ends, int(ent["start"])) + 1)
if convert_ions:
match_ion = self.re_ion.match(ent["entity"])
if match_ion:
match_ion = match_ion.groupdict()
match_charge = self.re_charge.search(match_ion["charge"])
if match_charge:
match_charge = match_charge.groupdict()
if match_charge["roman"]:
smiles = "[{}+{}]".format(match_ion["ion"], len(match_charge["roman"]))
elif match_charge["digit"]:
if "+" in match_ion["charge"]:
smiles = "[{}+{}]".format(match_ion["ion"], match_charge["digit"])
elif "-" in match_ion["charge"]:
smiles = "[{}-{}]".format(match_ion["ion"], match_charge["digit"])
elif match_charge["signs"]:
smiles = "[{}{}{}]".format(match_ion["ion"], match_charge["signs"][0],
len(match_charge["signs"]))
mol = MolFromSmiles(smiles)
if mol:
inchi = MolToInchi(mol)
if inchi:
ent.update(OrderedDict(
[("smiles", smiles), ("inchi", inchi), ("inchikey", InchiToInchiKey(inchi))]))
else:
ent.update(OrderedDict([("smiles", smiles), ("inchi", ""), ("inchikey", "")]))
else:
ent.update(OrderedDict([("smiles", ""), ("inchi", ""), ("inchikey", "")]))
else:
ent.update(OrderedDict([("smiles", ""), ("inchi", ""), ("inchikey", "")]))
if opsin_types and to_convert:
if ent["entity"] in to_convert:
ent_opsin = next(opsin_converted)
ent.update(OrderedDict([("smiles", ent_opsin["smiles"]), ("inchi", ent_opsin["inchi"]),
("inchikey", ent_opsin["inchikey"]), ("opsin_error", ent_opsin["error"])]))
elif convert_ions and self.re_ion.match(ent["entity"]):
ent.update(OrderedDict([("opsin_error", "")]))
elif (convert_ions and not self.re_ion.match(ent["entity"])) or (not convert_ions and ent["entity"] not in to_convert):
ent.update(OrderedDict([("smiles", ""), ("inchi", ""), ("inchikey", ""), ("opsin_error", "")]))
# TODO: this should be simplified...looks like garbage code
if annotate:
self.logger.info("Annotating entity {}/{}...".format(i + 1, len(to_return["content"])))
ent.update(OrderedDict([("pch_cids_by_inchikey", ""), ("chs_cids_by_inchikey", ""),
("pch_cids_by_name", ""), ("chs_cids_by_name", ""),
("pch_cids_by_smiles", ""), ("chs_cids_by_smiles", ""),
("pch_cids_by_inchi", ""), ("chs_cids_by_inchi", ""),
("pch_cids_by_formula", ""),
("pch_iupac_name", ""), ("chs_common_name", ""),
("pch_synonyms", "")]))
# do "double-annotation": some entities can be found in only one DB, updated and then searched in second DB
found_in_pch = False
found_in_chs = False
for _ in range(2):
results = []
# prefer InChI key
if "inchikey" in ent and ent["inchikey"]:
try:
results = get_compounds(ent["inchikey"], "inchikey")
if results:
if len(results) == 1:
result = results[0]
synonyms = result.synonyms
if synonyms:
ent["pch_synonyms"] = "\"{}\"".format("\",\"".join(synonyms))
ent["pch_iupac_name"] = result.iupac_name
if not found_in_chs:
ent["smiles"] = result.canonical_smiles or ent["smiles"]
ent["inchi"] = result.inchi or ent["inchi"]
ent["inchikey"] = result.inchikey or ent["inchikey"]
ent["pch_cids_by_inchikey"] = "\"{}\"".format(",".join([str(c.cid) for c in results]))
except (BadRequestError, NotFoundError, PubChemHTTPError, ResponseParseError, ServerError, TimeoutError, PubChemPyError):
pass
results = chemspider.search(ent["inchikey"]) if chemspider_token else []
if results:
if len(results) == 1:
result = results[0]
ent["chs_common_name"] = result.common_name
if not found_in_pch:
ent["smiles"] = result.smiles or ent["smiles"]
ent["inchi"] = result.stdinchi or ent["inchi"]
ent["inchikey"] = result.stdinchikey or ent["inchikey"]
ent["chs_cids_by_inchikey"] = "\"{}\"".format(",".join([str(c.csid) for c in results]))
else:
if (not found_in_pch and not found_in_chs) or (not found_in_pch and found_in_chs):
try:
results = get_compounds(ent["entity"] or ent["abbreviation"], "name")
if results:
if len(results) == 1:
found_in_pch = True
result = results[0]
synonyms = result.synonyms
if synonyms:
ent["pch_synonyms"] = "\"{}\"".format("\",\"".join(synonyms))
# only update identifiers if they weren't found in second DB
if not found_in_chs:
ent["smiles"] = result.canonical_smiles or ent["smiles"]
ent["inchi"] = result.inchi or ent["inchi"]
ent["inchikey"] = result.inchikey or ent["inchikey"]
ent["pch_iupac_name"] = result.iupac_name
ent["pch_cids_by_name"] = "\"{}\"".format(",".join([str(c.cid) for c in results]))
except (BadRequestError, NotFoundError, PubChemHTTPError, ResponseParseError, ServerError, TimeoutError, PubChemPyError):
pass
if (not found_in_pch and not found_in_chs) or (found_in_pch and not found_in_chs):
results = chemspider.search(ent["entity"] or ent["abbreviation"]) if chemspider_token else []
if results:
if len(results) == 1:
found_in_chs = True
result = results[0]
if not found_in_pch:
ent["smiles"] = result.smiles or ent["smiles"]
ent["inchi"] = result.stdinchi or ent["inchi"]
ent["inchikey"] = result.stdinchikey or ent["inchikey"]
ent["chs_common_name"] = result.common_name
ent["chs_cids_by_name"] = "\"{}\"".format(",".join([str(c.csid) for c in results]))
for search_field, col_pch, col_chs in [("smiles", "pch_cids_by_smiles", "chs_cids_by_smiles"),
("inchi", "pch_cids_by_inchi", "chs_cids_by_inchi"),
("formula", "pch_cids_by_formula", "")]:
results_pch = []
results_chs = []
if search_field == "smiles" and "smiles" in ent and ent["smiles"] and "*" not in ent["smiles"]:
if (not found_in_pch and not found_in_chs) or (not found_in_pch and found_in_chs):
try:
results_pch = get_compounds(ent["smiles"], "smiles")
except (BadRequestError, NotFoundError, PubChemHTTPError, ResponseParseError, ServerError, TimeoutError, PubChemPyError):
pass
if (not found_in_pch and not found_in_chs) or (found_in_pch and not found_in_chs):
results_chs = chemspider.search(ent["smiles"]) if chemspider_token else []
elif search_field == "inchi" and "inchi" in ent and ent["inchi"]:
if (not found_in_pch and not found_in_chs) or (not found_in_pch and found_in_chs):
try:
results_pch = get_compounds(ent["inchi"], "inchi")
except (BadRequestError, NotFoundError, PubChemHTTPError, ResponseParseError, ServerError, TimeoutError, PubChemPyError):
pass
if (not found_in_pch and not found_in_chs) or (found_in_pch and not found_in_chs):
results_chs = chemspider.search(ent["inchi"]) if chemspider_token else []
elif search_field == "formula":
if (not found_in_pch and not found_in_chs) or (not found_in_pch and found_in_chs):
try:
results_pch = get_compounds(ent["entity"], "formula")
except (BadRequestError, NotFoundError, PubChemHTTPError, ResponseParseError, ServerError, TimeoutError, PubChemPyError):
pass
# ChemSpider doesn't have search field for 'formula'
if results_pch:
ent[col_pch] = "\"{}\"".format(",".join([str(c.cid) for c in results_pch]))
if results_chs:
ent[col_chs] = "\"{}\"".format(",".join([str(c.csid) for c in results_chs]))
sleep(0.5)
sleep(annotation_sleep)
if not found_in_pch and not found_in_chs:
break
if output_file:
dict_to_csv(to_return["content"], output_file=output_file, csv_delimiter=csv_delimiter, write_header=write_header)
return to_return
@staticmethod
def normalize_text(input_file_path: str = "", text: str = "", output_file_path: str = "",
encoding: str = "utf-8") -> str:
r"""
Normalize the text. Operations:
- remove numbers of entities which points somewhere in the text, e.g. "N-octyl- (2b)" -> "N-octyl-"
- replace "-\n " with ""
Parameters
----------
input_file_path : str
text : str
output_file_path : str
encoding : str
Returns
-------
str
Normalized text.
Notes
-----
One of `input_file_path` or `text` parameters must be set.
"""
if not input_file_path and not text:
raise ValueError("One of 'input_file_path' or 'text' must be set.")
if input_file_path:
with open(input_file_path, mode="r", encoding=encoding) as file:
text = file.read()
text = re.sub(re.compile(r"\(?\d+[a-zA-Z]\)?,?"), "", text)
text = text.replace("-\n", "")
if output_file_path:
with open(output_file_path, mode="w", encoding=encoding) as file:
file.write(text)
return text
@staticmethod
def parse_chemspot(file_path: str = "", text: str = "", encoding: str = "utf-8") -> list:
"""
Parse the output from ChemSpot.
Parameters
----------
file_path : str
Path to file.
text : str
Text to normalize.
encoding : str
File encoding.
Returns
-------
list
| List of lists. Each sublist is one row from input file and contains:
| start position, end position, name of entity, type
| Type means a type of detected entity, e.g. SYSTEMATIC, FAMILY etc.
"""
if file_path:
with open(file_path, mode="r", encoding=encoding) as f:
text = f.read()
rows = [row.strip().split("\t") for row in text.strip().split("\n") if row]
# Sometimes newline causes ChemSpot to have bad output like
# 5355 5396 3-(cyclohexylamino)-1-propanesulfonic \n
# acid SYSTEMATIC
# This fixes it.
rows_new = []
rows_enumerator = enumerate(rows)
for i, row in rows_enumerator:
if row[3] == "ABBREVIATION":
abbreviation = row[2]
else:
abbreviation = ""
if len(row) == 4:
rows_new.append(OrderedDict([("start", row[0]), ("end", row[1]), ("page", 1), ("abbreviation", abbreviation),
("entity", row[2]), ("type", row[3])]))
elif len(row) == 5:
rows_new.append(OrderedDict([("start", row[0]), ("end", row[1]), ("page", 1), ("abbreviation", abbreviation),
("entity", row[4]), ("type", row[3])]))
else:
next_row = next(rows_enumerator)[1]
rows_new.append(OrderedDict([("start", row[0]), ("end", row[1]), ("page", 1), ("abbreviation", abbreviation),
("entity", row[2] + " " + next_row[0]), ("type", next_row[1])]))
return rows_new
@staticmethod
def parse_chemspot_iob(file_path: str = "", text: str = "", encoding: str = "utf-8") -> list:
if file_path:
with open(file_path, mode="r", encoding=encoding) as f:
text = f.readlines()
elif text:
text = [x.strip() for x in text.split("\n")]
text = iter(text)
rows = []
next(text) # skip first row containing "###"
for row in text:
row = row.strip().split()
if len(row) == 4:
rows.append(OrderedDict([("string", row[0]), ("start", row[1]), ("end", row[2]), ("page", "1"), ("type", row[3])]))
elif len(row) == 3:
rows.append(OrderedDict([("string", ""), ("start", row[0]), ("end", row[1]), ("page", "1"), ("type", row[2])]))
return rows
|
gorgitko/molminer
|
molminer/ChemSpot.py
|
Python
|
mit
| 34,926
|
[
"RDKit"
] |
d922d6d969cb5444581555bfd7623f80873024a8e5e63d7d0090b2bbb49c0e74
|
from __future__ import print_function
import sys
import time
import requests
from numpy import pi, sin, cos
import numpy as np
from bokeh.objects import (Plot, DataRange1d, LinearAxis,
ColumnDataSource, Glyph, PanTool, WheelZoomTool)
from bokeh.glyphs import Line
from bokeh import session
from bokeh import document
document = document.Document()
session = session.Session()
session.use_doc('line_animate')
session.load_document(document)
x = np.linspace(-2*pi, 2*pi, 1000)
x_static = np.linspace(-2*pi, 2*pi, 1000)
y = sin(x)
z = cos(x)
source = ColumnDataSource(
data=dict(
x=x, y=y, z=z, x_static=x_static)
)
xdr = DataRange1d(sources=[source.columns("x")])
xdr_static = DataRange1d(sources=[source.columns("x_static")])
ydr = DataRange1d(sources=[source.columns("y")])
line_glyph = Line(x="x", y="y", line_color="blue")
line_glyph2 = Line(x="x", y="z", line_color="red")
renderer = Glyph(
data_source = source,
xdata_range = xdr,
ydata_range = ydr,
glyph = line_glyph
)
renderer2 = Glyph(
data_source = source,
xdata_range = xdr_static,
ydata_range = ydr,
glyph = line_glyph2
)
plot = Plot(x_range=xdr_static, y_range=ydr, data_sources=[source], min_border=50)
xaxis = LinearAxis(plot=plot, dimension=0, location="bottom")
yaxis = LinearAxis(plot=plot, dimension=1, location="left")
pantool = PanTool(dimensions=["width", "height"])
wheelzoomtool = WheelZoomTool(dimensions=["width", "height"])
plot.renderers.append(renderer)
plot.renderers.append(renderer2)
plot.tools = [pantool, wheelzoomtool]
document.add(plot)
session.store_document(document)
link = session.object_link(document._plotcontext)
print ("please visit %s to see plots" % link)
print ("animating")
while True:
for i in np.linspace(-2*pi, 2*pi, 50):
source.data['x'] = x +i
session.store_objects(source)
time.sleep(0.05)
|
sahat/bokeh
|
examples/glyphs/line_animate.py
|
Python
|
bsd-3-clause
| 1,932
|
[
"VisIt"
] |
cc8d0125a27929f6e120f48ef4fd210ce24ab14102cffd739898071eb59acf6d
|
# Copyright 2017 The 'Scalable Private Learning with PATE' Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for smooth sensitivity analysis for PATE mechanisms.
This library implements functionality for doing smooth sensitivity analysis
for Gaussian Noise Max (GNMax), Threshold with Gaussian noise, and Gaussian
Noise with Smooth Sensitivity (GNSS) mechanisms.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import app
import numpy as np
import scipy
import sympy as sp
import core as pate
################################
# SMOOTH SENSITIVITY FOR GNMAX #
################################
# Global dictionary for storing cached q0 values keyed by (sigma, order).
_logq0_cache = {}
def _compute_logq0(sigma, order):
key = (sigma, order)
if key in _logq0_cache:
return _logq0_cache[key]
logq0 = compute_logq0_gnmax(sigma, order)
_logq0_cache[key] = logq0 # Update the global variable.
return logq0
def _compute_logq1(sigma, order, num_classes):
logq0 = _compute_logq0(sigma, order) # Most likely already cached.
logq1 = math.log(_compute_bl_gnmax(math.exp(logq0), sigma, num_classes))
assert logq1 <= logq0
return logq1
def _compute_mu1_mu2_gnmax(sigma, logq):
# Computes mu1, mu2 according to Proposition 10.
mu2 = sigma * math.sqrt(-logq)
mu1 = mu2 + 1
return mu1, mu2
def _compute_data_dep_bound_gnmax(sigma, logq, order):
# Applies Theorem 6 in Appendix without checking that logq satisfies necessary
# constraints. The pre-conditions must be assured by comparing logq against
# logq0 by the caller.
variance = sigma**2
mu1, mu2 = _compute_mu1_mu2_gnmax(sigma, logq)
eps1 = mu1 / variance
eps2 = mu2 / variance
log1q = np.log1p(-math.exp(logq)) # log1q = log(1-q)
log_a = (order - 1) * (
log1q - (np.log1p(-math.exp((logq + eps2) * (1 - 1 / mu2)))))
log_b = (order - 1) * (eps1 - logq / (mu1 - 1))
return np.logaddexp(log1q + log_a, logq + log_b) / (order - 1)
def _compute_rdp_gnmax(sigma, logq, order):
logq0 = _compute_logq0(sigma, order)
if logq >= logq0:
return pate.rdp_data_independent_gaussian(sigma, order)
else:
return _compute_data_dep_bound_gnmax(sigma, logq, order)
def compute_logq0_gnmax(sigma, order):
"""Computes the point where we start using data-independent bounds.
Args:
sigma: std of the Gaussian noise
order: Renyi order lambda
Returns:
logq0: the point above which the data-ind bound overtakes data-dependent
bound.
"""
def _check_validity_conditions(logq):
# Function returns true iff logq is in the range where data-dependent bound
# is valid. (Theorem 6 in Appendix.)
mu1, mu2 = _compute_mu1_mu2_gnmax(sigma, logq)
if mu1 < order:
return False
eps2 = mu2 / sigma**2
# Do computation in the log space. The condition below comes from Lemma 9
# from Appendix.
return (logq <= (mu2 - 1) * eps2 - mu2 * math.log(mu1 / (mu1 - 1) * mu2 /
(mu2 - 1)))
def _compare_dep_vs_ind(logq):
return (_compute_data_dep_bound_gnmax(sigma, logq, order) -
pate.rdp_data_independent_gaussian(sigma, order))
# Natural upper bounds on q0.
logub = min(-(1 + 1. / sigma)**2, -((order - .99) / sigma)**2, -1 / sigma**2)
assert _check_validity_conditions(logub)
# If data-dependent bound is already better, we are done already.
if _compare_dep_vs_ind(logub) < 0:
return logub
# Identifying a reasonable lower bound to bracket logq0.
loglb = 2 * logub # logub is negative, and thus loglb < logub.
while _compare_dep_vs_ind(loglb) > 0:
assert loglb > -10000, "The lower bound on q0 is way too low."
loglb *= 1.5
logq0, r = scipy.optimize.brentq(
_compare_dep_vs_ind, loglb, logub, full_output=True)
assert r.converged, "The root finding procedure failed to converge."
assert _check_validity_conditions(logq0) # just in case.
return logq0
def _compute_bl_gnmax(q, sigma, num_classes):
return ((num_classes - 1) / 2 * scipy.special.erfc(
1 / sigma + scipy.special.erfcinv(2 * q / (num_classes - 1))))
def _compute_bu_gnmax(q, sigma, num_classes):
return min(1, (num_classes - 1) / 2 * scipy.special.erfc(
-1 / sigma + scipy.special.erfcinv(2 * q / (num_classes - 1))))
def _compute_local_sens_gnmax(logq, sigma, num_classes, order):
"""Implements Algorithm 3 (computes an upper bound on local sensitivity).
(See Proposition 13 for proof of correctness.)
"""
logq0 = _compute_logq0(sigma, order)
logq1 = _compute_logq1(sigma, order, num_classes)
if logq1 <= logq <= logq0:
logq = logq1
beta = _compute_rdp_gnmax(sigma, logq, order)
beta_bu_q = _compute_rdp_gnmax(
sigma, math.log(_compute_bu_gnmax(math.exp(logq), sigma, num_classes)),
order)
beta_bl_q = _compute_rdp_gnmax(
sigma, math.log(_compute_bl_gnmax(math.exp(logq), sigma, num_classes)),
order)
return max(beta_bu_q - beta, beta - beta_bl_q)
def compute_local_sensitivity_bounds_gnmax(votes, num_teachers, sigma, order):
"""Computes a list of max-LS-at-distance-d for the GNMax mechanism.
A more efficient implementation of Algorithms 4 and 5 working in time
O(teachers*classes). A naive implementation is O(teachers^2*classes) or worse.
Args:
votes: A numpy array of votes.
num_teachers: Total number of voting teachers.
sigma: Standard deviation of the Guassian noise.
order: The Renyi order.
Returns:
A numpy array of local sensitivities at distances d, 0 <= d <= num_teachers.
"""
num_classes = len(votes) # Called m in the paper.
logq0 = _compute_logq0(sigma, order)
logq1 = _compute_logq1(sigma, order, num_classes)
logq = pate.compute_logq_gaussian(votes, sigma)
plateau = _compute_local_sens_gnmax(logq1, sigma, num_classes, order)
res = np.full(num_teachers, plateau)
if logq1 <= logq <= logq0:
return res
# Invariant: votes is sorted in the non-increasing order.
votes = sorted(votes, reverse=True)
res[0] = _compute_local_sens_gnmax(logq, sigma, num_classes, order)
curr_d = 0
go_left = logq > logq0 # Otherwise logq < logq1 and we go right.
# Iterate while the following is true:
# 1. If we are going left, logq is still larger than logq0 and we may still
# increase the gap between votes[0] and votes[1].
# 2. If we are going right, logq is still smaller than logq1.
while ((go_left and logq > logq0 and votes[1] > 0) or
(not go_left and logq < logq1)):
curr_d += 1
if go_left: # Try decreasing logq.
votes[0] += 1
votes[1] -= 1
idx = 1
# Restore the invariant. (Can be implemented more efficiently by keeping
# track of the range of indices equal to votes[1]. Does not seem to matter
# for the overall running time.)
while idx < len(votes) - 1 and votes[idx] < votes[idx + 1]:
votes[idx], votes[idx + 1] = votes[idx + 1], votes[idx]
idx += 1
else: # Go right, i.e., try increasing logq.
votes[0] -= 1
votes[1] += 1 # The invariant holds since otherwise logq >= logq1.
logq = pate.compute_logq_gaussian(votes, sigma)
res[curr_d] = _compute_local_sens_gnmax(logq, sigma, num_classes, order)
return res
##################################################
# SMOOTH SENSITIVITY FOR THE THRESHOLD MECHANISM #
##################################################
# A global dictionary of RDPs for various threshold values. Indexed by a 4-tuple
# (num_teachers, threshold, sigma, order).
_rdp_thresholds = {}
def _compute_rdp_list_threshold(num_teachers, threshold, sigma, order):
key = (num_teachers, threshold, sigma, order)
if key in _rdp_thresholds:
return _rdp_thresholds[key]
res = np.zeros(num_teachers + 1)
for v in range(0, num_teachers + 1):
logp = scipy.stats.norm.logsf(threshold - v, scale=sigma)
res[v] = pate.compute_rdp_threshold(logp, sigma, order)
_rdp_thresholds[key] = res
return res
def compute_local_sensitivity_bounds_threshold(counts, num_teachers, threshold,
sigma, order):
"""Computes a list of max-LS-at-distance-d for the threshold mechanism."""
def _compute_ls(v):
ls_step_up, ls_step_down = None, None
if v > 0:
ls_step_down = abs(rdp_list[v - 1] - rdp_list[v])
if v < num_teachers:
ls_step_up = abs(rdp_list[v + 1] - rdp_list[v])
return max(ls_step_down, ls_step_up) # Rely on max(x, None) = x.
cur_max = int(round(max(counts)))
rdp_list = _compute_rdp_list_threshold(num_teachers, threshold, sigma, order)
ls = np.zeros(num_teachers)
for d in range(max(cur_max, num_teachers - cur_max)):
ls_up, ls_down = None, None
if cur_max + d <= num_teachers:
ls_up = _compute_ls(cur_max + d)
if cur_max - d >= 0:
ls_down = _compute_ls(cur_max - d)
ls[d] = max(ls_up, ls_down)
return ls
#############################################
# PROCEDURES FOR SMOOTH SENSITIVITY RELEASE #
#############################################
# A global dictionary of exponentially decaying arrays. Indexed by beta.
dict_beta_discount = {}
def compute_discounted_max(beta, a):
n = len(a)
if beta not in dict_beta_discount or (len(dict_beta_discount[beta]) < n):
dict_beta_discount[beta] = np.exp(-beta * np.arange(n))
return max(a * dict_beta_discount[beta][:n])
def compute_smooth_sensitivity_gnmax(beta, counts, num_teachers, sigma, order):
"""Computes smooth sensitivity of a single application of GNMax."""
ls = compute_local_sensitivity_bounds_gnmax(counts, sigma, order,
num_teachers)
return compute_discounted_max(beta, ls)
def compute_rdp_of_smooth_sensitivity_gaussian(beta, sigma, order):
"""Computes the RDP curve for the GNSS mechanism.
Implements Theorem 23 (https://arxiv.org/pdf/1802.08908.pdf).
"""
if beta > 0 and not 1 < order < 1 / (2 * beta):
raise ValueError("Order outside the (1, 1/(2*beta)) range.")
return order * math.exp(2 * beta) / sigma**2 + (
-.5 * math.log(1 - 2 * order * beta) + beta * order) / (
order - 1)
def compute_params_for_ss_release(eps, delta):
"""Computes sigma for additive Gaussian noise scaled by smooth sensitivity.
Presently not used. (We proceed via RDP analysis.)
Compute beta, sigma for applying Lemma 2.6 (full version of Nissim et al.) via
Lemma 2.10.
"""
# Rather than applying Lemma 2.10 directly, which would give suboptimal alpha,
# (see http://www.cse.psu.edu/~ads22/pubs/NRS07/NRS07-full-draft-v1.pdf),
# we extract a sufficient condition on alpha from its proof.
#
# Let a = rho_(delta/2)(Z_1). Then solve for alpha such that
# 2 alpha a + alpha^2 = eps/2.
a = scipy.special.ndtri(1 - delta / 2)
alpha = math.sqrt(a**2 + eps / 2) - a
beta = eps / (2 * scipy.special.chdtri(1, delta / 2))
return alpha, beta
#######################################################
# SYMBOLIC-NUMERIC VERIFICATION OF CONDITIONS C5--C6. #
#######################################################
def _construct_symbolic_beta(q, sigma, order):
mu2 = sigma * sp.sqrt(sp.log(1 / q))
mu1 = mu2 + 1
eps1 = mu1 / sigma**2
eps2 = mu2 / sigma**2
a = (1 - q) / (1 - (q * sp.exp(eps2))**(1 - 1 / mu2))
b = sp.exp(eps1) / q**(1 / (mu1 - 1))
s = (1 - q) * a**(order - 1) + q * b**(order - 1)
return (1 / (order - 1)) * sp.log(s)
def _construct_symbolic_bu(q, sigma, m):
return (m - 1) / 2 * sp.erfc(sp.erfcinv(2 * q / (m - 1)) - 1 / sigma)
def _is_non_decreasing(fn, q, bounds):
"""Verifies whether the function is non-decreasing within a range.
Args:
fn: Symbolic function of a single variable.
q: The name of f's variable.
bounds: Pair of (lower_bound, upper_bound) reals.
Returns:
True iff the function is non-decreasing in the range.
"""
diff_fn = sp.diff(fn, q) # Symbolically compute the derivative.
diff_fn_lambdified = sp.lambdify(
q,
diff_fn,
modules=[
"numpy", {
"erfc": scipy.special.erfc,
"erfcinv": scipy.special.erfcinv
}
])
r = scipy.optimize.minimize_scalar(
diff_fn_lambdified, bounds=bounds, method="bounded")
assert r.success, "Minimizer failed to converge."
return r.fun >= 0 # Check whether the derivative is non-negative.
def check_conditions(sigma, m, order):
"""Checks conditions C5 and C6 (Section B.4.2 in Appendix)."""
q = sp.symbols("q", positive=True, real=True)
beta = _construct_symbolic_beta(q, sigma, order)
q0 = math.exp(compute_logq0_gnmax(sigma, order))
cond5 = _is_non_decreasing(beta, q, (0, q0))
if cond5:
bl_q0 = _compute_bl_gnmax(q0, sigma, m)
bu = _construct_symbolic_bu(q, sigma, m)
delta_beta = beta.subs(q, bu) - beta
cond6 = _is_non_decreasing(delta_beta, q, (0, bl_q0))
else:
cond6 = False # Skip the check, since Condition 5 is false already.
return (cond5, cond6)
def main(argv):
del argv # Unused.
if __name__ == "__main__":
app.run(main)
|
cshallue/models
|
research/differential_privacy/pate/smooth_sensitivity.py
|
Python
|
apache-2.0
| 13,739
|
[
"Gaussian"
] |
e2956d80bf6b77f4d8b0e349098786d376ad17d12632c14c5a637844484982eb
|
import numpy as np
import copy as cp
import random
from board import Board
from sklearn.neighbors import BallTree
from globalconsts import \
EMPTY, RED, BLACK, BKING, RKING, \
FORWARD_LEFT, FORWARD_RIGHT, BACKWARD_LEFT, BACKWARD_RIGHT, \
AI_COLOR, THRESHOLD, PLAYER_COLOR, \
LOSE, WIN, CONTINUE, TIE, \
WIN_FACTOR, LOSE_FACTOR
class Learner(object):
"""
A class that instantiates the feature space for an individual AI,
chooses moves, and performs learning
"""
def __init__(self, data_points = None, ai_history = None, threshold = THRESHOLD):
self.state_list = []
self.weights_list = []
if data_points is None:
data_points = []
if ai_history is None:
ai_history = []
for state, weights in data_points:
assert(len(state) == 32)
self.state_list.append(state)
self.weights_list.append(weights)
self._threshold = threshold
self._ai_history = cp.deepcopy(ai_history)
#self._featureTransform()
self.X = np.array(self.state_list)
assert(self.X.shape == (len(data_points), 32) or len(data_points) == 0)
#Think about different distance metrics. Manhattan or minkowski? P < 1?
if len(data_points) > 0:
self._tree = BallTree(self.X, metric='manhattan')
else:
self._tree = None
def getNextMove(self, current_board):
# current_board.printBoard()
nn_move = self._getNearestNeighbors(current_board)
if nn_move is not None:
next_move = nn_move
else:
next_move = self._getMinimax(current_board)
self._ai_history.append(next_move)
return next_move
def updateWeights(self, game_history, status):
if status == WIN:
factor = WIN_FACTOR
elif status == LOSE:
factor = LOSE_FACTOR
elif status == TIE:
factor = 1
# old_board = Board()
for _board, _move in game_history:
assert(any(_move == mv[1] for mv in _board.getMoveList(_move.color)))
if _move.color == AI_COLOR:
state = _board.getArray().tolist()
if state in self.state_list:
i = self.state_list.index(state)
# j = self.state_list[i].find(move)
# print zip(*_board.getMoveList(AI_COLOR))[1]
# print list(zip(*_board.getMoveList(AI_COLOR))[1])
j = list(zip(*_board.getMoveList(AI_COLOR))[1]).index(_move)
self.weights_list[i][j] *= factor
else:
self.state_list.append(state)
self.weights_list.append([1] * len(_board.getMoveList(AI_COLOR)))
# print zip(*_board.getMoveList(AI_COLOR))[1]
j = list(zip(*_board.getMoveList(AI_COLOR))[1]).index(_move)
self.weights_list[-1][j] *= factor
elif _move.color == PLAYER_COLOR:
_move = _move.getInverse()
state = _board.getInverse().getArray().tolist()
if state in self.state_list:
i = self.state_list.index(state)
# j = self.state_list[i].find(move)
j = list(zip(*_board.getInverse().getMoveList(AI_COLOR))[1]).index(_move)
self.weights_list[i][j] *= (1.0 / factor)
else:
self.state_list.append(state)
self.weights_list.append([1] * len(_board.getInverse().getMoveList(AI_COLOR)))
j = list(zip(*_board.getInverse().getMoveList(AI_COLOR))[1]).index(_move)
self.weights_list[-1][j] *= (1.0 / factor)
self.X = np.array(self.state_list)
self._tree = BallTree(self.X, metric='manhattan')
def getAiHistory(self):
return cp.deepcopy(self._ai_history)
def _getMinimax(self, current_board):
# return random.choice([bd[1] for bd in current_board.getMoveList(AI_COLOR)])
(bestBoard, bestVal) = minMax2(current_board, 6)
# print("bestVal", bestVal)
# bestBoard[0].printBoard()
return bestBoard[1]
def _getNearestNeighbors(self, current_board):
#dist, ind = self._tree.query(current_board.getArray(), k=3)
if self._tree is None:
return None
ind = self._tree.query_radius(current_board.getArray(), r = self._threshold).tolist()
ind = ind[0].tolist()
if len(ind) > 0:
pass
# print "neighbors found"
#cur_moves = current_board.getMoveList(AI_COLOR)
moves = []
weights = []
# print ind
for i in ind:
_board = Board(new_array = self.state_list[i])
assert(len(_board.getMoveList(AI_COLOR)) == len(self.weights_list[i]))
for j, (board, move) in enumerate(_board.getMoveList(AI_COLOR)):
# move.printMove()
# current_board.printBoard()
if current_board.verifyMove(AI_COLOR, move = move):
# print "move found"
# move.printMove()
if move not in moves:
moves.append(move)
weights.append(self.weights_list[i][j])
else:
weights[moves.index(move)] *= self.weights_list[i][j]
if len(moves) == 0:
# raise Exception()
# print "aborted neighbors"
return None
else:
assert(len(moves) == len(weights))
zipped = zip(moves, weights)
moves = [mv[0] for mv in zipped if mv[1] >= 1]
weights = [mv[1] for mv in zipped if mv[1] >= 1]
if len(moves) < 1: return None
return np.random.choice(moves, 1, weights)[0]
#neighbor_moves = [move for move in neighbor_moves if move in cur_moves]
def _featureTransform(self):
#replace weights with a Gaussian at some point
#or come up with a better feature transform
weights = [1, 2, 3, 4, 4, 3, 2, 1]
transformed_list = []
for state in self.state_list:
assert(len(state) == 32)
new_state = []
for i in range(32):
new_state.append(state[i] * weights[i / 4])
transformed_list.append(new_state)
self.X = np.array(transformed_list)
# -----------------------------------------------------
def minMax2(board, maxDepth):
bestBoard = None
currentDepth = maxDepth
while not bestBoard and currentDepth > 0:
currentDepth -= 1
(bestBoard, bestVal) = maxMove2(board, currentDepth)
return (bestBoard, bestVal)
def maxMove2(maxBoard, currentDepth):
"""
Calculates the best move for RED player (computer) (seeks a board with INF value)
"""
return maxMinBoard(maxBoard, currentDepth-1, float('-inf'))
def minMove2(minBoard, currentDepth):
"""
Calculates the best move from the perspective of BLACK player (seeks board with -INF value)
"""
return maxMinBoard(minBoard, currentDepth-1, float('inf'))
def maxMinBoard(board, currentDepth, bestMove):
"""
Does the actual work of calculating the best move
"""
# Check if we are at an end node
if currentDepth <= 0:
return (board, np.sum(board.getArray()))
# So we are not at an end node, now we need to do minmax
# Set up values for minmax
best_move_value = bestMove
best_board = None
# MaxNode
if bestMove == float('-inf'):
# Create the iterator for the Moves
board_moves = board.getMoveList(AI_COLOR)
for board_move in board_moves:
value = minMove2(board_move[0], currentDepth-1)[1]
if value > best_move_value:
best_move_value = value
best_board = board_move
# MinNode
elif bestMove == float('inf'):
board_moves = board.getMoveList(PLAYER_COLOR)
for board_move in board_moves:
value = maxMove2(board_move[0], currentDepth-1)[1]
# Take the smallest value we can
if value < best_move_value:
best_move_value = value
best_board = board_move
# Things appear to be fine, we should have a board with a good value to move to
return (best_board, best_move_value)
#http://scikit-learn.org/stable/modules/neighbors.html#classification
#http://www.sciencedirect.com/science/article/pii/S0925231210003875
|
zlalvani/checkers-learner
|
learner.py
|
Python
|
mit
| 7,702
|
[
"Gaussian"
] |
7578f79a7e43cdf4c9672d85fa4055e50b12d863103d8c6bd12fdfdd27d2ab23
|
import numpy as np
from ase.calculators.emt import EMT
from ase import Atoms
a = 3.60
b = a / 2
cu = Atoms('Cu',
positions=[(0, 0, 0)],
cell=[(0, b, b),
(b, 0, b),
(b, b, 0)],
pbc=1,
calculator=EMT())
e0 = cu.get_potential_energy()
print e0
cu.set_cell(cu.get_cell() * 1.001, scale_atoms=True)
e1 = cu.get_potential_energy()
V = a**3 / 4
B = 2 * (e1 - e0) / 0.003**2 / V * 160.2
print B
for i in range(4):
x = 0.001 * i
A = np.array([(x, b, b+x),
(b, 0, b),
(b, b, 0)])
cu.set_cell(A, scale_atoms=True)
e = cu.get_potential_energy() - e0
if i == 0:
print i, e
else:
print i, e, e / x**2
A = np.array([(0, b, b),
(b, 0, b),
(6*b, 6*b, 0)])
R = np.zeros((2, 3))
for i in range(1, 2):
R[i] = i * A[2] / 6
print (Atoms('Cu2', positions=R,
pbc=1, cell=A,
calculator=EMT()).get_potential_energy() - 2 * e0) / 2
A = np.array([(0, b, b),
(b, 0, b),
(10*b, 10*b, 0)])
R = np.zeros((3, 3))
for i in range(1, 3):
R[i] = i * A[2] / 10
print (Atoms('Cu3', positions=R,
pbc=1, cell=A,
calculator=EMT()).get_potential_energy() - 3 * e0) / 2
A = np.array([(0, b, b),
(b, 0, b),
(b, b, 0)])
R = np.zeros((3, 3))
for i in range(1, 3):
R[i] = i * A[2]
print (Atoms('Cu3', positions=R,
pbc=(1, 1, 0), cell=A,
calculator=EMT()).get_potential_energy() - 3 * e0) / 2
|
grhawk/ASE
|
tools/ase/test/emt.py
|
Python
|
gpl-2.0
| 1,579
|
[
"ASE"
] |
39c3b156b7859942c11b0659201f5b5228338adaa149a393c55192ca54193cca
|
#!/usr/bin/env python
import roslib
import rospy
import smach
import smach_ros
from smach import StateMachine
import actionlib
import time
import threading
from smach_ros import SimpleActionState
from smach_ros import ActionServerWrapper
from std_msgs.msg import String, Float64, UInt8, Bool
from wm_interpreter.msg import *
from collections import Counter
TIMEOUT_LENGTH = 10
# define state WaitingQuestion
class WaitingQuestion(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['NotUnderstood', 'Question', 'Timeout'],
input_keys=[],
output_keys=['WQ_question_out'])
self.RecoString = []
self.state = "WaitingQuestion"
self.QUESTIONS = []
self.QUESTIONS.append(["What is your name",
"Do a little presentation",
"Who are the inventors of the C programming language",
"Who is the inventor of the Python programming language",
"Which robot was the star in the movie Wall-E",
"Where does the term computer bug come from",
"What is the name of the round robot in the new Star Wars movie",
"How many curry sausages are eaten in Germany each year",
"Who is president of the galaxy in The Hitchhiker Guide to the Galaxy",
"Which robot is the love interest in Wall-E",
"Which company makes ASIMO",
"What company makes Big Dog",
"What is the funny clumsy character of the Star Wars prequels",
"How many people live in the Germany",
"What are the colours of the German flag",
"What city is the capital of the Germany",
"How many arms do you have",
"What is the heaviest element",
"what did Alan Turing create",
"Who is the helicopter pilot in the A-Team",
"What Apollo was the last to land on the moon",
"Who was the last man to step on the moon",
"In which county is the play of Hamlet set",
"What are names of Donald Duck nephews",
"How many metres are in a mile",
"Name a dragon in The Lord of the Rings",
"Who is the Chancellor of Germany",
"Who developed the first industrial robot",
"What's the difference between a cyborg and an android",
"Do you know any cyborg",
"In which city is this year's RoboCup hosted",
"Which city hosted last year's RoboCup",
"In which city will next year's RoboCup be hosted",
"Name the main rivers surrounding Leipzig",
"Where is the zoo of this city located",
"Where did the peaceful revolution of 1989 start",
"Where is the world's oldest trade fair hosted",
"Where is one of the world's largest dark music festivals hosted",
"Where is Europe's oldest continuous coffee shop hosted",
"Name one of the greatest German composers",
"Where is Johann Sebastian Bach buried",
"Do you have dreams",
"Hey what's up",
"There are seven days in a week. True or false",
"There are eleven days in a week. True or false",
"January has 31 days. True or false",
"January has 28 days. True or false",
"February has 28 days. True or false",
"February has 31 days. True or false",
"What city are you from",
"Who used first the word Robot",
"What origin has the word Robot"])
self.QUESTIONS.append([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
self.face_cmd = rospy.Publisher('/face_mode', UInt8, queue_size=1, latch=True)
self.sub = rospy.Subscriber("/recognizer_1/output", String, self.callback, queue_size=1)
def execute(self, userdata):
rospy.loginfo('Executing state WaitingQuestion')
self.face_cmd.publish(3)
timeout = time.time() + TIMEOUT_LENGTH # 10 sec
while True:
if max(self.QUESTIONS[1]) > 70:
userdata.WQ_question_out = self.QUESTIONS[0][self.QUESTIONS[1].index(max(self.QUESTIONS[1]))]
for idx in range(len(self.QUESTIONS[1])):
self.QUESTIONS[1][idx] = 0
return 'Question'
'''else:
if len(self.RecoString) < 2:
return 'NotUnderstood' '''
'''if time.time() > timeout:
return 'Timeout' '''
def callback(self, data):
self.RecoString = data.data.split()
for idx in range(len(self.QUESTIONS[1])):
self.QUESTIONS[1][idx] = 0
for RecoWord in self.RecoString:
for idx in range(len(self.QUESTIONS[1])):
if self.QUESTIONS[0][idx].lower().find(RecoWord) != -1:
self.QUESTIONS[1][idx] += 1
for idx in range(len(self.QUESTIONS[1])):
self.QUESTIONS[1][idx] = self.QUESTIONS[1][idx]*100/len(self.QUESTIONS[0][idx].split())
def SayX(self, ToSay_str):
rospy.loginfo(ToSay_str)
self.tts_pub.publish(ToSay_str)
def request_preempt(self):
"""Overload the preempt request method just to spew an error."""
smach.State.request_preempt(self)
rospy.logwarn("Preempted!")
# define state AnswerQuestion
class AnswerQuestion(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['Done'],
input_keys=['AQ_question_in'])
self.ANSWERS = {"What is your name":"Mon nom est Sara, ce qui signifie Systeme dassistance robotiser autonome",
"Do a little presentation":"Je suis un robot dassistance robotiser autonome. Jai eter concu par le club Walking Machine de ler-cole de technologie superieure specialement pour la comper-tition Robocup at Home.",
"Who are the inventors of the C programming language": "Les inventeur du language de programmation C sont Ken Thompson et Dennis Ritchie",
"Who is the inventor of the Python programming language": "Linventeur du language de programation python est Guido van Rossum",
"Which robot was the star in the movie Wall-E": "Le robot qui est lacteur principale dans le film Wall-E est Wall-E",
"Where does the term computer bug come from": "Le terme bogue informatique vient dun papillon de nuit coince dans un relais",
"What is the name of the round robot in the new Star Wars movie": "Le nom du petit robot rond dans le nouveau film de Star Wars est B B 8",
"How many curry sausages are eaten in Germany each year": "Environ 800 million currywurst par anner",
"Who is president of the galaxy in The Hitchhiker Guide to the Galaxy": "Le president de la galaxie dans le film Le Guide du voyageur galactique est Zaphod Beeblebrox",
"Which robot is the love interest in Wall-E": "Le robot companion de Wall-E est Eve",
"Which company makes ASIMO": "La compagnie qui fabrique ASIMO est Honda",
"What company makes Big Dog": "La compagnie qui fabrique Big Dog est Boston Dynamics",
"What is the funny clumsy character of the Star Wars prequels": "Le personnage drole mais maladroit des prelude de Star Wars est Jar-Jar Binks",
"How many people live in the Germany": "Il y a 80 millions dhabitant en Allemagne ",
"What are the colours of the German flag": "Les couleurs du drapeau de lAllemagne sont rouge, noir et jaune",
"What city is the capital of the Germany": "La capital de lAllemagne est Berlin",
"How many arms do you have": "Jai seulement un bras pour le moment. Veuillez me le redemander lannnee prochain",
"What is the heaviest element": "Lelement le plus lourd est le plutonium lorsquil est mesure par la masse de lelement mais lOsmium est plus dense",
"What did Alan Turing create": "Alan Turing a cree plusieurs choses comme les machines de Turing et le test de Turing",
"Who is the helicopter pilot in the A-Team": "Le pilote dhelicoptere dans A-Team est le capitaine Howling Mad Murdock",
"What Apollo was the last to land on the moon": "Le dernier a avoir atteris sur la lune etait Apollo 17",
"Who was the last man to step on the moon": "Le dernier homme a avoir marcher sur la lune etait Gene Cernan",
"In which county is the play of Hamlet set": "Il etait au Danemark",
"What are names of Donald Duck nephews": "The nom des neveux de Donald Duck etaient Huey Dewey et Louie Duck",
"How many metres are in a mile": "Il y a environ 1609 metres dans un mile",
"Name a dragon in The Lord of the Rings": "Le nom du dragon dans le Seigneur des anneaux etait Smaug",
"Who is the Chancellor of Germany": "La chancelliere de lAllemagne est Angela Merkel",
"Who developed the first industrial robot": "Le premier a developper un robot industriel etait le physicien americain Joseph Engelberg. Il est aussi considere comme le pere de la robotique.",
"What's the difference between a cyborg and an android": "Les cyborgs sont des etres biologiques avec des ameliorations electromecaniques. Les androids sont des robots avec une apparence humaine.",
"Do you know any cyborg": "Le professeur Kevin Warwick. Il a implemente un circuit dans son avant-bras gauche.",
"In which city is this year's RoboCup hosted": "La Robocup 2016 etait a Leipzig en Allemagne",
"Which city hosted last year's RoboCup": "La robocup 2015 etait a Heifei en Chine.",
"In which city will next year's RoboCup be hosted": "Robocup 2017 sera a Nagoya au Japon.",
"Name the main rivers surrounding Leipzig": "La Parthe Pleisse et la White Elster",
"Where is the zoo of this city located": "Le zoo est situe pres de la gare centrale.",
"Where did the peaceful revolution of 1989 start": "La revolution tranquille commenca le 4 septembre 1989 a Leipzig a la leglise Saint Nicholas.",
"Where is the world's oldest trade fair hosted": "La Foire de Leipzig est la plus ancienne du monde",
"Where is one of the world's largest dark music festivals hosted": "La ville de Leipzig accueille lun des plus grand festival de musique gothique du monde",
"Where is Europe's oldest continuous coffee shop hosted": "Le plus ancien cafe deurope ce trouve a Leipzig",
"Name one of the greatest German composers": "Jean Sebastien Bach est le plus grand compositeur dAllemagne",
"Where is Johann Sebastian Bach buried": "La sepulture de Jean Sebastien Bach se trouve a leglise Saint Thomas a Leipzig",
"Do you have dreams": "Je reve de moutons electriques.",
"Hey what's up": "Comment le saurai-je?",
"There are seven days in a week. True or false": "Cest vrais, il y a bel et bien sept jours dans une semaine.",
"There are eleven days in a week. True or false": "Cest faux, il y a plutot sept jours dans une semaine.",
"January has 31 days. True or false": "Cest vrai, le mois de Janvier compte 31 jours.",
"January has 28 days. True or false": "Faux, Janvier contient 31 jours, pas 28",
"February has 28 days. True or false": "Vrai, sauf dans une annee bissextile qui en contient 29",
"February has 31 days. True or false": "Faux, Fevrier a soit 28 jours, ou 29 selon lannee.",
"What city are you from": "Je viens de Mont-rer al",
"Who used first the word Robot": "Le mot robot fut utilise pour la premiere fois par lecrivain tcheque Karel Capek",
"What origin has the word Robot": "Il provient du mot tcheque Robota qui signifie travail force ou esclavage"}
self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
def execute(self, userdata):
rospy.loginfo('-- Executing state WaitingConfirmation --')
self.SayX(self.ANSWERS[userdata.AQ_question_in])
return 'Done'
def SayX(self, ToSay_str):
rospy.loginfo(ToSay_str)
self.tts_pub.publish(ToSay_str)
def request_preempt(self):
"""Overload the preempt request method just to spew an error."""
smach.State.request_preempt(self)
rospy.logwarn("Preempted!")
# define state AskToRepeat
class AskToRepeat(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['Done'])
self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
def execute(self, userdata):
rospy.loginfo('-- Executing state AskRepeat --')
self.SayX("Can you repeat the question please?")
rospy.sleep(5)
return 'Done'
def SayX(self, ToSay_str):
rospy.loginfo(ToSay_str)
self.tts_pub.publish(ToSay_str)
def request_preempt(self):
"""Overload the preempt request method just to spew an error."""
smach.State.request_preempt(self)
rospy.logwarn("Preempted!")
# main
def main():
rospy.init_node('interpreter')
rospy.sleep(5)
tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
neck_pub = rospy.Publisher('neckHead_controller/command', Float64, queue_size=1, latch=True)
neck_cmd = Float64()
neck_cmd.data = 0
neck_pub.publish(neck_cmd)
tts_pub.publish("Bonjour, je suis maintenant prete a repondre a vos questions")
outcomes = ""
# Create a SMACH state machine
sm = smach.StateMachine(outcomes=['success', 'aborted', 'preempted'],
output_keys=[])
with sm:
# Add states to the container
smach.StateMachine.add('WaitingQuestion', WaitingQuestion(),
transitions={'Question': 'AnswerQuestion',
'NotUnderstood': 'AskToRepeat',
'Timeout': 'WaitingQuestion'},
remapping={'WQ_question_out': 'question'})
smach.StateMachine.add('AnswerQuestion', AnswerQuestion(),
transitions={'Done': 'WaitingQuestion'},
remapping={'AQ_question_in': 'question'})
smach.StateMachine.add('AskToRepeat', AskToRepeat(),
transitions={'Done': 'WaitingQuestion'},
)
'''sis = smach_ros.IntrospectionServer('server_name', asw.wrapped_container, '/ASW_ROOT')'''
# Execute SMACH plan
sm.execute()
rospy.spin()
# Request the container to preempt
sm.request_preempt()
if __name__ == '__main__':
main()
|
WalkingMachine/sara_commun
|
wm_robocup2016/src/stage1_speech_recognition_FR.py
|
Python
|
apache-2.0
| 17,022
|
[
"Galaxy"
] |
e42926a1549e1fc74b591796a690d4065f0f669d21077d057c70927e2cfff7bc
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class servicecontrolCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'check': ('service_name', 'service_config_id', 'attributes', 'resources', 'flags', ),
'report': ('service_name', 'service_config_id', 'operations', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=servicecontrolCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the servicecontrol client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-service-control
|
scripts/fixup_servicecontrol_v2_keywords.py
|
Python
|
apache-2.0
| 6,089
|
[
"VisIt"
] |
6d090c0e89dc32e70045734b826b3cb755d0354f60b2543b908839a5a987ed20
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create a rendering window and renderer
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.StereoCapableWindowOn()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
reader = vtk.vtkGenericEnSightReader()
# Make sure all algorithms use the composite data pipeline
cdp = vtk.vtkCompositeDataPipeline()
reader.SetDefaultExecutivePrototype(cdp)
reader.SetCaseFileName("" + str(VTK_DATA_ROOT) + "/Data/EnSight/blow3_bin.case")
reader.SetTimeValue(1)
geom = vtk.vtkGeometryFilter()
geom.SetInputConnection(reader.GetOutputPort())
mapper = vtk.vtkHierarchicalPolyDataMapper()
mapper.SetInputConnection(geom.GetOutputPort())
mapper.SetColorModeToMapScalars()
mapper.SetScalarModeToUsePointFieldData()
mapper.ColorByArrayComponent("displacement",0)
mapper.SetScalarRange(0,2.08)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# assign our actor to the renderer
ren1.AddActor(actor)
# enable user interface interactor
iren.Initialize()
ren1.GetActiveCamera().SetPosition(99.3932,17.6571,-22.6071)
ren1.GetActiveCamera().SetFocalPoint(3.5,12,1.5)
ren1.GetActiveCamera().SetViewAngle(30)
ren1.GetActiveCamera().SetViewUp(0.239617,-0.01054,0.97081)
ren1.ResetCameraClippingRange()
renWin.Render()
# prevent the tk window from showing up then start the event loop
reader.SetDefaultExecutivePrototype(None)
# --- end of script --
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/IO/EnSight/Testing/Python/EnSightBlow3Bin.py
|
Python
|
gpl-3.0
| 1,508
|
[
"VTK"
] |
093cbd2eb6aeb22a982318e9be3bbc676f308af099a5e9e427d3a3902b3344bd
|
#!/usr/bin/env python
import cookielib
import sys, os, time, argparse, getpass, re
try:
import mechanize
except ImportError:
print 'python-mechanize module not available.\n'
sys.exit(1)
def webBrowser():
# Browser
br = mechanize.Browser()
# Cookie Jar
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
br.set_handle_gzip(False)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-agent', 'Chrome')]
# The site we will navigate into, handling it's session
br.open('https://github.com/login')
# Select the second (index one) form (the first form is a search query box)
# this changes from web site to web site. GitHub.com/login happens to be the second form
br.select_form(nr=0)
return br
def authenticatePage(args):
browser = webBrowser()
browser.form['login'] = args.user
browser.form['password'] = args.password
browser.submit()
return browser
def readPage(browser, args):
stats = {}
browser.addheaders = [('User-agent', 'Chrome'), ('Referer', 'https://github.com/' + args.repo + '/graphs/traffic'), ('X-Requested-With', 'XMLHttpRequest')]
# GitHubs Traffic payload is in python dictionary format
# grab the clones, and Visitors
try:
stats['Clones'] = eval(browser.open('https://github.com/' + args.repo + '/graphs/clone-activity-data').read())
stats['Visitors'] = eval(browser.open('https://github.com/' + args.repo + '/graphs/traffic-data').read())
except mechanize.HTTPError as e:
print 'There was an error obtaining traffic for said site.'
if str(e).find('406') != -1:
print '\tError 406: You do not have permission to view statistics. Or you supplied incorrect credentials'
sys.exit(1)
if str(e).find('404') != -1:
print '\tError 404: Page not found'
sys.exit(1)
return stats
def verifyArgs(args):
if args.repo is None or len(args.repo.split('/')) != 2:
print '\nYou must specify a repository you are insterested in scrapeing:\n\t --repo foo/bar\n\nNote: GitHub is case-sensitive, so your arguments must be too'
sys.exit(1)
if args.user is '':
print '\nYou must specify a user to authenticate with'
sys.exit(1)
try:
while args.password is '':
args.password = getpass.getpass('Password for UserID ' + args.user + ' :',)
except KeyboardInterrupt:
print ''
sys.exit(0)
return args
def writeFile(args, stats):
if os.path.isfile(args.write):
log_file = open(args.write, 'r')
file_data = log_file.read()
log_file.close()
# True if the file header contains the same name as --repo
if args.repo == file_data.split('\n')[0]:
old_clones = eval(re.findall(r'(\[.*\])', file_data.split('\n')[1])[0])
old_visitors = eval(re.findall(r'(\[.*\])', file_data.split('\n')[2])[0])
# Remove overlapping list items based on date in our old list. Store this old data in a new list
tmp_clones = old_clones[old_clones.index(stats['clones'][len(stats['clones'])-3]) + 3:]
tmp_visitors = old_visitors[old_visitors.index(stats['visitors'][len(stats['visitors'])-3]) + 3:]
# Insert the new data at index 0 (GitHub reports newest items at the begining of the list) into our old list
tmp_clones[0:0] = stats['clones']
tmp_visitors[0:0] = stats['visitors']
# write the new data into file, overwriting any previous data
log_file = open(args.write, 'w')
log_file.write(args.repo + '\nclones = ' + str(tmp_clones) + '\nvisitors = ' + str(tmp_visitors) + '\n')
log_file.close()
sys.exit(0)
else:
print 'The file you attempted to write to contains stats for another repository (' + file_data.split('\n')[0] + \
')\nwhile you supplied arguments to gather stats for (' + args.repo + \
').\n\n... Or this is probably not the file you wanted to overwrite:\n\t', args.write, '\nExiting just to be safe...\n'
sys.exit(1)
else:
log_file = open(args.write, 'w')
log_file.write(args.repo + '\nclones = ' + str(stats['clones']) + '\nvisitors = ' + str(stats['visitors']) + '\n')
log_file.close()
def parseArgs(args=None):
# Traffic Stats URL: https://github.com/idaholab/moose/graphs/clone-activity-data
parser = argparse.ArgumentParser(description='Scrape GitHub for a webpage requiring authentication')
parser.add_argument('--repo', '-r', nargs='?', help='Repository (example: foo/bar)')
parser.add_argument('--write', '-w', nargs='?', help='Write to a file')
try:
parser.add_argument('--user', '-u', nargs='?', default=os.getenv('USER'), help='Authenticate using specified user. Defaults to: (' + os.getenv('USER') + ')')
except TypeError:
parser.add_argument('--user', '-u', nargs='?', default='', help='Authenticate using specified user')
parser.add_argument('--password', '-p', nargs='?', default='', help='Authenticate using specified password')
return verifyArgs(parser.parse_args(args))
if __name__ == '__main__':
args = parseArgs()
web_page = authenticatePage(args)
payload = readPage(web_page, args)
stats = {'clones' : [],
'visitors' : []}
for point in payload['Clones']['counts']:
stats['clones'].extend([time.strftime("%Y-%b-%d", time.gmtime(point['bucket'])), str(point['total']), str(point['unique'])])
for point in payload['Visitors']['counts']:
stats['visitors'].extend([time.strftime("%Y-%b-%d", time.gmtime(point['bucket'])), str(point['total']), str(point['unique'])])
if args.write:
writeFile(args, stats)
else:
print '\nClones: (date, total, unique)\n', stats['clones']
print '\nVisitors: (date, total, unique)\n', stats['visitors']
|
giopastor/moose
|
scripts/github_traffic.py
|
Python
|
lgpl-2.1
| 5,792
|
[
"MOOSE"
] |
0a9220cba24c1fcbfe7e4ac82aff1b629f2802a449da2c2ceb2da91b4758206b
|
r"""
****************************
Generalised Gaussian Process
****************************
Introduction
^^^^^^^^^^^^
A GP is a statistical distribution :math:`Y_t`, :math:`t\in\mathrm T`, for
which any finite linear combination of samples has a joint Gaussian
distribution `[1]`_ `[2]`_.
An instance of such class of processes is defined via a mean function
:math:`m(\cdot)` and a covariance function :math:`k(\cdot, \cdot)` whose
domains are :math:`\mathrm T` and :math:`\mathrm T\times\mathrm T`,
respectively.
Here we implement an extension of GPs that makes use of exponential-family
likelihoods.
An instance of such process is given by
.. math::
\mathbf y \sim \int \prod_i \text{ExpFam}(y_i ~|~ \mu_i = g(z_i))
\mathcal N(\mathbf z ~|~ \mathbf m, \mathrm K) \mathrm d\mathbf z.
:class:`.ExpFamGP` performs inference over the mean and covariance
parameters via maximum likelihood and Expectation Propagation `[3]`_
approximation.
.. _[1]: https://en.wikipedia.org/wiki/Gaussian_process
.. _[2]: http://www.gaussianprocess.org/gpml/
.. _[3]: http://www.gaussianprocess.org/gpml/chapters/RW3.pdf
Usage
^^^^^
"""
from ._expfam import ExpFamGP
__all__ = ["ExpFamGP"]
|
limix/glimix-core
|
glimix_core/ggp/__init__.py
|
Python
|
mit
| 1,188
|
[
"Gaussian"
] |
4d57fe8cb810937a287602611371e22e8672ce7559789e96ab6bc23439cfb96a
|
class System(object):
def __init__(self):
self.molecules = tuple([])
self.atomtypes = []
self.bondtypes = []
self.angletypes= []
self.dihedraltypes = []
self.impropertypes = []
self.cmaptypes = []
self.interactiontypes = []
self.pairtypes = []
self.constrainttypes = []
self.forcefield= None
self.information = {} # like 'atomtypes': self.atomtypes
class Molecule(object):
def __init__(self):
self.chains = []
self.atoms = []
self.residues = []
self.bonds = []
self.angles = []
self.dihedrals = []
self.impropers = []
self.cmaps = []
self.pairs = []
self.exclusion_numb = None # 0, 1, 2, ..
self.exclusions = []
self.settles = []
self.constraints= []
self.information = {} # like 'atoms': self.atoms
self.name = None
self._anumb_to_atom = {}
def anumb_to_atom(self, anumb):
'''Returns the atom object corresponding to an atom number'''
assert isinstance(anumb, int), "anumb must be integer"
if len(self._anumb_to_atom) == 0: # empty dictionary
if len(self.atoms) != 0:
for atom in self.atoms:
self._anumb_to_atom[atom.number] = atom
return self._anumb_to_atom[anumb]
else:
print("no atoms in the molecule")
return False
else:
if anumb in self._anumb_to_atom:
return self._anumb_to_atom[anumb]
else:
print("no such atom number (%d) in the molecule" % (anumb))
return False
def renumber_atoms(self):
if len(self.atoms) != 0:
# reset the mapping
self._anumb_to_atom = {}
for i,atom in enumerate(self.atoms):
atom.number = i+1 # starting from 1
else:
print("the number of atoms is zero - no renumbering")
class Chain(object):
"""
name = str,
residues= list,
molecule= Molecule
"""
def __init__(self):
self.residues = []
class Residue(object):
"""
name = str,
number = int,
chain = Chain,
chain_name = str,
atoms = list,
"""
def __init__(self):
self.atoms = []
class Atom(object):
"""
name = str,
number = int,
flag = str, # HETATM
coords = list,
residue = Residue,
occup = float,
bfactor = float,
altlocs = list,
atomtype= str,
radius = float,
charge = float,
mass = float,
chain = str,
resname = str,
resnumb = int,
altloc = str, # per atoms
"""
def __init__(self):
self.coords = [] # a list of coordinates (x,y,z) of models
self.altlocs= [] # a list of (altloc_name, (x,y,z), occup, bfactor)
def get_atomtype(self):
if hasattr(self, 'atomtype'):
return self.atomtype
else:
print("atom %s doesn't have atomtype" % self)
return False
class Param:
def convert(self, reqformat):
assert reqformat in ('charmm', 'gromacs')
if reqformat == self.format:
if reqformat == 'charmm':
return self.charmm
elif reqformat == 'gromacs':
return self.gromacs
else:
raise NotImplementedError
if isinstance(self, AtomType):
if reqformat == 'gromacs' and self.format == 'charmm':
self.gromacs['param']['lje'] = abs(self.charmm['param']['lje']) * 4.184
self.gromacs['param']['ljl'] = self.charmm['param']['ljl'] * 2 * 0.1 / (2**(1.0/6.0))
if self.charmm['param']['lje14'] is not None:
self.gromacs['param']['lje14'] = abs(self.charmm['param']['lje14']) * 4.184
self.gromacs['param']['ljl14'] = self.charmm['param']['ljl14'] * 2 * 0.1 / (2**(1.0/6.0))
else:
self.gromacs['param']['lje14'] = None
self.gromacs['param']['ljl14'] = None
else:
raise NotImplementedError
elif isinstance(self, BondType):
if reqformat == 'gromacs' and self.format == 'charmm':
self.gromacs['param']['kb'] = self.charmm['param']['kb'] * 2 * 4.184 * (1.0 / 0.01) # nm^2
self.gromacs['param']['b0'] = self.charmm['param']['b0'] * 0.1
self.gromacs['func'] = 1
else:
raise NotImplementedError
elif isinstance(self, AngleType):
if reqformat == 'gromacs' and self.format == 'charmm':
self.gromacs['param']['ktetha'] = self.charmm['param']['ktetha'] * 2 * 4.184
self.gromacs['param']['tetha0'] = self.charmm['param']['tetha0']
self.gromacs['param']['kub'] = self.charmm['param']['kub'] * 2 * 4.184 * 10 * 10
self.gromacs['param']['s0'] = self.charmm['param']['s0'] * 0.1
self.gromacs['func'] = 5
else:
raise NotImplementedError
elif isinstance(self, DihedralType):
if reqformat == 'gromacs' and self.format == 'charmm':
for dih in self.charmm['param']:
convdih = {}
convdih['kchi'] = dih['kchi'] * 4.184
convdih['n'] = dih['n']
convdih['delta'] = dih['delta']
self.gromacs['param'].append(convdih)
self.gromacs['func'] = 9
else:
raise NotImplementedError
elif isinstance(self, ImproperType):
if reqformat == 'gromacs' and self.format == 'charmm':
for imp in self.charmm['param']:
convimp = {}
convimp['kpsi'] = imp['kpsi'] * 2 * 4.184
convimp['psi0'] = imp['psi0']
if imp.get('n', False):
convimp['n'] = imp['n']
self.gromacs['param'].append(convimp)
self.gromacs['func'] = 2
# self.gromacs['param']['kpsi'] = self.charmm['param']['kpsi'] * 2 * 4.184
# self.gromacs['param']['psi0'] = self.charmm['param']['psi0']
# self.gromacs['func'] = 2
else:
raise NotImplementedError
elif isinstance(self, CMapType):
if reqformat == 'gromacs' and self.format == 'charmm':
self.gromacs['param']= [n*4.184 for n in self.charmm['param']]
self.gromacs['func'] = 1
else:
raise NotImplementedError
elif isinstance(self, InteractionType):
if reqformat == 'gromacs' and self.format == 'charmm':
if self.charmm['param']['lje'] is not None:
self.gromacs['param']['lje'] = abs(self.charmm['param']['lje']) * 4.184
self.gromacs['param']['ljl'] = self.charmm['param']['ljl'] * 0.1 / (2**(1.0/6.0)) # no *2
else:
self.gromacs['param']['lje'] = None
self.gromacs['param']['ljl'] = None
if self.charmm['param']['lje14'] is not None:
self.gromacs['param']['lje14'] = abs(self.charmm['param']['lje14']) * 4.184
self.gromacs['param']['ljl14'] = self.charmm['param']['ljl14'] * 0.1 / (2**(1.0/6.0))
else:
self.gromacs['param']['lje14'] = None
self.gromacs['param']['ljl14'] = None
else:
raise NotImplementedError
else:
raise NotImplementedError
class AtomType(Param):
def __init__(self, format):
assert format in ('charmm', 'gromacs')
self.format = format
self.atype = None
self.mass = None
self.charge = None
self.charmm = {'param': {'lje':None, 'ljl':None, 'lje14':None, 'ljl14':None} }
self.gromacs= {'param': {'lje':None, 'ljl':None, 'lje14':None, 'ljl14':None} }
class BondType(Param):
def __init__(self, format):
assert format in ('charmm', 'gromacs')
self.format = format
self.atom1 = None
self.atom2 = None
self.atype1 = None
self.atype2 = None
self.charmm = {'param': {'kb':None, 'b0':None} }
self.gromacs= {'param': {'kb':None, 'b0':None}, 'func':None}
class AngleType(Param):
def __init__(self, format):
assert format in ('charmm', 'gromacs')
self.format = format
self.atom1 = None
self.atom2 = None
self.atom3 = None
self.atype1 = None
self.atype2 = None
self.atype3 = None
self.charmm = {'param':{'ktetha':None, 'tetha0':None, 'kub':None, 's0':None} }
self.gromacs= {'param':{'ktetha':None, 'tetha0':None, 'kub':None, 's0':None}, 'func':None}
class DihedralType(Param):
def __init__(self, format):
assert format in ('charmm', 'gromacs')
self.format = format
self.atom1 = None
self.atom2 = None
self.atom3 = None
self.atom4 = None
self.atype1 = None
self.atype2 = None
self.atype3 = None
self.atype4 = None
self.charmm = {'param':[]} # {kchi, n, delta}
self.gromacs= {'param':[]}
class ImproperType(Param):
def __init__(self, format):
assert format in ('charmm', 'gromacs')
self.format = format
self.atype1 = None
self.atype2 = None
self.atype3 = None
self.atype4 = None
self.charmm = {'param':[]}
self.gromacs= {'param':[], 'func': None} # {'kpsi': None, 'psi0':None}
class CMapType(Param):
def __init__(self, format):
assert format in ('charmm', 'gromacs')
self.format = format
self.atom1 = None
self.atom2 = None
self.atom3 = None
self.atom4 = None
self.atom5 = None
self.atom6 = None
self.atom7 = None
self.atom8 = None
self.atype1 = None
self.atype2 = None
self.atype3 = None
self.atype4 = None
self.atype5 = None
self.atype6 = None
self.atype7 = None
self.atype8 = None
self.charmm = {'param': []}
self.gromacs= {'param': []}
class InteractionType(Param):
def __init__(self, format):
assert format in ('charmm', 'gromacs')
self.format = format
self.atom1 = None
self.atom2 = None
self.atype1 = None
self.atype2 = None
self.charmm = {'param': {'lje':None, 'ljl':None, 'lje14':None, 'ljl14':None} }
self.gromacs= {'param': {'lje':None, 'ljl':None, 'lje14':None, 'ljl14':None}, 'func':None }
class SettleType(Param):
def __init__(self, format):
assert format in ('gromacs',)
self.atom = None
self.dOH = None
self.dHH = None
class ConstraintType(Param):
def __init__(self, format):
assert format in ('gromacs',)
self.atom1 = None
self.atom2 = None
self.atype1 = None
self.atype2 = None
self.gromacs= {'param': {'b0':None}, 'func':None}
class Exclusion:
def __init__(self):
self.main_atom = None
self.other_atoms = []
|
resal81/PyTopol
|
pytopol/parsers/blocks.py
|
Python
|
gpl-3.0
| 11,684
|
[
"CHARMM",
"Gromacs"
] |
68edeed7a27f4b1649297b617d5e1057569b55190531b3019776fdceef329f00
|
"""Each ElkM1 area will be created as a separate alarm_control_panel."""
from elkm1_lib.const import AlarmState, ArmedStatus, ArmLevel, ArmUpState
import voluptuous as vol
from homeassistant.components.alarm_control_panel import (
FORMAT_NUMBER,
AlarmControlPanel,
)
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.const import (
ATTR_CODE,
ATTR_ENTITY_ID,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from . import (
DOMAIN,
SERVICE_ALARM_ARM_HOME_INSTANT,
SERVICE_ALARM_ARM_NIGHT_INSTANT,
SERVICE_ALARM_ARM_VACATION,
SERVICE_ALARM_DISPLAY_MESSAGE,
ElkEntity,
create_elk_entities,
)
SIGNAL_ARM_ENTITY = "elkm1_arm"
SIGNAL_DISPLAY_MESSAGE = "elkm1_display_message"
ELK_ALARM_SERVICE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID, default=[]): cv.entity_ids,
vol.Required(ATTR_CODE): vol.All(vol.Coerce(int), vol.Range(0, 999999)),
}
)
DISPLAY_MESSAGE_SERVICE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID, default=[]): cv.entity_ids,
vol.Optional("clear", default=2): vol.All(vol.Coerce(int), vol.In([0, 1, 2])),
vol.Optional("beep", default=False): cv.boolean,
vol.Optional("timeout", default=0): vol.All(
vol.Coerce(int), vol.Range(min=0, max=65535)
),
vol.Optional("line1", default=""): cv.string,
vol.Optional("line2", default=""): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the ElkM1 alarm platform."""
if discovery_info is None:
return
elk_datas = hass.data[DOMAIN]
entities = []
for elk_data in elk_datas.values():
elk = elk_data["elk"]
entities = create_elk_entities(elk_data, elk.areas, "area", ElkArea, entities)
async_add_entities(entities, True)
def _dispatch(signal, entity_ids, *args):
for entity_id in entity_ids:
async_dispatcher_send(hass, f"{signal}_{entity_id}", *args)
def _arm_service(service):
entity_ids = service.data.get(ATTR_ENTITY_ID, [])
arm_level = _arm_services().get(service.service)
args = (arm_level, service.data.get(ATTR_CODE))
_dispatch(SIGNAL_ARM_ENTITY, entity_ids, *args)
for service in _arm_services():
hass.services.async_register(
DOMAIN, service, _arm_service, ELK_ALARM_SERVICE_SCHEMA
)
def _display_message_service(service):
entity_ids = service.data.get(ATTR_ENTITY_ID, [])
data = service.data
args = (
data["clear"],
data["beep"],
data["timeout"],
data["line1"],
data["line2"],
)
_dispatch(SIGNAL_DISPLAY_MESSAGE, entity_ids, *args)
hass.services.async_register(
DOMAIN,
SERVICE_ALARM_DISPLAY_MESSAGE,
_display_message_service,
DISPLAY_MESSAGE_SERVICE_SCHEMA,
)
def _arm_services():
return {
SERVICE_ALARM_ARM_VACATION: ArmLevel.ARMED_VACATION.value,
SERVICE_ALARM_ARM_HOME_INSTANT: ArmLevel.ARMED_STAY_INSTANT.value,
SERVICE_ALARM_ARM_NIGHT_INSTANT: ArmLevel.ARMED_NIGHT_INSTANT.value,
}
class ElkArea(ElkEntity, AlarmControlPanel):
"""Representation of an Area / Partition within the ElkM1 alarm panel."""
def __init__(self, element, elk, elk_data):
"""Initialize Area as Alarm Control Panel."""
super().__init__(element, elk, elk_data)
self._changed_by_entity_id = ""
self._state = None
async def async_added_to_hass(self):
"""Register callback for ElkM1 changes."""
await super().async_added_to_hass()
for keypad in self._elk.keypads:
keypad.add_callback(self._watch_keypad)
async_dispatcher_connect(
self.hass, f"{SIGNAL_ARM_ENTITY}_{self.entity_id}", self._arm_service
)
async_dispatcher_connect(
self.hass,
f"{SIGNAL_DISPLAY_MESSAGE}_{self.entity_id}",
self._display_message,
)
def _watch_keypad(self, keypad, changeset):
if keypad.area != self._element.index:
return
if changeset.get("last_user") is not None:
self._changed_by_entity_id = self.hass.data[DOMAIN][self._prefix][
"keypads"
].get(keypad.index, "")
self.async_schedule_update_ha_state(True)
@property
def code_format(self):
"""Return the alarm code format."""
return FORMAT_NUMBER
@property
def state(self):
"""Return the state of the element."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_NIGHT
@property
def device_state_attributes(self):
"""Attributes of the area."""
attrs = self.initial_attrs()
elmt = self._element
attrs["is_exit"] = elmt.is_exit
attrs["timer1"] = elmt.timer1
attrs["timer2"] = elmt.timer2
if elmt.armed_status is not None:
attrs["armed_status"] = ArmedStatus(elmt.armed_status).name.lower()
if elmt.arm_up_state is not None:
attrs["arm_up_state"] = ArmUpState(elmt.arm_up_state).name.lower()
if elmt.alarm_state is not None:
attrs["alarm_state"] = AlarmState(elmt.alarm_state).name.lower()
attrs["changed_by_entity_id"] = self._changed_by_entity_id
return attrs
def _element_changed(self, element, changeset):
elk_state_to_hass_state = {
ArmedStatus.DISARMED.value: STATE_ALARM_DISARMED,
ArmedStatus.ARMED_AWAY.value: STATE_ALARM_ARMED_AWAY,
ArmedStatus.ARMED_STAY.value: STATE_ALARM_ARMED_HOME,
ArmedStatus.ARMED_STAY_INSTANT.value: STATE_ALARM_ARMED_HOME,
ArmedStatus.ARMED_TO_NIGHT.value: STATE_ALARM_ARMED_NIGHT,
ArmedStatus.ARMED_TO_NIGHT_INSTANT.value: STATE_ALARM_ARMED_NIGHT,
ArmedStatus.ARMED_TO_VACATION.value: STATE_ALARM_ARMED_AWAY,
}
if self._element.alarm_state is None:
self._state = None
elif self._area_is_in_alarm_state():
self._state = STATE_ALARM_TRIGGERED
elif self._entry_exit_timer_is_running():
self._state = (
STATE_ALARM_ARMING if self._element.is_exit else STATE_ALARM_PENDING
)
else:
self._state = elk_state_to_hass_state[self._element.armed_status]
def _entry_exit_timer_is_running(self):
return self._element.timer1 > 0 or self._element.timer2 > 0
def _area_is_in_alarm_state(self):
return self._element.alarm_state >= AlarmState.FIRE_ALARM.value
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
self._element.disarm(int(code))
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
self._element.arm(ArmLevel.ARMED_STAY.value, int(code))
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
self._element.arm(ArmLevel.ARMED_AWAY.value, int(code))
async def async_alarm_arm_night(self, code=None):
"""Send arm night command."""
self._element.arm(ArmLevel.ARMED_NIGHT.value, int(code))
async def _arm_service(self, arm_level, code):
self._element.arm(arm_level, code)
async def _display_message(self, clear, beep, timeout, line1, line2):
"""Display a message on all keypads for the area."""
self._element.display_message(clear, beep, timeout, line1, line2)
|
leppa/home-assistant
|
homeassistant/components/elkm1/alarm_control_panel.py
|
Python
|
apache-2.0
| 8,138
|
[
"Elk"
] |
05b6fc26a14c1ffd17fbf241f47eea29460745566bf499c505aeb504cd3d5c23
|
###AltAnalyze
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import math
#import pkg_resources
#import distutils
import statistics
import sys, string
import os.path
import unique
import update
import UI
import copy
import export; reload(export)
import ExpressionBuilder; reload(ExpressionBuilder)
import ExonAnalyze_module; reload(ExonAnalyze_module)
import ExonAnnotate_module; reload(ExonAnnotate_module)
import ResultsExport_module
import FeatureAlignment
import GO_Elite
import time
import webbrowser
import random
import traceback
import shutil
try:
import multiprocessing as mlp
except Exception:
mlp=None
print 'Note: Multiprocessing not supported for this verison python.'
try:
from scipy import stats
except Exception:
pass ### scipy is not required but is used as a faster implementation of Fisher Exact Test when present
try:
from PIL import Image as PIL_Image
try: import ImageTk
except Exception: from PIL import ImageTk
except Exception:
None #print 'Python Imaging Library not installed... using default PNG viewer'
use_Tkinter = 'no'
debug_mode = 'no'
analysis_start_time = time.time()
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
dir_list2 = [] #add in code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".csv" or entry[-4:] == ".TXT":
dir_list2.append(entry)
return dir_list2
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def makeUnique(item):
db1={}; list1=[]; k=0
for i in item:
try: db1[i]=[]
except TypeError: db1[tuple(i)]=[]; k=1
for i in db1:
if k==0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def returnLargeGlobalVars():
### Prints all large global variables retained in memory (taking up space)
all = [var for var in globals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(globals()[var])>500:
print var, len(globals()[var])
except Exception: null=[]
def clearObjectsFromMemory(db_to_clear):
db_keys={}
try:
for key in db_to_clear: db_keys[key]=[]
except Exception:
for key in db_to_clear: del key ### if key is a list
for key in db_keys:
try: del db_to_clear[key]
except Exception:
try:
for i in key: del i ### For lists of tuples
except Exception: del key ### For plain lists
def importGeneric(filename):
fn=filepath(filename); key_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
key_db[t[0]] = t[1:]
return key_db
def importGenericFiltered(filename,filter_db):
fn=filepath(filename); key_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
key = t[0]
if key in filter_db: key_db[key] = t[1:]
return key_db
def importGenericFilteredDBList(filename,filter_db):
fn=filepath(filename); key_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
try:
null=filter_db[t[0]]
try: key_db[t[0]].append(t[1])
except KeyError: key_db[t[0]] = [t[1]]
except Exception: null=[]
return key_db
def importGenericDBList(filename):
fn=filepath(filename); key_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
try: key_db[t[0]].append(t[1])
except KeyError: key_db[t[0]] = [t[1]]
return key_db
def importExternalDBList(filename):
fn=filepath(filename); key_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
try: key_db[t[0]].append(t[1:])
except Exception: key_db[t[0]] = [t[1:]]
return key_db
def FindDir(dir,term):
dir_list = unique.read_directory(dir)
dir_list2=[]
dir_list.sort()
for i in dir_list:
if term == i: dir_list2.append(i)
if len(dir_list2)==0:
for i in dir_list:
if term in i: dir_list2.append(i)
dir_list2.sort(); dir_list2.reverse()
if len(dir_list2)>0: return dir_list2[0]
else: return ''
def openFile(file_dir):
if os.name == 'nt':
try: os.startfile('"'+file_dir+'"')
except Exception: os.system('open "'+file_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+file_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+file_dir+'"')
def openCytoscape(parent_dir,application_dir,application_name):
cytoscape_dir = FindDir(parent_dir,application_dir); cytoscape_dir = filepath(parent_dir+'/'+cytoscape_dir)
app_dir = FindDir(cytoscape_dir,application_name)
app_dir = cytoscape_dir+'/'+app_dir
if 'linux' in sys.platform:
app_dir = app_dir
app_dir2 = cytoscape_dir+'/Cytoscape'
try: createCytoscapeDesktop(cytoscape_dir)
except Exception: null=[]
dir_list = unique.read_directory('/usr/bin/') ### Check to see that JAVA is installed
if 'java' not in dir_list: print 'Java not referenced in "usr/bin/. If not installed,\nplease install and re-try opening Cytoscape'
try:
jar_path = cytoscape_dir+'/cytoscape.jar'
main_path = cytoscape_dir+'/cytoscape.CyMain'
plugins_path = cytoscape_dir+'/plugins'
os.system('java -Dswing.aatext=true -Xss5M -Xmx512M -jar '+jar_path+' '+main_path+' -p '+plugins_path+' &')
print 'Cytoscape jar opened:',jar_path
except Exception:
print 'OS command to open Java failed.'
try:
try: openFile(app_dir2); print 'Cytoscape opened:',app_dir2
except Exception:
os.chmod(app_dir,0777)
openFile(app_dir2)
except Exception:
try: openFile(app_dir)
except Exception:
os.chmod(app_dir,0777)
openFile(app_dir)
else:
try: openFile(app_dir)
except Exception:
os.chmod(app_dir,0777)
openFile(app_dir)
def createCytoscapeDesktop(cytoscape_dir):
cyto_ds_output = cytoscape_dir+'/Cytoscape.desktop'
data = export.ExportFile(cyto_ds_output)
cytoscape_desktop = cytoscape_dir+'/Cytoscape'; #cytoscape_desktop = '/hd3/home/nsalomonis/Cytoscape_v2.6.1/Cytoscape'
cytoscape_png = cytoscape_dir+ '/.install4j/Cytoscape.png'; #cytoscape_png = '/hd3/home/nsalomonis/Cytoscape_v2.6.1/.install4j/Cytoscape.png'
data.write('[Desktop Entry]'+'\n')
data.write('Type=Application'+'\n')
data.write('Name=Cytoscape'+'\n')
data.write('Exec=/bin/sh "'+cytoscape_desktop+'"'+'\n')
data.write('Icon='+cytoscape_png+'\n')
data.write('Categories=Application;'+'\n')
data.close()
########### Parse Input Annotations ###########
def ProbesetCalls(array_type,probeset_class,splice_event,constitutive_call,external_exonid):
include_probeset = 'yes'
if array_type == 'AltMouse':
exonid = splice_event
if filter_probesets_by == 'exon':
if '-' in exonid or '|' in exonid: ###Therfore the probeset represents an exon-exon junction or multi-exon probeset
include_probeset = 'no'
if filter_probesets_by != 'exon':
if '|' in exonid: include_probeset = 'no'
if constitutive_call == 'yes': include_probeset = 'yes'
else:
if avg_all_for_ss == 'yes' and (probeset_class == 'core' or len(external_exonid)>2): constitutive_call = 'yes'
#if len(splice_event)>2 and constitutive_call == 'yes' and avg_all_for_ss == 'no': constitutive_call = 'no'
if constitutive_call == 'no' and len(splice_event)<2 and len(external_exonid)<2: ###otherwise these are interesting probesets to keep
if filter_probesets_by != 'full':
if filter_probesets_by == 'extended':
if probeset_class == 'full': include_probeset = 'no'
elif filter_probesets_by == 'core':
if probeset_class != 'core': include_probeset = 'no'
return include_probeset,constitutive_call
def EvidenceOfAltSplicing(slicing_annot):
splice_annotations = ["ntron","xon","strangeSplice","Prime","3","5","C-term"]; as_call = 0
splice_annotations2 = ["ntron","assette","strangeSplice","Prime","3","5"]
for annot in splice_annotations:
if annot in slicing_annot: as_call = 1
if as_call == 1:
if "C-term" in slicing_annot and ("N-" in slicing_annot or "Promoter" in slicing_annot):
as_call = 0
for annot in splice_annotations2:
if annot in slicing_annot: as_call = 1
elif "bleed" in slicing_annot and ("N-" in slicing_annot or "Promoter" in slicing_annot):
as_call = 0
for annot in splice_annotations2:
if annot in slicing_annot: as_call = 1
return as_call
########### Begin Analyses ###########
class SplicingAnnotationData:
def ArrayType(self):
self._array_type = array_type
return self._array_type
def Probeset(self): return self._probeset
def setProbeset(self,probeset): self._probeset = probeset
def ExonID(self): return self._exonid
def setDisplayExonID(self,exonid): self._exonid = exonid
def GeneID(self): return self._geneid
def Symbol(self):
symbol = ''
if self.GeneID() in annotate_db:
y = annotate_db[self.GeneID()]
symbol = y.Symbol()
return symbol
def ExternalGeneID(self): return self._external_gene
def ProbesetType(self):
###e.g. Exon, junction, constitutive(gene)
return self._probeset_type
def GeneStructure(self): return self._block_structure
def SecondaryExonID(self): return self._block_exon_ids
def setSecondaryExonID(self,ids): self._block_exon_ids = ids
def setLocationData(self, chromosome, strand, probeset_start, probeset_stop):
self._chromosome = chromosome; self._strand = strand
self._start = probeset_start; self._stop = probeset_stop
def LocationSummary(self):
location = self.Chromosome()+':'+self.ProbeStart()+'-'+self.ProbeStop()+'('+self.Strand()+')'
return location
def Chromosome(self): return self._chromosome
def Strand(self): return self._strand
def ProbeStart(self): return self._start
def ProbeStop(self): return self._stop
def ProbesetClass(self):
###e.g. core, extendended, full
return self._probest_class
def ExternalExonIDs(self): return self._external_exonids
def ExternalExonIDList(self):
external_exonid_list = string.split(self.ExternalExonIDs(),'|')
return external_exonid_list
def Constitutive(self): return self._constitutive_status
def setTranscriptCluster(self,secondary_geneid): self._secondary_geneid = secondary_geneid
def setNovelExon(self,novel_exon): self._novel_exon = novel_exon
def NovelExon(self): return self._novel_exon
def SecondaryGeneID(self): return self._secondary_geneid
def setExonRegionID(self,exon_region): self._exon_region = exon_region
def ExonRegionID(self): return self._exon_region
def SplicingEvent(self):
splice_event = self._splicing_event
if len(splice_event)!=0:
if splice_event[0] == '|': splice_event = splice_event[1:]
return splice_event
def SplicingCall(self): return self._splicing_call
def SpliceJunctions(self): return self._splice_junctions
def Delete(self): del self
def Report(self):
output = self.ArrayType() +'|'+ self.ExonID() +'|'+ self.ExternalGeneID()
return output
def __repr__(self): return self.Report()
class AltMouseData(SplicingAnnotationData):
def __init__(self,affygene,exons,ensembl,block_exon_ids,block_structure,probe_type_call):
self._geneid = affygene; self._external_gene = ensembl; self._exonid = exons; self._secondary_geneid = ensembl
self._probeset_type = probe_type_call; self._block_structure = block_structure; self._block_exon_ids = block_exon_ids
self._external_exonids = 'NA';
self._constitutive_status = 'no'
self._splicing_event = ''
self._secondary_geneid = 'NA'
self._exon_region = ''
if self._probeset_type == 'gene': self._constitutive_status = 'yes'
else: self._constitutive_status = 'no'
class AffyExonSTData(SplicingAnnotationData):
def __init__(self,ensembl_gene_id,exon_id,ens_exon_ids, constitutive_call_probeset, exon_region, splicing_event, splice_junctions, splicing_call):
self._geneid = ensembl_gene_id; self._external_gene = ensembl_gene_id; self._exonid = exon_id
self._constitutive_status = constitutive_call_probeset#; self._start = probeset_start; self._stop = probeset_stop
self._external_exonids = ens_exon_ids; #self._secondary_geneid = transcript_cluster_id#; self._chromosome = chromosome; self._strand = strand
self._exon_region=exon_region; self._splicing_event=splicing_event; self._splice_junctions=splice_junctions; self._splicing_call = splicing_call
if self._exonid[0] == 'U': self._probeset_type = 'UTR'
elif self._exonid[0] == 'E': self._probeset_type = 'exonic'
elif self._exonid[0] == 'I': self._probeset_type = 'intronic'
class AffyExonSTDataAbbreviated(SplicingAnnotationData):
def __init__(self,ensembl_gene_id,exon_id,splicing_call):
self._geneid = ensembl_gene_id; self._exonid = exon_id; self._splicing_call = splicing_call
def importSplicingAnnotations(array_type,Species,probeset_type,avg_ss_for_all,root_dir):
global filter_probesets_by; filter_probesets_by = probeset_type
global species; species = Species; global avg_all_for_ss; avg_all_for_ss = avg_ss_for_all; global exon_db; exon_db={}
global summary_data_db; summary_data_db={}; global remove_intronic_junctions; remove_intronic_junctions = 'no'
if array_type == 'RNASeq':
probeset_annotations_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_junctions.txt'
else: probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'
filtered_arrayids={};filter_status='no'
constitutive_probeset_db,exon_db,genes_being_analyzed = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status)
return exon_db, constitutive_probeset_db
def importSplicingAnnotationDatabase(filename,array_type,filtered_arrayids,filter_status):
begin_time = time.time()
probesets_included_by_new_evidence = 0; export_exon_regions = 'yes'
if 'fake' in array_type: array_type = string.replace(array_type,'-fake',''); original_arraytype = 'RNASeq'
else: original_arraytype = array_type
if filter_status == 'no': global gene_transcript_cluster_db; gene_transcript_cluster_db={}; gene_transcript_cluster_db2={}; global last_exon_region_db; last_exon_region_db = {}
else: new_exon_db={}
fn=filepath(filename)
last_gene = ' '; last_exon_region = ''
constitutive_probeset_db = {}; constitutive_gene = {}
count = 0; x = 0; constitutive_original = {}
#if filter_status == 'yes': exon_db = {}
if array_type == 'AltMouse':
for line in open(fn,'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
probeset,affygene,exons,transcript_num,transcripts,probe_type_call,ensembl,block_exon_ids,block_structure,comparison_info = string.split(probeset_data,'\t')
###note: currently exclude comparison_info since not applicable for existing analyses
if x == 0: x = 1
else:
if exons[-1] == '|': exons = exons[0:-1]
if affygene[-1] == '|': affygene = affygene[0:-1]; constitutive_gene[affygene]=[]
if probe_type_call == 'gene': constitutive_call = 'yes' #looked through the probe annotations and the gene seems to be the most consistent constitutive feature
else: constitutive_call = 'no'
include_call,constitutive_call = ProbesetCalls(array_type,'',exons,constitutive_call,'')
if include_call == 'yes':
probe_data = AltMouseData(affygene,exons,ensembl,block_exon_ids,block_structure,probe_type_call) #this used to just have affygene,exon in the values (1/17/05)
exon_db[probeset] = probe_data
if filter_status == 'yes': new_exon_db[probeset] = probe_data
if constitutive_call == 'yes': constitutive_probeset_db[probeset] = affygene
genes_being_analyzed = constitutive_gene
else:
for line in open(fn,'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
if x == 0: x = 1
else:
try: probeset_id, exon_id, ensembl_gene_id, transcript_cluster_id, chromosome, strand, probeset_start, probeset_stop, affy_class, constitutive_call_probeset, external_exonid, ens_const_exons, exon_region, exon_region_start, exon_region_stop, splicing_event, splice_junctions = string.split(probeset_data,'\t')
except Exception: print probeset_data;force_error
if affy_class == 'free': affy_class = 'full' ### Don't know what the difference is
include_call,constitutive_call = ProbesetCalls(array_type,affy_class,splicing_event,constitutive_call_probeset,external_exonid)
#if 'ENSG00000163904:E11.5' in probeset_id: print probeset_data
#print array_type,affy_class,splicing_event,constitutive_call_probeset,external_exonid,constitutive_call,include_call;kill
if array_type == 'junction' and '.' not in exon_id: exon_id = string.replace(exon_id,'-','.'); exon_region = string.replace(exon_region,'-','.')
if ensembl_gene_id != last_gene: new_gene = 'yes'
else: new_gene = 'no'
if filter_status == 'no' and new_gene == 'yes':
if '.' in exon_id: ### Exclude junctions
if '-' not in last_exon_region and 'E' in last_exon_region: last_exon_region_db[last_gene] = last_exon_region
else: last_exon_region_db[last_gene] = last_exon_region
last_gene = ensembl_gene_id
if len(exon_region)>1: last_exon_region = exon_region ### some probeset not linked to an exon region
###Record the transcript clusters assoicated with each gene to annotate the results later on
if constitutive_call_probeset!=constitutive_call: probesets_included_by_new_evidence +=1#; print probeset_id,[splicing_event],[constitutive_call_probeset];kill
proceed = 'no'; as_call = 0
if array_type == 'RNASeq' or array_type == 'junction': include_call = 'yes' ### Constitutive expression is not needed
if remove_intronic_junctions == 'yes':
if 'E' not in exon_id: include_call = 'no' ### Remove junctions that only have splice-sites within an intron or UTR
if include_call == 'yes' or constitutive_call == 'yes':
#if proceed == 'yes':
as_call = EvidenceOfAltSplicing(splicing_event)
if filter_status == 'no':
probe_data = AffyExonSTDataAbbreviated(ensembl_gene_id, exon_id, as_call)
if array_type != 'RNASeq':
probe_data.setTranscriptCluster(transcript_cluster_id)
try:
if export_exon_regions == 'yes':
probe_data.setExonRegionID(exon_region)
except Exception: null=[]
else:
probe_data = AffyExonSTData(ensembl_gene_id, exon_id, external_exonid, constitutive_call, exon_region, splicing_event, splice_junctions, as_call)
probe_data.setLocationData(chromosome, strand, probeset_start, probeset_stop)
if array_type != 'RNASeq':
probe_data.setTranscriptCluster(transcript_cluster_id)
else:
probe_data.setNovelExon(affy_class)
if filter_status == 'yes':
try: ### saves memory
null = filtered_arrayids[probeset_id]
new_exon_db[probeset_id] = probe_data
except KeyError: null = []
else: exon_db[probeset_id] = probe_data
if constitutive_call == 'yes' and filter_status == 'no': ###only perform function when initially running
constitutive_probeset_db[probeset_id] = ensembl_gene_id
try: constitutive_gene[ensembl_gene_id].append(probeset_id)
except Exception: constitutive_gene[ensembl_gene_id] = [probeset_id]
###Only consider transcript clusters that make up the constitutive portion of the gene or that are alternatively regulated
if array_type != 'RNASeq':
try: gene_transcript_cluster_db[ensembl_gene_id].append(transcript_cluster_id)
except KeyError: gene_transcript_cluster_db[ensembl_gene_id] = [transcript_cluster_id]
if constitutive_call_probeset == 'yes' and filter_status == 'no': ###only perform function when initially running
try: constitutive_original[ensembl_gene_id].append(probeset_id)
except KeyError: constitutive_original[ensembl_gene_id] = [probeset_id]
if array_type != 'RNASeq':
try: gene_transcript_cluster_db2[ensembl_gene_id].append(transcript_cluster_id)
except KeyError: gene_transcript_cluster_db2[ensembl_gene_id] = [transcript_cluster_id]
###If no constitutive probesets for a gene as a result of additional filtering (removing all probesets associated with a splice event), add these back
original_probesets_add = 0; genes_being_analyzed = {}
for gene in constitutive_gene: genes_being_analyzed[gene]=[]
for gene in constitutive_original:
if gene not in constitutive_gene:
genes_being_analyzed[gene] = [gene]
constitutive_gene[gene]=[]
original_probesets_add +=1
gene_transcript_cluster_db[gene] = gene_transcript_cluster_db2[gene]
for probeset in constitutive_original[gene]: constitutive_probeset_db[probeset] = gene
#if array_type == 'junction' or array_type == 'RNASeq':
### Added the below in 1.16!!!
### If no constitutive probesets for a gene assigned, assign all gene probesets
for probeset in exon_db:
gene = exon_db[probeset].GeneID()
proceed = 'no'
exonid = exon_db[probeset].ExonID()
### Rather than add all probesets, still filter based on whether the probeset is in an annotated exon
if 'E' in exonid and 'I' not in exonid and '_' not in exonid: proceed = 'yes'
if proceed == 'yes':
if gene not in constitutive_gene:
constitutive_probeset_db[probeset] = gene
genes_being_analyzed[gene] = [gene]
### DO NOT ADD TO constitutive_gene SINCE WE WANT ALL mRNA ALIGNING EXONS/JUNCTIONS TO BE ADDED!!!!
#constitutive_gene[gene]=[]
gene_transcript_cluster_db = eliminate_redundant_dict_values(gene_transcript_cluster_db)
#if affygene == 'ENSMUSG00000023089': print [abs(fold_change_log)],[log_fold_cutoff];kill
if array_type == 'RNASeq':
import RNASeq
try: last_exon_region_db = RNASeq.importExonAnnotations(species,'distal-exon','')
except Exception: null=[]
constitutive_original=[]; constitutive_gene=[]
#clearObjectsFromMemory(exon_db); constitutive_probeset_db=[];genes_being_analyzed=[] ### used to evaluate how much memory objects are taking up
#print 'remove_intronic_junctions:',remove_intronic_junctions
#print constitutive_gene['ENSMUSG00000031170'];kill ### Determine if avg_ss_for_all is working
if original_arraytype == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print len(exon_db),id_name,'stored as instances of SplicingAnnotationData in memory'
#print len(constitutive_probeset_db),'array IDs stored as constititive'
#print probesets_included_by_new_evidence, 'array IDs were re-annotated as NOT constitutive based on mRNA evidence'
if array_type != 'AltMouse': print original_probesets_add, 'genes not viewed as constitutive as a result of filtering',id_name,'based on splicing evidence, added back'
end_time = time.time(); time_diff = int(end_time-begin_time)
#print filename,"import finished in %d seconds" % time_diff
if filter_status == 'yes': return new_exon_db
else:
summary_data_db['gene_assayed'] = len(genes_being_analyzed)
try: exportDenominatorGenes(genes_being_analyzed)
except Exception: null=[]
return constitutive_probeset_db,exon_db,genes_being_analyzed
def exportDenominatorGenes(genes_being_analyzed):
goelite_output = root_dir+'GO-Elite/denominator/AS.denominator.txt'
goelite_data = export.ExportFile(goelite_output)
systemcode = 'En'
goelite_data.write("GeneID\tSystemCode\n")
for gene in genes_being_analyzed:
if array_type == 'AltMouse':
try: gene = annotate_db[gene].ExternalGeneID()
except KeyError: null = []
goelite_data.write(gene+'\t'+systemcode+'\n')
try: goelite_data.close()
except Exception: null=[]
def performExpressionAnalysis(filename,constitutive_probeset_db,exon_db,annotate_db,dataset_name):
#if analysis_method == 'splicing-index': returnLargeGlobalVars();kill ### used to ensure all large global vars from the reciprocal junction analysis have been cleared from memory
#returnLargeGlobalVars()
"""import list of expression values for arrayids and calculates statistics"""
global fold_dbase; global original_conditions; global normalization_method
stats_dbase = {}; fold_dbase={}; ex_db={}; si_db=[]; bad_row_import = {}; count=0
global array_group_name_db; array_group_name_db = {}
global array_group_db; array_group_db = {};
global array_raw_group_values; array_raw_group_values = {}; global original_array_names; original_array_names=[]
global max_replicates; global equal_replicates; global array_group_list
array_index_list = [] ###Use this list for permutation analysis
fn=filepath(filename); line_num = 1
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line); t = string.split(data,'\t'); probeset = t[0]
if t[0]== '#': null=[] ### Don't import line
elif line_num == 1:
line_num += 1 #makes this value null for the next loop of actual array data
###Below ocucrs if the data is raw opposed to precomputed
if ':' in t[1]:
array_group_list = []; x=0 ###gives us an original index value for each entry in the group
for entry in t[1:]:
original_array_names.append(entry)
aa = string.split(entry,':')
try: array_group,array_name = aa
except Exception: array_name = string.join(aa[1:],':'); array_group = aa[0]
try:
array_group_db[array_group].append(x)
array_group_name_db[array_group].append(array_name)
except KeyError:
array_group_db[array_group] = [x]
array_group_name_db[array_group] = [array_name]
### below only occurs with a new group addition
array_group_list.append(array_group) #use this to generate comparisons in the below linked function
x += 1
else:
#try: print data_type
#except Exception,exception:
#print exception
#print traceback.format_exc()
print_out = 'The AltAnalyze filtered expression file "'+filename+'" is not propperly formatted.\n Review formatting requirements if this file was created by another application.\n'
print_out += "\nFirst line\n"+line
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
else:
#if probeset in exon_db:
#if exon_db[probeset].GeneID() == 'ENSG00000139970':
###Use the index values from above to assign each expression value to a new database
temp_group_array = {}
line_num+=1
for group in array_group_db:
if count == 0: array_index_list.append(array_group_db[group])
for array_index in array_group_db[group]:
try: exp_val = float(t[array_index+1])
except Exception:
if 'Gene_ID' not in line: bad_row_import[probeset]=line; exp_val = 1
###appended is the numerical expression value for each array in the group (temporary array)
try: temp_group_array[group].append(exp_val) #add 1 since probeset is the first column
except KeyError: temp_group_array[group] = [exp_val]
if count == 0: array_index_list.sort(); count = 1
####store the group database within the probeset database entry
try:
null = exon_db[probeset] ###To conserve memory, don't store any probesets not used for downstream analyses (e.g. not linked to mRNAs)
#if 'ENSG00000139970' in probeset:
#print [max_exp]
#print t[1:];kill
#max_exp = max(map(float, t[1:]))
#if len(array_raw_group_values)>10000: break
#if max_exp>math.log(70,2):
array_raw_group_values[probeset] = temp_group_array
except KeyError:
#print probeset
pass
print len(array_raw_group_values), 'sequence identifiers imported out of', line_num-1
if len(bad_row_import)>0:
print len(bad_row_import), "Rows with an unexplained import error processed and deleted."
print "Example row:"; x=0
for i in bad_row_import:
if x==0: print bad_row_import[i]
try: del array_raw_group_values[i]
except Exception: null=[]
x+=1
### If no gene expression reporting probesets were imported, update constitutive_probeset_db to include all mRNA aligning probesets
cs_genedb={}; missing_genedb={}; addback_genedb={}; rnaseq_cs_gene_db={}
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
#if gene == 'ENSG00000185008': print [probeset]
try:
null=array_raw_group_values[probeset]; cs_genedb[gene]=[]
if gene == probeset: rnaseq_cs_gene_db[gene]=[] ### If RPKM normalization used, use the gene expression values already calculated
except Exception: missing_genedb[gene]=[] ### Collect possible that are missing from constitutive database (verify next)
for gene in missing_genedb:
try: null=cs_genedb[gene]
except Exception: addback_genedb[gene]=[]
for probeset in array_raw_group_values:
try:
gene = exon_db[probeset].GeneID()
try:
null=addback_genedb[gene]
if 'I' not in probeset and 'U' not in probeset: ### No intron or UTR containing should be used for constitutive expression
null=string.split(probeset,':')
if len(null)<3: ### No trans-gene junctions should be used for constitutive expression
constitutive_probeset_db[probeset]=gene
except Exception: null=[]
except Exception: null=[]
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
#if gene == 'ENSG00000185008': print [[probeset]]
### Only examine values for associated exons when determining RNASeq constitutive expression (when exon data is present)
normalization_method = 'raw'
if array_type == 'RNASeq':
junction_count=0; constitutive_probeset_db2={}
for uid in constitutive_probeset_db:
if '-' in uid: junction_count+=1
if len(rnaseq_cs_gene_db)>0: ### If filtered RPKM gene-level expression data present, use this instead (and only this)
normalization_method = 'RPKM'
constitutive_probeset_db={} ### Re-set this database
for gene in rnaseq_cs_gene_db:
constitutive_probeset_db[gene]=gene
elif junction_count !=0 and len(constitutive_probeset_db) != junction_count:
### occurs when there is a mix of junction and exon IDs
for uid in constitutive_probeset_db:
if '-' not in uid: constitutive_probeset_db2[uid] = constitutive_probeset_db[uid]
constitutive_probeset_db = constitutive_probeset_db2; constitutive_probeset_db2=[]
"""
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
if gene == 'ENSG00000185008': print [probeset]
"""
###Build all putative splicing events
global alt_junction_db; global exon_dbase; global critical_exon_db; critical_exon_db={}
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
### Applies to reciprocal junction analyses only
if array_type == 'AltMouse':
alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db = ExonAnnotate_module.identifyPutativeSpliceEvents(exon_db,constitutive_probeset_db,array_raw_group_values,agglomerate_inclusion_probesets,onlyAnalyzeJunctions)
print 'Number of Genes with Examined Splice Events:',len(alt_junction_db)
elif (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null':
import JunctionArray
alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db = JunctionArray.getPutativeSpliceEvents(species,array_type,exon_db,agglomerate_inclusion_probesets,root_dir)
print 'Number of Genes with Examined Splice Events:',len(alt_junction_db)
#alt_junction_db=[]; critical_exon_db=[]; exon_dbase=[]; exon_inclusion_db=[]
if agglomerate_inclusion_probesets == 'yes':
array_raw_group_values = agglomerateInclusionProbesets(array_raw_group_values,exon_inclusion_db)
exon_inclusion_db=[]
### For datasets with high memory requirements (RNASeq), filter the current and new databases
### Begin this function after agglomeration to ensure agglomerated probesets are considered
reciprocal_probesets = {}
if array_type == 'junction' or array_type == 'RNASeq':
for affygene in alt_junction_db:
for event in alt_junction_db[affygene]:
reciprocal_probesets[event.InclusionProbeset()]=[]
reciprocal_probesets[event.ExclusionProbeset()]=[]
not_evalutated={}
for probeset in array_raw_group_values:
try: null=reciprocal_probesets[probeset]
except Exception:
### Don't remove constitutive probesets
try: null=constitutive_probeset_db[probeset]
except Exception: not_evalutated[probeset]=[]
#print 'Removing',len(not_evalutated),'exon/junction IDs not evaulated for splicing'
for probeset in not_evalutated:
del array_raw_group_values[probeset]
###Check to see if we have precomputed expression data or raw to be analyzed
x=0; y=0; array_raw_group_values2={}; probesets_to_delete=[] ### Record deleted probesets
if len(array_raw_group_values)==0:
print_out = "No genes were considered 'Expressed' based on your input options. Check to make sure that the right species database is indicated and that the right data format has been selected (e.g., non-log versus log expression)."
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out; print "Exiting program"
badExit()
elif len(array_raw_group_values)>0:
###array_group_list should already be unique and correctly sorted (see above)
for probeset in array_raw_group_values:
data_lists=[]
for group_name in array_group_list:
data_list = array_raw_group_values[probeset][group_name] ###nested database entry access - baseline expression
if global_addition_factor > 0: data_list = addGlobalFudgeFactor(data_list,'log')
data_lists.append(data_list)
if len(array_group_list)==2:
data_list1 = data_lists[0]; data_list2 = data_lists[-1]; avg1 = statistics.avg(data_list1); avg2 = statistics.avg(data_list2)
log_fold = avg2 - avg1
try:
#t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function
#t = abs(t); df = round(df) #Excel doesn't recognize fractions in a DF
#p = statistics.t_probability(t,df)
p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
if p == -1:
if len(data_list1)>1 and len(data_list2)>1:
print_out = "The probability statistic selected ("+probability_statistic+") is not compatible with the\nexperimental design. Please consider an alternative statistic or correct the problem.\nExiting AltAnalyze."
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out; print "Exiting program"
badExit()
else: p = 1
except Exception: p = 1
fold_dbase[probeset] = [0]; fold_dbase[probeset].append(log_fold)
stats_dbase[probeset]=[avg1]; stats_dbase[probeset].append(p)
###replace entries with the two lists for later permutation analysis
if p == -1: ### should by p == 1: Not sure why this filter was here, but mistakenly removes probesets where there is just one array for each group
del fold_dbase[probeset];del stats_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
if x == 1: print 'Bad data detected...', data_list1, data_list2
elif (avg1 < expression_threshold and avg2 < expression_threshold and p > p_threshold) and array_type != 'RNASeq': ### Inserted a filtering option to exclude small variance, low expreession probesets
del fold_dbase[probeset];del stats_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
else: array_raw_group_values2[probeset] = [data_list1,data_list2]
else: ###Non-junction analysis can handle more than 2 groups
index=0
for data_list in data_lists:
try: array_raw_group_values2[probeset].append(data_list)
except KeyError: array_raw_group_values2[probeset] = [data_list]
if len(array_group_list)>2: ### Thus, there is some variance for this probeset
### Create a complete stats_dbase containing all fold changes
if index==0:
avg_baseline = statistics.avg(data_list); stats_dbase[probeset] = [avg_baseline]
else:
avg_exp = statistics.avg(data_list)
log_fold = avg_exp - avg_baseline
try: fold_dbase[probeset].append(log_fold)
except KeyError: fold_dbase[probeset] = [0,log_fold]
index+=1
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
array_raw_group_values = array_raw_group_values2; array_raw_group_values2=[]
print x, id_name,"excluded prior to analysis... predicted not detected"
global original_avg_const_exp_db; global original_fold_dbase
global avg_const_exp_db; global permute_lists; global midas_db
if len(array_raw_group_values)>0:
adj_fold_dbase, nonlog_NI_db, conditions, gene_db, constitutive_gene_db, constitutive_fold_change, original_avg_const_exp_db = constitutive_exp_normalization(fold_dbase,stats_dbase,exon_db,constitutive_probeset_db)
stats_dbase=[] ### No longer needed after this point
original_fold_dbase = fold_dbase; avg_const_exp_db = {}; permute_lists = []; y = 0; original_conditions = conditions; max_replicates,equal_replicates = maxReplicates()
gene_expression_diff_db = constitutive_expression_changes(constitutive_fold_change,annotate_db) ###Add in constitutive fold change filter to assess gene expression for ASPIRE
while conditions > y:
avg_const_exp_db = constitutive_exp_normalization_raw(gene_db,constitutive_gene_db,array_raw_group_values,exon_db,y,avg_const_exp_db); y+=1
#print len(avg_const_exp_db),constitutive_gene_db['ENSMUSG00000054850']
###Export Analysis Results for external splicing analysis (e.g. MiDAS format)
if run_MiDAS == 'yes' and normalization_method != 'RPKM': ### RPKM has negative values which will crash MiDAS
status = ResultsExport_module.exportTransitResults(array_group_list,array_raw_group_values,array_group_name_db,avg_const_exp_db,adj_fold_dbase,exon_db,dataset_name,apt_location)
print "Finished exporting input data for MiDAS analysis"
try: midas_db = ResultsExport_module.importMidasOutput(dataset_name)
except Exception: midas_db = {} ### Occurs if there are not enough samples to calculate a MiDAS p-value
else: midas_db = {}
###Provides all pairwise permuted group comparisons
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
permute_lists = statistics.permute_arrays(array_index_list)
### Now remove probesets from the analysis that were used to evaluate gene expression
for probeset in constitutive_probeset_db:
try: null = reciprocal_probesets[probeset]
except Exception:
try: del array_raw_group_values[probeset]
except Exception: null=[]
not_evalutated=[]; reciprocal_probesets=[]
constitutive_probeset_db=[]
### Above, all conditions were examined when more than 2 are present... change this so that only the most extreeem are analyzed further
if len(array_group_list)>2 and analysis_method == 'splicing-index' and (array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null'): ### USED FOR MULTIPLE COMPARISONS
print 'Calculating splicing-index values for multiple group comparisons (please be patient)...',
"""
if len(midas_db)==0:
print_out = 'Warning!!! MiDAS failed to run for multiple groups. Please make\nsure there are biological replicates present for your groups.\nAltAnalyze requires replicates for multi-group (more than two) analyses.'
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out; print "Exiting program"
badExit()"""
if filter_for_AS == 'yes':
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del nonlog_NI_db[probeset]
except KeyError: null=[]
if export_NI_values == 'yes':
export_exon_regions = 'yes'
### Currently, we don't deal with raw adjusted expression values, just group, so just export the values for each group
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
print "Exporting all normalized intensities to:\n"+summary_output
adjoutput = export.ExportFile(summary_output)
title = string.join(['Gene\tExonID\tprobesetID']+original_array_names,'\t')+'\n'; adjoutput.write(title)
### Pick which data lists have the most extreem values using the NI_dbase (adjusted folds for each condition)
original_increment = int(len(nonlog_NI_db)/20); increment = original_increment; interaction = 0
for probeset in nonlog_NI_db:
if interaction == increment: increment+=original_increment; print '*',
interaction +=1
geneid = exon_db[probeset].GeneID(); ed = exon_db[probeset]
index=0; NI_list=[] ### Add the group_name to each adj fold value
for NI in nonlog_NI_db[probeset]:
NI_list.append((NI,index)); index+=1 ### setup to sort for the extreeme adj folds and get associated group_name using the index
raw_exp_vals = array_raw_group_values[probeset]
adj_exp_lists={} ### Store the adjusted expression values for each group
if geneid in avg_const_exp_db:
k=0; gi=0; adj_exp_vals = []
for exp_list in raw_exp_vals:
for exp in exp_list:
adj_exp_val = exp-avg_const_exp_db[geneid][k]
try: adj_exp_lists[gi].append(adj_exp_val)
except Exception: adj_exp_lists[gi] = [adj_exp_val]
if export_NI_values == 'yes': adj_exp_vals.append(str(adj_exp_val))
k+=1
gi+=1
if export_NI_values == 'yes':
#print geneid+'-'+probeset, adj_exp_val, [ed.ExonID()];kill
if export_exon_regions == 'yes':
try: ### Thid will only work if ExonRegionID is stored in the abreviated AffyExonSTData object - useful in comparing results between arrays (exon-region centric)
if (array_type == 'exon' or array_type == 'gene') or '-' not in ed.ExonID(): ### only include exon entries not junctions
exon_regions = string.split(ed.ExonRegionID(),'|')
for er in exon_regions:
if len(er)>0: er = er
else:
try: er = ed.ExonID()
except Exception: er = 'NA'
ev = string.join([geneid+'\t'+er+'\t'+probeset]+adj_exp_vals,'\t')+'\n'
if len(filtered_probeset_db)>0:
if probeset in filtered_probeset_db: adjoutput.write(ev) ### This is used when we want to restrict to only probesets known to already by changed
else: adjoutput.write(ev)
except Exception:
ev = string.join([geneid+'\t'+'NA'+'\t'+probeset]+adj_exp_vals,'\t')+'\n'; adjoutput.write(ev)
NI_list.sort()
examine_pairwise_comparisons = 'yes'
if examine_pairwise_comparisons == 'yes':
k1=0; k2=0; filtered_NI_comps = []
NI_list_rev = list(NI_list); NI_list_rev.reverse()
NI1,index1 = NI_list[k1]; NI2,index2 = NI_list_rev[k2]; abs_SI = abs(math.log(NI1/NI2,2))
if abs_SI<alt_exon_logfold_cutoff:
### Indicates that no valid matches were identified - hence, exit loop and return an NI_list with no variance
NI_list = [NI_list[0],NI_list[0]]
else:
### Indicates that no valid matches were identified - hence, exit loop and return an NI_list with no variance
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
#print 'original',abs_SI,k1,k2, ge_fold, constit_exp1, constit_exp2
if abs(ge_fold) < log_fold_cutoff: filtered_NI_comps.append([abs_SI,k1,k2])
else:
for i1 in NI_list:
k2=0
for i2 in NI_list_rev:
NI1,index1 = i1; NI2,index2 = i2; abs_SI = abs(math.log(NI1/NI2,2))
#constit_exp1 = original_avg_const_exp_db[geneid][index1]
#constit_exp2 = original_avg_const_exp_db[geneid][index2]
#ge_fold = constit_exp2-constit_exp1
#if abs(ge_fold) < log_fold_cutoff: filtered_NI_comps.append([abs_SI,k1,k2])
#print k1,k2, i1, i2, abs_SI, abs(ge_fold), log_fold_cutoff, alt_exon_logfold_cutoff
if abs_SI<alt_exon_logfold_cutoff: break
else:
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
if abs(ge_fold) < log_fold_cutoff:
filtered_NI_comps.append([abs_SI,k1,k2])
#if k1 == 49 or k1 == 50 or k1 == 51: print probeset, abs_SI, k1, k2, abs(ge_fold),log_fold_cutoff, index1, index2, NI1, NI2, constit_exp1,constit_exp2
k2+=1
k1+=1
if len(filtered_NI_comps)>0:
#print filtered_NI_comps
#print NI_list_rev
#print probeset,geneid
#print len(filtered_NI_comps)
#print original_avg_const_exp_db[geneid]
filtered_NI_comps.sort()
si,k1,k2 = filtered_NI_comps[-1]
NI_list = [NI_list[k1],NI_list_rev[k2]]
"""
NI1,index1 = NI_list[0]; NI2,index2 = NI_list[-1]
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
print probeset, si, ge_fold, NI_list"""
#print k1,k2;sys.exit()
index1 = NI_list[0][1]; index2 = NI_list[-1][1]
nonlog_NI_db[probeset] = [NI_list[0][0],NI_list[-1][0]] ### Update the values of this dictionary
data_list1 = array_raw_group_values[probeset][index1]; data_list2 = array_raw_group_values[probeset][index2]
avg1 = statistics.avg(data_list1); avg2 = statistics.avg(data_list2); log_fold = avg2 - avg1
group_name1 = array_group_list[index1]; group_name2 = array_group_list[index2]
try:
#t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function
#t = abs(t); df = round(df); ttest_exp_p = statistics.t_probability(t,df)
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 1
fold_dbase[probeset] = [0]; fold_dbase[probeset].append(log_fold)
if ttest_exp_p == -1: del fold_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
elif avg1 < expression_threshold and avg2 < expression_threshold and (ttest_exp_p > p_threshold and ttest_exp_p != 1): ### Inserted a filtering option to exclude small variance, low expreession probesets
del fold_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
else:
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
normInt1 = (avg1-constit_exp1); normInt2 = (avg2-constit_exp2)
adj_fold = normInt2 - normInt1
splicing_index = -1*adj_fold; abs_splicing_index = abs(splicing_index)
#print probeset, splicing_index, ge_fold, index1, index2
#normIntList1 = adj_exp_lists[index1]; normIntList2 = adj_exp_lists[index2]
all_nI=[]
for g_index in adj_exp_lists: all_nI.append(adj_exp_lists[g_index])
try: normIntensityP = statistics.OneWayANOVA(all_nI) #[normIntList1,normIntList2] ### This stays an ANOVA independent of the algorithm choosen since groups number > 2
except Exception: normIntensityP = 'NA'
if (normInt1*normInt2)<0: opposite_SI_log_mean = 'yes'
else: opposite_SI_log_mean = 'no'
abs_log_ratio = abs(ge_fold)
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError: midas_p = 'NA'
else: midas_p = 'NA'
#if 'ENSG00000059588' in geneid: print probeset, splicing_index, constit_exp1, constit_exp2, ge_fold,group_name2+'_vs_'+group_name1, index1, index2
if abs_splicing_index>alt_exon_logfold_cutoff and (midas_p < p_threshold or midas_p == 'NA'): #and abs_log_ratio>1 and ttest_exp_p<0.05: ###and ge_threshold_count==2
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
ped = ProbesetExpressionData(avg1, avg2, log_fold, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
sid = ExonData(splicing_index,probeset,critical_exon_list,geneid,normInt1,normInt2,normIntensityP,opposite_SI_log_mean)
sid.setConstitutiveExpression(constit_exp1); sid.setConstitutiveFold(ge_fold); sid.setProbesetExpressionData(ped)
si_db.append((splicing_index,sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(splicing_index,geneid,normIntensityP)
ex_db[probeset] = eed
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print len(si_db),id_name,"with evidence of Alternative expression"
original_fold_dbase = fold_dbase; si_db.sort()
summary_data_db['denominator_exp_events']=len(nonlog_NI_db)
del avg_const_exp_db; del gene_db; del constitutive_gene_db; gene_expression_diff_db={}
if export_NI_values == 'yes': adjoutput.close()
### Above, all conditions were examined when more than 2 are present... change this so that only the most extreeem are analyzed further
elif len(array_group_list)>2 and (array_type == 'junction' or array_type == 'RNASeq' or array_type == 'AltMouse'): ### USED FOR MULTIPLE COMPARISONS
excluded_probeset_db={}
group_sizes = []; original_array_indices = permute_lists[0] ###p[0] is the original organization of the group samples prior to permutation
for group in original_array_indices: group_sizes.append(len(group))
if analysis_method == 'linearregres': ### For linear regression, these scores are non-long
original_array_raw_group_values = copy.deepcopy(array_raw_group_values)
for probeset in array_raw_group_values:
ls_concatenated=[]
for group in array_raw_group_values[probeset]: ls_concatenated+=group
ls_concatenated = statistics.log_fold_conversion_fraction(ls_concatenated)
array_raw_group_values[probeset] = ls_concatenated
pos1=0; pos2=0; positions=[]
for group in group_sizes:
if pos1 == 0: pos2 = group; positions.append((pos1,pos2))
else: pos2 = pos1+group; positions.append((pos1,pos2))
pos1 = pos2
if export_NI_values == 'yes':
export_exon_regions = 'yes'
### Currently, we don't deal with raw adjusted expression values, just group, so just export the values for each group
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
print "Exporting all normalized intensities to:\n"+summary_output
adjoutput = export.ExportFile(summary_output)
title = string.join(['gene\tprobesets\tExonRegion']+original_array_names,'\t')+'\n'; adjoutput.write(title)
events_examined= 0; denominator_events=0; fold_dbase=[]; adj_fold_dbase=[]; scores_examined=0
splice_event_list=[]; splice_event_list_mx=[]; splice_event_list_non_mx=[]; event_mx_temp = []; permute_p_values={}; probeset_comp_db={}#use this to exclude duplicate mx events
for geneid in alt_junction_db:
affygene = geneid
for event in alt_junction_db[geneid]:
if array_type == 'AltMouse':
#event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
#critical_exon_db[affygene,tuple(critical_exons)] = [1,'E'+str(e1a),'E'+str(e2b)] --- affygene,tuple(event) == key, 1 indicates both are either up or down together
event_call = event[0][0] + '-' + event[1][0]
exon_set1 = event[0][1]; exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene,exon_set1]
probeset2 = exon_dbase[affygene,exon_set2]
critical_exon_list = critical_exon_db[affygene,tuple(event)]
if array_type == 'junction' or array_type == 'RNASeq':
event_call = 'ei-ex' ### Below objects from JunctionArrayEnsemblRules - class JunctionInformation
probeset1 = event.InclusionProbeset(); probeset2 = event.ExclusionProbeset()
exon_set1 = event.InclusionJunction(); exon_set2 = event.ExclusionJunction()
try: novel_event = event.NovelEvent()
except Exception: novel_event = 'known'
critical_exon_list = [1,event.CriticalExonSets()]
key,jd = formatJunctionData([probeset1,probeset2],geneid,critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try: jd.setSymbol(annotate_db[geneid].Symbol())
except Exception: null=[]
#if '|' in probeset1: print probeset1, key,jd.InclusionDisplay();kill
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
dI_scores=[]
if probeset1 in nonlog_NI_db and probeset2 in nonlog_NI_db and probeset1 in array_raw_group_values and probeset2 in array_raw_group_values:
events_examined+=1
if analysis_method == 'ASPIRE':
index1=0; NI_list1=[]; NI_list2=[] ### Add the group_name to each adj fold value
for NI in nonlog_NI_db[probeset1]: NI_list1.append(NI)
for NI in nonlog_NI_db[probeset2]: NI_list2.append(NI)
for NI1_g1 in NI_list1:
NI2_g1 = NI_list2[index1]; index2=0
for NI1_g2 in NI_list1:
try: NI2_g2 = NI_list2[index2]
except Exception: print index1, index2, NI_list1, NI_list2;kill
if index1 != index2:
b1 = NI1_g1; e1 = NI1_g2
b2 = NI2_g1; e2 = NI2_g2
try:
dI = statistics.aspire_stringent(b1,e1,b2,e2); Rin = b1/e1; Rex = b2/e2
if (Rin>1 and Rex<1) or (Rin<1 and Rex>1):
if dI<0: i1,i2 = index2,index1 ### all scores should indicate upregulation
else: i1,i2=index1,index2
dI_scores.append((abs(dI),i1,i2))
except Exception:
#if array_type != 'RNASeq': ### RNASeq has counts of zero and one that can cause the same result between groups and probesets
#print probeset1, probeset2, b1, e1, b2, e2, index1, index2, events_examined;kill
### Exception - Occurs for RNA-Seq but can occur for array data under extreemly rare circumstances (Rex=Rin even when different b1,e1 and b2,ed values)
null=[]
index2+=1
index1+=1
dI_scores.sort()
if analysis_method == 'linearregres':
log_fold,i1,i2 = getAllPossibleLinearRegressionScores(probeset1,probeset2,positions,group_sizes)
dI_scores.append((log_fold,i1,i2))
raw_exp_vals1 = original_array_raw_group_values[probeset1]; raw_exp_vals2 = original_array_raw_group_values[probeset2]
else: raw_exp_vals1 = array_raw_group_values[probeset1]; raw_exp_vals2 = array_raw_group_values[probeset2]
adj_exp_lists1={}; adj_exp_lists2={} ### Store the adjusted expression values for each group
if geneid in avg_const_exp_db:
gi=0; l=0; adj_exp_vals = []; anova_test=[]
for exp_list in raw_exp_vals1:
k=0; anova_group=[]
for exp in exp_list:
adj_exp_val1 = exp-avg_const_exp_db[geneid][l]
try: adj_exp_lists1[gi].append(adj_exp_val1)
except Exception: adj_exp_lists1[gi] = [adj_exp_val1]
adj_exp_val2 = raw_exp_vals2[gi][k]-avg_const_exp_db[geneid][l]
try: adj_exp_lists2[gi].append(adj_exp_val2)
except Exception: adj_exp_lists2[gi] = [adj_exp_val2]
anova_group.append(adj_exp_val2-adj_exp_val1)
if export_NI_values == 'yes':
#if analysis_method == 'ASPIRE':
adj_exp_vals.append(str(adj_exp_val2-adj_exp_val1))
### BELOW CODE PRODUCES THE SAME RESULT!!!!
"""folds1 = statistics.log_fold_conversion_fraction([exp])
folds2 = statistics.log_fold_conversion_fraction([raw_exp_vals2[gi][k]])
lr_score = statistics.convert_to_log_fold(statistics.simpleLinRegress(folds1,folds2))
adj_exp_vals.append(str(lr_score))"""
k+=1; l+=0
gi+=1; anova_test.append(anova_group)
if export_NI_values == 'yes':
if export_exon_regions == 'yes':
exon_regions = string.join(critical_exon_list[1],'|')
exon_regions = string.split(exon_regions,'|')
for er in exon_regions:
ev = string.join([geneid+'\t'+probeset1+'-'+probeset2+'\t'+er]+adj_exp_vals,'\t')+'\n'
if len(filtered_probeset_db)>0:
if probeset1 in filtered_probeset_db and probeset2 in filtered_probeset_db:
adjoutput.write(ev) ### This is used when we want to restrict to only probesets known to already by changed
else: adjoutput.write(ev)
try: anovaNIp = statistics.OneWayANOVA(anova_test) ### This stays an ANOVA independent of the algorithm choosen since groups number > 2
except Exception: anovaNIp='NA'
if len(dI_scores)>0 and geneid in avg_const_exp_db:
dI,index1,index2 = dI_scores[-1]; count=0
probesets = [probeset1, probeset2]; index=0
key,jd = formatJunctionData([probeset1,probeset2],affygene,critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try: jd.setSymbol(annotate_db[affygene].Symbol())
except Exception:null=[]
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
if max_replicates >2 or equal_replicates==2: permute_p_values[(probeset1,probeset2)] = [anovaNIp, 'NA', 'NA', 'NA']
index=0
for probeset in probesets:
if analysis_method == 'linearregres':
data_list1 = original_array_raw_group_values[probeset][index1]; data_list2 = original_array_raw_group_values[probeset][index2]
else: data_list1 = array_raw_group_values[probeset][index1]; data_list2 = array_raw_group_values[probeset][index2]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
group_name1 = array_group_list[index1]; group_name2 = array_group_list[index2]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 'NA'
if ttest_exp_p==1: ttest_exp_p = 'NA'
if index == 0:
try: adj_fold = statistics.avg(adj_exp_lists1[index2]) - statistics.avg(adj_exp_lists1[index1])
except Exception:
print raw_exp_vals1,raw_exp_vals2, avg_const_exp_db[geneid]
print probeset,probesets,adj_exp_lists1,adj_exp_lists2,index1,index2;kill
ped1 = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
else:
adj_fold = statistics.avg(adj_exp_lists2[index2]) - statistics.avg(adj_exp_lists2[index1])
ped2 = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
index+=1
try:
pp1 = statistics.runComparisonStatistic(adj_exp_lists1[index1], adj_exp_lists1[index2],probability_statistic)
pp2 = statistics.runComparisonStatistic(adj_exp_lists2[index1], adj_exp_lists2[index2],probability_statistic)
except Exception: pp1 = 'NA'; pp2 = 'NA'
if analysis_method == 'ASPIRE' and len(dI_scores)>0:
p1 = JunctionExpressionData(adj_exp_lists1[index1], adj_exp_lists1[index2], pp1, ped1)
p2 = JunctionExpressionData(adj_exp_lists2[index1], adj_exp_lists2[index2], pp2, ped2)
### ANOVA p-replaces the below p-value
"""try: baseline_scores, exp_scores, pairwiseNIp = calculateAllASPIREScores(p1,p2)
except Exception: baseline_scores = [0]; exp_scores=[dI]; pairwiseNIp = 0 """
#if pairwiseNIp == 'NA': pairwiseNIp = 0 ### probably comment out
if len(dI_scores)>0:
scores_examined+=1
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError: midas_p = 'NA'
else: midas_p = 'NA'
if dI>alt_exon_logfold_cutoff and (anovaNIp < p_threshold or perform_permutation_analysis == 'yes' or anovaNIp == 'NA' or anovaNIp == 1): #and abs_log_ratio>1 and ttest_exp_p<0.05: ###and ge_threshold_count==2
#print [dI, probeset1,probeset2, anovaNIp, alt_exon_logfold_cutoff];kill
ejd = ExonJunctionData(dI,probeset1,probeset2,pp1,pp2,'upregulated',event_call,critical_exon_list,affygene,ped1,ped2)
ejd.setConstitutiveFold(ge_fold); ejd.setConstitutiveExpression(constit_exp1)
if array_type == 'RNASeq':
ejd.setNovelEvent(novel_event)
splice_event_list.append((dI,ejd))
else: excluded_probeset_db[affygene+':'+critical_exon_list[1][0]] = probeset1, affygene, dI, 'NA', anovaNIp
statistics.adjustPermuteStats(permute_p_values)
ex_db = splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db
original_fold_dbase = fold_dbase; original_avg_const_exp_db=[]; nonlog_NI_db = []; fold_dbase=[]
summary_data_db['denominator_exp_events']=events_examined
del avg_const_exp_db; del gene_db; del constitutive_gene_db; gene_expression_diff_db={}
if export_NI_values == 'yes': adjoutput.close()
print len(splice_event_list), 'alternative exons out of %s exon events examined' % events_examined
fold_dbase=[]; original_fold_dbase=[]; exon_db=[]; constitutive_gene_db=[]; addback_genedb=[]
gene_db=[]; missing_genedb=[]
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
return conditions,adj_fold_dbase,nonlog_NI_db,dataset_name,gene_expression_diff_db,midas_db,ex_db,si_db
class ProbesetExpressionData:
def __init__(self, baseline_exp, experimental_exp, fold_change, adj_fold, ttest_raw_exp, annotation):
self.baseline_exp = baseline_exp; self.experimental_exp = experimental_exp
self.fold_change = fold_change; self.adj_fold = adj_fold
self.ttest_raw_exp = ttest_raw_exp; self.annotation = annotation
def BaselineExp(self): return str(self.baseline_exp)
def ExperimentalExp(self): return str(self.experimental_exp)
def FoldChange(self): return str(self.fold_change)
def AdjFold(self): return str(self.adj_fold)
def ExpPval(self): return str(self.ttest_raw_exp)
def Annotation(self): return self.annotation
def __repr__(self): return self.BaselineExp()+'|'+FoldChange()
def agglomerateInclusionProbesets(array_raw_group_values,exon_inclusion_db):
###Combine expression profiles for inclusion probesets that correspond to the same splice event
for excl_probeset in exon_inclusion_db:
inclusion_event_profiles = []
if len(exon_inclusion_db[excl_probeset])>1:
for incl_probeset in exon_inclusion_db[excl_probeset]:
if incl_probeset in array_raw_group_values and excl_probeset in array_raw_group_values:
array_group_values = array_raw_group_values[incl_probeset]
inclusion_event_profiles.append(array_group_values)
#del array_raw_group_values[incl_probeset] ###Remove un-agglomerated original entry
if len(inclusion_event_profiles) > 0: ###Thus, some probesets for this splice event in input file
combined_event_profile = combine_profiles(inclusion_event_profiles)
###Combine inclusion probesets into a single ID (identical manner to that in ExonAnnotate_module.identifyPutativeSpliceEvents
incl_probesets = exon_inclusion_db[excl_probeset]
incl_probesets_str = string.join(incl_probesets,'|')
array_raw_group_values[incl_probesets_str] = combined_event_profile
return array_raw_group_values
def combine_profiles(profile_list):
profile_group_sizes={}
for db in profile_list:
for key in db: profile_group_sizes[key] = len(db[key])
break
new_profile_db={}
for key in profile_group_sizes:
x = profile_group_sizes[key] ###number of elements in list for key
new_val_list=[]; i = 0
while i<x:
temp_val_list=[]
for db in profile_list:
if key in db: val = db[key][i]; temp_val_list.append(val)
i+=1; val_avg = statistics.avg(temp_val_list); new_val_list.append(val_avg)
new_profile_db[key] = new_val_list
return new_profile_db
def constitutive_exp_normalization(fold_db,stats_dbase,exon_db,constitutive_probeset_db):
"""For every expression value, normalize to the expression of the constitutive gene features for that condition,
then store those ratios (probeset_exp/avg_constitutive_exp) and regenerate expression values relative only to the
baseline avg_constitutive_exp, for all conditions, to normalize out gene expression changes"""
#print "\nParameters:"
#print "Factor_out_expression_changes:",factor_out_expression_changes
#print "Only_include_constitutive_containing_genes:",only_include_constitutive_containing_genes
#print "\nAdjusting probeset average intensity values to factor out condition specific expression changes for optimal splicing descrimination"
gene_db = {}; constitutive_gene_db = {}
### organize everything by gene
for probeset in fold_db: conditions = len(fold_db[probeset]); break
remove_diff_exp_genes = remove_transcriptional_regulated_genes
if conditions > 2: remove_diff_exp_genes = 'no'
for probeset in exon_db:
affygene = exon_db[probeset].GeneID() #exon_db[probeset] = affygene,exons,ensembl,block_exon_ids,block_structure,comparison_info
if probeset in fold_db:
try: gene_db[affygene].append(probeset)
except KeyError: gene_db[affygene] = [probeset]
if probeset in constitutive_probeset_db and (only_include_constitutive_containing_genes == 'yes' or factor_out_expression_changes == 'no'):
#the second conditional is used to exlcude constitutive data if we wish to use all probesets for
#background normalization rather than just the designated 'gene' probesets.
if probeset in stats_dbase:
try: constitutive_gene_db[affygene].append(probeset)
except KeyError: constitutive_gene_db[affygene] = [probeset]
if len(constitutive_gene_db)>0:
###This is blank when there are no constitutive and the above condition is implemented
gene_db2 = constitutive_gene_db
else: gene_db2 = gene_db
avg_const_exp_db = {}
for affygene in gene_db2:
probeset_list = gene_db2[affygene]
x = 0
while x < conditions:
### average all exp values for constitutive probesets for each condition
exp_list=[]
for probeset in probeset_list:
probe_fold_val = fold_db[probeset][x]
baseline_exp = stats_dbase[probeset][0]
exp_val = probe_fold_val + baseline_exp
exp_list.append(exp_val)
avg_const_exp = statistics.avg(exp_list)
try: avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: avg_const_exp_db[affygene] = [avg_const_exp]
x += 1
adj_fold_dbase={}; nonlog_NI_db={}; constitutive_fold_change={}
for affygene in avg_const_exp_db: ###If we only wish to include propper constitutive probes, this will ensure we only examine those genes and probesets that are constitutive
probeset_list = gene_db[affygene]
x = 0
while x < conditions:
exp_list=[]
for probeset in probeset_list:
expr_to_subtract = avg_const_exp_db[affygene][x]
baseline_const_exp = avg_const_exp_db[affygene][0]
probe_fold_val = fold_db[probeset][x]
baseline_exp = stats_dbase[probeset][0]
exp_val = probe_fold_val + baseline_exp
exp_val_non_log = statistics.log_fold_conversion_fraction(exp_val)
expr_to_subtract_non_log = statistics.log_fold_conversion_fraction(expr_to_subtract)
baseline_const_exp_non_log = statistics.log_fold_conversion_fraction(baseline_const_exp)
if factor_out_expression_changes == 'yes':
exp_splice_valff = exp_val_non_log/expr_to_subtract_non_log
else: #if no, then we just normalize to the baseline constitutive expression in order to keep gene expression effects (useful if you don't trust constitutive feature expression levels)
exp_splice_valff = exp_val_non_log/baseline_const_exp_non_log
constitutive_fold_diff = expr_to_subtract_non_log/baseline_const_exp_non_log
###To calculate adjusted expression, we need to get the fold change in the constitutive avg (expr_to_subtract/baseline_const_exp) and divide the experimental expression
###By this fold change.
ge_adj_exp_non_log = exp_val_non_log/constitutive_fold_diff #gives a GE adjusted expression
try: ge_adj_exp = math.log(ge_adj_exp_non_log,2)
except ValueError: print probeset,ge_adj_exp_non_log,constitutive_fold_diff,exp_val_non_log,exp_val,baseline_exp, probe_fold_val, dog
adj_probe_fold_val = ge_adj_exp - baseline_exp
### Here we normalize probeset expression to avg-constitutive expression by dividing probe signal by avg const.prove sig (should be < 1)
### refered to as steady-state normalization
if array_type != 'AltMouse' or (probeset not in constitutive_probeset_db):
"""Can't use constitutive gene features since these have no variance for pearson analysis
Python will approximate numbers to a small decimal point range. If the first fold value is
zero, often, zero will be close to but not exactly zero. Correct below """
try:
adj_fold_dbase[probeset].append(adj_probe_fold_val)
except KeyError:
if abs(adj_probe_fold_val - 0) < 0.0000001: #make zero == exactly to zero
adj_probe_fold_val = 0
adj_fold_dbase[probeset] = [adj_probe_fold_val]
try: nonlog_NI_db[probeset].append(exp_splice_valff) ###ratio of junction exp relative to gene expression at that time-point
except KeyError: nonlog_NI_db[probeset] = [exp_splice_valff]
n = 0
#if expr_to_subtract_non_log != baseline_const_exp_non_log: ###otherwise this is the first value in the expression array
if x!=0: ###previous expression can produce errors when multiple group averages have identical values
fold_change = expr_to_subtract_non_log/baseline_const_exp_non_log
fold_change_log = math.log(fold_change,2)
constitutive_fold_change[affygene] = fold_change_log
### If we want to remove any genes from the analysis with large transcriptional changes
### that may lead to false positive splicing calls (different probeset kinetics)
if remove_diff_exp_genes == 'yes':
if abs(fold_change_log) > log_fold_cutoff:
del constitutive_fold_change[affygene]
try: del adj_fold_dbase[probeset]
except KeyError: n = 1
try: del nonlog_NI_db[probeset]
except KeyError: n = 1
"""elif expr_to_subtract_non_log == baseline_const_exp_non_log: ###This doesn't make sense, since n can't equal 1 if the conditional is false (check this code again later 11/23/07)
if n == 1:
del adj_fold_dbase[probeset]
del nonlog_NI_db[probeset]"""
x += 1
print "Intensity normalization complete..."
if factor_out_expression_changes == 'no':
adj_fold_dbase = fold_db #don't change expression values
print len(constitutive_fold_change), "genes undergoing analysis for alternative splicing/transcription"
summary_data_db['denominator_exp_genes']=len(constitutive_fold_change)
"""
mir_gene_count = 0
for gene in constitutive_fold_change:
if gene in gene_microRNA_denom: mir_gene_count+=1
print mir_gene_count, "Genes with predicted microRNA binding sites undergoing analysis for alternative splicing/transcription"
"""
global gene_analyzed; gene_analyzed = len(constitutive_gene_db)
return adj_fold_dbase, nonlog_NI_db, conditions, gene_db, constitutive_gene_db,constitutive_fold_change, avg_const_exp_db
class TranscriptionData:
def __init__(self, constitutive_fold, rna_processing_annotation):
self._constitutive_fold = constitutive_fold; self._rna_processing_annotation = rna_processing_annotation
def ConstitutiveFold(self): return self._constitutive_fold
def ConstitutiveFoldStr(self): return str(self._constitutive_fold)
def RNAProcessing(self): return self._rna_processing_annotation
def __repr__(self): return self.ConstitutiveFoldStr()+'|'+RNAProcessing()
def constitutive_expression_changes(constitutive_fold_change,annotate_db):
###Add in constitutive fold change filter to assess gene expression for ASPIRE
gene_expression_diff_db = {}
for affygene in constitutive_fold_change:
constitutive_fold = constitutive_fold_change[affygene]; rna_processing_annotation=''
if affygene in annotate_db:
if len(annotate_db[affygene].RNAProcessing()) > 4: rna_processing_annotation = annotate_db[affygene].RNAProcessing()
###Add in evaluation of RNA-processing/binding factor
td = TranscriptionData(constitutive_fold,rna_processing_annotation)
gene_expression_diff_db[affygene] = td
return gene_expression_diff_db
def constitutive_exp_normalization_raw(gene_db,constitutive_gene_db,array_raw_group_values,exon_db,y,avg_const_exp_db):
"""normalize expression for raw expression data (only for non-baseline data)"""
#avg_true_const_exp_db[affygene] = [avg_const_exp]
temp_avg_const_exp_db={}
for probeset in array_raw_group_values:
conditions = len(array_raw_group_values[probeset][y]); break #number of raw expresson values to normalize
for affygene in gene_db:
###This is blank when there are no constitutive or the above condition is implemented
if affygene in constitutive_gene_db:
probeset_list = constitutive_gene_db[affygene]
z = 1
else: ###so we can analyze splicing independent of gene expression even if no 'gene' feature is present
probeset_list = gene_db[affygene]
z = 0
x = 0
while x < conditions:
### average all exp values for constitutive probesets for each conditionF
exp_list=[]
for probeset in probeset_list:
try: exp_val = array_raw_group_values[probeset][y][x] ### try statement is used for constitutive probes that were deleted due to filtering in performExpressionAnalysis
except KeyError: continue
exp_list.append(exp_val)
try: avg_const_exp = statistics.avg(exp_list)
except Exception: avg_const_exp = 'null'
if only_include_constitutive_containing_genes == 'yes' and avg_const_exp != 'null':
if z == 1:
try: avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: avg_const_exp_db[affygene] = [avg_const_exp]
try: temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: temp_avg_const_exp_db[affygene] = [avg_const_exp]
elif avg_const_exp != 'null': ###***
try: avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: avg_const_exp_db[affygene] = [avg_const_exp]
try: temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: temp_avg_const_exp_db[affygene] = [avg_const_exp]
x += 1
if analysis_method == 'ANOVA':
global normalized_raw_exp_ratios; normalized_raw_exp_ratios = {}
for affygene in gene_db:
probeset_list = gene_db[affygene]
for probeset in probeset_list:
while x < group_size:
new_ratios = [] ### Calculate expression ratios relative to constitutive expression
exp_val = array_raw_group_values[probeset][y][x]
const_exp_val = temp_avg_const_exp_db[affygene][x]
###Since the above dictionary is agglomerating all constitutive expression values for permutation,
###we need an unbiased way to grab just those relevant const. exp. vals. (hence the temp dictionary)
#non_log_exp_val = statistics.log_fold_conversion_fraction(exp_val)
#non_log_const_exp_val = statistics.log_fold_conversion_fraction(const_exp_val)
#non_log_exp_ratio = non_log_exp_val/non_log_const_exp_val
log_exp_ratio = exp_val - const_exp_val
try: normalized_raw_exp_ratios[probeset].append(log_exp_ratio)
except KeyError: normalized_raw_exp_ratios[probeset] = [log_exp_ratio]
return avg_const_exp_db
######### Z Score Analyses #######
class ZScoreData:
def __init__(self,element,changed,measured,zscore,null_z,gene_symbols):
self._element = element; self._changed = changed; self._measured = measured
self._zscore = zscore; self._null_z = null_z; self._gene_symbols = gene_symbols
def ElementID(self): return self._element
def Changed(self): return str(self._changed)
def Measured(self): return str(self._measured)
def AssociatedWithElement(self): return str(self._gene_symbols)
def ZScore(self): return str(self._zscore)
def SetP(self,p): self._permute_p = p
def PermuteP(self): return str(self._permute_p)
def SetAdjP(self,adjp): self._adj_p = adjp
def AdjP(self): return str(self._adj_p)
def PercentChanged(self):
try: pc = float(self.Changed())/float(self.Measured())*100
except Exception: pc = 0
return str(pc)
def NullZ(self): return self._null_z
def Report(self):
output = self.ElementID()
return output
def __repr__(self): return self.Report()
class FDRStats(ZScoreData):
def __init__(self,p): self._permute_p = p
def AdjP(self): return str(self._adj_p)
def countGenesForElement(permute_input_list,probeset_to_gene,probeset_element_db):
element_gene_db={}
for probeset in permute_input_list:
try:
element_list = probeset_element_db[probeset]
gene = probeset_to_gene[probeset]
for element in element_list:
try: element_gene_db[element].append(gene)
except KeyError: element_gene_db[element] = [gene]
except KeyError: null=[]
### Count the number of unique genes per element
for element in element_gene_db:
t = {}
for i in element_gene_db[element]: t[i]=[]
element_gene_db[element] = len(t)
return element_gene_db
def formatGeneSymbolHits(geneid_list):
symbol_list=[]
for geneid in geneid_list:
symbol = ''
if geneid in annotate_db: symbol = annotate_db[geneid].Symbol()
if len(symbol)<1: symbol = geneid
symbol_list.append(symbol)
symbol_str = string.join(symbol_list,', ')
return symbol_str
def zscore(r,n,N,R):
z = (r - n*(R/N))/math.sqrt(n*(R/N)*(1-(R/N))*(1-((n-1)/(N-1)))) #z = statistics.zscore(r,n,N,R)
return z
def calculateZScores(hit_count_db,denom_count_db,total_gene_denom_count,total_gene_hit_count,element_type):
N = float(total_gene_denom_count) ###Genes examined
R = float(total_gene_hit_count) ###AS genes
for element in denom_count_db:
element_denom_gene_count = denom_count_db[element]
n = float(element_denom_gene_count) ###all genes associated with element
if element in hit_count_db:
element_hit_gene_count = len(hit_count_db[element])
gene_symbols = formatGeneSymbolHits(hit_count_db[element])
r = float(element_hit_gene_count) ###regulated genes associated with element
else: r = 0; gene_symbols = ''
try: z = zscore(r,n,N,R)
except Exception: z = 0; #print 'error:',element,r,n,N,R; kill
try: null_z = zscore(0,n,N,R)
except Exception: null_z = 0; #print 'error:',element,r,n,N,R; kill
zsd = ZScoreData(element,r,n,z,null_z,gene_symbols)
if element_type == 'domain': original_domain_z_score_data[element] = zsd
elif element_type == 'microRNA': original_microRNA_z_score_data[element] = zsd
permuted_z_scores[element] = [z]
if perform_element_permutation_analysis == 'no':
### The below is an alternative to the permute t-statistic that is more effecient
p = FishersExactTest(r,n,R,N)
zsd.SetP(p)
return N,R
######### Begin Permutation Analysis #######
def calculatePermuteZScores(permute_element_inputs,element_denominator_gene_count,N,R):
###Make this code as efficient as possible
for element_input_gene_count in permute_element_inputs:
for element in element_input_gene_count:
r = element_input_gene_count[element]
n = element_denominator_gene_count[element]
try: z = statistics.zscore(r,n,N,R)
except Exception: z = 0
permuted_z_scores[element].append(abs(z))
#if element == '0005488':
#a.append(r)
def calculatePermuteStats(original_element_z_score_data):
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
z = abs(permuted_z_scores[element][0])
permute_scores = permuted_z_scores[element][1:] ###Exclude the true value
nullz = zsd.NullZ()
if abs(nullz) == z: ###Only add the nullz values if they can count towards the p-value (if equal to the original z)
null_z_to_add = permutations - len(permute_scores)
permute_scores+=[abs(nullz)]*null_z_to_add ###Add null_z's in proportion to the amount of times there were not genes found for that element
if len(permute_scores)>0:
p = permute_p(permute_scores,z)
else: p = 1
#if p>1: p=1
zsd.SetP(p)
def FishersExactTest(r,n,R,N):
a = r; b = n-r; c=R-r; d=N-R-b
table = [[int(a),int(b)], [int(c),int(d)]]
try: ### Scipy version - cuts down rutime by ~1/3rd the time
oddsratio, pvalue = stats.fisher_exact(table)
return pvalue
except Exception:
ft = fishers_exact_test.FishersExactTest(table)
return ft.two_tail_p()
def adjustPermuteStats(original_element_z_score_data):
#1. Sort ascending the original input p value vector. Call this spval. Keep the original indecies so you can sort back.
#2. Define a new vector called tmp. tmp= spval. tmp will contain the BH p values.
#3. m is the length of tmp (also spval)
#4. i=m-1
#5 tmp[ i ]=min(tmp[i+1], min((m/i)*spval[ i ],1)) - second to last, last, last/second to last
#6. i=m-2
#7 tmp[ i ]=min(tmp[i+1], min((m/i)*spval[ i ],1))
#8 repeat step 7 for m-3, m-4,... until i=1
#9. sort tmp back to the original order of the input p values.
spval=[]
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
p = float(zsd.PermuteP())
spval.append([p,element])
spval.sort(); tmp = spval; m = len(spval); i=m-2; x=0 ###Step 1-4
while i > -1:
tmp[i]=min(tmp[i+1][0], min((float(m)/(i+1))*spval[i][0],1)),tmp[i][1]; i -= 1
for (adjp,element) in tmp:
zsd = original_element_z_score_data[element]
zsd.SetAdjP(adjp)
spval=[]
def permute_p(null_list,true_value):
y = 0; z = 0; x = permutations
for value in null_list:
if value >= true_value: y += 1
#if true_value > 8: global a; a = null_list; print true_value,y,x;kill
return (float(y)/float(x)) ###Multiply probabilty x2?
######### End Permutation Analysis #######
def exportZScoreData(original_element_z_score_data,element_type):
element_output = root_dir+'AltResults/AlternativeOutput/' + dataset_name + analysis_method+'-'+element_type+'-zscores.txt'
data = export.ExportFile(element_output)
headers = [element_type+'-Name','Number Changed','Number Measured','Percent Changed', 'Zscore','PermuteP','AdjP','Changed GeneSymbols']
headers = string.join(headers,'\t')+'\n'
data.write(headers); sort_results=[]
#print "Results for",len(original_element_z_score_data),"elements exported to",element_output
for element in original_element_z_score_data:
zsd=original_element_z_score_data[element]
try: results = [zsd.Changed(), zsd.Measured(), zsd.PercentChanged(), zsd.ZScore(), zsd.PermuteP(), zsd.AdjP(), zsd.AssociatedWithElement()]
except AttributeError: print element,len(permuted_z_scores[element]);kill
results = [element] + results
results = string.join(results,'\t') + '\n'
sort_results.append([float(zsd.PermuteP()),-1/float(zsd.Measured()),results])
sort_results.sort()
for values in sort_results:
results = values[2]
data.write(results)
data.close()
def getInputsForPermutationAnalysis(exon_db):
### Filter fold_dbase, which is the proper denominator
probeset_to_gene = {}; denominator_list = []
for probeset in exon_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
if proceed == 'yes':
gene = exon_db[probeset].GeneID()
probeset_to_gene[probeset] = gene
denominator_list.append(probeset)
return probeset_to_gene,denominator_list
def getJunctionSplicingAnnotations(regulated_exon_junction_db):
filter_status = 'yes'
########### Import critical exon annotation for junctions, build through the exon array analysis pipeline - link back to probesets
filtered_arrayids={}; critical_probeset_annotation_db={}
if array_type == 'RNASeq' and explicit_data_type == 'null':
critical_exon_annotation_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_exons.txt'
elif array_type == 'RNASeq' and explicit_data_type != 'null':
critical_exon_annotation_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_junctions.txt'
else:
critical_exon_annotation_file = "AltDatabase/"+species+"/"+array_type+"/"+species+"_Ensembl_"+array_type+"_probesets.txt"
critical_exon_annotation_file = filename=getFilteredFilename(critical_exon_annotation_file)
for uid in regulated_exon_junction_db:
gene = regulated_exon_junction_db[uid].GeneID()
critical_exons = regulated_exon_junction_db[uid].CriticalExons()
"""### It appears that each critical exon for junction arrays can be a concatenation of multiple exons, making this unnecessary
if len(critical_exons)>1 and array_type == 'junction':
critical_exons_joined = string.join(critical_exons,'|')
filtered_arrayids[gene+':'+critical_exon].append(uid)"""
for critical_exon in critical_exons:
try:
try: filtered_arrayids[gene+':'+critical_exon].append(uid)
except TypeError: print gene, critical_exon, uid;kill
except KeyError: filtered_arrayids[gene+':'+critical_exon]=[uid]
critical_exon_annotation_db = importSplicingAnnotationDatabase(critical_exon_annotation_file,'exon-fake',filtered_arrayids,filter_status);null=[] ###The file is in exon centric format, so designate array_type as exon
for key in critical_exon_annotation_db:
ced = critical_exon_annotation_db[key]
for junction_probesets in filtered_arrayids[key]:
try: critical_probeset_annotation_db[junction_probesets].append(ced) ###use for splicing and Exon annotations
except KeyError: critical_probeset_annotation_db[junction_probesets] = [ced]
for junction_probesets in critical_probeset_annotation_db:
if len(critical_probeset_annotation_db[junction_probesets])>1: ###Thus multiple exons associated, must combine annotations
exon_ids=[]; external_exonids=[]; exon_regions=[]; splicing_events=[]
for ed in critical_probeset_annotation_db[junction_probesets]:
ensembl_gene_id = ed.GeneID(); transcript_cluster_id = ed.ExternalGeneID()
exon_ids.append(ed.ExonID()); external_exonids.append(ed.ExternalExonIDs()); exon_regions.append(ed.ExonRegionID()); se = string.split(ed.SplicingEvent(),'|')
for i in se: splicing_events.append(i)
splicing_events = unique.unique(splicing_events) ###remove duplicate entries
exon_id = string.join(exon_ids,'|'); external_exonid = string.join(external_exonids,'|'); exon_region = string.join(exon_regions,'|'); splicing_event = string.join(splicing_events,'|')
probe_data = AffyExonSTData(ensembl_gene_id, exon_id, external_exonid, '', exon_region, splicing_event, '','')
if array_type != 'RNASeq': probe_data.setTranscriptCluster(transcript_cluster_id)
critical_probeset_annotation_db[junction_probesets] = probe_data
else:
critical_probeset_annotation_db[junction_probesets] = critical_probeset_annotation_db[junction_probesets][0]
return critical_probeset_annotation_db
def determineExternalType(external_probeset_db):
external_probeset_db2={}
if 'TC' in external_probeset_db:
temp_index={}; i=0; type = 'JETTA'
for name in external_probeset_db['TC'][0]: temp_index[i]=i; i+=1
if 'PS:norm_expr_fold_change' in temp_index: NI_fold_index = temp_index['PS:norm_expr_fold_change']
if 'MADS:pv_1over2' in temp_index: MADS_p1_index = temp_index['MADS:pv_1over2']
if 'MADS:pv_2over1' in temp_index: MADS_p2_index = temp_index['MADS:pv_2over1']
if 'TC:expr_fold_change' in temp_index: MADS_p2_index = temp_index['MADS:pv_2over1']
if 'PsId' in temp_index: ps_index = temp_index['PsId']
for tc in external_probeset_db:
for list in external_probeset_db[tc]:
try: NI_fold = float(list[NI_fold_index])
except Exception: NI_fold = 1
try: MADSp1 = float(list[MADS_p1_index])
except Exception: MADSp1 = 1
try: MADSp2 = float(list[MADS_p2_index])
except Exception: MADSp1 = 1
if MADSp1<MADSp2: pval = MADSp1
else: pval = MADSp2
probeset = list[ps_index]
external_probeset_db2[probeset] = NI_fold,pval
else:
type = 'generic'
a = []; b = []
for id in external_probeset_db:
#print external_probeset_db[id]
try: a.append(abs(float(external_probeset_db[id][0][0])))
except Exception: null=[]
try: b.append(abs(float(external_probeset_db[id][0][1])))
except Exception: null=[]
a.sort(); b.sort(); pval_index = None; score_index = None
if len(a)>0:
if max(a) > 1: score_index = 0
else: pval_index = 0
if len(b)>0:
if max(b) > 1: score_index = 1
else: pval_index = 1
for id in external_probeset_db:
if score_index != None: score = external_probeset_db[id][0][score_index]
else: score = 1
if pval_index != None: pval = external_probeset_db[id][0][pval_index]
else: pval = 1
external_probeset_db2[id] = score,pval
return external_probeset_db2, type
def importExternalProbesetData(dataset_dir):
excluded_probeset_db={}; splice_event_list=[]; p_value_call={}; permute_p_values={}; gene_expression_diff_db={}
analyzed_probeset_db = {}
external_probeset_db = importExternalDBList(dataset_dir)
external_probeset_db, ext_type = determineExternalType(external_probeset_db)
for probeset in exon_db: analyzed_probeset_db[probeset] = []
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in analyzed_probeset_db: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del analyzed_probeset_db[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del analyzed_probeset_db[probeset]
except KeyError: null=[]
for probeset in analyzed_probeset_db:
ed = exon_db[probeset]; geneid = ed.GeneID()
td = TranscriptionData('',''); gene_expression_diff_db[geneid] = td
if probeset in external_probeset_db:
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
splicing_index,normIntensityP = external_probeset_db[probeset]
group1_ratios=[]; group2_ratios=[];exp_log_ratio=''; ttest_exp_p='';normIntensityP='';opposite_SI_log_mean=''
sid = ExonData(splicing_index,probeset,critical_exon_list,geneid,group1_ratios,group2_ratios,normIntensityP,opposite_SI_log_mean)
splice_event_list.append((splicing_index,sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(0,geneid,'NA')
excluded_probeset_db[probeset] = eed
print len(splice_event_list), 'pre-filtered external results imported...\n'
return splice_event_list, p_value_call, permute_p_values, excluded_probeset_db, gene_expression_diff_db
def splicingAnalysisAlgorithms(nonlog_NI_db,fold_dbase,dataset_name,gene_expression_diff_db,exon_db,ex_db,si_db,dataset_dir):
protein_exon_feature_db={}; global regulated_exon_junction_db; global critical_exon_annotation_db; global probeset_comp_db; probeset_comp_db={}
if original_conditions == 2: print "Beginning to run", analysis_method, "algorithm on",dataset_name[0:-1],"data"
if run_from_scratch == 'Annotate External Results':
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db, gene_expression_diff_db = importExternalProbesetData(dataset_dir)
elif analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
original_exon_db = exon_db
if original_conditions > 2:
splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db = ex_db
splice_event_list, p_value_call, permute_p_values, exon_db, regulated_exon_junction_db = furtherProcessJunctionScores(splice_event_list, probeset_comp_db, permute_p_values)
else:
splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db = analyzeJunctionSplicing(nonlog_NI_db)
splice_event_list, p_value_call, permute_p_values, exon_db, regulated_exon_junction_db = furtherProcessJunctionScores(splice_event_list, probeset_comp_db, permute_p_values)
elif analysis_method == 'splicing-index':
regulated_exon_junction_db = {}
if original_conditions > 2:
excluded_probeset_db = ex_db; splice_event_list = si_db;
clearObjectsFromMemory(ex_db); clearObjectsFromMemory(si_db)
ex_db=[]; si_db=[]; permute_p_values={}; p_value_call=''
else: splice_event_list, p_value_call, permute_p_values, excluded_probeset_db = analyzeSplicingIndex(fold_dbase)
elif analysis_method == 'FIRMA':
regulated_exon_junction_db = {}
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db = FIRMAanalysis(fold_dbase)
global permuted_z_scores; permuted_z_scores={}; global original_domain_z_score_data; original_domain_z_score_data={}
global original_microRNA_z_score_data; original_microRNA_z_score_data={}
nonlog_NI_db=[] ### Clear memory of this large dictionary
try: clearObjectsFromMemory(original_avg_const_exp_db); clearObjectsFromMemory(array_raw_group_values)
except Exception: null=[]
try: clearObjectsFromMemory(avg_const_exp_db)
except Exception: null=[]
try: clearObjectsFromMemory(alt_junction_db)
except Exception: null=[]
try: clearObjectsFromMemory(fold_dbase); fold_dbase=[]
except Exception: null=[]
microRNA_full_exon_db,microRNA_count_db,gene_microRNA_denom = ExonAnalyze_module.importmicroRNADataExon(species,array_type,exon_db,microRNA_prediction_method,explicit_data_type,root_dir)
#print "MicroRNA data imported"
if use_direct_domain_alignments_only == 'yes':
protein_ft_db_len,domain_associated_genes = importProbesetAligningDomains(exon_db,'gene')
else: protein_ft_db_len,domain_associated_genes = importProbesetProteinCompDomains(exon_db,'gene','exoncomp')
if perform_element_permutation_analysis == 'yes':
probeset_to_gene,denominator_list = getInputsForPermutationAnalysis(exon_db)
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
exon_gene_array_translation_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_'+array_type+'-exon_probesets.txt'
try: exon_array_translation_db = importGeneric(exon_gene_array_translation_file)
except Exception: exon_array_translation_db={} ### Not present for all species
exon_hits={}; clearObjectsFromMemory(probeset_comp_db); probeset_comp_db=[]
###Run analyses in the ExonAnalyze_module module to assess functional changes
for (score,ed) in splice_event_list:
geneid = ed.GeneID()
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
pl = string.split(ed.Probeset1(),'|'); probeset1 = pl[0] ### When agglomerated, this is important
uid = (probeset1,ed.Probeset2())
else: uid = ed.Probeset1()
gene_exon = geneid,uid; exon_hits[gene_exon] = ed
#print probeset1,ed.Probeset1(),ed.Probeset2(),gene_exon,ed.CriticalExons()
dataset_name_original = analysis_method+'-'+dataset_name[8:-1]
global functional_attribute_db; global protein_features
### Possibly Block-out code for DomainGraph export
########### Re-import the exon_db for significant entries with full annotaitons
exon_db={}; filtered_arrayids={}; filter_status='yes' ###Use this as a means to save memory (import multiple times - only storing different types relevant information)
for (score,entry) in splice_event_list:
try: probeset = original_exon_db[entry.Probeset1()].Probeset()
except Exception: probeset = entry.Probeset1()
pl = string.split(probeset,'|'); probeset = pl[0]; filtered_arrayids[probeset] = [] ### When agglomerated, this is important
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
try: probeset = entry.Probeset2(); filtered_arrayids[probeset] = []
except AttributeError: null =[] ###occurs when running Splicing
exon_db = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status);null=[] ###replace existing exon_db (probeset_annotations_file should be a global)
###domain_gene_changed_count_db is the number of genes for each domain that are found for regulated probesets
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if use_direct_domain_alignments_only == 'yes':
protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetAligningDomains(regulated_exon_junction_db,'probeset')
else: protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetProteinCompDomains(regulated_exon_junction_db,'probeset','exoncomp')
else:
if use_direct_domain_alignments_only == 'yes':
protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetAligningDomains(exon_db,'probeset')
else: protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetProteinCompDomains(exon_db,'probeset','exoncomp')
filtered_microRNA_exon_db = ExonAnalyze_module.filterMicroRNAProbesetAssociations(microRNA_full_exon_db,exon_hits)
microRNA_full_exon_db=[]
###add microRNA data to functional_attribute_db
microRNA_hit_gene_count_db = {}; all_microRNA_gene_hits={}; microRNA_attribute_db={}; probeset_mirBS_db={}
for (affygene,uid) in filtered_microRNA_exon_db: ###example ('G7091354', 'E20|') [('hsa-miR-130a', 'Pbxip1'), ('hsa-miR-130a', 'Pbxip1'
###3-1-08
miR_list = []
microRNA_symbol_list = filtered_microRNA_exon_db[(affygene,uid)]
for mir_key in microRNA_symbol_list:
microRNA,gene_symbol,miR_seq, miR_sources = mir_key
#if 'ENS' in microRNA: print microRNA; kill ### bug in some miRNA annotations introduced in the build process
specific_microRNA_tuple = (microRNA,'~')
try: microRNA_hit_gene_count_db[microRNA].append(affygene)
except KeyError: microRNA_hit_gene_count_db[microRNA] = [affygene]
###Create a database with the same structure as "protein_exon_feature_db"(below) for over-representation analysis (direction specific), after linking up splice direction data
try: microRNA_attribute_db[(affygene,uid)].append(specific_microRNA_tuple)
except KeyError: microRNA_attribute_db[(affygene,uid)] = [specific_microRNA_tuple]
miR_data = microRNA+':'+miR_sources
miR_list.append(miR_data) ###Add miR information to the record
function_type = ('miR-sequence: ' +'('+miR_data+')'+miR_seq,'~') ###Add miR sequence information to the sequence field of the report
try: functional_attribute_db[(affygene,uid)].append(function_type)
except KeyError: functional_attribute_db[(affygene,uid)]=[function_type]
#print (affygene,uid), [function_type];kill
if perform_element_permutation_analysis == 'yes':
try: probeset_mirBS_db[uid].append(microRNA)
except KeyError: probeset_mirBS_db[uid] = [microRNA]
miR_str = string.join(miR_list,','); miR_str = '('+miR_str+')'
function_type = ('microRNA-target'+miR_str,'~')
try: functional_attribute_db[(affygene,uid)].append(function_type)
except KeyError: functional_attribute_db[(affygene,uid)]=[function_type]
all_microRNA_gene_hits[affygene] = []
###Replace the gene list for each microRNA hit with count data
microRNA_hit_gene_count_db = eliminate_redundant_dict_values(microRNA_hit_gene_count_db)
###Combines any additional feature alignment info identified from 'ExonAnalyze_module.characterizeProteinLevelExonChanges' (e.g. from Ensembl or junction-based queries rather than exon specific) and combines
###this with this database of (Gene,Exon)=[(functional element 1,'~'),(functional element 2,'~')] for downstream result file annotatations
domain_hit_gene_count_db = {}; all_domain_gene_hits = {}; probeset_domain_db={}
for entry in protein_features:
gene,uid = entry
for data_tuple in protein_features[entry]:
domain,call = data_tuple
try: protein_exon_feature_db[entry].append(data_tuple)
except KeyError: protein_exon_feature_db[entry] = [data_tuple]
try: domain_hit_gene_count_db[domain].append(gene)
except KeyError: domain_hit_gene_count_db[domain] = [gene]
all_domain_gene_hits[gene]=[]
if perform_element_permutation_analysis == 'yes':
try: probeset_domain_db[uid].append(domain)
except KeyError: probeset_domain_db[uid] = [domain]
protein_features=[]; domain_gene_changed_count_db=[]
###Replace the gene list for each microRNA hit with count data
domain_hit_gene_count_db = eliminate_redundant_dict_values(domain_hit_gene_count_db)
############ Perform Element Over-Representation Analysis ############
"""Domain/FT Fishers-Exact test: with "protein_exon_feature_db" (transformed to "domain_hit_gene_count_db") we can analyze over-representation of domain/features WITHOUT taking into account exon-inclusion or exclusion
Do this using: "domain_associated_genes", which contains domain tuple ('Tyr_pkinase', 'IPR001245') as a key and count in unique genes as the value in addition to
Number of genes linked to splice events "regulated" (SI and Midas p<0.05), number of genes with constitutive probesets
MicroRNA Fishers-Exact test: "filtered_microRNA_exon_db" contains gene/exon to microRNA data. For each microRNA, count the representation in spliced genes microRNA (unique gene count - make this from the mentioned file)
Do this using: "microRNA_count_db"""
domain_gene_counts = {} ### Get unique gene counts for each domain
for domain in domain_associated_genes:
domain_gene_counts[domain] = len(domain_associated_genes[domain])
total_microRNA_gene_hit_count = len(all_microRNA_gene_hits)
total_microRNA_gene_denom_count = len(gene_microRNA_denom)
Nm,Rm = calculateZScores(microRNA_hit_gene_count_db,microRNA_count_db,total_microRNA_gene_denom_count,total_microRNA_gene_hit_count,'microRNA')
gene_microRNA_denom =[]
summary_data_db['miRNA_gene_denom'] = total_microRNA_gene_denom_count
summary_data_db['miRNA_gene_hits'] = total_microRNA_gene_hit_count
summary_data_db['alt_events']=len(splice_event_list)
total_domain_gene_hit_count = len(all_domain_gene_hits)
total_domain_gene_denom_count = protein_ft_db_len ###genes connected to domain annotations
Nd,Rd = calculateZScores(domain_hit_gene_count_db,domain_gene_counts,total_domain_gene_denom_count,total_domain_gene_hit_count,'domain')
microRNA_hit_gene_counts={}; gene_to_miR_db={} ### Get unique gene counts for each miR and the converse
for microRNA in microRNA_hit_gene_count_db:
microRNA_hit_gene_counts[microRNA] = len(microRNA_hit_gene_count_db[microRNA])
for gene in microRNA_hit_gene_count_db[microRNA]:
try: gene_to_miR_db[gene].append(microRNA)
except KeyError: gene_to_miR_db[gene] = [microRNA]
gene_to_miR_db = eliminate_redundant_dict_values(gene_to_miR_db)
if perform_element_permutation_analysis == 'yes':
###Begin Domain/microRNA Permute Analysis
input_count = len(splice_event_list) ### Number of probesets or probeset pairs (junction array) alternatively regulated
original_increment = int(permutations/20); increment = original_increment
start_time = time.time(); print 'Permuting the Domain/miRBS analysis %d times' % permutations
x=0; permute_domain_inputs=[]; permute_miR_inputs=[]
while x<permutations:
if x == increment: increment+=original_increment; print '*',
permute_input_list = random.sample(denominator_list,input_count); x+=1
permute_domain_input_gene_counts = countGenesForElement(permute_input_list,probeset_to_gene,probeset_domain_db)
permute_domain_inputs.append(permute_domain_input_gene_counts)
permute_miR_input_gene_counts = countGenesForElement(permute_input_list,probeset_to_gene,probeset_mirBS_db)
permute_miR_inputs.append(permute_miR_input_gene_counts)
calculatePermuteZScores(permute_domain_inputs,domain_gene_counts,Nd,Rd)
calculatePermuteZScores(permute_miR_inputs,microRNA_hit_gene_counts,Nm,Rm)
calculatePermuteStats(original_domain_z_score_data)
calculatePermuteStats(original_microRNA_z_score_data)
adjustPermuteStats(original_domain_z_score_data)
adjustPermuteStats(original_microRNA_z_score_data)
exportZScoreData(original_domain_z_score_data,'ft-domain')
exportZScoreData(original_microRNA_z_score_data,'microRNA')
end_time = time.time(); time_diff = int(end_time-start_time)
print "Enrichment p-values for Domains/miRBS calculated in %d seconds" % time_diff
denominator_list=[]
try: clearObjectsFromMemory(original_microRNA_z_score_data)
except Exception: null=[]
microRNA_hit_gene_count_db={}; microRNA_hit_gene_counts={};
clearObjectsFromMemory(permuted_z_scores); permuted_z_scores=[]; original_domain_z_score_data=[]
if (array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null')) and analysis_method != 'splicing-index':
critical_probeset_annotation_db = getJunctionSplicingAnnotations(regulated_exon_junction_db)
probeset_aligning_db = importProbesetAligningDomains(regulated_exon_junction_db,'perfect_match')
else: probeset_aligning_db = importProbesetAligningDomains(exon_db,'perfect_match')
############ Export exon/junction level results ############
splice_event_db={}; protein_length_list=[]; aspire_gene_results={}
critical_gene_exons={}; unique_exon_event_db={}; comparison_count={}; direct_domain_gene_alignments={}
functional_attribute_db2={}; protein_exon_feature_db2={}; microRNA_exon_feature_db2={}
external_exon_annot={}; gene_exon_region={}; gene_smallest_p={}; gene_splice_event_score={}; alternatively_reg_tc={}
aspire_output = root_dir+'AltResults/AlternativeOutput/' + dataset_name + analysis_method+'-exon-inclusion-results.txt'
data = export.ExportFile(aspire_output)
goelite_output = root_dir+'GO-Elite/AltExon/AS.'+ dataset_name + analysis_method+'.txt'
goelite_data = export.ExportFile(goelite_output); gcn=0
#print 'LENGTH OF THE GENE ANNOTATION DATABASE',len(annotate_db)
if array_type != 'AltMouse':
DG_output = root_dir+'AltResults/DomainGraph/' + dataset_name + analysis_method+'-DomainGraph.txt'
DG_data = export.ExportFile(DG_output)
### Write out only the inclusion hits to a subdir
SRFinder_inclusion = root_dir+'GO-Elite/exon/' + dataset_name + analysis_method+'-inclusion.txt'
SRFinder_in_data = export.ExportFile(SRFinder_inclusion)
SRFinder_in_data.write('probeset\tSystemCode\tdeltaI\tp-value\n')
### Write out only the exclusion hits to a subdir
SRFinder_exclusion = root_dir+'GO-Elite/exon/' + dataset_name + analysis_method+'-exclusion.txt'
SRFinder_ex_data = export.ExportFile(SRFinder_exclusion)
SRFinder_ex_data.write('probeset\tSystemCode\tdeltaI\tp-value\n')
### Write out only the denominator set to a subdir
SRFinder_denom = root_dir+'GO-Elite/exon_denominator/' + species+'-'+array_type+'.txt'
SRFinder_denom_data = export.ExportFile(SRFinder_denom)
SRFinder_denom_data.write('probeset\tSystemCode\n')
ens_version = unique.getCurrentGeneDatabaseVersion()
ProcessedSpliceData_output = string.replace(DG_output,'DomainGraph','ProcessedSpliceData') ### This is the same as the DG export but without converting the probeset IDs for non-exon arrays
ProcessedSpliceData_data = export.ExportFile(ProcessedSpliceData_output)
if ens_version == '':
try:
elite_db_versions = UI.returnDirectoriesNoReplace('/AltDatabase')
if len(elite_db_versions)>0: ens_version = elite_db_versions[0]
except Exception: null=[]
ens_version = string.replace(ens_version,'EnsMart','ENS_')
DG_data.write(ens_version+"\n")
DG_data.write("Probeset\tGeneID\tRegulation call\tSI\tSI p-value\tMiDAS p-value\n")
ProcessedSpliceData_data.write("ExonID(s)\tGeneID\tRegulation call\t"+analysis_method+"\t"+analysis_method+" p-value\tMiDAS p-value\n")
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
if perform_permutation_analysis == 'yes': p_value_type = 'permutation-values'
else: p_value_type = 'FDR-'+p_value_call
if array_type == 'AltMouse': gene_name = 'AffyGene'; extra_transcript_annotation = 'block_structure'; extra_exon_annotation = 'splice_event_description'
if array_type == 'junction' or array_type == 'RNASeq':
gene_name = 'Ensembl'; extra_transcript_annotation = 'transcript cluster ID'; extra_exon_annotation = 'distal exon-region-ID'
goelite_data.write("GeneID\tSystemCode\tscore\tp-value\tSymbol\tExonIDs\n")
if array_type == 'RNASeq':
id1='junctionID-1'; id2='junctionID-2'; loc_column='exon/junction locations'
extra_transcript_annotation = 'Known/Novel Feature'
else: id1='probeset1'; id2='probeset2'; loc_column='probeset locations'
title = [gene_name,analysis_method,'symbol','description','exons1','exons2','regulation_call','event_call',id1,'norm-p1',id2,'norm-p2','fold1','fold2']
title +=['adj-fold1' ,'adj-fold2' ,extra_transcript_annotation,'critical_up_exons','critical_down_exons','functional_prediction','uniprot-ens_feature_predictions']
title +=['peptide_predictions','exp1','exp2','ens_overlapping_domains','constitutive_baseline_exp',p_value_call,p_value_type,'permutation-false-positives']
title +=['gene-expression-change', extra_exon_annotation ,'ExternalExonIDs','ExonRegionID','SplicingEvent','ExonAnnotationScore','large_splicing_diff',loc_column]
else:
goelite_data.write("GeneID\tSystemCode\tSI\tSI p-value\tMiDAS p-value\tSymbol\tExonID\n")
if analysis_method == 'splicing-index':
NIpval = 'SI_rawp'; splicing_score = 'Splicing-Index'; lowestp = 'lowest_p (MIDAS or SI)'; AdjPcolumn = 'Deviation-Value'; #AdjPcolumn = 'SI_adjp'
else:
NIpval = 'FIRMA_rawp'; splicing_score = 'FIRMA_fold'; lowestp = 'lowest_p (MIDAS or FIRMA)'; AdjPcolumn = 'Deviation-Value'; #AdjPcolumn = 'FIRMA_adjp'
if array_type == 'RNASeq':
id1='junctionID'; pval_column='junction p-value'; loc_column='junction location'
else: id1='probeset'; pval_column='probeset p-value'; loc_column='probeset location'
if array_type == 'RNASeq': secondary_ID_title = 'Known/Novel Feature'
else: secondary_ID_title = 'alternative gene ID'
title= ['Ensembl',splicing_score,'symbol','description','exons','regulation_call',id1,pval_column,lowestp,'midas p-value','fold','adjfold']
title+=['up_exons','down_exons','functional_prediction','uniprot-ens_feature_predictions','peptide_predictions','ens_overlapping_domains','baseline_probeset_exp']
title+=['constitutive_baseline_exp',NIpval,AdjPcolumn,'gene-expression-change']
title+=[secondary_ID_title, 'ensembl exons', 'consitutive exon', 'exon-region-ID', 'exon annotations','distal exon-region-ID',loc_column]
title = string.join(title,'\t') + '\n'
try:
if original_conditions>2: title = string.replace(title,'regulation_call','conditions_compared')
except Exception: null=[]
data.write(title)
### Calculate adjusted normalized intensity p-values
fdr_exon_stats={}
if analysis_method != 'ASPIRE' and 'linearregres' not in analysis_method:
for (score,entry) in splice_event_list: ### These are all "significant entries"
fds = FDRStats(entry.TTestNormalizedRatios())
fdr_exon_stats[entry.Probeset1()] = fds
for probeset in excluded_probeset_db: ### These are all "non-significant entries"
fds = FDRStats(excluded_probeset_db[probeset].TTestNormalizedRatios())
fdr_exon_stats[probeset] = fds
try: adjustPermuteStats(fdr_exon_stats)
except Exception: null=[]
### Calculate score average and stdev for each gene to alter get a Deviation Value
gene_deviation_db={}
for (score,entry) in splice_event_list:
dI = entry.Score(); geneID = entry.GeneID()
try: gene_deviation_db[geneID].append(dI)
except Exception: gene_deviation_db[geneID] = [dI]
for i in excluded_probeset_db:
entry = excluded_probeset_db[i]
try: dI = entry.Score(); geneID = entry.GeneID()
except Exception: geneID = entry[1]; dI = entry[-1]
try: gene_deviation_db[geneID].append(dI)
except Exception: None ### Don't include genes with no hits
for geneID in gene_deviation_db:
try:
avg_dI=statistics.avg(gene_deviation_db[geneID])
stdev_dI=statistics.stdev(gene_deviation_db[geneID])
gene_deviation_db[geneID] = avg_dI,stdev_dI
except Exception:
gene_deviation_db[geneID] = 'NA','NA'
event_count = 0
for (score,entry) in splice_event_list:
event_count += 1
dI = entry.Score(); probeset1 = entry.Probeset1(); regulation_call = entry.RegulationCall(); event_call = entry.EventCall();critical_exon_list = entry.CriticalExonTuple()
probeset1_display = probeset1; selected_probeset = probeset1
if agglomerate_inclusion_probesets == 'yes':
if array_type == 'AltMouse':
exons1 = original_exon_db[probeset1].ExonID()
try: probeset1 = original_exon_db[probeset1].Probeset()
except Exception: null=[]
else:
probeset1 = probeset1; exons1 = original_exon_db[probeset1].ExonID()
try: selected_probeset = original_exon_db[probeset1].Probeset()
except Exception: selected_probeset = probeset1
else:
try: exons1 = exon_db[probeset1].ExonID()
except Exception:
print probeset1, len(exon_db)
for i in exon_db: print i; break
kill
critical_probeset_list = [selected_probeset]
affygene = entry.GeneID()
### Calculate deviation value for each exon
avg_dI,stdev_dI = gene_deviation_db[affygene]
try: DV = deviation(dI,avg_dI,stdev_dI) ### Note: the dI values are always in log2 space, independent of platform
except Exception: DV = 'NA'
if affygene in annotate_db: description = annotate_db[affygene].Description(); symbol = annotate_db[affygene].Symbol()
else: description = ''; symbol = ''
ped1 = entry.ProbesetExprData1(); adjfold1 = ped1.AdjFold(); exp1 = ped1.BaselineExp(); fold1 = ped1.FoldChange(); rawp1 = ped1.ExpPval()
### Get Constitutive expression values
baseline_const_exp = entry.ConstitutiveExpression() ### For multiple group comparisosn
#if affygene in gene_expression_diff_db: mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
try: mean_fold_change = str(entry.ConstitutiveFold()) ### For multi-condition analyses, the gene expression is dependent on the conditions compared
except Exception: mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
probeset2 = entry.Probeset2(); exons2 = exon_db[probeset2].ExonID(); rawp1 = str(entry.TTestNormalizedRatios()); rawp2 = str(entry.TTestNormalizedRatios2()); critical_probeset_list.append(probeset2)
ped2 = entry.ProbesetExprData2(); adjfold2 = ped2.AdjFold(); exp2 = ped2.BaselineExp(); fold2 = ped2.FoldChange()
try: location_summary=original_exon_db[selected_probeset].LocationSummary()+'|'+original_exon_db[probeset2].LocationSummary()
except Exception:
try: location_summary=exon_db[selected_probeset].LocationSummary()+'|'+exon_db[probeset2].LocationSummary()
except Exception: location_summary=''
if array_type == 'AltMouse':
extra_transcript_annotation = exon_db[probeset1].GeneStructure()
else:
try: extra_exon_annotation = last_exon_region_db[affygene]
except KeyError: extra_exon_annotation = ''
try:
tc1 = original_exon_db[probeset1].SecondaryGeneID()
tc2 = original_exon_db[probeset2].SecondaryGeneID() ### Transcript Cluster
probeset_tc = makeUnique([tc1,tc2])
extra_transcript_annotation = string.join(probeset_tc,'|')
try: alternatively_reg_tc[affygene] += probeset_tc
except KeyError: alternatively_reg_tc[affygene] = probeset_tc
except Exception: extra_transcript_annotation=''
if array_type == 'RNASeq':
try: extra_transcript_annotation = entry.NovelEvent() ### Instead of secondary gene ID, list known vs. novel reciprocal junction annotation
except Exception: None
exp_list = [float(exp1),float(exp2),float(exp1)+float(fold1),float(exp2)+float(fold2)]; exp_list.sort(); exp_list.reverse()
probeset_tuple = (probeset1,probeset2)
else:
try: exp_list = [float(exp1),float(exp1)+float(fold1)]; exp_list.sort(); exp_list.reverse()
except Exception: exp_list = ['']
probeset_tuple = (probeset1)
highest_exp = exp_list[0]
###Use permuted p-value or lowest expression junction p-value based on the situtation
###This p-value is used to filter out aspire events for further analyses
if len(p_value_call)>0:
if probeset_tuple in permute_p_values:
lowest_raw_p, pos_permute, total_permute, false_pos = permute_p_values[probeset_tuple]
else: lowest_raw_p = "NA"; pos_permute = "NA"; total_permute = "NA"; false_pos = "NA"
else:
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method: raw_p_list = [entry.TTestNormalizedRatios(),entry.TTestNormalizedRatios2()] #raw_p_list = [float(rawp1),float(rawp2)]; raw_p_list.sort()
else:
try: raw_p_list = [float(entry.TTestNormalizedRatios())] ###Could also be rawp1, but this is more appropriate
except Exception: raw_p_list = [1] ### Occurs when p='NA'
raw_p_list.sort()
lowest_raw_p = raw_p_list[0]; pos_permute = "NA"; total_permute = "NA"; false_pos = "NA"
if perform_permutation_analysis == 'yes':
p_value_extra = str(pos_permute)+' out of '+str(total_permute)
else: p_value_extra = str(pos_permute)
up_exons = ''; down_exons = ''; up_exon_list = []; down_exon_list = []; gene_exon_list=[]
exon_data = critical_exon_list
variable = exon_data[0]
if variable == 1 and regulation_call == 'upregulated':
for exon in exon_data[1]:
up_exons = up_exons + exon + ',';up_exon_list.append(exon)
key = affygene,exon+'|'; gene_exon_list.append(key)
elif variable == 1 and regulation_call == 'downregulated':
for exon in exon_data[1]:
down_exons = down_exons + exon + ',';down_exon_list.append(exon)
key = affygene,exon+'|';gene_exon_list.append(key)
else:
try: exon1 = exon_data[1][0]; exon2 = exon_data[1][1]
except Exception: print exon_data;kill
if adjfold1 > 0:
up_exons = up_exons + exon1 + ',';down_exons = down_exons + exon2 + ','
up_exon_list.append(exon1); down_exon_list.append(exon2)
key = affygene,exon1+'|'; gene_exon_list.append(key);key = affygene,exon2+'|'; gene_exon_list.append(key)
else:
up_exons = up_exons + exon2 + ',';down_exons = down_exons + exon1 + ','
up_exon_list.append(exon2); down_exon_list.append(exon1)
key = affygene,exon1+'|'; gene_exon_list.append(key); key = affygene,exon2+'|'; gene_exon_list.append(key)
up_exons = up_exons[0:-1];down_exons = down_exons[0:-1]
try: ### Get comparisons group annotation data for multigroup comparison analyses
if original_conditions>2:
try: regulation_call = ped1.Annotation()
except Exception: null=[]
except Exception: null=[]
###Format functional results based on exon level fold change
null = []
#global a; a = exon_hits; global b; b=microRNA_attribute_db; kill
"""if 'G7100684@J934332_RC@j_at' in critical_probeset_list:
print probeset1, probeset2, gene, critical_probeset_list, 'blah'
if ('G7100684', ('G7100684@J934333_RC@j_at', 'G7100684@J934332_RC@j_at')) in functional_attribute_db:
print functional_attribute_db[('G7100684', ('G7100684@J934333_RC@j_at', 'G7100684@J934332_RC@j_at'))];blah
blah"""
new_functional_attribute_str, functional_attribute_list2, seq_attribute_str,protein_length_list = format_exon_functional_attributes(affygene,critical_probeset_list,functional_attribute_db,up_exon_list,down_exon_list,protein_length_list)
new_uniprot_exon_feature_str, uniprot_exon_feature_list, null, null = format_exon_functional_attributes(affygene,critical_probeset_list,protein_exon_feature_db,up_exon_list,down_exon_list,null)
null, microRNA_exon_feature_list, null, null = format_exon_functional_attributes(affygene,critical_probeset_list,microRNA_attribute_db,up_exon_list,down_exon_list,null)
if len(new_functional_attribute_str) == 0: new_functional_attribute_str = ' '
if len(new_uniprot_exon_feature_str) == 0: new_uniprot_exon_feature_str = ' '
if len(seq_attribute_str) > 12000: seq_attribute_str = 'The sequence is too long to report for spreadsheet analysis'
### Add entries to a database to quantify the number of reciprocal isoforms regulated
reciprocal_isoform_data = [len(critical_exon_list[1]),critical_exon_list[1],event_call,regulation_call]
try: float((lowest_raw_p))
except ValueError: lowest_raw_p=0
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: unique_exon_event_db[affygene].append(reciprocal_isoform_data)
except KeyError: unique_exon_event_db[affygene] = [reciprocal_isoform_data]
### Add functional attribute information to a new database
for item in uniprot_exon_feature_list:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: protein_exon_feature_db2[affygene,attribute].append(exon)
except KeyError: protein_exon_feature_db2[affygene,attribute]=[exon]
### Add functional attribute information to a new database
"""Database not used for exon/junction data export but for over-representation analysis (direction specific)"""
for item in microRNA_exon_feature_list:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: microRNA_exon_feature_db2[affygene,attribute].append(exon)
except KeyError: microRNA_exon_feature_db2[affygene,attribute]=[exon]
### Add functional attribute information to a new database
for item in functional_attribute_list2:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: functional_attribute_db2[affygene,attribute].append(exon)
except KeyError: functional_attribute_db2[affygene,attribute]=[exon]
try:
abs_fold = abs(float(mean_fold_change)); fold_direction = 'down'; fold1_direction = 'down'; fold2_direction = 'down'
large_splicing_diff1 = 0; large_splicing_diff2 = 0; large_splicing_diff = 'null'; opposite_splicing_pattern = 'no'
if float(mean_fold_change)>0: fold_direction = 'up'
if float(fold1)>0: fold1_direction = 'up'
if fold1_direction != fold_direction:
if float(fold1)>float(mean_fold_change): large_splicing_diff1 = float(fold1)-float(mean_fold_change)
except Exception:
fold_direction = ''; large_splicing_diff = ''; opposite_splicing_pattern = ''
if analysis_method != 'ASPIRE' and 'linearregres' not in analysis_method: ed = exon_db[probeset1]
else:
try: ed = critical_probeset_annotation_db[selected_probeset,probeset2]
except KeyError:
try: ed = exon_db[selected_probeset] ###not useful data here, but the objects need to exist
except IOError: ed = original_exon_db[probeset1]
ucsc_splice_annotations = ["retainedIntron","cassetteExon","strangeSplice","altFivePrime","altThreePrime","altPromoter","bleedingExon"]
custom_annotations = ["alt-3'","alt-5'","alt-C-term","alt-N-term","cassette-exon","cassette-exon","exon-region-exclusion","intron-retention","mutually-exclusive-exon","trans-splicing"]
custom_exon_annotations_found='no'; ucsc_annotations_found = 'no'; exon_annot_score=0
if len(ed.SplicingEvent())>0:
for annotation in ucsc_splice_annotations:
if annotation in ed.SplicingEvent(): ucsc_annotations_found = 'yes'
for annotation in custom_annotations:
if annotation in ed.SplicingEvent(): custom_exon_annotations_found = 'yes'
if custom_exon_annotations_found == 'yes' and ucsc_annotations_found == 'no': exon_annot_score = 3
elif ucsc_annotations_found == 'yes' and custom_exon_annotations_found == 'no': exon_annot_score = 4
elif ucsc_annotations_found == 'yes' and custom_exon_annotations_found == 'yes': exon_annot_score = 5
else: exon_annot_score = 2
try: gene_splice_event_score[affygene].append(exon_annot_score) ###store for gene level results
except KeyError: gene_splice_event_score[affygene] = [exon_annot_score]
try: gene_exon_region[affygene].append(ed.ExonRegionID()) ###store for gene level results
except KeyError: gene_exon_region[affygene] = [ed.ExonRegionID()]
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
if float(fold2)>0: fold2_direction = 'up'
if fold2_direction != fold_direction:
if float(fold2)>float(mean_fold_change):
large_splicing_diff2 = float(fold2)-float(mean_fold_change)
if abs(large_splicing_diff2) > large_splicing_diff1: large_splicing_diff = str(large_splicing_diff2)
else: large_splicing_diff = str(large_splicing_diff1)
if fold1_direction != fold2_direction and abs(float(fold1))>0.4 and abs(float(fold2))>0.4 and abs(float(mean_fold_change))< max([float(fold2),float(fold1)]):
opposite_splicing_pattern = 'yes'
### Annotate splicing events based on exon_strucuture data
if array_type == 'AltMouse':
extra_exon_annotation = ExonAnnotate_module.annotate_splice_event(exons1,exons2,extra_transcript_annotation)
try: splice_event_db[extra_exon_annotation] += 1
except KeyError: splice_event_db[extra_exon_annotation] = 1
try:
direct_domain_alignments = probeset_aligning_db[selected_probeset,probeset2]
try: direct_domain_gene_alignments[affygene]+=', '+direct_domain_alignments
except KeyError: direct_domain_gene_alignments[affygene]=direct_domain_alignments
except KeyError: direct_domain_alignments = ' '
splicing_event = ed.SplicingEvent()
if array_type == 'RNASeq':
splicing_event = checkForTransSplicing(probeset1_display,splicing_event)
splicing_event = checkForTransSplicing(probeset2,splicing_event)
exp1 = covertLogExpressionToNonLog(exp1)
exp2 = covertLogExpressionToNonLog(exp2)
baseline_const_exp = covertLogExpressionToNonLog(baseline_const_exp)
fold1 = covertLogFoldToNonLog(fold1)
fold2 = covertLogFoldToNonLog(fold2)
adjfold1 = covertLogFoldToNonLog(adjfold1)
adjfold2 = covertLogFoldToNonLog(adjfold2)
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
### Annotate splicing events based on pre-computed and existing annotations
values= [affygene,dI,symbol,fs(description),exons1,exons2,regulation_call,event_call,probeset1_display,rawp1,probeset2,rawp2,fold1,fold2,adjfold1,adjfold2]
values+=[extra_transcript_annotation,up_exons,down_exons,fs(new_functional_attribute_str),fs(new_uniprot_exon_feature_str),fs(seq_attribute_str),exp1,exp2,fs(direct_domain_alignments)]
values+=[str(baseline_const_exp),str(lowest_raw_p),p_value_extra,str(false_pos),mean_fold_change,extra_exon_annotation]
values+=[ed.ExternalExonIDs(),ed.ExonRegionID(),splicing_event,str(exon_annot_score),large_splicing_diff,location_summary]
exon_sets = abs(float(dI)),regulation_call,event_call,exons1,exons2,''
### Export significant reciprocol junction pairs and scores
values_ps = [probeset1+'|'+probeset2,affygene,'changed',dI,'NA',str(lowest_raw_p)]; values_ps = string.join(values_ps,'\t')+'\n'
try: ProcessedSpliceData_data.write(values_ps)
except Exception: None
values_ge = [affygene,'En',dI,str(lowest_raw_p),symbol,probeset1_display+' | '+probeset2]; values_ge = string.join(values_ge,'\t')+'\n'
if array_type == 'junction' or array_type == 'RNASeq': ### Only applies to reciprocal junction sensitive platforms (but not currently AltMouse)
goelite_data.write(values_ge)
if array_type == 'junction' or array_type == 'RNASeq': ### Only applies to reciprocal junction sensitive platforms (but not currently AltMouse)
try: exon_probeset = exon_array_translation_db[affygene+':'+exon_data[1][0]][0]; probeset1 = exon_probeset; gcn+=1
except Exception: probeset1 = None #probeset1 = affygene+':'+exon_data[1][0]
try:
null = int(probeset1) ### Must be an int to work in DomainGraph
values_dg = [probeset1,affygene,'changed',dI,'NA',str(lowest_raw_p)]; values_dg = string.join(values_dg,'\t')+'\n'
if array_type == 'junction' or array_type == 'RNASeq':
DG_data.write(values_dg)
values_srf = string.join([probeset1,'Ae',dI,str(lowest_raw_p)],'\t')+'\n'
if float(dI)>0:
SRFinder_ex_data.write(values_srf)
elif float(dI)<0:
SRFinder_in_data.write(values_srf)
except Exception: null=[]
else:
si_pvalue = lowest_raw_p
if si_pvalue == 1: si_pvalue = 'NA'
if probeset1 in midas_db:
midas_p = str(midas_db[probeset1])
if float(midas_p)<lowest_raw_p: lowest_raw_p = float(midas_p) ###This is the lowest and SI-pvalue
else: midas_p = ''
###Determine what type of exon-annotations are present to assign a confidence score
if affygene in annotate_db: ###Determine the transcript clusters used to comprise a splice event (genes and exon specific)
try:
gene_tc = annotate_db[affygene].TranscriptClusterIDs()
try: probeset_tc = [ed.SecondaryGeneID()]
except Exception: probeset_tc = [affygene]
for transcript_cluster in gene_tc: probeset_tc.append(transcript_cluster)
probeset_tc = makeUnique(probeset_tc)
except Exception: probeset_tc = ''; gene_tc=''
else:
try:
try: probeset_tc = [ed.SecondaryGeneID()]
except Exception: probeset_tc = [affygene]
probeset_tc = makeUnique(probeset_tc)
except Exception: probeset_tc = ''; gene_tc=''
cluster_number = len(probeset_tc)
try: alternatively_reg_tc[affygene] += probeset_tc
except KeyError: alternatively_reg_tc[affygene] = probeset_tc
try: last_exon_region = last_exon_region_db[affygene]
except KeyError: last_exon_region = ''
if cluster_number>1: exon_annot_score = 1
direct_domain_alignments = ' '
if array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null':
try:
direct_domain_alignments = probeset_aligning_db[probeset1]
try: direct_domain_gene_alignments[affygene]+=', '+direct_domain_alignments
except KeyError: direct_domain_gene_alignments[affygene]=direct_domain_alignments
except KeyError: direct_domain_alignments = ' '
else:
try: direct_domain_alignments = probeset_aligning_db[affygene+':'+exons1]
except KeyError: direct_domain_alignments = ''
if array_type == 'RNASeq':
exp1 = covertLogExpressionToNonLog(exp1)
baseline_const_exp = covertLogExpressionToNonLog(baseline_const_exp)
fold1 = covertLogFoldToNonLog(fold1)
adjfold1 = covertLogFoldToNonLog(adjfold1)
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
try: adj_SIp=fdr_exon_stats[probeset1].AdjP()
except Exception: adj_SIp = 'NA'
try: secondary_geneid = ed.SecondaryGeneID()
except Exception: secondary_geneid = affygene
if array_type == 'RNASeq':
secondary_geneid = ed.NovelExon()
### Write Splicing Index results
values= [affygene,dI,symbol,fs(description),exons1,regulation_call,probeset1,rawp1,str(lowest_raw_p),midas_p,fold1,adjfold1]
values+=[up_exons,down_exons,fs(new_functional_attribute_str),fs(new_uniprot_exon_feature_str),fs(seq_attribute_str),fs(direct_domain_alignments),exp1]
values+=[str(baseline_const_exp),str(si_pvalue),DV,mean_fold_change,secondary_geneid, ed.ExternalExonIDs()]
values+=[ed.Constitutive(),ed.ExonRegionID(),ed.SplicingEvent(),last_exon_region,ed.LocationSummary()] #str(exon_annot_score)
if probeset1 in filtered_probeset_db: values += filtered_probeset_db[probeset1]
exon_sets = abs(float(dI)),regulation_call,event_call,exons1,exons1,midas_p
probeset = probeset1 ### store original ID (gets converted below)
### Write DomainGraph results
try: midas_p = str(midas_db[probeset1])
except KeyError: midas_p = 'NA'
### Export significant exon/junction IDs and scores
values_ps = [probeset1,affygene,'changed',dI,'NA',str(lowest_raw_p)]; values_ps = string.join(values_ps,'\t')+'\n'
try: ProcessedSpliceData_data.write(values_ps)
except Exception: None
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
if (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null':
try: exon_probeset = exon_array_translation_db[affygene+':'+exon_data[1][0]][0]; probeset1 = exon_probeset; gcn+=1
except Exception: probeset1 = None ### don't write out a line
else:
try: exon_probeset = exon_array_translation_db[probeset1][0]; probeset1 = exon_probeset; gcn+=1
except Exception: probeset1=None; #null=[]; #print gcn, probeset1;kill - force an error - new in version 2.0.8
try:
null = int(probeset1)
values_dg = [probeset1,affygene,'changed',dI,str(si_pvalue),midas_p]; values_dg = string.join(values_dg,'\t')+'\n'
DG_data.write(values_dg)
values_srf = string.join([probeset1,'Ae',dI,str(lowest_raw_p)],'\t')+'\n'
if float(dI)>0:
SRFinder_ex_data.write(values_srf)
elif float(dI)<0:
SRFinder_in_data.write(values_srf)
except Exception: null=[]
values_ge = [affygene,'En',dI,str(si_pvalue),midas_p,symbol,probeset]; values_ge = string.join(values_ge,'\t')+'\n'
goelite_data.write(values_ge)
if len(ed.SplicingEvent())>2:
try: external_exon_annot[affygene].append(ed.SplicingEvent())
except KeyError: external_exon_annot[affygene] = [ed.SplicingEvent()]
try: values = string.join(values,'\t')+'\n'
except Exception: print values;kill
data.write(values)
###Process data for gene level reports
if float((lowest_raw_p))<=p_threshold or false_pos < 2 or lowest_raw_p == 1:
try: comparison_count[affygene] += 1
except KeyError: comparison_count[affygene] = 1
try: aspire_gene_results[affygene].append(exon_sets)
except KeyError: aspire_gene_results[affygene] = [exon_sets]
for exon in up_exon_list:
exon_info = exon,'upregulated'
try: critical_gene_exons[affygene].append(exon_info)
except KeyError: critical_gene_exons[affygene] = [exon_info]
for exon in down_exon_list:
exon_info = exon,'downregulated'
try: critical_gene_exons[affygene].append(exon_info)
except KeyError: critical_gene_exons[affygene] = [exon_info]
data.close()
print event_count, analysis_method, "results written to:", aspire_output,'\n'
try: clearObjectsFromMemory(original_exon_db)
except Exception: null=[]
exon_array_translation_db=[]; original_exon_db=[]; probeset_to_gene=[]
### Finish writing the DomainGraph export file with non-significant probesets
if array_type != 'AltMouse':
for probeset in excluded_probeset_db:
eed = excluded_probeset_db[probeset]
try: midas_p = str(midas_db[probeset])
except KeyError: midas_p = 'NA'
### Export significant exon/junction IDs and scores
try: values_ps = [probeset,eed.GeneID(),'UC',eed.Score(),str(eed.TTestNormalizedRatios()),midas_p]
except Exception: excl_probeset, geneid, score, rawp, pvalue = eed; values_ps = [probeset,geneid,'UC', str(score), str(rawp), str(pvalue)]
values_ps = string.join(values_ps,'\t')+'\n'; ProcessedSpliceData_data.write(values_ps)
### Write DomainGraph results
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
try: exon_probeset = exon_array_translation_db[probeset][0]; probeset = exon_probeset; gcn+=1
except Exception: probeset=None; # null=[] - force an error - new in version 2.0.8
try: values_dg = [probeset,eed.GeneID(),'UC',eed.Score(),str(eed.TTestNormalizedRatios()),midas_p]
except Exception:
try:
excl_probeset, geneid, score, rawp, pvalue = eed
if ':' in probeset: probeset = excl_probeset ### Example: ENSMUSG00000029213:E2.1, make this just the numeric exclusion probeset - Not sure if DG handles non-numeric
values_dg = [probeset,geneid,'UC', str(score), str(rawp), str(pvalue)]
except Exception: None
try:
null=int(probeset)
values_dg = string.join(values_dg,'\t')+'\n'; DG_data.write(values_dg)
except Exception: null=[]
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
for id in exon_array_translation_db:
SRFinder_denom_data.write(exon_array_translation_db[id]+'\tAe\n')
else:
for probeset in original_exon_db:
SRFinder_denom_data.write(probeset+'\tAe\n')
DG_data.close()
SRFinder_in_data.close()
SRFinder_ex_data.close()
SRFinder_denom_data.close()
for affygene in direct_domain_gene_alignments:
domains = string.split(direct_domain_gene_alignments[affygene],', ')
domains = unique.unique(domains); domains = string.join(domains,', ')
direct_domain_gene_alignments[affygene] = domains
### functional_attribute_db2 will be reorganized so save the database with another. Use this
functional_attribute_db = functional_attribute_db2
functional_attribute_db2 = reorganize_attribute_entries(functional_attribute_db2,'no')
external_exon_annot = eliminate_redundant_dict_values(external_exon_annot)
protein_exon_feature_db = protein_exon_feature_db2
protein_exon_feature_db2 = reorganize_attribute_entries(protein_exon_feature_db2,'no')
############ Export Gene Data ############
up_splice_val_genes = 0; down_dI_genes = 0; diff_exp_spliced_genes = 0; diff_spliced_rna_factor = 0
ddI = 0; udI = 0
summary_data_db['direct_domain_genes']=len(direct_domain_gene_alignments)
summary_data_db['alt_genes']=len(aspire_gene_results)
critical_gene_exons = eliminate_redundant_dict_values(critical_gene_exons)
aspire_output_gene = root_dir+'AltResults/AlternativeOutput/' + dataset_name + analysis_method + '-exon-inclusion-GENE-results.txt'
data = export.ExportFile(aspire_output_gene)
if array_type == 'AltMouse': goelite_data.write("GeneID\tSystemCode\n")
title = ['AffyGene','max_dI','midas-p (corresponding)','symbol','external gene ID','description','regulation_call','event_call']
title +=['number_of_comparisons','num_effected_exons','up_exons','down_exons','functional_attribute','uniprot-ens_exon_features','direct_domain_alignments']
title +=['pathways','mean_fold_change','exon-annotations','exon-region IDs','alternative gene ID','splice-annotation score']
title = string.join(title,'\t')+'\n'
data.write(title)
for affygene in aspire_gene_results:
if affygene in annotate_db:
description = annotate_db[affygene].Description()
symbol = annotate_db[affygene].Symbol()
ensembl = annotate_db[affygene].ExternalGeneID()
if array_type != 'AltMouse' and array_type != 'RNASeq': transcript_clusters = alternatively_reg_tc[affygene]; transcript_clusters = makeUnique(transcript_clusters); transcript_clusters = string.join(transcript_clusters,'|')
else: transcript_clusters = affygene
rna_processing_factor = annotate_db[affygene].RNAProcessing()
else: description='';symbol='';ensembl=affygene;rna_processing_factor=''; transcript_clusters=''
if ensembl in go_annotations: wpgo = go_annotations[ensembl]; goa = wpgo.Combined()
else: goa = ''
if array_type == 'AltMouse':
if len(ensembl) >0: goelite_data.write(ensembl+'\tL\n')
try: gene_splice_event_score[affygene].sort(); top_se_score = str(gene_splice_event_score[affygene][-1])
except KeyError: top_se_score = 'NA'
try: gene_regions = gene_exon_region[affygene]; gene_regions = makeUnique(gene_regions); gene_regions = string.join(gene_regions,'|')
except KeyError: gene_regions = 'NA'
if analysis_method == 'ASPIRE' or analysis_method == 'linearregres': number_of_comparisons = str(comparison_count[affygene])
else: number_of_comparisons = 'NA'
results_list = aspire_gene_results[affygene]
results_list.sort(); results_list.reverse()
max_dI = str(results_list[0][0])
regulation_call = results_list[0][1]
event_call = results_list[0][2]
midas_p = results_list[0][-1]
num_critical_exons = str(len(critical_gene_exons[affygene]))
try: direct_domain_annots = direct_domain_gene_alignments[affygene]
except KeyError: direct_domain_annots = ' '
down_exons = ''; up_exons = ''; down_list=[]; up_list=[]
for exon_info in critical_gene_exons[affygene]:
exon = exon_info[0]; call = exon_info[1]
if call == 'downregulated':
down_exons = down_exons + exon + ','
down_list.append(exon)
ddI += 1
if call == 'upregulated':
up_exons = up_exons + exon + ','
up_list.append(exon)
udI += 1
down_exons = down_exons[0:-1]
up_exons = up_exons[0:-1]
up_exons = add_a_space(up_exons); down_exons = add_a_space(down_exons)
functional_annotation =''
if affygene in functional_attribute_db2:
number_of_functional_attributes = str(len(functional_attribute_db2[affygene]))
attribute_list = functional_attribute_db2[affygene]
attribute_list.sort()
for attribute_exon_info in attribute_list:
exon_attribute = attribute_exon_info[0]
exon_list = attribute_exon_info[1]
functional_annotation = functional_annotation + exon_attribute
exons = '('
for exon in exon_list: exons = exons + exon + ','
exons = exons[0:-1] + '),'
if add_exons_to_annotations == 'yes': functional_annotation = functional_annotation + exons
else: functional_annotation = functional_annotation + ','
functional_annotation = functional_annotation[0:-1]
uniprot_exon_annotation = ''
if affygene in protein_exon_feature_db2:
number_of_functional_attributes = str(len(protein_exon_feature_db2[affygene]))
attribute_list = protein_exon_feature_db2[affygene]; attribute_list.sort()
for attribute_exon_info in attribute_list:
exon_attribute = attribute_exon_info[0]
exon_list = attribute_exon_info[1]
uniprot_exon_annotation = uniprot_exon_annotation + exon_attribute
exons = '('
for exon in exon_list: exons = exons + exon + ','
exons = exons[0:-1] + '),'
if add_exons_to_annotations == 'yes': uniprot_exon_annotation = uniprot_exon_annotation + exons
else: uniprot_exon_annotation = uniprot_exon_annotation + ','
uniprot_exon_annotation = uniprot_exon_annotation[0:-1]
if len(uniprot_exon_annotation) == 0: uniprot_exon_annotation = ' '
if len(functional_annotation) == 0: functional_annotation = ' '
if affygene in gene_expression_diff_db:
mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
try:
if abs(float(mean_fold_change)) > log_fold_cutoff: diff_exp_spliced_genes += 1
except Exception: diff_exp_spliced_genes = diff_exp_spliced_genes
else: mean_fold_change = 'NC'
if len(rna_processing_factor) > 2: diff_spliced_rna_factor +=1
###Add annotations for where in the gene structure these exons are (according to Ensembl)
if affygene in external_exon_annot: external_gene_annot = string.join(external_exon_annot[affygene],', ')
else: external_gene_annot = ''
if array_type == 'RNASeq':
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
values =[affygene,max_dI,midas_p,symbol,ensembl,fs(description),regulation_call,event_call,number_of_comparisons]
values+=[num_critical_exons,up_exons,down_exons,functional_annotation]
values+=[fs(uniprot_exon_annotation),fs(direct_domain_annots),fs(goa),mean_fold_change,external_gene_annot,gene_regions,transcript_clusters,top_se_score]
values = string.join(values,'\t')+'\n'
data.write(values)
### Use results for summary statistics
if len(up_list)>len(down_list): up_splice_val_genes +=1
else: down_dI_genes +=1
data.close()
print "Gene-level results written"
###yes here indicates that although the truncation events will initially be filtered out, later they will be added
###back in without the non-truncation annotations....if there is no second database (in this case functional_attribute_db again)
###IF WE WANT TO FILTER OUT NON-NMD ENTRIES WHEN NMD IS PRESENT (FOR A GENE) MUST INCLUDE functional_attribute_db AS THE SECOND VARIABLE!!!!
###Currently, yes does nothing
functional_annotation_db, null = grab_summary_dataset_annotations(functional_attribute_db,'','yes')
upregulated_genes = 0; downregulated_genes = 0
###Calculate the number of upregulated and downregulated genes
for affygene in gene_expression_diff_db:
fold_val = gene_expression_diff_db[affygene].ConstitutiveFold()
try:
if float(fold_val) > log_fold_cutoff: upregulated_genes += 1
elif abs(float(fold_val)) > log_fold_cutoff: downregulated_genes += 1
except Exception: null=[]
upregulated_rna_factor = 0; downregulated_rna_factor = 0
###Calculate the total number of putative RNA-processing/binding factors differentially regulated
for affygene in gene_expression_diff_db:
gene_fold = gene_expression_diff_db[affygene].ConstitutiveFold()
rna_processing_factor = gene_expression_diff_db[affygene].RNAProcessing()
if len(rna_processing_factor) > 1:
if gene_fold>log_fold_cutoff: upregulated_rna_factor += 1
elif abs(gene_fold)>log_fold_cutoff: downregulated_rna_factor += 1
###Generate three files for downstream functional summary
### functional_annotation_db2 is output to the same function as functional_annotation_db, ranked_uniprot_list_all to get all ranked uniprot annotations,
### and ranked_uniprot_list_coding_only to get only coding ranked uniprot annotations
functional_annotation_db2, ranked_uniprot_list_all = grab_summary_dataset_annotations(protein_exon_feature_db,'','') #functional_attribute_db
null, ranked_uniprot_list_coding_only = grab_summary_dataset_annotations(protein_exon_feature_db,functional_attribute_db,'') #functional_attribute_db
functional_attribute_db=[]; protein_exon_feature_db=[]
###Sumarize changes in avg protein length for each splice event
up_protein_list=[];down_protein_list=[]; protein_length_fold_diff=[]
for [down_protein,up_protein] in protein_length_list:
up_protein = float(up_protein); down_protein = float(down_protein)
down_protein_list.append(down_protein); up_protein_list.append(up_protein)
if up_protein > 10 and down_protein > 10:
fold_change = up_protein/down_protein; protein_length_fold_diff.append(fold_change)
median_fold_diff = statistics.median(protein_length_fold_diff)
try: down_avg=int(statistics.avg(down_protein_list)); up_avg=int(statistics.avg(up_protein_list))
except Exception: down_avg=0; up_avg=0
try:
try:
down_std=int(statistics.stdev(down_protein_list)); up_std=int(statistics.stdev(up_protein_list))
except ValueError: ###If 'null' is returned fro stdev
down_std = 0;up_std = 0
except Exception:
down_std = 0;up_std = 0
if len(down_protein_list)>1 and len(up_protein_list)>1:
try:
#t,df,tails = statistics.ttest(down_protein_list,up_protein_list,2,3)
#t = abs(t);df = round(df)
#print 'ttest t:',t,'df:',df
#p = str(statistics.t_probability(t,df))
p = str(statistics.runComparisonStatistic(down_protein_list,up_protein_list,probability_statistic))
#print dataset_name,p
except Exception: p = 'NA'
if p == 1: p = 'NA'
else: p = 'NA'
###Calculate unique reciprocal isoforms for exon-inclusion, exclusion and mutual-exclusive events
unique_exon_inclusion_count=0;unique_exon_exclusion_count=0;unique_mutual_exclusive_count=0;
unique_exon_event_db = eliminate_redundant_dict_values(unique_exon_event_db)
for affygene in unique_exon_event_db:
isoform_entries = unique_exon_event_db[affygene]
possibly_redundant=[]; non_redundant=[]; check_for_redundant=[]
for entry in isoform_entries:
if entry[0] == 1: ### If there is only one regulated exon
possibly_redundant.append(entry)
else:
non_redundant.append(entry)
critical_exon_list = entry[1]
for exon in critical_exon_list:
check_for_redundant.append(exon)
for entry in possibly_redundant:
exon = entry[1][0]
if exon not in check_for_redundant:
non_redundant.append(entry)
for entry in non_redundant:
if entry[2] == 'ei-ex':
if entry[3] == 'upregulated': unique_exon_inclusion_count += 1
else: unique_exon_exclusion_count += 1
else: unique_mutual_exclusive_count += 1
udI = unique_exon_inclusion_count; ddI = unique_exon_exclusion_count; mx = unique_mutual_exclusive_count
###Add splice event information to the functional_annotation_db
for splice_event in splice_event_db:count = splice_event_db[splice_event]; functional_annotation_db.append((splice_event,count))
if analysis_method == 'splicing-index' or analysis_method == 'FIRMA': udI='NA'; ddI='NA'
summary_results_db[dataset_name[0:-1]] = udI,ddI,mx,up_splice_val_genes,down_dI_genes,(up_splice_val_genes + down_dI_genes),upregulated_genes, downregulated_genes, diff_exp_spliced_genes, upregulated_rna_factor,downregulated_rna_factor,diff_spliced_rna_factor,down_avg,down_std,up_avg,up_std,p,median_fold_diff,functional_annotation_db
result_list = exportComparisonSummary(dataset_name,summary_data_db,'log')
###Re-set this variable (useful for testing purposes)
clearObjectsFromMemory(gene_expression_diff_db)
clearObjectsFromMemory(splice_event_list); clearObjectsFromMemory(si_db); si_db=[]
clearObjectsFromMemory(fdr_exon_stats)
try: clearObjectsFromMemory(excluded_probeset_db); clearObjectsFromMemory(ex_db); ex_db=[]
except Exception: ex_db=[]
clearObjectsFromMemory(exon_db)
#clearObjectsFromMemory(annotate_db)
critical_probeset_annotation_db=[]; gene_expression_diff_db=[]; domain_associated_genes=[]; permute_p_values=[]
permute_miR_inputs=[]; seq_attribute_str=[]; microRNA_count_db=[]; excluded_probeset_db=[]; fdr_exon_stats=[]
splice_event_list=[]; critical_exon_db_len=len(critical_exon_db)#; critical_exon_db=[] deleting here will cause a global instance problem
all_domain_gene_hits=[]; gene_splice_event_score=[]; unique_exon_event_db=[]; probeset_aligning_db=[]; ranked_uniprot_list_all=[];
filtered_microRNA_exon_db=[]; permute_domain_inputs=[]; functional_annotation_db2=[]; functional_attribute_db2=[]; protein_length_list=[];
ranked_uniprot_list_coding_only=[]; miR_str=[]; permute_input_list=[]; microRNA_exon_feature_db2=[]; alternatively_reg_tc=[];
direct_domain_gene_alignments=[]; aspire_gene_results=[]; domain_gene_counts=[]; functional_annotation=[]; protein_exon_feature_db2=[];
microRNA_attribute_db=[]; probeset_mirBS_db=[]; exon_hits=[]; critical_gene_exons=[]; gene_exon_region=[]; exon_db=[]; external_exon_annot=[];
values=[]; down_protein_list=[]; functional_annotation_db=[]; protein_length_fold_diff=[]; comparison_count=[]; filtered_arrayids=[];
domain_hit_gene_count_db=[]; up_protein_list=[]; probeset_domain_db=[]
try: goelite_data.close()
except Exception: null=[]
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
return summary_results_db, summary_results_db2, aspire_output, aspire_output_gene, critical_exon_db_len
def deviation(dI,avg_dI,stdev_dI):
dI = covertLogFoldToNonLogFloat(dI)
avg_dI = covertLogFoldToNonLogFloat(avg_dI)
stdev_dI = covertLogFoldToNonLogFloat(stdev_dI)
return str(abs((dI-avg_dI)/stdev_dI))
def covertLogExpressionToNonLog(log_val):
if normalization_method == 'RPKM':
nonlog_val = (math.pow(2,float(log_val)))
else:
nonlog_val = (math.pow(2,float(log_val)))-1
return str(nonlog_val)
def covertLogFoldToNonLog(log_val):
try:
if float(log_val)<0: nonlog_val = (-1/math.pow(2,(float(log_val))))
else: nonlog_val = (math.pow(2,float(log_val)))
except Exception: nonlog_val = log_val
return str(nonlog_val)
def covertLogFoldToNonLogFloat(log_val):
if float(log_val)<0: nonlog_val = (-1/math.pow(2,(float(log_val))))
else: nonlog_val = (math.pow(2,float(log_val)))
return nonlog_val
def checkForTransSplicing(uid,splicing_event):
pl = string.split(uid,':')
if len(pl)>2:
if pl[0] not in pl[1]: ### Two different genes
if len(splicing_event)>0: splicing_event+= '|trans-splicing'
else: splicing_event = '|trans-splicing'
return splicing_event
def fs(text):
### Formats a text entry to prevent delimiting a comma
return '"'+text+'"'
def analyzeSplicingIndex(fold_dbase):
"""The Splicing Index (SI) represents the log ratio of the exon intensities between the two tissues after normalization
to the gene intensities in each sample: SIi = log2((e1i/g1j)/(e2i/g2j)), for the i-th exon of the j-th gene in tissue
type 1 or 2. The splicing indices are then subjected to a t-test to probe for differential inclusion of the exon into the gene.
In order to determine if the change in isoform expression was statistically significant, a simple two-tailed t-test was carried
out on the isoform ratios by grouping the 10 samples from either "tumor" or "normal" tissue.
The method ultimately producing the highest proportion of true positives was to retain only: a) exons with a DABG p-value < 0.05,
b) genes with a signal > 70, c) exons with a log ratio between tissues (i.e., the gene-level normalized fold change) > 0.5,
d) Splicing Index p-values < 0.005 and e) Core exons.
Gardina PJ, Clark TA, Shimada B, Staples MK, Yang Q, Veitch J, Schweitzer A, Awad T, Sugnet C, Dee S, Davies C, Williams A, Turpaz Y.
Alternative splicing and differential gene expression in colon cancer detected by a whole genome exon array.
BMC Genomics. 2006 Dec 27;7:325. PMID: 17192196
"""
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in fold_dbase: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del fold_dbase[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
proceed = 0
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del fold_dbase[probeset]
except KeyError: null=[]
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
data = export.ExportFile(summary_output)
title = string.join(['gene\tExonID\tprobesets']+original_array_names,'\t')+'\n'; data.write(title)
print 'Calculating splicing-index values (please be patient)...',
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print len(fold_dbase),id_name,'beging examined'
###original_avg_const_exp_db contains constitutive mean expression values per group: G6953871 [7.71, 7.66]
###array_raw_group_values: Raw expression values in list of groups: G7072464@J935416_RC@j_at ([1.79, 2.16, 2.22], [1.68, 2.24, 1.97, 1.92, 2.12])
###avg_const_exp_db contains the raw constitutive expression values in a single list
splicing_index_hash=[]; excluded_probeset_db={}; denominator_probesets=0; interaction = 0
original_increment = int(len(exon_db)/20); increment = original_increment
for probeset in exon_db:
ed = exon_db[probeset]
#include_probeset = ed.IncludeProbeset()
if interaction == increment: increment+=original_increment; print '*',
interaction +=1
include_probeset = 'yes' ###Moved this filter to import of the probeset relationship file
###Examines user input parameters for inclusion of probeset types in the analysis
if include_probeset == 'yes':
geneid = ed.GeneID()
if probeset in fold_dbase and geneid in original_avg_const_exp_db: ###used to search for array_raw_group_values, but when filtered by expression changes, need to filter by adj_fold_dbase
denominator_probesets+=1
###Includes probesets with a calculated constitutive expression value for each gene and expression data for that probeset
group_index = 0; si_interim_group_db={}; si_interim_group_str_db={}; ge_threshold_count=0; value_count = 0
for group_values in array_raw_group_values[probeset]:
"""gene_expression_value = math.pow(2,original_avg_const_exp_db[geneid][group_index])
###Check to see if gene expression is > threshod for both conditions
if gene_expression_value>gene_expression_threshold:ge_threshold_count+=1"""
value_index = 0; ratio_hash=[]; ratio_str_hash=[]
for value in group_values: ###Calculate normalized ratio's for each condition and save raw values for later permutation
#exp_val = math.pow(2,value);ge_val = math.pow(2,avg_const_exp_db[geneid][value_count]) ###To calculate a ttest we need the raw constitutive expression values, these are not in group list form but are all in a single list so keep count.
exp_val = value;ge_val = avg_const_exp_db[geneid][value_count]
exp_ratio = exp_val-ge_val; ratio_hash.append(exp_ratio); ratio_str_hash.append(str(exp_ratio))
value_index +=1; value_count +=1
si_interim_group_db[group_index] = ratio_hash
si_interim_group_str_db[group_index] = ratio_str_hash
group_index+=1
group1_ratios = si_interim_group_db[0]; group2_ratios = si_interim_group_db[1]
group1_mean_ratio = statistics.avg(group1_ratios); group2_mean_ratio = statistics.avg(group2_ratios)
if export_NI_values == 'yes':
try: er = ed.ExonID()
except Exception: er = 'NA'
ev = string.join([geneid+'\t'+er+'\t'+probeset]+si_interim_group_str_db[0]+si_interim_group_str_db[1],'\t')+'\n'; data.write(ev)
#if ((math.log(group1_mean_ratio,2))*(math.log(group2_mean_ratio,2)))<0: opposite_SI_log_mean = 'yes'
if (group1_mean_ratio*group2_mean_ratio)<0: opposite_SI_log_mean = 'yes'
else: opposite_SI_log_mean = 'no'
try:
if calculate_normIntensity_p == 'yes':
try:
normIntensityP = statistics.runComparisonStatistic(group1_ratios,group2_ratios,probability_statistic)
except Exception: normIntensityP = 'NA' ### Occurs when analyzing two groups with no variance
else: normIntensityP = 'NA' ### Set to an always signficant value
if normIntensityP == 1: normIntensityP = 'NA'
splicing_index = group1_mean_ratio-group2_mean_ratio; abs_splicing_index = abs(splicing_index)
#if probeset == '3061323': print abs_splicing_index,normIntensityP,ed.ExonID(),group1_mean_ratio,group2_mean_ratio,math.log(group1_mean_ratio,2),math.log(group2_mean_ratio,2),((math.log(group1_mean_ratio,2))*(math.log(group2_mean_ratio,2))),opposite_SI_log_mean; kill
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 0
#if abs_splicing_index>1 and normIntensityP < 0.05: print probeset,normIntensityP, abs_splicing_index;kill
else: midas_p = 0
#print ed.GeneID(),ed.ExonID(),probeset,splicing_index,normIntensityP,midas_p,group1_ratios,group2_ratios
if abs_splicing_index>alt_exon_logfold_cutoff and (normIntensityP < p_threshold or normIntensityP == 'NA' or normIntensityP == 1) and midas_p < p_threshold:
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
constit_exp1 = original_avg_const_exp_db[geneid][0]
constit_exp2 = original_avg_const_exp_db[geneid][1]
ge_fold=constit_exp2-constit_exp1
### Re-define all of the pairwise values now that the two Splicing-Index groups to report have been determined
data_list1 = array_raw_group_values[probeset][0]; data_list2 = array_raw_group_values[probeset][1]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 1
normInt1 = (baseline_exp-constit_exp1); normInt2 = (experimental_exp-constit_exp2); adj_fold = normInt2 - normInt1
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, '')
sid = ExonData(splicing_index,probeset,critical_exon_list,geneid,group1_ratios,group2_ratios,normIntensityP,opposite_SI_log_mean)
sid.setConstitutiveExpression(constit_exp1); sid.setConstitutiveFold(ge_fold); sid.setProbesetExpressionData(ped)
splicing_index_hash.append((splicing_index,sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(splicing_index,geneid,normIntensityP)
excluded_probeset_db[probeset] = eed
except Exception:
null = [] ###If this occurs, then most likely, the exon and constitutive probeset are the same
print 'Splicing Index analysis complete'
if export_NI_values == 'yes': data.close()
splicing_index_hash.sort(); splicing_index_hash.reverse()
print len(splicing_index_hash),id_name,"with evidence of Alternative expression"
p_value_call=''; permute_p_values = {}; summary_data_db['denominator_exp_events']=denominator_probesets
return splicing_index_hash,p_value_call,permute_p_values, excluded_probeset_db
def importResiduals(filename,probe_probeset_db):
fn=filepath(filename); key_db = {}; x=0; prior_uid = ''; uid_gene_db={}
for line in open(fn,'rU').xreadlines():
if x == 0 and line[0] == '#': null=[]
elif x == 0: x+=1
else:
data = cleanUpLine(line)
t = string.split(data,'\t')
uid = t[0]; uid,probe = string.split(uid,'-')
try:
probeset = probe_probeset_db[probe]; residuals = t[1:]
if uid == prior_uid:
try: uid_gene_db[probeset].append(residuals) ### Don't need to keep track of the probe ID
except KeyError: uid_gene_db[probeset] = [residuals]
else: ### Hence, we have finished storing all residual data for that gene
if len(uid_gene_db)>0: calculateFIRMAScores(uid_gene_db); uid_gene_db={}
try: uid_gene_db[probeset].append(residuals) ### Don't need to keep track of the probe ID
except KeyError: uid_gene_db[probeset] = [residuals]
prior_uid = uid
except Exception: null=[]
### For the last gene imported
if len(uid_gene_db)>0: calculateFIRMAScores(uid_gene_db)
def calculateFIRMAScores(uid_gene_db):
probeset_residuals={}; all_gene_residuals=[]; total_probes=0
for probeset in uid_gene_db:
residuals_list = uid_gene_db[probeset]; sample_db={}; total_probes+=len(residuals_list)
### For all probes in a probeset, calculate the median residual for each sample
for residuals in residuals_list:
index=0
for residual in residuals:
try: sample_db[index].append(float(residual))
except KeyError: sample_db[index] = [float(residual)]
all_gene_residuals.append(float(residual))
index+=1
for index in sample_db:
median_residual = statistics.median(sample_db[index])
sample_db[index] = median_residual
probeset_residuals[probeset] = sample_db
### Calculate the Median absolute deviation
"""http://en.wikipedia.org/wiki/Absolute_deviation
The median absolute deviation (also MAD) is the median absolute deviation from the median. It is a robust estimator of dispersion.
For the example {2, 2, 3, 4, 14}: 3 is the median, so the absolute deviations from the median are {1, 1, 0, 1, 11} (or reordered as
{0, 1, 1, 1, 11}) with a median absolute deviation of 1, in this case unaffected by the value of the outlier 14.
Here, the global gene median will be expressed as res_gene_median.
"""
res_gene_median = statistics.median(all_gene_residuals); subtracted_residuals=[]
for residual in all_gene_residuals: subtracted_residuals.append(abs(res_gene_median-residual))
gene_MAD = statistics.median(subtracted_residuals)
#if '3263614' in probeset_residuals: print len(all_gene_residuals),all_gene_residuals
for probeset in probeset_residuals:
sample_db = probeset_residuals[probeset]
for index in sample_db:
median_residual = sample_db[index]
try:
firma_score = median_residual/gene_MAD
sample_db[index] = firma_score
except Exception: null=[]
#if probeset == '3263614': print index, median_residual, firma_score, gene_MAD
firma_scores[probeset] = sample_db
def importProbeToProbesets(fold_dbase):
#print "Importing probe-to-probeset annotations (please be patient)..."
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_probeset-probes.txt'
probeset_to_include={}
gene2examine={}
### Although we want to restrict the analysis to probesets in fold_dbase, we don't want to effect the FIRMA model - filter later
for probeset in fold_dbase:
try: ed = exon_db[probeset]; gene2examine[ed.GeneID()]=[]
except Exception: null=[]
for gene in original_avg_const_exp_db: gene2examine[gene]=[]
for probeset in exon_db:
ed = exon_db[probeset]; geneid = ed.GeneID()
if geneid in gene2examine:
gene2examine[geneid].append(probeset) ### Store these so we can break things up
probeset_to_include[probeset]=[]
probeset_probe_db = importGenericFilteredDBList(filename,probeset_to_include)
### Get Residuals filename and verify it's presence
#print "Importing comparison residuals..."
filename_objects = string.split(dataset_name[:-1],'.p'); filename = filename_objects[0]+'.txt'
if len(array_group_list)==2:
filename = import_dir = root_dir+'AltExpression/FIRMA/residuals/'+array_type+'/'+species+'/'+filename
else: filename = import_dir = root_dir+'AltExpression/FIRMA/FullDatasets/'+array_type+'/'+species+'/'+filename
status = verifyFile(filename)
if status != 'found':
print_out = 'The residual file:'; print_out+= filename
print_out+= 'was not found in the default location.\nPlease make re-run the analysis from the Beginning.'
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out
print traceback.format_exc(); badExit()
print "Calculating FIRMA scores..."
input_count = len(gene2examine) ### Number of probesets or probeset pairs (junction array) alternatively regulated
original_increment = int(input_count/20); increment = original_increment
start_time = time.time(); x=0
probe_probeset_db={}; gene_count=0; total_gene_count = 0; max_gene_count=3000; round = 1
for gene in gene2examine:
gene_count+=1; total_gene_count+=1; x+=1
#if x == increment: increment+=original_increment; print '*',
for probeset in gene2examine[gene]:
for probe in probeset_probe_db[probeset]: probe_probeset_db[probe] = probeset
if gene_count == max_gene_count:
### Import residuals and calculate primary sample/probeset FIRMA scores
importResiduals(filename,probe_probeset_db)
#print max_gene_count*round,"genes"
print '*',
gene_count=0; probe_probeset_db={}; round+=1 ### Reset these variables and re-run
probeset_probe_db={}
### Analyze residuals for the remaining probesets (< max_gene_count)
importResiduals(filename,probe_probeset_db)
end_time = time.time(); time_diff = int(end_time-start_time)
print "FIRMA scores calculted for",total_gene_count, "genes in %d seconds" % time_diff
def FIRMAanalysis(fold_dbase):
"""The FIRMA method calculates a score for each probeset and for each samples within a group of arrays, independent
of group membership. However, in AltAnalyze, these analyses are performed dependent on group. The FIRMA score is calculated
by obtaining residual values (residuals is a variable for each probe that can't be explained by the GC content or intensity
of that probe) from APT, for all probes corresponding to a metaprobeset (Ensembl gene in AltAnalyze). These probe residuals
are imported and the ratio of the median residual per probeset per sample divided by the absolute standard deviation of the
median of all probes for all samples for that gene."""
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in fold_dbase: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del fold_dbase[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
proceed = 0
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del fold_dbase[probeset]
except KeyError: null=[]
#print 'Beginning FIRMA analysis (please be patient)...'
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
sample_names_ordered = [] ### note: Can't use original_array_names since the order is potentially different (FIRMA stores sample data as indeces within dictionary keys)
for group_name in array_group_list: ### THIS LIST IS USED TO MAINTAIN CONSISTENT GROUP ORDERING DURING ANALYSIS
for sample_name in array_group_name_db[group_name]: sample_names_ordered.append(sample_name)
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
data = export.ExportFile(summary_output)
title = string.join(['gene-probesets']+sample_names_ordered,'\t')+'\n'; data.write(title)
### Import probes for probesets to be analyzed
global firma_scores; firma_scores = {}
importProbeToProbesets(fold_dbase)
print 'FIRMA scores obtained for',len(firma_scores),'probests.'
### Group sample scores for each probeset and calculate statistics
firma_hash=[]; excluded_probeset_db={}; denominator_probesets=0; interaction = 0
original_increment = int(len(firma_scores)/20); increment = original_increment
for probeset in firma_scores:
if probeset in fold_dbase: ### Filter based on expression
ed = exon_db[probeset]; geneid = ed.GeneID()
if interaction == increment: increment+=original_increment; print '*',
interaction +=1; denominator_probesets+=1
sample_db = firma_scores[probeset]
###Use the index values from performExpressionAnalysis to assign each expression value to a new database
firma_group_array = {}
for group_name in array_group_db:
for array_index in array_group_db[group_name]:
firma_score = sample_db[array_index]
try: firma_group_array[group_name].append(firma_score)
except KeyError: firma_group_array[group_name] = [firma_score]
###array_group_list should already be unique and correctly sorted (see above)
firma_lists=[]; index=0
for group_name in array_group_list:
firma_list = firma_group_array[group_name]
if len(array_group_list)>2: firma_list = statistics.avg(firma_list), firma_list, index
firma_lists.append(firma_list); index+=1
if export_NI_values == 'yes': ### DO THIS HERE SINCE firma_lists IS SORTED BELOW!!!!
try: er = ed.ExonID()
except Exception: er = 'NA'
export_list = [geneid+'\t'+er+'\t'+probeset]; export_list2=[]
for firma_ls in firma_lists:
if len(array_group_list)>2: firma_ls =firma_ls[1] ### See above modification of firma_list object for multiple group anlaysis
export_list+=firma_ls
for i in export_list: export_list2.append(str(i))
ev = string.join(export_list2,'\t')+'\n'; data.write(ev)
if len(array_group_list)==2:
firma_list1 = firma_lists[0]; firma_list2 = firma_lists[-1]; firma_avg1 = statistics.avg(firma_list1); firma_avg2 = statistics.avg(firma_list2)
index1=0; index2=1 ### Only two groups, thus only two indeces
else: ### The below code deals with identifying the comparisons which yeild the greatest FIRMA difference
firma_lists.sort(); index1=firma_lists[0][-1]; index2 = firma_lists[-1][-1]
firma_list1 = firma_lists[0][1]; firma_list2 = firma_lists[-1][1]; firma_avg1 = firma_lists[0][0]; firma_avg2 = firma_lists[-1][0]
if calculate_normIntensity_p == 'yes':
try:
normIntensityP = statistics.runComparisonStatistic(firma_list1,firma_list2,probability_statistic)
except Exception: normIntensityP = 'NA' ### Occurs when analyzing two groups with no variance
else: normIntensityP = 'NA'
if normIntensityP == 1: normIntensityP = 'NA'
firma_fold_change = firma_avg2 - firma_avg1
firma_fold_change = -1*firma_fold_change ### Make this equivalent to Splicing Index fold which is also relative to experimental not control
if (firma_avg2*firma_avg1)<0: opposite_FIRMA_scores = 'yes'
else: opposite_FIRMA_scores = 'no'
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError: midas_p = 0
else: midas_p = 0
#if probeset == '3263614': print firma_fold_change, normIntensityP, midas_p,'\n',firma_list1, firma_list2, [p_threshold];kill
if abs(firma_fold_change)>alt_exon_logfold_cutoff and (normIntensityP < p_threshold or normIntensityP == 'NA') and midas_p < p_threshold:
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
#gene_expression_values = original_avg_const_exp_db[geneid]
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
### Re-define all of the pairwise values now that the two FIRMA groups to report have been determined
data_list1 = array_raw_group_values[probeset][index1]; data_list2 = array_raw_group_values[probeset][index2]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
group_name1 = array_group_list[index1]; group_name2 = array_group_list[index2]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 1
normInt1 = (baseline_exp-constit_exp1); normInt2 = (experimental_exp-constit_exp2); adj_fold = normInt2 - normInt1
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
fid = ExonData(firma_fold_change,probeset,critical_exon_list,geneid,data_list1,data_list2,normIntensityP,opposite_FIRMA_scores)
fid.setConstitutiveExpression(constit_exp1); fid.setConstitutiveFold(ge_fold); fid.setProbesetExpressionData(ped)
firma_hash.append((firma_fold_change,fid))
#print [[[probeset,firma_fold_change,normIntensityP,p_threshold]]]
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(firma_fold_change,geneid,normIntensityP)
excluded_probeset_db[probeset] = eed
print 'FIRMA analysis complete'
if export_NI_values == 'yes': data.close()
firma_hash.sort(); firma_hash.reverse()
print len(firma_hash),"Probesets with evidence of Alternative expression out of",len(excluded_probeset_db)+len(firma_hash)
p_value_call=''; permute_p_values = {}; summary_data_db['denominator_exp_events']=denominator_probesets
return firma_hash,p_value_call,permute_p_values, excluded_probeset_db
def getFilteredFilename(filename):
if array_type == 'junction':
filename = string.replace(filename,'.txt','-filtered.txt')
return filename
def getExonVersionFilename(filename):
original_filename = filename
if array_type == 'junction' or array_type == 'RNASeq':
if explicit_data_type != 'null':
filename = string.replace(filename,array_type,array_type+'/'+explicit_data_type)
### Make sure the file exists, otherwise, use the original
file_status = verifyFile(filename)
#print [[filename,file_status]]
if file_status != 'found': filename = original_filename
return filename
def importProbesetAligningDomains(exon_db,report_type):
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_domain_aligning_probesets.txt'
filename=getFilteredFilename(filename)
probeset_aligning_db = importGenericDBList(filename)
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_indirect_domain_aligning_probesets.txt'
filename=getFilteredFilename(filename)
probeset_indirect_aligning_db = importGenericDBList(filename)
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
new_exon_db={}; splicing_call_db={}
for probeset_pair in exon_db:
### For junction analyses exon_db is really regulated_exon_junction_db, containing the inclusion,exclusion probeset tuple and an object as values
ed = exon_db[probeset_pair]; geneid = ed.GeneID(); critical_exons = ed.CriticalExons()
for exon in critical_exons:
new_key = geneid+':'+exon
try: new_exon_db[new_key].append(probeset_pair)
except KeyError: new_exon_db[new_key] = [probeset_pair]
try: splicing_call_db[new_key].append(ed.SplicingCall())
except KeyError: splicing_call_db[new_key] = [ed.SplicingCall()]
for key in new_exon_db:
probeset_pairs = new_exon_db[key]; probeset_pair = probeset_pairs[0] ### grab one of the probeset pairs
ed = exon_db[probeset_pair]; geneid = ed.GeneID()
jd = SimpleJunctionData(geneid,'','','',probeset_pairs) ### use only those necessary fields for this function (probeset pairs will be called as CriticalExons)
splicing_call_db[key].sort(); splicing_call = splicing_call_db[key][-1]; jd.setSplicingCall(splicing_call) ### Bug from 1.15 to have key be new_key?
new_exon_db[key] = jd
exon_db = new_exon_db
gene_protein_ft_db={};domain_gene_count_db={};protein_functional_attribute_db={}; probeset_aligning_db2={}
splicing_call_db=[]; new_exon_db=[] ### Clear memory
for probeset in exon_db:
#if probeset == '107650':
#if probeset in probeset_aligning_db: print probeset_aligning_db[probeset];kill
if probeset in probeset_aligning_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
gene = exon_db[probeset].GeneID()
new_domain_list=[]; new_domain_list2=[]
if report_type == 'gene' and proceed == 'yes':
for domain in probeset_aligning_db[probeset]:
try: domain_gene_count_db[domain].append(gene)
except KeyError: domain_gene_count_db[domain] = [gene]
try: gene_protein_ft_db[gene].append(domain)
except KeyError: gene_protein_ft_db[gene]=[domain]
elif proceed == 'yes':
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
probeset_list = exon_db[probeset].CriticalExons()
else: probeset_list = [probeset]
for id in probeset_list:
for domain in probeset_aligning_db[probeset]:
new_domain_list.append('(direct)'+domain)
new_domain_list2.append((domain,'+'))
new_domain_list = unique.unique(new_domain_list)
new_domain_list_str = string.join(new_domain_list,', ')
gene_protein_ft_db[gene,id] = new_domain_list2
probeset_aligning_db2[id] = new_domain_list_str
#print exon_db['107650']
for probeset in exon_db:
if probeset in probeset_indirect_aligning_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
gene = exon_db[probeset].GeneID()
new_domain_list=[]; new_domain_list2=[]
if report_type == 'gene' and proceed == 'yes':
for domain in probeset_indirect_aligning_db[probeset]:
try: domain_gene_count_db[domain].append(gene)
except KeyError: domain_gene_count_db[domain] = [gene]
try: gene_protein_ft_db[gene].append(domain)
except KeyError: gene_protein_ft_db[gene]=[domain]
elif proceed == 'yes':
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
probeset_list = exon_db[probeset].CriticalExons()
else: probeset_list = [probeset]
for id in probeset_list:
for domain in probeset_indirect_aligning_db[probeset]:
new_domain_list.append('(indirect)'+domain)
new_domain_list2.append((domain,'-'))
new_domain_list = unique.unique(new_domain_list)
new_domain_list_str = string.join(new_domain_list,', ')
gene_protein_ft_db[gene,id] = new_domain_list2
probeset_aligning_db2[id] = new_domain_list_str
domain_gene_count_db = eliminate_redundant_dict_values(domain_gene_count_db)
gene_protein_ft_db = eliminate_redundant_dict_values(gene_protein_ft_db)
if analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
clearObjectsFromMemory(exon_db);exon_db=[]
try: clearObjectsFromMemory(new_exon_db)
except Exception: null=[]
probeset_indirect_aligning_db=[]; probeset_aligning_db=[]
if report_type == 'perfect_match':
gene_protein_ft_db=[];domain_gene_count_db=[];protein_functional_attribute_db=[]
return probeset_aligning_db2
elif report_type == 'probeset':
probeset_aligning_db2=[]
return gene_protein_ft_db,domain_gene_count_db,protein_functional_attribute_db
else:
probeset_aligning_db2=[]; protein_functional_attribute_db=[]; probeset_aligning_db2=[]
len_gene_protein_ft_db = len(gene_protein_ft_db); gene_protein_ft_db=[]
return len_gene_protein_ft_db,domain_gene_count_db
def importProbesetProteinCompDomains(exon_db,report_type,comp_type):
filename = 'AltDatabase/'+species+'/'+array_type+'/probeset-domain-annotations-'+comp_type+'.txt'
if (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type != 'null': filename=getFilteredFilename(filename)
filename=getExonVersionFilename(filename)
probeset_aligning_db = importGeneric(filename)
filename = 'AltDatabase/'+species+'/'+array_type+'/probeset-protein-annotations-'+comp_type+'.txt'
if (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type != 'null': filename=getFilteredFilename(filename)
filename=getExonVersionFilename(filename)
gene_protein_ft_db={};domain_gene_count_db={}
for probeset in exon_db:
initial_proceed = 'no'; original_probeset = probeset
if probeset in probeset_aligning_db: initial_proceed = 'yes'
elif array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
### For junction analyses exon_db is really regulated_exon_junction_db, containing the inclusion,exclusion probeset tuple and an object as values
if '|' in probeset[0]: probeset1 = string.split(probeset[0],'|')[0]; probeset = probeset1,probeset[1]
try: alternate_probeset_id = exon_db[probeset].InclusionLookup(); probeset = alternate_probeset_id,probeset[1]
except Exception: null=[]
probeset_joined = string.join(probeset,'|')
#print [probeset_joined],[probeset]
if probeset_joined in probeset_aligning_db: initial_proceed = 'yes'; probeset = probeset_joined
elif probeset[0] in probeset_aligning_db: initial_proceed = 'yes'; probeset = probeset[0]
elif probeset[1] in probeset_aligning_db: initial_proceed = 'yes'; probeset = probeset[1]
#else: for i in probeset_aligning_db: print [i];kill
if initial_proceed == 'yes':
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[original_probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
new_domain_list = []
gene = exon_db[original_probeset].GeneID()
if report_type == 'gene' and proceed == 'yes':
for domain_data in probeset_aligning_db[probeset]:
try:
domain,call = string.split(domain_data,'|')
except Exception:
values = string.split(domain_data,'|')
domain = values[0]; call = values[-1] ### occurs when a | exists in the annotations from UniProt
try: domain_gene_count_db[domain].append(gene)
except KeyError: domain_gene_count_db[domain] = [gene]
try: gene_protein_ft_db[gene].append(domain)
except KeyError: gene_protein_ft_db[gene]=[domain]
elif proceed == 'yes':
for domain_data in probeset_aligning_db[probeset]:
domain,call = string.split(domain_data,'|')
new_domain_list.append((domain,call))
#new_domain_list = string.join(new_domain_list,', ')
gene_protein_ft_db[gene,original_probeset] = new_domain_list
domain_gene_count_db = eliminate_redundant_dict_values(domain_gene_count_db)
probeset_aligning_db=[] ### Clear memory
probeset_aligning_protein_db = importGeneric(filename)
probeset_pairs={} ### Store all possible probeset pairs as single probesets for protein-protein associations
for probeset in exon_db:
if len(probeset)==2:
for p in probeset: probeset_pairs[p] = probeset
if report_type == 'probeset':
### Below code was re-written to be more memory efficient by not storing all data in probeset-domain-annotations-*comp*.txt via generic import
protein_functional_attribute_db={}; probeset_protein_associations={}; protein_db={}
for probeset in exon_db:
initial_proceed = 'no'; original_probeset = probeset
if probeset in probeset_aligning_protein_db: initial_proceed = 'yes'
elif array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if '|' in probeset[0]: probeset1 = string.split(probeset[0],'|')[0]; probeset = probeset1,probeset[1]
try: alternate_probeset_id = exon_db[probeset].InclusionLookup(); probeset = alternate_probeset_id,probeset[1]
except Exception: null=[]
probeset_joined = string.join(probeset,'|')
#print [probeset_joined],[probeset]
if probeset_joined in probeset_aligning_protein_db: initial_proceed = 'yes'; probeset = probeset_joined
elif probeset[0] in probeset_aligning_protein_db: initial_proceed = 'yes'; probeset = probeset[0]
elif probeset[1] in probeset_aligning_protein_db: initial_proceed = 'yes'; probeset = probeset[1]
#else: for i in probeset_aligning_db: print [i];kill
if initial_proceed == 'yes':
protein_data_list=probeset_aligning_protein_db[probeset]
new_protein_list = []
gene = exon_db[original_probeset].GeneID()
for protein_data in protein_data_list:
protein_info,call = string.split(protein_data,'|')
if 'AA:' in protein_info:
protein_info_r = string.replace(protein_info,')','*')
protein_info_r = string.replace(protein_info_r,'(','*')
protein_info_r = string.split(protein_info_r,'*')
null_protein = protein_info_r[1]; hit_protein = protein_info_r[3]
probeset_protein_associations[original_probeset] = null_protein,hit_protein,call
protein_db[null_protein] = []; protein_db[hit_protein] = []
new_protein_list.append((protein_info,call))
#new_protein_list = string.join(new_domain_list,', ')
protein_functional_attribute_db[gene,original_probeset] = new_protein_list
filename = 'AltDatabase/'+species+'/'+array_type+'/SEQUENCE-protein-dbase_'+comp_type+'.txt'
filename=getExonVersionFilename(filename)
protein_seq_db = importGenericFiltered(filename,protein_db)
for key in protein_functional_attribute_db:
gene,probeset = key
try:
null_protein,hit_protein,call = probeset_protein_associations[probeset]
null_seq = protein_seq_db[null_protein][0]; hit_seq = protein_seq_db[hit_protein][0]
seq_attr = 'sequence: ' +'('+null_protein+')'+null_seq +' -> '+'('+hit_protein+')'+hit_seq
protein_functional_attribute_db[key].append((seq_attr,call))
except KeyError: null=[]
protein_seq_db=[]; probeset_aligning_protein_db=[]
return gene_protein_ft_db,domain_gene_count_db,protein_functional_attribute_db
else:
probeset_aligning_protein_db=[]; len_gene_protein_ft_db = len(gene_protein_ft_db); gene_protein_ft_db=[]
return len_gene_protein_ft_db,domain_gene_count_db
class SimpleJunctionData:
def __init__(self, geneid, probeset1, probeset2, probeset1_display, critical_exon_list):
self._geneid = geneid; self._probeset1 = probeset1; self._probeset2 = probeset2
self._probeset1_display = probeset1_display; self._critical_exon_list = critical_exon_list
def GeneID(self): return self._geneid
def Probeset1(self): return self._probeset1
def Probeset2(self): return self._probeset2
def InclusionDisplay(self): return self._probeset1_display
def CriticalExons(self): return self._critical_exon_list
def setSplicingCall(self,splicing_call):
#self._splicing_call = EvidenceOfAltSplicing(slicing_annot)
self._splicing_call = splicing_call
def setSymbol(self,symbol): self.symbol = symbol
def Symbol(self): return self.symbol
def SplicingCall(self): return self._splicing_call
def setInclusionLookup(self,incl_junction_probeset): self.incl_junction_probeset = incl_junction_probeset
def InclusionLookup(self): return self.incl_junction_probeset
def formatJunctionData(probesets,affygene,critical_exon_list):
if '|' in probesets[0]: ### Only return the first inclusion probeset (agglomerated probesets)
incl_list = string.split(probesets[0],'|')
incl_probeset = incl_list[0]; excl_probeset = probesets[1]
else: incl_probeset = probesets[0]; excl_probeset = probesets[1]
jd = SimpleJunctionData(affygene,incl_probeset,excl_probeset,probesets[0],critical_exon_list)
key = incl_probeset,excl_probeset
return key,jd
class JunctionExpressionData:
def __init__(self, baseline_norm_exp, exper_norm_exp, pval, ped):
self.baseline_norm_exp = baseline_norm_exp; self.exper_norm_exp = exper_norm_exp; self.pval = pval; self.ped = ped
def ConNI(self):
ls=[]
for i in self.logConNI():
ls.append(math.pow(2,i))
return ls
def ExpNI(self):
ls=[]
for i in self.logExpNI():
ls.append(math.pow(2,i))
return ls
def ConNIAvg(self): return math.pow(2,statistics.avg(self.logConNI()))
def ExpNIAvg(self): return math.pow(2,statistics.avg(self.logExpNI()))
def logConNI(self): return self.baseline_norm_exp
def logExpNI(self): return self.exper_norm_exp
def Pval(self): return self.pval
def ProbesetExprData(self): return self.ped
def __repr__(self): return self.ConNI()+'|'+self.ExpNI()
def calculateAllASPIREScores(p1,p2):
b1o = p1.ConNIAvg(); b2o = p2.ConNIAvg()
e1o = p1.ExpNIAvg(); e2o = p2.ExpNIAvg(); original_score = statistics.aspire_stringent(b1o,e1o,b2o,e2o)
index=0; baseline_scores=[] ### Loop through each control ratio and compare to control ratio mean
for b1 in p1.ConNI():
b2 = p2.ConNI()[index]
score = statistics.aspire_stringent(b2,e2o,b1,e1o); index+=1
baseline_scores.append(score)
index=0; exp_scores=[] ### Loop through each experimental ratio and compare to control ratio mean
for e1 in p1.ExpNI():
e2 = p2.ExpNI()[index]
score = statistics.aspire_stringent(b1o,e1,b2o,e2); index+=1
exp_scores.append(score)
try:
aspireP = statistics.runComparisonStatistic(baseline_scores,exp_scores,probability_statistic)
except Exception: aspireP = 'NA' ### Occurs when analyzing two groups with no variance
if aspireP == 1: aspireP = 'NA'
"""
if aspireP<0.05 and oscore>0.2 and statistics.avg(exp_scores)<0:
index=0
for e1 in p1.ExpNI():
e2 = p2.ExpNI()[index]
score = statistics.aspire_stringent(b1,e1,b2,e2)
print p1.ExpNI(), p2.ExpNI(); print e1, e2
print e1o,e2o; print b1, b2; print score, original_score
print exp_scores, statistics.avg(exp_scores); kill"""
return baseline_scores, exp_scores, aspireP
def stringListConvert(ls):
ls2=[]
for i in ls: ls2.append(str(i))
return ls2
def analyzeJunctionSplicing(nonlog_NI_db):
group_sizes = []; original_array_indices = permute_lists[0] ###p[0] is the original organization of the group samples prior to permutation
for group in original_array_indices: group_sizes.append(len(group))
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in nonlog_NI_db: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del nonlog_NI_db[probeset]
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
global NIdata_export
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
NIdata_export = export.ExportFile(summary_output)
title = string.join(['inclusion-probeset','exclusion-probeset']+original_array_names,'\t')+'\n'; NIdata_export.write(title)
### Calculate a probeset p-value adjusted for constitutive expression levels (taken from splicing index method)
xl=0
probeset_normIntensity_db={}
for probeset in array_raw_group_values:
ed = exon_db[probeset]; geneid = ed.GeneID(); xl+=1
#if geneid in alt_junction_db and geneid in original_avg_const_exp_db: ### Don't want this filter since it causes problems for Trans-splicing
group_index = 0; si_interim_group_db={}; ge_threshold_count=0; value_count = 0
### Prepare normalized expression lists for recipricol-junction algorithms
if geneid in avg_const_exp_db:
for group_values in array_raw_group_values[probeset]:
value_index = 0; ratio_hash=[]
for value in group_values: ###Calculate normalized ratio's for each condition and save raw values for later permutation
exp_val = value;ge_val = avg_const_exp_db[geneid][value_count]; exp_ratio = exp_val-ge_val
ratio_hash.append(exp_ratio); value_index +=1; value_count +=1
si_interim_group_db[group_index] = ratio_hash
group_index+=1
group1_ratios = si_interim_group_db[0]; group2_ratios = si_interim_group_db[1]
### Calculate and store simple expression summary stats
data_list1 = array_raw_group_values[probeset][0]; data_list2 = array_raw_group_values[probeset][1]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
#group_name1 = array_group_list[0]; group_name2 = array_group_list[1]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 'NA'
if ttest_exp_p == 1: ttest_exp_p = 'NA'
adj_fold = statistics.avg(group2_ratios) - statistics.avg(group1_ratios)
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, '')
try:
try:
normIntensityP = statistics.runComparisonStatistic(group1_ratios,group2_ratios,probability_statistic)
except Exception:
#print group1_ratios,group2_ratios,array_raw_group_values[probeset],avg_const_exp_db[geneid];kill
normIntensityP = 'NA' ###occurs for constitutive probesets
except Exception: normIntensityP = 0
if normIntensityP == 1: normIntensityP = 'NA'
ji = JunctionExpressionData(group1_ratios, group2_ratios, normIntensityP, ped)
probeset_normIntensity_db[probeset]=ji ### store and access this below
#if probeset == 'G6899622@J916374@j_at': print normIntensityP,group1_ratios,group2_ratios;kill
###Concatenate the two raw expression groups into a single list for permutation analysis
ls_concatenated = []
for group in array_raw_group_values[probeset]:
for entry in group: ls_concatenated.append(entry)
if analysis_method == 'linearregres': ###Convert out of log space
ls_concatenated = statistics.log_fold_conversion_fraction(ls_concatenated)
array_raw_group_values[probeset] = ls_concatenated
s = 0; t = 0; y = ''; denominator_events=0; excluded_probeset_db = {}
splice_event_list=[]; splice_event_list_mx=[]; splice_event_list_non_mx=[]; event_mx_temp = []; permute_p_values={} #use this to exclude duplicate mx events
for affygene in alt_junction_db:
if affygene in original_avg_const_exp_db:
constit_exp1 = original_avg_const_exp_db[affygene][0]
constit_exp2 = original_avg_const_exp_db[affygene][1]
ge_fold=constit_exp2-constit_exp1
for event in alt_junction_db[affygene]:
if array_type == 'AltMouse':
#event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
#critical_exon_db[affygene,tuple(critical_exons)] = [1,'E'+str(e1a),'E'+str(e2b)] --- affygene,tuple(event) == key, 1 indicates both are either up or down together
event_call = event[0][0] + '-' + event[1][0]
exon_set1 = event[0][1]; exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene,exon_set1]
probeset2 = exon_dbase[affygene,exon_set2]
critical_exon_list = critical_exon_db[affygene,tuple(event)]
if array_type == 'junction' or array_type == 'RNASeq':
event_call = 'ei-ex' ### Below objects from JunctionArrayEnsemblRules - class JunctionInformation
probeset1 = event.InclusionProbeset(); probeset2 = event.ExclusionProbeset()
exon_set1 = event.InclusionJunction(); exon_set2 = event.ExclusionJunction()
try: novel_event = event.NovelEvent()
except Exception: novel_event = 'known'
critical_exon_list = [1,event.CriticalExonSets()]
key,jd = formatJunctionData([probeset1,probeset2],affygene,critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try: jd.setSymbol(annotate_db[affygene].Symbol())
except Exception:null=[]
#if '|' in probeset1: print probeset1, key,jd.InclusionDisplay();kill
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
#print probeset1,probeset2, critical_exon_list,event_call,exon_set1,exon_set2;kill
if probeset1 in nonlog_NI_db and probeset2 in nonlog_NI_db:
denominator_events+=1
try: p1 = probeset_normIntensity_db[probeset1]; p2 = probeset_normIntensity_db[probeset2]
except Exception:
print probeset1, probeset2
p1 = probeset_normIntensity_db[probeset1]
p2 = probeset_normIntensity_db[probeset2]
#if '|' in probeset1: print
pp1 = p1.Pval(); pp2 = p2.Pval()
baseline_ratio1 = p1.ConNIAvg()
experimental_ratio1 = p1.ExpNIAvg()
baseline_ratio2 = p2.ConNIAvg()
experimental_ratio2 = p2.ExpNIAvg()
ped1 = p1.ProbesetExprData()
ped2 = p2.ProbesetExprData()
Rin = ''; Rex = ''
r = 0 ###Variable used to determine if we should take the absolute value of dI for mutually exlcusive events
if event_call == 'ei-ex': #means probeset1 is an exon inclusion and probeset2 is an exon exclusion
Rin = baseline_ratio1/experimental_ratio1 # Rin=A/C
Rex = baseline_ratio2/experimental_ratio2 # Rin=B/D
I1=baseline_ratio1/(baseline_ratio1+baseline_ratio2)
I2=experimental_ratio1/(experimental_ratio1+experimental_ratio2)
###When Rex is larger, the exp_ratio for exclusion is decreased in comparison to baseline.
###Thus, increased inclusion (when Rin is small, inclusion is big)
if (Rin>1 and Rex<1): y = 'downregulated'
elif (Rin<1 and Rex>1): y = 'upregulated'
elif (Rex<Rin): y = 'downregulated'
else: y = 'upregulated'
temp_list = []
if event_call == 'mx-mx':
temp_list.append(exon_set1); temp_list.append(exon_set2);temp_list.sort()
if (affygene,temp_list) not in event_mx_temp: #use this logic to prevent mx entries being added more than once
event_mx_temp.append((affygene,temp_list))
###Arbitrarily choose which exon-set will be Rin or Rex, does matter for mutually exclusive events
Rin = baseline_ratio1/experimental_ratio1 # Rin=A/C
Rex = baseline_ratio2/experimental_ratio2 # Rin=B/D
I1=baseline_ratio1/(baseline_ratio1+baseline_ratio2)
I2=experimental_ratio1/(experimental_ratio1+experimental_ratio2)
y = 'mutually-exclusive'; r = 1
if analysis_method == 'ASPIRE' and Rex != '':
#if affygene == 'ENSMUSG00000000126': print Rin, Rex, probeset1, probeset2
if (Rin>1 and Rex<1) or (Rin<1 and Rex>1):
s +=1
in1=((Rex-1.0)*Rin)/(Rex-Rin); in2=(Rex-1.0)/(Rex-Rin)
dI = ((in2-in1)+(I2-I1))/2.0 #modified to give propper exon inclusion
dI = dI*(-1) ### Reverse the fold to make equivalent to splicing-index and FIRMA scores
try: baseline_scores, exp_scores, aspireP = calculateAllASPIREScores(p1,p2)
except Exception: baseline_scores = [0]; exp_scores=[dI]; aspireP = 0
if export_NI_values == 'yes':
baseline_scores = stringListConvert(baseline_scores); exp_scores = stringListConvert(exp_scores)
ev = string.join([probeset1,probeset2]+baseline_scores+exp_scores,'\t')+'\n'; NIdata_export.write(ev)
if max_replicates >2 or equal_replicates==2:
permute_p_values[(probeset1,probeset2)] = [aspireP, 'NA', 'NA', 'NA']
if r == 1: dI = abs(dI) ###Occurs when event is mutually exclusive
#if abs(dI)>alt_exon_logfold_cutoff: print [dI],pp1,pp2,aspireP;kill
#print [affygene,dI,pp1,pp2,aspireP,event.CriticalExonSets(),probeset1,probeset2,alt_exon_logfold_cutoff,p_threshold]
if ((pp1<p_threshold or pp2<p_threshold) or pp1==1 or pp1=='NA') and abs(dI) > alt_exon_logfold_cutoff: ###Require that the splice event have a constitutive corrected p less than the user defined threshold
ejd = ExonJunctionData(dI,probeset1,probeset2,pp1,pp2,y,event_call,critical_exon_list,affygene,ped1,ped2)
"""if probeset1 == 'ENSMUSG00000033335:E16.1-E17.1' and probeset2 == 'ENSMUSG00000033335:E16.1-E19.1':
print [dI,pp1,pp2,p_threshold,alt_exon_logfold_cutoff]
print baseline_scores, exp_scores, [aspireP]#;sys.exit()"""
ejd.setConstitutiveExpression(constit_exp1); ejd.setConstitutiveFold(ge_fold)
if perform_permutation_analysis == 'yes': splice_event_list.append((dI,ejd))
elif aspireP < permute_p_threshold or aspireP=='NA': splice_event_list.append((dI,ejd))
#if abs(dI)>.2: print probeset1, probeset2, critical_exon_list, [exon_set1], [exon_set2]
#if dI>.2 and aspireP<0.05: print baseline_scores,exp_scores,aspireP, statistics.avg(exp_scores), dI
elif array_type == 'junction' or array_type == 'RNASeq':
excluded_probeset_db[affygene+':'+event.CriticalExonSets()[0]] = probeset1, affygene, dI, 'NA', aspireP
if array_type == 'RNASeq':
try: ejd.setNovelEvent(novel_event)
except Exception: None
if analysis_method == 'linearregres' and Rex != '':
s+=1
log_fold,linregressP,rsqrd_status = getLinearRegressionScores(probeset1,probeset2,group_sizes)
log_fold = log_fold ### Reverse the fold to make equivalent to splicing-index and FIRMA scores
if max_replicates >2 or equal_replicates==2: permute_p_values[(probeset1,probeset2)] = [linregressP, 'NA', 'NA', 'NA']
if rsqrd_status == 'proceed':
if ((pp1<p_threshold or pp2<p_threshold) or pp1==1 or pp1=='NA') and abs(log_fold) > alt_exon_logfold_cutoff: ###Require that the splice event have a constitutive corrected p less than the user defined threshold
ejd = ExonJunctionData(log_fold,probeset1,probeset2,pp1,pp2,y,event_call,critical_exon_list,affygene,ped1,ped2)
ejd.setConstitutiveExpression(constit_exp1); ejd.setConstitutiveFold(ge_fold)
if perform_permutation_analysis == 'yes': splice_event_list.append((log_fold,ejd))
elif linregressP < permute_p_threshold: splice_event_list.append((log_fold,ejd))
#if probeset1 == 'G6990053@762121_762232_at' and probeset2 == 'G6990053@J926254@j_at':
#print event_call, critical_exon_list,affygene, Rin, Rex, y, temp_list;kill
elif array_type == 'junction' or array_type == 'RNASeq':
excluded_probeset_db[affygene+':'+event.CriticalExonSets()[0]] = probeset1, affygene, log_fold, 'NA', linregressP
if array_type == 'RNASeq':
try: ejd.setNovelEvent(novel_event)
except Exception: None
else: t +=1
clearObjectsFromMemory(probeset_normIntensity_db)
probeset_normIntensity_db={}; ### Potentially large memory object containing summary stats for all probesets
statistics.adjustPermuteStats(permute_p_values)
summary_data_db['denominator_exp_events']=denominator_events
print "Number of exon-events analyzed:", s
print "Number of exon-events excluded:", t
return splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db
def maxReplicates():
replicates=0; greater_than_two=0; greater_than_one=0; group_sizes=[]
for probeset in array_raw_group_values:
for group_values in array_raw_group_values[probeset]:
try:
replicates+=len(group_values); group_sizes.append(len(group_values))
if len(group_values)>2: greater_than_two+=1
elif len(group_values)>1: greater_than_one+=1
except Exception: replicates+=len(array_raw_group_values[probeset]); break
break
group_sizes = unique.unique(group_sizes)
if len(group_sizes) == 1: equal_replicates = group_sizes[0]
else: equal_replicates = 0
max_replicates = replicates/float(original_conditions)
if max_replicates<2.01:
if greater_than_two>0 and greater_than_one>0: max_replicates=3
return max_replicates, equal_replicates
def furtherProcessJunctionScores(splice_event_list, probeset_comp_db, permute_p_values):
splice_event_list.sort(); splice_event_list.reverse()
print "filtered %s scores:" % analysis_method, len(splice_event_list)
if perform_permutation_analysis == 'yes':
###*********BEGIN PERMUTATION ANALYSIS*********
if max_replicates >2 or equal_replicates==2:
splice_event_list, p_value_call, permute_p_values = permuteSplicingScores(splice_event_list)
else:
print "WARNING...Not enough replicates to perform permutation analysis."
p_value_call=''; permute_p_values = {}
else:
if max_replicates >2 or equal_replicates==2:
if probability_statistic == 'unpaired t-test':
p_value_call=analysis_method+'-OneWayAnova'
else:
p_value_call=analysis_method+'-'+probability_statistic
else:
if probability_statistic == 'unpaired t-test':
p_value_call='OneWayAnova'; permute_p_values = {}
else:
p_value_call=probability_statistic; permute_p_values = {}
print len(splice_event_list), 'alternative events after subsequent filtering (optional)'
### Get ExonJunction annotaitons
junction_splicing_annot_db = getJunctionSplicingAnnotations(probeset_comp_db)
regulated_exon_junction_db={}; new_splice_event_list=[]
if filter_for_AS == 'yes': print "Filtering for evidence of Alternative Splicing"
for (fold,ejd) in splice_event_list:
proceed = 'no'
if filter_for_AS == 'yes':
try:
ja = junction_splicing_annot_db[ejd.Probeset1(),ejd.Probeset2()]; splicing_call = ja.SplicingCall()
if splicing_call == 1: proceed = 'yes'
except KeyError: proceed = 'no'
else: proceed = 'yes'
if proceed == 'yes':
key,jd = formatJunctionData([ejd.Probeset1(),ejd.Probeset2()],ejd.GeneID(),ejd.CriticalExons())
regulated_exon_junction_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
new_splice_event_list.append((fold,ejd))
### Add junction probeset lookup for reciprocal junctions composed of an exonid (not in protein database currently)
if array_type == 'RNASeq' and '-' not in key[0]: ### Thus, it is an exon compared to a junction
events = alt_junction_db[ejd.GeneID()]
for ji in events:
if (ji.InclusionProbeset(),ji.ExclusionProbeset()) == key:
jd.setInclusionLookup(ji.InclusionLookup()) ### This is the source junction from which the exon ID comes from
probeset_comp_db[ji.InclusionLookup(),ji.ExclusionProbeset()]=jd
#print ji.InclusionProbeset(),ji.ExclusionProbeset(),' ',ji.InclusionLookup()
if filter_for_AS == 'yes': print len(new_splice_event_list), "remaining after filtering for evidence of Alternative splicing"
filtered_exon_db = {}
for junctions in probeset_comp_db:
rj = probeset_comp_db[junctions] ### Add splicing annotations to the AltMouse junction DBs (needed for permutation analysis statistics and filtering)
try: ja = junction_splicing_annot_db[junctions]; splicing_call = ja.SplicingCall(); rj.setSplicingCall(ja.SplicingCall())
except KeyError: rj.setSplicingCall(0)
if filter_for_AS == 'yes': filtered_exon_db[junctions] = rj
for junctions in regulated_exon_junction_db:
rj = regulated_exon_junction_db[junctions]
try: ja = junction_splicing_annot_db[junctions]; rj.setSplicingCall(ja.SplicingCall())
except KeyError: rj.setSplicingCall(0)
if filter_for_AS == 'yes': probeset_comp_db = filtered_exon_db
try: clearObjectsFromMemory(alt_junction_db)
except Exception: null=[]
return new_splice_event_list, p_value_call, permute_p_values, probeset_comp_db, regulated_exon_junction_db
class SplicingScoreData:
def Method(self):
###e.g. ASPIRE
return self._method
def Score(self): return str(self._score)
def Probeset1(self): return self._probeset1
def Probeset2(self): return self._probeset2
def RegulationCall(self): return self._regulation_call
def GeneID(self): return self._geneid
def CriticalExons(self): return self._critical_exon_list[1]
def CriticalExonTuple(self): return self._critical_exon_list
def TTestNormalizedRatios(self): return self._normIntensityP
def TTestNormalizedRatios2(self): return self._normIntensityP2
def setConstitutiveFold(self,exp_log_ratio): self._exp_log_ratio = exp_log_ratio
def ConstitutiveFold(self): return str(self._exp_log_ratio)
def setConstitutiveExpression(self,const_baseline): self.const_baseline = const_baseline
def ConstitutiveExpression(self): return str(self.const_baseline)
def setProbesetExpressionData(self,ped): self.ped1 = ped
def ProbesetExprData1(self): return self.ped1
def ProbesetExprData2(self): return self.ped2
def setNovelEvent(self,novel_event): self._novel_event = novel_event
def NovelEvent(self): return self._novel_event
def EventCall(self):
###e.g. Exon inclusion (ei) Exon exclusion (ex), ei-ex, reported in that direction
return self._event_call
def Report(self):
output = self.Method() +'|'+ self.GeneID() +'|'+ string.join(self.CriticalExons(),'|')
return output
def __repr__(self): return self.Report()
class ExonJunctionData(SplicingScoreData):
def __init__(self,score,probeset1,probeset2,probeset1_p,probeset2_p,regulation_call,event_call,critical_exon_list,affygene,ped1,ped2):
self._score = score; self._probeset1 = probeset1; self._probeset2 = probeset2; self._regulation_call = regulation_call
self._event_call = event_call; self._critical_exon_list = critical_exon_list; self._geneid = affygene
self._method = analysis_method; self._normIntensityP = probeset1_p; self._normIntensityP2 = probeset2_p
self.ped1 = ped1; self.ped2=ped2
class ExonData(SplicingScoreData):
def __init__(self,splicing_index,probeset,critical_exon_list,geneid,group1_ratios,group2_ratios,normIntensityP,opposite_SI_log_mean):
self._score = splicing_index; self._probeset1 = probeset; self._opposite_SI_log_mean = opposite_SI_log_mean
self._critical_exon_list = critical_exon_list; self._geneid = geneid
self._baseline_ratio1 = group1_ratios; self._experimental_ratio1 = group2_ratios
self._normIntensityP = normIntensityP
self._method = analysis_method; self._event_call = 'exon-inclusion'
if splicing_index > 0: regulation_call = 'downregulated' ###Since baseline is the numerator ratio
else: regulation_call = 'upregulated'
self._regulation_call = regulation_call
def OppositeSIRatios(self): return self._opposite_SI_log_mean
class ExcludedExonData(ExonData):
def __init__(self,splicing_index,geneid,normIntensityP):
self._score = splicing_index; self._geneid = geneid; self._normIntensityP = normIntensityP
def getAllPossibleLinearRegressionScores(probeset1,probeset2,positions,group_sizes):
### Get Raw expression values for the two probests
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
all_possible_scores=[]; index1=0 ### Perform all possible pairwise comparisons between groups (not sure how this will work for 10+ groups)
for (pos1a,pos2a) in positions:
index2=0
for (pos1b,pos2b) in positions:
if pos1a != pos1b:
p1_g1 = p1_exp[pos1a:pos2a]; p1_g2 = p1_exp[pos1b:pos2b]
p2_g1 = p2_exp[pos1a:pos2a]; p2_g2 = p2_exp[pos1b:pos2b]
#log_fold, linregressP, rsqrd = getAllLinearRegressionScores(probeset1,probeset2,p1_g1,p2_g1,p1_g2,p2_g2,len(group_sizes)) ### Used to calculate a pairwise group pvalue
log_fold, rsqrd = performLinearRegression(p1_g1,p2_g1,p1_g2,p2_g2)
if log_fold<0: i1,i2 = index2,index1 ### all scores should indicate upregulation
else: i1,i2=index1,index2
all_possible_scores.append((abs(log_fold),i1,i2))
index2+=1
index1+=1
all_possible_scores.sort()
try: log_fold,index1,index2 = all_possible_scores[-1]
except Exception: log_fold=0; index1=0; index2=0
return log_fold, index1, index2
def getLinearRegressionScores(probeset1,probeset2,group_sizes):
### Get Raw expression values for the two probests
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
try:
p1_g1 = p1_exp[:group_sizes[0]]; p1_g2 = p1_exp[group_sizes[0]:]
p2_g1 = p2_exp[:group_sizes[0]]; p2_g2 = p2_exp[group_sizes[0]:]
except Exception:
print probeset1,probeset2
print p1_exp
print p2_exp
print group_sizes
force_kill
log_fold, linregressP, rsqrd = getAllLinearRegressionScores(probeset1,probeset2,p1_g1,p2_g1,p1_g2,p2_g2,2)
return log_fold, linregressP, rsqrd
def getAllLinearRegressionScores(probeset1,probeset2,p1_g1,p2_g1,p1_g2,p2_g2,groups):
log_fold, rsqrd = performLinearRegression(p1_g1,p2_g1,p1_g2,p2_g2)
try:
### Repeat for each sample versus baselines to calculate a p-value
index=0; group1_scores=[]
for p1_g1_sample in p1_g1:
p2_g1_sample = p2_g1[index]
log_f, rs = performLinearRegression(p1_g1,p2_g1,[p1_g1_sample],[p2_g1_sample])
group1_scores.append(log_f); index+=1
index=0; group2_scores=[]
for p1_g2_sample in p1_g2:
p2_g2_sample = p2_g2[index]
log_f, rs = performLinearRegression(p1_g1,p2_g1,[p1_g2_sample],[p2_g2_sample])
group2_scores.append(log_f); index+=1
try:
linregressP = statistics.runComparisonStatistic(group1_scores,group2_scores,probability_statistic)
except Exception:
linregressP = 0; group1_scores = [0]; group2_scores = [log_fold]
if linregressP == 1: linregressP = 0
except Exception:
linregressP = 0; group1_scores = [0]; group2_scores = [log_fold]
if export_NI_values == 'yes' and groups==2:
group1_scores = stringListConvert(group1_scores)
group2_scores = stringListConvert(group2_scores)
ev = string.join([probeset1,probeset2]+group1_scores+group2_scores,'\t')+'\n'; NIdata_export.write(ev)
return log_fold, linregressP, rsqrd
def performLinearRegression(p1_g1,p2_g1,p1_g2,p2_g2):
return_rsqrd = 'no'
if use_R == 'yes': ###Uses the RLM algorithm
#print "Performing Linear Regression analysis using rlm."
g1_slope = statistics.LinearRegression(p1_g1,p2_g1,return_rsqrd)
g2_slope = statistics.LinearRegression(p1_g2,p2_g2,return_rsqrd)
else: ###Uses a basic least squared method
#print "Performing Linear Regression analysis using python specific methods."
g1_slope = statistics.simpleLinRegress(p1_g1,p2_g1)
g2_slope = statistics.simpleLinRegress(p1_g2,p2_g2)
log_fold = statistics.convert_to_log_fold(g2_slope/g1_slope)
rsqrd = 'proceed'
#if g1_rsqrd > 0 and g2_rsqrd > 0: rsqrd = 'proceed'
#else: rsqrd = 'hault'
return log_fold, rsqrd
########### Permutation Analysis Functions ###########
def permuteLinearRegression(probeset1,probeset2,p):
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
p1_g1, p1_g2 = permute_samples(p1_exp,p)
p2_g1, p2_g2 = permute_samples(p2_exp,p)
return_rsqrd = 'no'
if use_R == 'yes': ###Uses the RLM algorithm
g1_slope = statistics.LinearRegression(p1_g1,p2_g1,return_rsqrd)
g2_slope = statistics.LinearRegression(p1_g2,p2_g2,return_rsqrd)
else: ###Uses a basic least squared method
g1_slope = statistics.simpleLinRegress(p1_g1,p2_g1)
g2_slope = statistics.simpleLinRegress(p1_g2,p2_g2)
log_fold = statistics.convert_to_log_fold(g2_slope/g1_slope)
return log_fold
def permuteSplicingScores(splice_event_list):
p_value_call = 'lowest_raw_p'
permute_p_values = {}; splice_event_list2=[]
if len(permute_lists) > 0:
#tuple_data in splice_event_list = dI,probeset1,probeset2,y,event_call,critical_exon_list
all_samples = []; a = 0
for (score,x) in splice_event_list:
###NOTE: This reference dI differs slightly from the below calculated, since the values are calculated from raw relative ratios rather than the avg
###Solution: Use the first calculated dI as the reference
score = score*(-1) ### Reverse the score to make equivalent to splicing-index and FIRMA scores
ref_splice_val = score; probeset1 = x.Probeset1(); probeset2 = x.Probeset2(); affygene = x.GeneID()
y = 0; p_splice_val_dist = []; count = 0; return_rsqrd = 'no'
for p in permute_lists: ###There are two lists in each entry
count += 1
permute = 'yes'
if analysis_method == 'ASPIRE':
p_splice_val = permute_ASPIRE_filtered(affygene, probeset1,probeset2,p,y,ref_splice_val,x)
elif analysis_method == 'linearregres':
slope_ratio = permuteLinearRegression(probeset1,probeset2,p)
p_splice_val = slope_ratio
if p_splice_val != 'null': p_splice_val_dist.append(p_splice_val)
y+=1
p_splice_val_dist.sort()
new_ref_splice_val = str(abs(ref_splice_val)); new_ref_splice_val = float(new_ref_splice_val[0:8]) #otherwise won't match up the scores correctly
if analysis_method == 'linearregres':
if ref_splice_val<0:
p_splice_val_dist2=[]
for val in p_splice_val_dist: p_splice_val_dist2.append(-1*val)
p_splice_val_dist=p_splice_val_dist2; p_splice_val_dist.reverse()
p_val, pos_permute, total_permute, greater_than_true_permute = statistics.permute_p(p_splice_val_dist,new_ref_splice_val,len(permute_lists))
#print p_val,ref_splice_val, pos_permute, total_permute, greater_than_true_permute,p_splice_val_dist[-3:];kill
###When two groups are of equal size, there will be 2 pos_permutes rather than 1
if len(permute_lists[0][0]) == len(permute_lists[0][1]): greater_than_true_permute = (pos_permute/2) - 1 #size of the two groups are equal
else:greater_than_true_permute = (pos_permute) - 1
if analysis_method == 'linearregres': greater_than_true_permute = (pos_permute) - 1 ###since this is a one sided test, unlike ASPIRE
###Below equation is fine if the population is large
permute_p_values[(probeset1,probeset2)] = [p_val, pos_permute, total_permute, greater_than_true_permute]
###Remove non-significant linear regression results
if analysis_method == 'linearregres':
if p_val <= permute_p_threshold or greater_than_true_permute < 2: splice_event_list2.append((score,x)) ###<= since many p=0.05
print "Number of permutation p filtered splice event:",len(splice_event_list2)
if len(permute_p_values)>0: p_value_call = 'permuted_aspire_p-value'
if analysis_method == 'linearregres': splice_event_list = splice_event_list2
return splice_event_list, p_value_call, permute_p_values
def permute_ASPIRE_filtered(affygene,probeset1,probeset2,p,y,ref_splice_val,x):
### Get raw expression values for each permuted group for the two probesets
b1,e1 = permute_dI(array_raw_group_values[probeset1],p)
try: b2,e2 = permute_dI(array_raw_group_values[probeset2],p)
except IndexError: print probeset2, array_raw_group_values[probeset2],p; kill
### Get the average constitutive expression values (averaged per-sample across probesets) for each permuted group
try: bc,ec = permute_dI(avg_const_exp_db[affygene],p)
except IndexError: print affygene, avg_const_exp_db[affygene],p; kill
if factor_out_expression_changes == 'no':
ec = bc
### Analyze the averaged ratio's of junction expression relative to permuted constitutive expression
try: p_splice_val = abs(statistics.aspire_stringent(b1/bc,e1/ec,b2/bc,e2/ec)) ### This the permuted ASPIRE score
except Exception: p_splice_val = 0
#print p_splice_val, ref_splice_val, probeset1, probeset2, affygene; dog
if y == 0: ###The first permutation is always the real one
### Grab the absolute number with small number of decimal places
try:
new_ref_splice_val = str(p_splice_val); new_ref_splice_val = float(new_ref_splice_val[0:8])
ref_splice_val = str(abs(ref_splice_val)); ref_splice_val = float(ref_splice_val[0:8]); y += 1
except ValueError:
###Only get this error if your ref_splice_val is a null
print y, probeset1, probeset2; print ref_splice_val, new_ref_splice_val, p
print b1/bc,e1/ec,b2/bc,e2/ec; print (b1/bc)/(e1/ec), (b2/bc)/(e2/ec)
print x[7],x[8],x[9],x[10]; kill
return p_splice_val
def permute_samples(a,p):
baseline = []; experimental = []
for p_index in p[0]:
baseline.append(a[p_index]) ###Append expression values for each permuted list
for p_index in p[1]:
experimental.append(a[p_index])
return baseline, experimental
def permute_dI(all_samples,p):
baseline, experimental = permute_samples(all_samples,p)
#if get_non_log_avg == 'no':
gb = statistics.avg(baseline); ge = statistics.avg(experimental) ###Group avg baseline, group avg experimental value
gb = statistics.log_fold_conversion_fraction(gb); ge = statistics.log_fold_conversion_fraction(ge)
#else:
#baseline = statistics.log_fold_conversion_fraction(baseline); experimental = statistics.log_fold_conversion_fraction(experimental)
#gb = statistics.avg(baseline); ge = statistics.avg(experimental) ###Group avg baseline, group avg experimental value
return gb,ge
def format_exon_functional_attributes(affygene,critical_probeset_list,functional_attribute_db,up_exon_list,down_exon_list,protein_length_list):
### Add functional attributes
functional_attribute_list2=[]
new_functional_attribute_str=''
new_seq_attribute_str=''
new_functional_attribute_list=[]
if array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null': critical_probesets = critical_probeset_list[0]
else: critical_probesets = tuple(critical_probeset_list)
key = affygene,critical_probesets
if key in functional_attribute_db:
###Grab exon IDs corresponding to the critical probesets
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
try: critical_exons = regulated_exon_junction_db[critical_probesets].CriticalExons() ###For junction arrays
except Exception: print key, functional_attribute_db[key];kill
else: critical_exons = [exon_db[critical_probesets].ExonID()] ###For exon arrays
for exon in critical_exons:
for entry in functional_attribute_db[key]:
x = 0
functional_attribute = entry[0]
call = entry[1] # +, -, or ~
if ('AA:' in functional_attribute) or ('ref' in functional_attribute):
x = 1
if exon in up_exon_list:
### design logic to determine whether up or down regulation promotes the functional change (e.g. NMD)
if 'ref' in functional_attribute:
new_functional_attribute = '(~)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '+' or call == '~':
new_functional_attribute = '(+)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '-':
new_functional_attribute = '(-)'+functional_attribute
data_tuple = new_functional_attribute,exon
if 'AA:' in functional_attribute and '?' not in functional_attribute:
functional_attribute_temp = functional_attribute[3:]
if call == '+' or call == '~':
val1,val2 = string.split(functional_attribute_temp,'->')
else:
val2,val1 = string.split(functional_attribute_temp,'->')
val1,null = string.split(val1,'(')
val2,null = string.split(val2,'(')
protein_length_list.append([val1,val2])
elif exon in down_exon_list:
if 'ref' in functional_attribute:
new_functional_attribute = '(~)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '+' or call == '~':
new_functional_attribute = '(-)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '-':
new_functional_attribute = '(+)'+functional_attribute
data_tuple = new_functional_attribute,exon
if 'AA:' in functional_attribute and '?' not in functional_attribute:
functional_attribute_temp = functional_attribute[3:]
if call == '+' or call == '~':
val2,val1 = string.split(functional_attribute_temp,'->')
else:
val1,val2 = string.split(functional_attribute_temp,'->')
val1,null = string.split(val1,'(')
val2,null = string.split(val2,'(')
protein_length_list.append([val1,val2])
if x == 0 or (exclude_protein_details != 'yes'):
try: new_functional_attribute_list.append(new_functional_attribute)
except UnboundLocalError:
print entry
print up_exon_list,down_exon_list
print exon, critical_exons
print critical_probesets, (key, affygene,critical_probesets)
for i in functional_attribute_db:
print i, functional_attribute_db[i]; kill
###remove protein sequence prediction_data
if 'sequence' not in data_tuple[0]:
if x == 0 or exclude_protein_details == 'no':
functional_attribute_list2.append(data_tuple)
###Get rid of duplicates, but maintain non-alphabetical order
new_functional_attribute_list2=[]
for entry in new_functional_attribute_list:
if entry not in new_functional_attribute_list2:
new_functional_attribute_list2.append(entry)
new_functional_attribute_list = new_functional_attribute_list2
#new_functional_attribute_list = unique.unique(new_functional_attribute_list)
#new_functional_attribute_list.sort()
for entry in new_functional_attribute_list:
if 'sequence' in entry: new_seq_attribute_str = new_seq_attribute_str + entry + ','
else: new_functional_attribute_str = new_functional_attribute_str + entry + ','
new_seq_attribute_str = new_seq_attribute_str[0:-1]
new_functional_attribute_str = new_functional_attribute_str[0:-1]
return new_functional_attribute_str, functional_attribute_list2, new_seq_attribute_str,protein_length_list
def grab_summary_dataset_annotations(functional_attribute_db,comparison_db,include_truncation_results_specifically):
###If a second filtering database present, filter the 1st database based on protein length changes
fa_db={}; cp_db={} ###index the geneids for efficient recall in the next segment of code
for (affygene,annotation) in functional_attribute_db:
try: fa_db[affygene].append(annotation)
except KeyError: fa_db[affygene]= [annotation]
for (affygene,annotation) in comparison_db:
try: cp_db[affygene].append(annotation)
except KeyError: cp_db[affygene]= [annotation]
functional_attribute_db_exclude = {}
for affygene in fa_db:
if affygene in cp_db:
for annotation2 in cp_db[affygene]:
if ('trunc' in annotation2) or ('frag' in annotation2) or ('NMDs' in annotation2):
try: functional_attribute_db_exclude[affygene].append(annotation2)
except KeyError: functional_attribute_db_exclude[affygene] = [annotation2]
functional_annotation_db = {}
for (affygene,annotation) in functional_attribute_db:
### if we wish to filter the 1st database based on protein length changes
if affygene not in functional_attribute_db_exclude:
try: functional_annotation_db[annotation] += 1
except KeyError: functional_annotation_db[annotation] = 1
elif include_truncation_results_specifically == 'yes':
for annotation_val in functional_attribute_db_exclude[affygene]:
try: functional_annotation_db[annotation_val] += 1
except KeyError: functional_annotation_db[annotation_val] = 1
annotation_list = []
annotation_list_ranked = []
for annotation in functional_annotation_db:
if 'micro' not in annotation:
count = functional_annotation_db[annotation]
annotation_list.append((annotation,count))
annotation_list_ranked.append((count,annotation))
annotation_list_ranked.sort(); annotation_list_ranked.reverse()
return annotation_list, annotation_list_ranked
def reorganize_attribute_entries(attribute_db1,build_attribute_direction_databases):
attribute_db2 = {}; inclusion_attributes_hit_count={}; exclusion_attributes_hit_count={}
genes_with_inclusion_attributes={}; genes_with_exclusion_attributes={};
###This database has unique gene, attribute information. No attribute will now be represented more than once per gene
for key in attribute_db1:
###Make gene the key and attribute (functional elements or protein information), along with the associated exons the values
affygene = key[0];exon_attribute = key[1];exon_list = attribute_db1[key]
exon_list = unique.unique(exon_list);exon_list.sort()
attribute_exon_info = exon_attribute,exon_list #e.g. 5'UTR, [E1,E2,E3]
try: attribute_db2[affygene].append(attribute_exon_info)
except KeyError: attribute_db2[affygene] = [attribute_exon_info]
###Separate out attribute data by direction for over-representation analysis
if build_attribute_direction_databases == 'yes':
direction=exon_attribute[1:2];unique_gene_attribute=exon_attribute[3:]
if direction == '+':
try: inclusion_attributes_hit_count[unique_gene_attribute].append(affygene)
except KeyError: inclusion_attributes_hit_count[unique_gene_attribute] = [affygene]
genes_with_inclusion_attributes[affygene]=[]
if direction == '-':
try: exclusion_attributes_hit_count[unique_gene_attribute].append(affygene)
except KeyError: exclusion_attributes_hit_count[unique_gene_attribute] = [affygene]
genes_with_exclusion_attributes[affygene]=[]
inclusion_attributes_hit_count = eliminate_redundant_dict_values(inclusion_attributes_hit_count)
exclusion_attributes_hit_count = eliminate_redundant_dict_values(exclusion_attributes_hit_count)
"""for key in inclusion_attributes_hit_count:
inclusion_attributes_hit_count[key] = len(inclusion_attributes_hit_count[key])
for key in exclusion_attributes_hit_count:
exclusion_attributes_hit_count[key] = len(exclusion_attributes_hit_count[key])"""
if build_attribute_direction_databases == 'yes': return attribute_db2,inclusion_attributes_hit_count,genes_with_inclusion_attributes,exclusion_attributes_hit_count,genes_with_exclusion_attributes
else: return attribute_db2
########### Misc. Functions ###########
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def add_a_space(string):
if len(string)<1:
string = ' '
return string
def convertToLog2(data_list):
return map(lambda x: math.log(float(x), 2), data_list)
def addGlobalFudgeFactor(data_list,data_type):
new_list = []
if data_type == 'log':
for item in data_list:
new_item = statistics.log_fold_conversion_fraction(item)
new_list.append(float(new_item) + global_addition_factor)
new_list = convertToLog2(new_list)
else:
for item in data_list: new_list.append(float(item) + global_addition_factor)
return new_list
def copyDirectoryPDFs(root_dir,AS='AS'):
directories = ['AltResults/AlternativeOutputDirectoryDescription.pdf',
'AltResultsDirectoryDescription.pdf',
'ClusteringDirectoryDescription.pdf',
'ExpressionInputDirectoryDescription.pdf',
'ExpressionOutputDirectoryDescription.pdf',
'GO-Elite/GO-Elite_resultsDirectoryDescription.pdf',
'GO-EliteDirectoryDescription.pdf',
'RootDirectoryDescription.pdf']
import shutil
for dir in directories:
file = string.split(dir,'/')[-1]
proceed=True
if 'AltResult' in dir and AS!='AS': proceed=False
if proceed:
try: shutil.copyfile(filepath('Documentation/DirectoryDescription/'+file), filepath(root_dir+dir))
except Exception: pass
def restrictProbesets(dataset_name):
### Take a file with probesets and only perform the splicing-analysis on these (e.g. those already identified from a previous run with a specific pattern)
### Allows for propper denominator when calculating z-scores for microRNA and protein-domain ORA
probeset_list_filename = import_dir = '/AltDatabaseNoVersion/filtering'; filtered_probeset_db={}
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
try:
dir_list = read_directory(import_dir)
fn_dir = filepath(import_dir[1:])
except Exception: dir_list=[]; fn_dir=''
if len(dir_list)>0:
for file in dir_list:
if file[:-4] in dataset_name:
fn = fn_dir+'/'+file; fn = string.replace(fn,'AltDatabase','AltDatabaseNoVersion')
filtered_probeset_db = importGeneric(fn)
print len(filtered_probeset_db), id_name,"will be used to restrict analysis..."
return filtered_probeset_db
def RunAltAnalyze():
#print altanalyze_files
#print '!!!!!starting to run alt-exon analysis'
#returnLargeGlobalVars()
global annotate_db; annotate_db={}; global splice_event_list; splice_event_list=[]; residuals_dirlist=[]
global dataset_name; global constitutive_probeset_db; global exon_db; dir_list2=[]; import_dir2=''
if array_type == 'AltMouse': import_dir = root_dir+'AltExpression/'+array_type
elif array_type == 'exon':
import_dir = root_dir+'AltExpression/ExonArray/'+species+'/'
elif array_type == 'gene':
import_dir = root_dir+'AltExpression/GeneArray/'+species+'/'
elif array_type == 'junction':
import_dir = root_dir+'AltExpression/JunctionArray/'+species+'/'
else:
import_dir = root_dir+'AltExpression/'+array_type+'/'+species+'/'
#if analysis_method == 'ASPIRE' or analysis_method == 'linearregres' or analysis_method == 'splicing-index':
if array_type != 'AltMouse': gene_annotation_file = "AltDatabase/ensembl/"+species+"/"+species+"_Ensembl-annotations.txt"
else: gene_annotation_file = "AltDatabase/"+species+"/"+array_type+"/"+array_type+"_gene_annotations.txt"
annotate_db = ExonAnalyze_module.import_annotations(gene_annotation_file,array_type)
###Import probe-level associations
exon_db={}; filtered_arrayids={};filter_status='no'
try: constitutive_probeset_db,exon_db,genes_being_analyzed = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status)
except IOError:
print_out = 'The annotation database: \n'+probeset_annotations_file+'\nwas not found. Ensure this file was not deleted and that the correct species has been selected.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
print traceback.format_exc()
badExit()
run=0
### Occurs when analyzing multiple conditions rather than performing a simple pair-wise comparison
if run_from_scratch == 'Annotate External Results': import_dir = root_dir
elif analyze_all_conditions == 'all groups':
import_dir = string.replace(import_dir,'AltExpression','AltExpression/FullDatasets')
if array_type == 'AltMouse':
import_dir = string.replace(import_dir,'FullDatasets/AltMouse','FullDatasets/AltMouse/Mm')
elif analyze_all_conditions == 'both':
import_dir2 = string.replace(import_dir,'AltExpression','AltExpression/FullDatasets')
if array_type == 'AltMouse':
import_dir2 = string.replace(import_dir2,'FullDatasets/AltMouse','FullDatasets/AltMouse/Mm')
try: dir_list2 = read_directory(import_dir2) #send a sub_directory to a function to identify all files in a directory
except Exception:
try:
if array_type == 'exon': array_type_dir = 'ExonArray'
elif array_type == 'gene': array_type_dir = 'GeneArray'
elif array_type == 'junction': array_type_dir = 'GeneArray'
else: array_type_dir = array_type
import_dir2 = string.replace(import_dir2,'AltExpression/'+array_type_dir+'/'+species+'/','')
import_dir2 = string.replace(import_dir2,'AltExpression/'+array_type_dir+'/','');
dir_list2 = read_directory(import_dir2)
except Exception:
print_out = 'The expression files were not found. Please make\nsure you selected the correct species and array type.\n\nselected species: '+species+'\nselected array type: '+array_type+'\nselected directory:'+import_dir2
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
print traceback.format_exc()
badExit()
try: dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
except Exception:
try:
if array_type == 'exon': array_type_dir = 'ExonArray'
elif array_type == 'gene': array_type_dir = 'GeneArray'
elif array_type == 'junction': array_type_dir = 'JunctionArray'
else: array_type_dir = array_type
import_dir = string.replace(import_dir,'AltExpression/'+array_type_dir+'/'+species+'/','')
import_dir = string.replace(import_dir,'AltExpression/'+array_type_dir+'/','');
try: dir_list = read_directory(import_dir)
except Exception:
import_dir = root_dir
dir_list = read_directory(root_dir) ### Occurs when reading in an AltAnalyze filtered file under certain conditions
except Exception:
print_out = 'The expression files were not found. Please make\nsure you selected the correct species and array type.\n\nselected species: '+species+'\nselected array type: '+array_type+'\nselected directory:'+import_dir
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out
print traceback.format_exc()
badExit()
dir_list+=dir_list2
### Capture the corresponding files in the residual dir to make sure these files exist for all comparisons - won't if FIRMA was run on some files
if analysis_method == 'FIRMA':
try:
residual_dir = root_dir+'AltExpression/FIRMA/residuals/'+array_type+'/'+species+'/'
residuals_dirlist = read_directory(residual_dir)
except Exception: null=[]
try:
residual_dir = root_dir+'AltExpression/FIRMA/FullDatasets/'+array_type+'/'+species+'/'
residuals_dirlist += read_directory(residual_dir)
except Exception: null=[]
dir_list_verified=[]
for file in residuals_dirlist:
for filename in dir_list:
if file[:-4] in filename: dir_list_verified.append(filename)
dir_list = unique.unique(dir_list_verified)
junction_biotype = 'no'
if array_type == 'RNASeq':
### Check to see if user data includes junctions or just exons
for probeset in exon_db:
if '-' in probeset: junction_biotype = 'yes'; break
if junction_biotype == 'no' and analysis_method != 'splicing-index' and array_type == 'RNASeq':
dir_list=[] ### DON'T RUN ALTANALYZE WHEN JUST ANALYZING EXON DATA
print 'No junction data to summarize... proceeding with exon analysis\n'
elif len(dir_list)==0:
print_out = 'No expression files available in the input directory:\n'+root_dir
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
dir_list = filterAltExpressionFiles(dir_list,altanalyze_files) ### Looks to see if the AltExpression files are for this run or from an older run
for altanalyze_input in dir_list: #loop through each file in the directory to output results
###Import probe-level associations
if 'cel_files' in altanalyze_input:
print_out = 'The AltExpression directory containing the necessary import file(s) is missing. Please verify the correct parameters and input directory were selected. If this error persists, contact us.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
if run>0: ### Only re-set these databases after the run when batch analysing multiple files
exon_db={}; filtered_arrayids={};filter_status='no' ###Use this as a means to save memory (import multiple times - only storing different types relevant information)
constitutive_probeset_db,exon_db,genes_being_analyzed = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status)
if altanalyze_input in dir_list2: dataset_dir = import_dir2 +'/'+ altanalyze_input ### Then not a pairwise comparison
else: dataset_dir = import_dir +'/'+ altanalyze_input
dataset_name = altanalyze_input[:-4] + '-'
print "Beginning to process",dataset_name[0:-1]
### If the user want's to restrict the analysis to preselected probesets (e.g., limma or FIRMA analysis selected)
global filtered_probeset_db; filtered_probeset_db={}
try: filtered_probeset_db = restrictProbesets(dataset_name)
except Exception: null=[]
if run_from_scratch != 'Annotate External Results':
###Import expression data and stats and filter the expression data based on fold and p-value OR expression threshold
try: conditions,adj_fold_dbase,nonlog_NI_db,dataset_name,gene_expression_diff_db,midas_db,ex_db,si_db = performExpressionAnalysis(dataset_dir,constitutive_probeset_db,exon_db,annotate_db,dataset_name)
except IOError:
#except Exception,exception:
#print exception
print traceback.format_exc()
print_out = 'The AltAnalyze filtered expression file "'+dataset_name+'" is not propperly formatted. Review formatting requirements if this file was created by another application.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
else:
conditions = 0; adj_fold_dbase={}; nonlog_NI_db={}; gene_expression_diff_db={}; ex_db={}; si_db={}
defineEmptyExpressionVars(exon_db); adj_fold_dbase = original_fold_dbase
###Run Analysis
summary_results_db, summary_results_db2, aspire_output, aspire_output_gene, number_events_analyzed = splicingAnalysisAlgorithms(nonlog_NI_db,adj_fold_dbase,dataset_name,gene_expression_diff_db,exon_db,ex_db,si_db,dataset_dir)
aspire_output_list.append(aspire_output); aspire_output_gene_list.append(aspire_output_gene)
try: clearObjectsFromMemory(exon_db); clearObjectsFromMemory(constitutive_probeset_db); constitutive_probeset_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(last_exon_region_db);last_exon_region_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(adj_fold_dbase);adj_fold_dbase=[]; clearObjectsFromMemory(nonlog_NI_db);nonlog_NI_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(gene_expression_diff_db);gene_expression_diff_db=[]; clearObjectsFromMemory(midas_db);midas_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(ex_db);ex_db=[]; clearObjectsFromMemory(si_db);si_db=[]
except Exception: null=[]
try: run+=1
except Exception: run = 1
if run>0: ###run = 0 if no filtered expression data present
try: return summary_results_db, aspire_output_gene_list, number_events_analyzed
except Exception:
print_out = 'AltAnalyze was unable to find an expression dataset to analyze in:\n',import_dir,'\nor\n',import_dir2,'\nPlease re-run and select a valid input directory.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
else:
try: clearObjectsFromMemory(exon_db); clearObjectsFromMemory(constitutive_probeset_db); constitutive_probeset_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(last_exon_region_db);last_exon_region_db=[]
except Exception: null=[]
return None
def filterAltExpressionFiles(dir_list,current_files):
dir_list2=[]
try:
if len(current_files) == 0: current_files = dir_list ###if no filenames input
for altanalzye_input in dir_list: #loop through each file in the directory to output results
if altanalzye_input in current_files:
dir_list2.append(altanalzye_input)
dir_list = dir_list2
except Exception: dir_list = dir_list
return dir_list
def defineEmptyExpressionVars(exon_db):
global fold_dbase; fold_dbase={}; global original_fold_dbase; global critical_exon_db; critical_exon_db={}
global midas_db; midas_db = {}; global max_replicates; global equal_replicates; max_replicates=0; equal_replicates=0
for probeset in exon_db: fold_dbase[probeset]='',''
original_fold_dbase = fold_dbase
def universalPrintFunction(print_items):
log_report = open(log_file,'a')
for item in print_items:
if commandLineMode == 'no': ### Command-line has it's own log file write method (Logger)
log_report.write(item+'\n')
else: print item
log_report.close()
class StatusWindow:
def __init__(self,root,expr_var,alt_var,goelite_var,additional_var,exp_file_location_db):
root.title('AltAnalyze version 2.1.0')
statusVar = StringVar() ### Class method for Tkinter. Description: "Value holder for strings variables."
self.root = root
height = 450; width = 500
if os.name != 'nt': height = 500; width = 600
self.sf = PmwFreeze.ScrolledFrame(root,
labelpos = 'n', label_text = 'Results Status Window',
usehullsize = 1, hull_width = width, hull_height = height)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Output')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
Label(group.interior(),width=190,height=552,justify=LEFT, bg='black', fg = 'white',anchor=NW,padx = 5,pady = 5, textvariable=statusVar).pack(fill=X,expand=Y)
status = StringVarFile(statusVar,root) ### Likely captures the stdout
sys.stdout = status
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]; fl.setSTDOUT(sys.stdout)
root.after(100, AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db, root))
try:
root.protocol("WM_DELETE_WINDOW", self.deleteWindow)
root.mainloop()
except Exception: pass
def deleteWindow(self):
try: self.root.destroy()
except Exception: pass
def quit(self):
try:
self.root.quit()
self.root.destroy()
except Exception: pass
sys.exit()
def exportComparisonSummary(dataset_name,summary_data_dbase,return_type):
log_report = open(log_file,'a')
result_list=[]
for key in summary_data_dbase:
if key != 'QC': ### The value is a list of strings
summary_data_dbase[key] = str(summary_data_dbase[key])
d = 'Dataset name: '+ dataset_name[:-1]; result_list.append(d+'\n')
d = summary_data_dbase['gene_assayed']+':\tAll genes examined'; result_list.append(d)
d = summary_data_dbase['denominator_exp_genes']+':\tExpressed genes examined for AS'; result_list.append(d)
if explicit_data_type == 'exon-only':
d = summary_data_dbase['alt_events']+':\tAlternatively regulated probesets'; result_list.append(d)
d = summary_data_dbase['denominator_exp_events']+':\tExpressed probesets examined'; result_list.append(d)
elif (array_type == 'AltMouse' or array_type == 'junction' or array_type == 'RNASeq') and (explicit_data_type == 'null' or return_type == 'print'):
d = summary_data_dbase['alt_events']+':\tAlternatively regulated junction-pairs'; result_list.append(d)
d = summary_data_dbase['denominator_exp_events']+':\tExpressed junction-pairs examined'; result_list.append(d)
else:
d = summary_data_dbase['alt_events']+':\tAlternatively regulated probesets'; result_list.append(d)
d = summary_data_dbase['denominator_exp_events']+':\tExpressed probesets examined'; result_list.append(d)
d = summary_data_dbase['alt_genes']+':\tAlternatively regulated genes (ARGs)'; result_list.append(d)
d = summary_data_dbase['direct_domain_genes']+':\tARGs - overlaping with domain/motifs'; result_list.append(d)
d = summary_data_dbase['miRNA_gene_hits']+':\tARGs - overlaping with microRNA binding sites'; result_list.append(d)
result_list2=[]
for d in result_list:
if explicit_data_type == 'exon-only': d = string.replace(d,'probeset','exon')
elif array_type == 'RNASeq': d = string.replace(d,'probeset','junction')
result_list2.append(d)
result_list = result_list2
if return_type == 'log':
for d in result_list: log_report.write(d+'\n')
log_report.write('\n')
log_report.close()
return result_list
class SummaryResultsWindow:
def __init__(self,tl,analysis_type,output_dir,dataset_name,output_type,summary_data_dbase):
def showLink(event):
try:
idx = int(event.widget.tag_names(CURRENT)[1]) ### This is just the index provided below (e.g., str(0))
#print [self.LINKS[idx]]
if 'http://' in self.LINKS[idx]:
webbrowser.open(self.LINKS[idx])
elif self.LINKS[idx][-1] == '/':
self.openSuppliedDirectory(self.LINKS[idx])
else:
### Instead of using this option to open a hyperlink (which is what it should do), we can open another Tk window
try: self.viewPNGFile(self.LINKS[idx]) ### ImageTK PNG viewer
except Exception:
try: self.ShowImageMPL(self.LINKS[idx]) ### MatPlotLib based dispaly
except Exception:
self.openPNGImage(self.LINKS[idx]) ### Native OS PNG viewer
#self.DisplayPlots(self.LINKS[idx]) ### GIF based dispaly
except Exception:
null=[] ### anomalous error
self.emergency_exit = False
self.LINKS = []
self.tl = tl
self.tl.title('AltAnalyze version 2.1.0')
self.analysis_type = analysis_type
filename = 'Config/icon.gif'
fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(tl); can.pack(side='top'); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
use_scroll = 'yes'
try: runGOElite = run_GOElite
except Exception: runGOElite='decide_later'
if 'QC' in summary_data_dbase:
graphic_links = summary_data_dbase['QC'] ### contains hyperlinks to QC and Clustering plots
if len(graphic_links)==0: del summary_data_dbase['QC'] ### This can be added if an analysis fails
else:
graphic_links = []
label_text_str = 'AltAnalyze Result Summary'; height = 150; width = 500
if analysis_type == 'AS' or 'QC' in summary_data_dbase: height = 330
if analysis_type == 'AS' and 'QC' in summary_data_dbase: height = 330
self.sf = PmwFreeze.ScrolledFrame(tl,
labelpos = 'n', label_text = label_text_str,
usehullsize = 1, hull_width = width, hull_height = height)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
txt=Text(self.frame,bg='gray',width=150, height=80)
txt.pack(expand=True, fill="both")
#txt.insert(END, 'Primary Analysis Finished....\n')
txt.insert(END, 'Results saved to:\n'+output_dir+'\n')
f = Font(family="System", size=12, weight="bold")
txt.tag_config("font", font=f)
i=0
copyDirectoryPDFs(output_dir,AS=analysis_type)
if analysis_type == 'AS':
txt.insert(END, '\n')
result_list = exportComparisonSummary(dataset_name,summary_data_dbase,'print')
for d in result_list: txt.insert(END, d+'\n')
if 'QC' in summary_data_dbase and len(graphic_links)>0:
txt.insert(END, '\nQC and Expression Clustering Plots',"font")
txt.insert(END, '\n\n 1) ')
for (name,file_dir) in graphic_links:
txt.insert(END, name, ('link', str(i)))
if len(graphic_links) > (i+1):
txt.insert(END, '\n %s) ' % str(i+2))
self.LINKS.append(file_dir)
i+=1
txt.insert(END, '\n\nView all primary plots in the folder ')
txt.insert(END, 'DataPlots',('link', str(i))); i+=1
self.LINKS.append(output_dir+'DataPlots/')
else:
url = 'http://code.google.com/p/altanalyze/'
self.LINKS=(url,'')
txt.insert(END, '\nFor more information see the ')
txt.insert(END, "AltAnalyze Online Help", ('link', str(0)))
txt.insert(END, '\n\n')
if runGOElite == 'run-immediately':
txt.insert(END, '\n\nView all pathway enrichment results in the folder ')
txt.insert(END, 'GO-Elite',('link', str(i))); i+=1
self.LINKS.append(output_dir+'GO-Elite/')
if analysis_type == 'AS':
txt.insert(END, '\n\nView all splicing plots in the folder ')
txt.insert(END, 'ExonPlots',('link', str(i))); i+=1
try: self.LINKS.append(output_dir+'ExonPlots/')
except Exception: pass
txt.tag_config('link', foreground="blue", underline = 1)
txt.tag_bind('link', '<Button-1>', showLink)
txt.insert(END, '\n\n')
open_results_folder = Button(tl, text = 'Results Folder', command = self.openDirectory)
open_results_folder.pack(side = 'left', padx = 5, pady = 5);
if analysis_type == 'AS':
#self.dg_url = 'http://www.altanalyze.org/domaingraph.htm'
self.dg_url = 'http://www.altanalyze.org/domaingraph.htm'
dg_pdf_file = 'Documentation/domain_graph.pdf'; dg_pdf_file = filepath(dg_pdf_file); self.dg_pdf_file = dg_pdf_file
text_button = Button(tl, text='Start DomainGraph in Cytoscape', command=self.SelectCytoscapeTopLevel)
text_button.pack(side = 'right', padx = 5, pady = 5)
self.output_dir = output_dir + "AltResults"
self.whatNext_url = 'http://code.google.com/p/altanalyze/wiki/AnalyzingASResults' #http://www.altanalyze.org/what_next_altexon.htm'
whatNext_pdf = 'Documentation/what_next_alt_exon.pdf'; whatNext_pdf = filepath(whatNext_pdf); self.whatNext_pdf = whatNext_pdf
if output_type == 'parent': self.output_dir = output_dir ###Used for fake datasets
else:
if pathway_permutations == 'NA':
self.output_dir = output_dir + "ExpressionOutput"
else: self.output_dir = output_dir
self.whatNext_url = 'http://code.google.com/p/altanalyze/wiki/AnalyzingGEResults' #'http://www.altanalyze.org/what_next_expression.htm'
whatNext_pdf = 'Documentation/what_next_GE.pdf'; whatNext_pdf = filepath(whatNext_pdf); self.whatNext_pdf = whatNext_pdf
what_next = Button(tl, text='What Next?', command=self.whatNextlinkout)
what_next.pack(side = 'right', padx = 5, pady = 5)
quit_buttonTL = Button(tl,text='Close View', command=self.close)
quit_buttonTL.pack(side = 'right', padx = 5, pady = 5)
continue_to_next_win = Button(text = 'Continue', command = self.continue_win)
continue_to_next_win.pack(side = 'right', padx = 10, pady = 10)
quit_button = Button(root,text='Quit', command=self.quit)
quit_button.pack(side = 'right', padx = 5, pady = 5)
button_text = 'Help'; help_url = 'http://www.altanalyze.org/help_main.htm'; self.help_url = filepath(help_url)
pdf_help_file = 'Documentation/AltAnalyze-Manual.pdf'; pdf_help_file = filepath(pdf_help_file); self.pdf_help_file = pdf_help_file
help_button = Button(root, text=button_text, command=self.Helplinkout)
help_button.pack(side = 'left', padx = 5, pady = 5)
if self.emergency_exit == False:
self.tl.protocol("WM_DELETE_WINDOW", self.tldeleteWindow)
self.tl.mainloop() ###Needed to show graphic
else:
""" This shouldn't have to be called, but is when the topLevel window isn't closed first
specifically if a PNG file is opened. the sys.exitfunc() should work but doesn't.
work on this more later """
#AltAnalyzeSetup('no')
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try: self.tl.quit(); self.tl.destroy()
except Exception: None
try: root.quit(); root.destroy()
except Exception: None
UI.getUpdatedParameters(array_type,species,'Process Expression file',output_dir)
sys.exit() ### required when opening PNG files on Windows to continue (not sure why)
#sys.exitfunc()
def tldeleteWindow(self):
try: self.tl.quit(); self.tl.destroy()
except Exception: self.tl.destroy()
def deleteTLWindow(self):
self.emergency_exit = True
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
self.tl.quit()
self.tl.destroy()
sys.exitfunc()
def deleteWindow(self):
self.emergency_exit = True
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try:
self.tl.quit()
self.tl.destroy()
except Exception: None
sys.exitfunc()
def continue_win(self):
self.emergency_exit = True
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try: self.tl.quit(); self.tl.destroy()
except Exception: pass
root.quit()
root.destroy()
try: self.tl.grid_forget()
except Exception: None
try: root.grid_forget()
except Exception: None
sys.exitfunc()
def openDirectory(self):
if os.name == 'nt':
try: os.startfile('"'+self.output_dir+'"')
except Exception: os.system('open "'+self.output_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+self.output_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+self.output_dir+'/"')
def openSuppliedDirectory(self,dir):
if os.name == 'nt':
try: os.startfile('"'+self.output_dir+'"')
except Exception: os.system('open "'+dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+dir+'/"')
def DGlinkout(self):
try:
altanalyze_path = filepath('') ### Find AltAnalye's path
altanalyze_path = altanalyze_path[:-1]
except Exception: null=[]
if os.name == 'nt':
parent_dir = 'C:/Program Files'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform:
parent_dir = '/Applications'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.app'
elif 'linux' in sys.platform:
parent_dir = '/opt'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape'
try: openCytoscape(altanalyze_path,application_dir,application_name)
except Exception: null=[]
try: self._tls.destroy()
except Exception: None
try: ###Remove this cytoscape as the default
file_location_defaults = UI.importDefaultFileLocations()
del file_location_defaults['CytoscapeDir']
UI.exportDefaultFileLocations(file_location_defaults)
except Exception: null=[]
self.GetHelpTopLevel(self.dg_url,self.dg_pdf_file)
def Helplinkout(self): self.GetHelpTopLevel(self.help_url,self.pdf_help_file)
def whatNextlinkout(self): self.GetHelpTopLevel(self.whatNext_url,self.whatNext_pdf)
def ShowImageMPL(self,file_location):
""" Visualization method using MatPlotLib """
try:
import matplotlib
import matplotlib.pyplot as pylab
except Exception:
#print 'Graphical output mode disabled (requires matplotlib, numpy and scipy)'
None
fig = pylab.figure()
pylab.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.00) ### Fill the plot area left to right
ax = fig.add_subplot(111)
ax.set_xticks([]) ### Hides ticks
ax.set_yticks([])
img= pylab.imread(file_location)
imgplot = pylab.imshow(img)
pylab.show()
def viewPNGFile(self,png_file_dir):
""" View PNG file within a PMW Tkinter frame """
import ImageTk
tlx = Toplevel(); self._tlx = tlx
sf = PmwFreeze.ScrolledFrame(tlx, labelpos = 'n', label_text = '',
usehullsize = 1, hull_width = 800, hull_height = 550)
sf.pack(padx = 0, pady = 0, fill = 'both', expand = 1)
frame = sf.interior()
tlx.title(png_file_dir)
img = ImageTk.PhotoImage(file=png_file_dir)
can = Canvas(frame)
can.pack(fill=BOTH, padx = 0, pady = 0)
w = img.width()
h = height=img.height()
can.config(width=w, height=h)
can.create_image(2, 2, image=img, anchor=NW)
tlx.mainloop()
def openPNGImage(self,png_file_dir):
if os.name == 'nt':
try: os.startfile('"'+png_file_dir+'"')
except Exception: os.system('open "'+png_file_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+png_file_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+png_file_dir+'"')
def DisplayPlots(self,file_location):
""" Native Tkinter method - Displays a gif file in a standard TopLevel window (nothing fancy) """
tls = Toplevel(); self._tls = tls; nulls = '\t\t\t\t'; tls.title('AltAnalyze Plot Visualization')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos = 'n', label_text = '', usehullsize = 1, hull_width = 520, hull_height = 500)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = file_location)
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
img = PhotoImage(file=filepath(file_location))
can = Canvas(group.interior()); can.pack(side='left',padx = 10, pady = 20); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
tls.mainloop()
def GetHelpTopLevel(self,url,pdf_file):
try:
config_db = UI.importConfigFile()
ask_for_help = config_db['help'] ### hide_selection_option
except Exception: ask_for_help = 'null'; config_db={}
self.pdf_file = pdf_file; self.url = url
if ask_for_help == 'null':
message = ''; self.message = message; self.online_help = 'Online Documentation'; self.pdf_help = 'Local PDF File'
tls = Toplevel(); self._tls = tls; nulls = '\t\t\t\t'; tls.title('Please select one of the options')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos = 'n', label_text = '', usehullsize = 1, hull_width = 320, hull_height = 200)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Options')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
filename = 'Config/icon.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(group.interior()); can.pack(side='left',padx = 10, pady = 20); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
text_button2 = Button(group.interior(), text=self.online_help, command=self.openOnlineHelp); text_button2.pack(side = 'top', padx = 5, pady = 5)
try: text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(side = 'top', padx = 5, pady = 5)
except Exception: text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(side = 'top', padx = 5, pady = 5)
text_button3 = Button(group.interior(), text='No Thanks', command=self.skipHelp); text_button3.pack(side = 'top', padx = 5, pady = 5)
c = Checkbutton(group.interior(), text = "Apply these settings each time", command=self.setHelpConfig); c.pack(side = 'bottom', padx = 5, pady = 0)
tls.mainloop()
try: tls.destroy()
except Exception: None
else:
file_location_defaults = UI.importDefaultFileLocations()
try:
help_choice = file_location_defaults['HelpChoice'].Location()
if help_choice == 'PDF': self.openPDFHelp()
elif help_choice == 'http': self.openOnlineHelp()
else: self.skip()
except Exception: self.openPDFHelp() ### Open PDF if there's a problem
def SelectCytoscapeTopLevel(self):
try:
config_db = UI.importConfigFile()
cytoscape_type = config_db['cytoscape'] ### hide_selection_option
except Exception: cytoscape_type = 'null'; config_db={}
if cytoscape_type == 'null':
message = ''; self.message = message
tls = Toplevel(); self._tls = tls; nulls = '\t\t\t\t'; tls.title('Cytoscape Automatic Start Options')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos = 'n', label_text = '', usehullsize = 1, hull_width = 420, hull_height = 200)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Options')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
filename = 'Config/cyto-logo-smaller.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(group.interior()); can.pack(side='left',padx = 10, pady = 5); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
#"""
self.local_cytoscape = 'AltAnalyze Bundled Version'; self.custom_cytoscape = 'Previously Installed Version'
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
l3 = Label(group.interior(), text='Select version of Cytoscape to open:'); l3.pack(side = 'top', pady = 5)
"""
self.local_cytoscape = ' No '; self.custom_cytoscape = ' Yes '
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
l2 = Label(group.interior(), text='Note: Cytoscape can take up-to a minute to initalize', fg="red"); l2.pack(side = 'top', padx = 5, pady = 0)
"""
text_button2 = Button(group.interior(), text=self.local_cytoscape, command=self.DGlinkout); text_button2.pack(padx = 5, pady = 5)
try: text_button = Button(group.interior(), text=self.custom_cytoscape, command=self.getPath); text_button.pack(padx = 5, pady = 5)
except Exception: text_button = Button(group.interior(), text=self.custom_cytoscape, command=self.getPath); text_button.pack(padx = 5, pady = 5)
l2 = Label(group.interior(), text='Note: Cytoscape can take up-to a minute to initalize', fg="blue"); l2.pack(side = 'bottom', padx = 5, pady = 0)
c = Checkbutton(group.interior(), text = "Apply these settings each time and don't show again", command=self.setCytoscapeConfig); c.pack(side = 'bottom', padx = 5, pady = 0)
#c2 = Checkbutton(group.interior(), text = "Open PDF of DomainGraph help rather than online help", command=self.setCytoscapeConfig); c2.pack(side = 'bottom', padx = 5, pady = 0)
tls.mainloop()
try: tls.destroy()
except Exception: None
else:
file_location_defaults = UI.importDefaultFileLocations()
try: cytoscape_app_dir = file_location_defaults['CytoscapeDir'].Location(); openFile(cytoscape_app_dir)
except Exception:
try: altanalyze_path = filepath(''); altanalyze_path = altanalyze_path[:-1]
except Exception: altanalyze_path=''
application_dir = 'Cytoscape_v'
if os.name == 'nt': application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform: application_name = 'Cytoscape.app'
elif 'linux' in sys.platform: application_name = 'Cytoscape'
try: openCytoscape(altanalyze_path,application_dir,application_name)
except Exception: null=[]
def setCytoscapeConfig(self):
config_db={}; config_db['cytoscape'] = 'hide_selection_option'
UI.exportConfigFile(config_db)
def setHelpConfig(self):
config_db={}; config_db['help'] = 'hide_selection_option'
UI.exportConfigFile(config_db)
def getPath(self):
file_location_defaults = UI.importDefaultFileLocations()
if os.name == 'nt': parent_dir = 'C:/Program Files'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform: parent_dir = '/Applications'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.app'
elif 'linux' in sys.platform: parent_dir = '/opt'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape'
try:
self.default_dir = file_location_defaults['CytoscapeDir'].Location()
self.default_dir = string.replace(self.default_dir,'//','/')
self.default_dir = string.replace(self.default_dir,'\\','/')
self.default_dir = string.join(string.split(self.default_dir,'/')[:-1],'/')
except Exception:
dir = FindDir(parent_dir,application_dir); dir = filepath(parent_dir+'/'+dir)
self.default_dir = filepath(parent_dir)
try: dirPath = tkFileDialog.askdirectory(parent=self._tls,initialdir=self.default_dir)
except Exception:
self.default_dir = ''
try: dirPath = tkFileDialog.askdirectory(parent=self._tls,initialdir=self.default_dir)
except Exception:
try: dirPath = tkFileDialog.askdirectory(parent=self._tls)
except Exception: dirPath=''
try:
#print [dirPath],application_name
app_dir = dirPath+'/'+application_name
if 'linux' in sys.platform:
try: createCytoscapeDesktop(cytoscape_dir)
except Exception: null=[]
dir_list = unique.read_directory('/usr/bin/') ### Check to see that JAVA is installed
if 'java' not in dir_list: print 'Java not referenced in "usr/bin/. If not installed,\nplease install and re-try opening Cytoscape'
try:
jar_path = dirPath+'/cytoscape.jar'
main_path = dirPath+'/cytoscape.CyMain'
plugins_path = dirPath+'/plugins'
os.system('java -Dswing.aatext=true -Xss5M -Xmx512M -jar '+jar_path+' '+main_path+' -p '+plugins_path+' &')
print 'Cytoscape jar opened:',jar_path
except Exception:
print 'OS command to open Java failed.'
try: openFile(app_dir2); print 'Cytoscape opened:',app_dir2
except Exception: openFile(app_dir)
else: openFile(app_dir)
try: file_location_defaults['CytoscapeDir'].SetLocation(app_dir)
except Exception:
fl = UI.FileLocationData('', app_dir, 'all')
file_location_defaults['CytoscapeDir'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
except Exception: null=[]
try: self._tls.destroy()
except Exception: None
self.GetHelpTopLevel(self.dg_url,self.dg_pdf_file)
def openOnlineHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:file_location_defaults['HelpChoice'].SetLocation('http')
except Exception:
fl = UI.FileLocationData('', 'http', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
webbrowser.open(self.url)
#except Exception: null=[]
try: self._tls.destroy()
except Exception: None
def skipHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try: file_location_defaults['HelpChoice'].SetLocation('skip')
except Exception:
fl = UI.FileLocationData('', 'skip', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
try: self._tls.destroy()
except Exception: None
def openPDFHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:file_location_defaults['HelpChoice'].SetLocation('PDF')
except Exception:
fl = UI.FileLocationData('', 'PDF', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
if os.name == 'nt':
try: os.startfile('"'+self.pdf_file+'"')
except Exception: os.system('open "'+self.pdf_file+'"')
elif 'darwin' in sys.platform: os.system('open "'+self.pdf_file+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+self.pdf_file+'"')
try: self._tls.destroy()
except Exception: None
def quit(self):
root.quit()
root.destroy()
sys.exit()
def close(self):
#self.tl.quit() #### This was causing multiple errors in 2.0.7 - evaluate more!
self.tl.destroy()
class StringVarFile:
def __init__(self,stringVar,window):
self.__newline = 0; self.__stringvar = stringVar; self.__window = window
def write(self,s):
try:
log_report = open(log_file,'a')
log_report.write(s); log_report.close() ### Variable to record each print statement
new = self.__stringvar.get()
for c in s:
#if c == '\n': self.__newline = 1
if c == '\k': self.__newline = 1### This should not be found and thus results in a continous feed rather than replacing a single line
else:
if self.__newline: new = ""; self.__newline = 0
new = new+c
self.set(new)
except Exception: pass
def set(self,s):
try: self.__stringvar.set(s); self.__window.update()
except Exception: pass
def get(self):
try:
return self.__stringvar.get()
except Exception: pass
def flush(self):
pass
def timestamp():
import datetime
today = str(datetime.date.today()); today = string.split(today,'-'); today = today[0]+''+today[1]+''+today[2]
time_stamp = string.replace(time.ctime(),':','')
time_stamp = string.replace(time_stamp,' ',' ')
time_stamp = string.split(time_stamp,' ') ###Use a time-stamp as the output dir (minus the day)
time_stamp = today+'-'+time_stamp[3]
return time_stamp
def callWXPython():
import wx
import AltAnalyzeViewer
app = wx.App(False)
AltAnalyzeViewer.remoteViewer(app)
def AltAnalyzeSetup(skip_intro):
global apt_location; global root_dir;global log_file; global summary_data_db; summary_data_db={}; reload(UI)
global probability_statistic; global commandLineMode; commandLineMode = 'no'
if 'remoteViewer' == skip_intro:
if os.name == 'nt':
callWXPython()
elif os.name == 'ntX':
package_path = filepath('python')
win_package_path = string.replace(package_path,'python','AltAnalyzeViewer.exe')
import subprocess
subprocess.call([win_package_path]);sys.exit()
elif os.name == 'posix':
package_path = filepath('python')
#mac_package_path = string.replace(package_path,'python','AltAnalyze.app/Contents/MacOS/python')
#os.system(mac_package_path+' RemoteViewer.py');sys.exit()
mac_package_path = string.replace(package_path,'python','AltAnalyzeViewer.app/Contents/MacOS/AltAnalyzeViewer')
import subprocess
subprocess.call([mac_package_path]);sys.exit()
"""
import threading
import wx
app = wx.PySimpleApp()
t = threading.Thread(target=callWXPython)
t.setDaemon(1)
t.start()
s = 1
queue = mlp.Queue()
proc = mlp.Process(target=callWXPython) ### passing sys.stdout unfortunately doesn't work to pass the Tk string
proc.start()
sys.exit()
"""
reload(UI)
expr_var, alt_var, additional_var, goelite_var, exp_file_location_db = UI.getUserParameters(skip_intro,Multi=mlp)
"""except Exception:
if 'SystemExit' not in str(traceback.format_exc()):
expr_var, alt_var, additional_var, goelite_var, exp_file_location_db = UI.getUserParameters('yes')
else: sys.exit()"""
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
apt_location = fl.APTLocation()
root_dir = fl.RootDir()
try: probability_statistic = fl.ProbabilityStatistic()
except Exception: probability_statistic = 'unpaired t-test'
time_stamp = timestamp()
log_file = filepath(root_dir+'AltAnalyze_report-'+time_stamp+'.log')
log_report = open(log_file,'w'); log_report.close()
if use_Tkinter == 'yes' and debug_mode == 'no':
try:
global root; root = Tk()
StatusWindow(root,expr_var, alt_var, goelite_var, additional_var, exp_file_location_db)
root.destroy()
except Exception, exception:
try:
print traceback.format_exc()
badExit()
except Exception: sys.exit()
else: AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db,'')
def badExit():
print "\n...exiting AltAnalyze due to unexpected error"
try:
time_stamp = timestamp()
print_out = "Unknown error encountered during data processing.\nPlease see logfile in:\n\n"+log_file+"\nand report to altanalyze@gmail.com."
try:
if len(log_file)>0:
if commandLineMode == 'no':
if os.name == 'nt':
try: os.startfile('"'+log_file+'"')
except Exception: os.system('open "'+log_file+'"')
elif 'darwin' in sys.platform: os.system('open "'+log_file+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+log_file+'"')
if commandLineMode == 'no':
try: UI.WarningWindow(print_out,'Error Encountered!'); root.destroy()
except Exception: print print_out
except Exception: sys.exit()
except Exception: sys.exit()
sys.exit()
kill
def AltAnalyzeMain(expr_var,alt_var,goelite_var,additional_var,exp_file_location_db,root):
### Hard-coded defaults
w = 'Agilent'; x = 'Affymetrix'; y = 'Ensembl'; z = 'any'; data_source = y; constitutive_source = z; manufacturer = x ### Constitutive source, is only really paid attention to if Ensembl, otherwise Affymetrix is used (even if default)
### Get default options for ExpressionBuilder and AltAnalyze
start_time = time.time()
test_goelite = 'no'; test_results_pannel = 'no'
global species; global array_type; global expression_data_format; global use_R; use_R = 'no'
global analysis_method; global p_threshold; global filter_probeset_types
global permute_p_threshold; global perform_permutation_analysis; global export_NI_values
global run_MiDAS; global analyze_functional_attributes; global microRNA_prediction_method
global calculate_normIntensity_p; global pathway_permutations; global avg_all_for_ss; global analyze_all_conditions
global remove_intronic_junctions
global agglomerate_inclusion_probesets; global expression_threshold; global factor_out_expression_changes
global only_include_constitutive_containing_genes; global remove_transcriptional_regulated_genes; global add_exons_to_annotations
global exclude_protein_details; global filter_for_AS; global use_direct_domain_alignments_only; global run_from_scratch
global explicit_data_type; explicit_data_type = 'null'
global altanalyze_files; altanalyze_files = []
species,array_type,manufacturer,constitutive_source,dabg_p,raw_expression_threshold,avg_all_for_ss,expression_data_format,include_raw_data, run_from_scratch, perform_alt_analysis = expr_var
analysis_method,p_threshold,filter_probeset_types,alt_exon_fold_variable,gene_expression_cutoff,remove_intronic_junctions,permute_p_threshold,perform_permutation_analysis, export_NI_values, analyze_all_conditions = alt_var
calculate_normIntensity_p, run_MiDAS, use_direct_domain_alignments_only, microRNA_prediction_method, filter_for_AS, additional_algorithms = additional_var
ge_fold_cutoffs,ge_pvalue_cutoffs,ge_ptype,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,pathway_permutations,mod,returnPathways = goelite_var
original_remove_intronic_junctions = remove_intronic_junctions
if run_from_scratch == 'Annotate External Results': analysis_method = 'external'
if returnPathways == 'no' or returnPathways == 'None':
returnPathways = None
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
try: exon_exp_threshold = fl.ExonExpThreshold()
except Exception: exon_exp_threshold = 'NA'
try: gene_exp_threshold = fl.GeneExpThreshold()
except Exception: gene_exp_threshold = 'NA'
try: exon_rpkm_threshold = fl.ExonRPKMThreshold()
except Exception: exon_rpkm_threshold = 'NA'
try: rpkm_threshold = fl.RPKMThreshold() ### Gene-Level
except Exception: rpkm_threshold = 'NA'
fl.setJunctionExpThreshold(raw_expression_threshold) ### For RNA-Seq, this specifically applies to exon-junctions
try: predictGroups = fl.predictGroups()
except Exception: predictGroups = False
try:
if fl.excludeLowExpressionExons(): excludeLowExpExons = 'yes'
else: excludeLowExpExons = 'no'
except Exception: excludeLowExpExons = 'no'
if test_goelite == 'yes': ### It can be difficult to get error warnings from GO-Elite, unless run here
results_dir = filepath(fl.RootDir())
elite_input_dirs = ['AltExonConfirmed','AltExon','regulated','upregulated','downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir+'GO-Elite/'+elite_dir,results_dir+'GO-Elite/denominator',results_dir+'GO-Elite/'+elite_dir
variables = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,root
GO_Elite.remoteAnalysis(variables,'non-UI',Multi=mlp)
global perform_element_permutation_analysis; global permutations
perform_element_permutation_analysis = 'yes'; permutations = 2000
analyze_functional_attributes = 'yes' ### Do this by default (shouldn't substantially increase runtime)
if run_from_scratch != 'Annotate External Results' and (array_type != "3'array" and array_type!='RNASeq'):
if run_from_scratch !='Process AltAnalyze filtered':
try: raw_expression_threshold = float(raw_expression_threshold)
except Exception: raw_expression_threshold = 1
if raw_expression_threshold<1:
raw_expression_threshold = 1
print "Expression threshold < 1, forcing to be a minimum of 1."
try: dabg_p = float(dabg_p)
except Exception: dabg_p = 0
if dabg_p == 0 or dabg_p > 1:
print "Invalid dabg-p value threshold entered,(",dabg_p,") setting to default of 0.05"
dabg_p = 0.05
if use_direct_domain_alignments_only == 'direct-alignment': use_direct_domain_alignments_only = 'yes'
if run_from_scratch == 'Process CEL files': expression_data_format = 'log'
print "Beginning AltAnalyze Analysis... Format:", expression_data_format
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print_items=[]; #print [permute_p_threshold]; sys.exit()
print_items.append("AltAnalyze version 2.1.0 - Expression Analysis Parameters Being Used...")
print_items.append('\t'+'database'+': '+unique.getCurrentGeneDatabaseVersion())
print_items.append('\t'+'species'+': '+species)
print_items.append('\t'+'method'+': '+array_type)
print_items.append('\t'+'manufacturer'+': '+manufacturer)
print_items.append('\t'+'probability_statistic'+': '+probability_statistic)
print_items.append('\t'+'constitutive_source'+': '+constitutive_source)
print_items.append('\t'+'dabg_p'+': '+str(dabg_p))
if array_type == 'RNASeq':
print_items.append('\t'+'junction expression threshold'+': '+str(raw_expression_threshold))
print_items.append('\t'+'exon_exp_threshold'+': '+str(exon_exp_threshold))
print_items.append('\t'+'gene_exp_threshold'+': '+str(gene_exp_threshold))
print_items.append('\t'+'exon_rpkm_threshold'+': '+str(exon_rpkm_threshold))
print_items.append('\t'+'gene_rpkm_threshold'+': '+str(rpkm_threshold))
print_items.append('\t'+'exclude low expressing exons for RPKM'+': '+excludeLowExpExons)
else:
print_items.append('\t'+'raw_expression_threshold'+': '+str(raw_expression_threshold))
print_items.append('\t'+'avg_all_for_ss'+': '+avg_all_for_ss)
print_items.append('\t'+'expression_data_format'+': '+expression_data_format)
print_items.append('\t'+'include_raw_data'+': '+include_raw_data)
print_items.append('\t'+'run_from_scratch'+': '+run_from_scratch)
print_items.append('\t'+'perform_alt_analysis'+': '+perform_alt_analysis)
if avg_all_for_ss == 'yes': cs_type = 'core'
else: cs_type = 'constitutive'
print_items.append('\t'+'calculate_gene_expression_using'+': '+cs_type)
print_items.append("Alternative Exon Analysis Parameters Being Used..." )
print_items.append('\t'+'analysis_method'+': '+analysis_method)
print_items.append('\t'+'p_threshold'+': '+str(p_threshold))
print_items.append('\t'+'filter_data_types'+': '+filter_probeset_types)
print_items.append('\t'+'alt_exon_fold_variable'+': '+str(alt_exon_fold_variable))
print_items.append('\t'+'gene_expression_cutoff'+': '+str(gene_expression_cutoff))
print_items.append('\t'+'remove_intronic_junctions'+': '+remove_intronic_junctions)
print_items.append('\t'+'avg_all_for_ss'+': '+avg_all_for_ss)
print_items.append('\t'+'permute_p_threshold'+': '+str(permute_p_threshold))
print_items.append('\t'+'perform_permutation_analysis'+': '+perform_permutation_analysis)
print_items.append('\t'+'export_NI_values'+': '+export_NI_values)
print_items.append('\t'+'run_MiDAS'+': '+run_MiDAS)
print_items.append('\t'+'use_direct_domain_alignments_only'+': '+use_direct_domain_alignments_only)
print_items.append('\t'+'microRNA_prediction_method'+': '+microRNA_prediction_method)
print_items.append('\t'+'analyze_all_conditions'+': '+analyze_all_conditions)
print_items.append('\t'+'filter_for_AS'+': '+filter_for_AS)
if pathway_permutations == 'NA': run_GOElite = 'decide_later'
else: run_GOElite = 'run-immediately'
print_items.append('\t'+'run_GOElite'+': '+ run_GOElite)
universalPrintFunction(print_items)
if commandLineMode == 'yes': print 'Running command line mode:',commandLineMode
summary_data_db['gene_assayed'] = 0
summary_data_db['denominator_exp_genes']=0
summary_data_db['alt_events'] = 0
summary_data_db['denominator_exp_events'] = 0
summary_data_db['alt_genes'] = 0
summary_data_db['direct_domain_genes'] = 0
summary_data_db['miRNA_gene_denom'] = 0
summary_data_db['miRNA_gene_hits'] = 0
if test_results_pannel == 'yes': ### It can be difficult to get error warnings from GO-Elite, unless run here
graphic_links = []
graphic_links.append(['test','Config/AltAnalyze_structure-RNASeq.jpg'])
summary_data_db['QC']=graphic_links
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
dataset = 'test'; results_dir=''
print "Analysis Complete\n";
if root !='' and root !=None:
UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'GE',results_dir,dataset,'parent',summary_data_db)
root.destroy(); sys.exit()
global export_go_annotations; global aspire_output_list; global aspire_output_gene_list
global filter_probesets_by; global global_addition_factor; global onlyAnalyzeJunctions
global log_fold_cutoff; global aspire_cutoff; global annotation_system; global alt_exon_logfold_cutoff
"""dabg_p = 0.75; data_type = 'expression' ###used for expression analysis when dealing with AltMouse arrays
a = "3'array"; b = "exon"; c = "AltMouse"; e = "custom"; array_type = c
l = 'log'; n = 'non-log'; expression_data_format = l
hs = 'Hs'; mm = 'Mm'; dr = 'Dr'; rn = 'Rn'; species = mm
include_raw_data = 'yes'; expression_threshold = 70 ### Based on suggestion from BMC Genomics. 2006 Dec 27;7:325. PMID: 17192196, for hu-exon 1.0 st array
avg_all_for_ss = 'no' ###Default is 'no' since we don't want all probes averaged for the exon arrays"""
###### Run ExpressionBuilder ######
"""ExpressionBuilder is used to:
(1) extract out gene expression values, provide gene annotations, and calculate summary gene statistics
(2) filter probesets based DABG p-values and export to pair-wise comparison files
(3) build array annotations files matched to gene structure features (e.g. exons, introns) using chromosomal coordinates
options 1-2 are executed in remoteExpressionBuilder and option 3 is by running ExonArrayEnsembl rules"""
try:
additional_algorithm = additional_algorithms.Algorithm()
additional_score = additional_algorithms.Score()
except Exception: additional_algorithm = 'null'; additional_score = 'null'
if analysis_method == 'FIRMA': analyze_metaprobesets = 'yes'
elif additional_algorithm == 'FIRMA': analyze_metaprobesets = 'yes'
else: analyze_metaprobesets = 'no'
### Check to see if this is a real or FAKE (used for demonstration purposes) dataset
if run_from_scratch == 'Process CEL files' or 'Feature Extraction' in run_from_scratch:
for dataset in exp_file_location_db:
if run_from_scratch == 'Process CEL files':
fl = exp_file_location_db[dataset]
pgf_file=fl.InputCDFFile()
results_dir = filepath(fl.RootDir())
if '_demo' in pgf_file: ### Thus we are running demo CEL files and want to quit immediately
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
try:
print "Analysis Complete\n";
if root !='' and root !=None:
UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'AS',results_dir,dataset,'parent',summary_data_db)
except Exception: null=[]
skip_intro = 'yes'
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
reload(UI)
UI.getUpdatedParameters(array_type,species,run_from_scratch,results_dir)
try: AltAnalyzeSetup('no')
except Exception: sys.exit()
if 'CEL files' in run_from_scratch:
import APT
try:
try:
APT.probesetSummarize(exp_file_location_db,analyze_metaprobesets,filter_probeset_types,species,root)
if analyze_metaprobesets == 'yes':
analyze_metaprobesets = 'no' ### Re-run the APT analysis to obtain probeset rather than gene-level results (only the residuals are needed from a metaprobeset run)
APT.probesetSummarize(exp_file_location_db,analyze_metaprobesets,filter_probeset_types,species,root)
except Exception:
import platform
print "Trying to change APT binary access privileges"
for dataset in exp_file_location_db: ### Instance of the Class ExpressionFileLocationData
fl = exp_file_location_db[dataset]; apt_dir =fl.APTLocation()
if '/bin' in apt_dir: apt_file = apt_dir +'/apt-probeset-summarize' ### if the user selects an APT directory
elif os.name == 'nt': apt_file = apt_dir + '/PC/'+platform.architecture()[0]+'/apt-probeset-summarize.exe'
elif 'darwin' in sys.platform: apt_file = apt_dir + '/Mac/apt-probeset-summarize'
elif 'linux' in sys.platform:
if '32bit' in platform.architecture(): apt_file = apt_dir + '/Linux/32bit/apt-probeset-summarize'
elif '64bit' in platform.architecture(): apt_file = apt_dir + '/Linux/64bit/apt-probeset-summarize'
apt_file = filepath(apt_file)
os.chmod(apt_file,0777)
midas_dir = string.replace(apt_file,'apt-probeset-summarize','apt-midas')
os.chmod(midas_dir,0777)
APT.probesetSummarize(exp_file_location_db,analysis_method,filter_probeset_types,species,root)
except Exception:
print_out = 'AltAnalyze encountered an un-expected error while running Affymetrix\n'
print_out += 'Power Tools (APT). Additional information may be found in the directory\n'
print_out += '"ExpressionInput/APT" in the output directory. You may also encounter issues\n'
print_out += 'if you are logged into an account with restricted priveledges.\n\n'
print_out += 'If this issue can not be resolved, contact AltAnalyze help or run RMA outside\n'
print_out += 'of AltAnalyze and import the results using the analysis option "expression file".\n'
print traceback.format_exc()
try:
UI.WarningWindow(print_out,'Exit')
root.destroy(); sys.exit()
except Exception:
print print_out; sys.exit()
elif 'Feature Extraction' in run_from_scratch:
import ProcessAgilentArrays
try: ProcessAgilentArrays.agilentSummarize(exp_file_location_db)
except Exception:
print_out = 'Agilent array import and processing failed... see error log for details...'
print traceback.format_exc()
try:
UI.WarningWindow(print_out,'Exit')
root.destroy(); sys.exit()
except Exception:
print print_out; sys.exit()
reload(ProcessAgilentArrays)
if run_from_scratch == 'Process RNA-seq reads' or run_from_scratch == 'buildExonExportFiles':
import RNASeq; reload(RNASeq); import RNASeq
for dataset in exp_file_location_db: fl = exp_file_location_db[dataset]
### The below function aligns splice-junction coordinates to Ensembl exons from BED Files and
### exports AltAnalyze specific databases that are unique to this dataset to the output directory
try: fastq_folder = fl.RunKallisto()
except Exception: print traceback.format_exc()
if len(fastq_folder)>0:
try:
RNASeq.runKallisto(species,dataset,root_dir,fastq_folder,returnSampleNames=False)
biotypes = 'ran'
except Exception: biotypes='failed'
else:
analyzeBAMs = False; bedFilesPresent = False
dir_list = unique.read_directory(fl.BEDFileDir())
for file in dir_list:
if '.bam' in string.lower(file):
analyzeBAMs=True
if '.bed' in string.lower(file):
bedFilesPresent=True
if analyzeBAMs and bedFilesPresent==False:
import multiBAMtoBED
bam_dir = fl.BEDFileDir()
refExonCoordinateFile = filepath('AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt')
outputExonCoordinateRefBEDfile = bam_dir+'/BedRef/'+species+'_'+string.replace(dataset,'exp.','')
analysisType = ['exon','junction','reference']
#analysisType = ['junction']
multiBAMtoBED.parallelBAMProcessing(bam_dir,refExonCoordinateFile,outputExonCoordinateRefBEDfile,analysisType=analysisType,useMultiProcessing=fl.multiThreading(),MLP=mlp,root=root)
biotypes = RNASeq.alignExonsAndJunctionsToEnsembl(species,exp_file_location_db,dataset,Multi=mlp)
if biotypes == 'failed':
print_out = 'No valid chromosomal positions in the input BED or BioScope files. Exiting AltAnalyze.'
if len(fastq_folder)>0:
if 'FTP' in traceback.format_exc():
print_out = 'AltAnlayze was unable to retreive a transcript fasta sequence file from the Ensembl website. '
print_out += 'Ensure you are connected to the internet and that the website http://ensembl.org is live.'
else:
print_out = 'An unexplained error was encountered with Kallisto analysis:\n'
print_out += traceback.format_exc()
try:
UI.WarningWindow(print_out,'Exit')
root.destroy(); sys.exit()
except Exception:
print print_out; sys.exit()
reload(RNASeq)
if root_dir in biotypes:
print_out = 'Exon-level BED coordinate predictions exported to:\n'+biotypes
print_out+= '\n\nAfter obtaining exon expression estimates, rename exon BED files to\n'
print_out+= 'match the junction name (e.g., Sample1__exon.bed and Sample1__junction.bed)\n'
print_out+= 'and re-run AltAnalyze (see tutorials at http://altanalyze.org for help).'
UI.InfoWindow(print_out,'Export Complete')
try: root.destroy(); sys.exit()
except Exception: sys.exit()
if predictGroups == True:
expFile = fl.ExpFile()
if array_type == 'RNASeq':
exp_threshold=100; rpkm_threshold=10
else:
exp_threshold=200; rpkm_threshold=8
RNASeq.singleCellRNASeqWorkflow(species, array_type, expFile, mlp, exp_threshold=exp_threshold, rpkm_threshold=rpkm_threshold)
goelite_run = False
if run_from_scratch == 'Process Expression file' or run_from_scratch == 'Process CEL files' or run_from_scratch == 'Process RNA-seq reads' or 'Feature Extraction' in run_from_scratch:
if (fl.NormMatrix()=='quantile' or fl.NormMatrix()=='group') and 'Feature Extraction' not in run_from_scratch:
import NormalizeDataset
try: NormalizeDataset.normalizeDataset(fl.ExpFile(),normalization=fl.NormMatrix(),platform=array_type)
except Exception: print "Normalization failed for unknown reasons..."
#"""
status = ExpressionBuilder.remoteExpressionBuilder(species,array_type,
dabg_p,raw_expression_threshold,avg_all_for_ss,expression_data_format,
manufacturer,constitutive_source,data_source,include_raw_data,
perform_alt_analysis,ge_fold_cutoffs,ge_pvalue_cutoffs,ge_ptype,
exp_file_location_db,root)
reload(ExpressionBuilder) ### Clears Memory
#"""
graphics=[]
if fl.MarkerFinder() == 'yes':
### Identify putative condition-specific marker genees
import markerFinder
fl.setOutputDir(root_dir) ### This needs to be set here
exp_file = fl.ExpFile()
if array_type != "3'array": exp_file = string.replace(exp_file,'.txt','-steady-state.txt')
markerFinder_inputs = [exp_file,fl.DatasetFile()] ### Output a replicate and non-replicate version
markerFinder_inputs = [exp_file] ### Only considers the replicate and not mean analysis (recommended)
for input_exp_file in markerFinder_inputs:
### This applies to an ExpressionOutput DATASET file compoosed of gene expression values (averages already present)
try:
output_dir = markerFinder.getAverageExpressionValues(input_exp_file,array_type) ### Either way, make an average annotated file from the DATASET file
except Exception:
print "Unknown MarkerFinder failure (possible filename issue or data incompatibility)..."
print traceback.format_exc()
continue
if 'DATASET' in input_exp_file:
group_exp_file = string.replace(input_exp_file,'DATASET','AVERAGE')
else:
group_exp_file = (input_exp_file,output_dir) ### still analyze the primary sample
compendiumType = 'protein_coding'
if expression_data_format == 'non-log': logTransform = True
else: logTransform = False
try: markerFinder.analyzeData(group_exp_file,species,array_type,compendiumType,AdditionalParameters=fl,logTransform=logTransform)
except Exception: None
### Generate heatmaps (unclustered - order by markerFinder)
try: graphics = markerFinder.generateMarkerHeatMaps(fl,array_type,graphics=graphics,Species=species)
except Exception: print traceback.format_exc()
remove_intronic_junctions = original_remove_intronic_junctions ### This var gets reset when running FilterDABG
try:
summary_data_db['QC'] = fl.GraphicLinks()+graphics ### provides links for displaying QC and clustering plots
except Exception:
null=[] ### Visualization support through matplotlib either not present or visualization options excluded
#print '!!!!!finished expression builder'
#returnLargeGlobalVars()
expression_data_format = 'log' ### This variable is set from non-log in FilterDABG when present (version 1.16)
try:
parent_dir = fl.RootDir()+'/GO-Elite/regulated/'
dir_list = read_directory(parent_dir)
for file in dir_list:
input_file_dir = parent_dir+'/'+file
inputType = 'IDs'
interactionDirs = ['WikiPathways','KEGG','BioGRID','TFTargets']
output_dir = parent_dir
degrees = 'direct'
input_exp_file = input_file_dir
gsp = UI.GeneSelectionParameters(species,array_type,manufacturer)
gsp.setGeneSet('None Selected')
gsp.setPathwaySelect('')
gsp.setGeneSelection('')
gsp.setOntologyID('')
gsp.setIncludeExpIDs(True)
UI.networkBuilder(input_file_dir,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,'')
except Exception:
print traceback.format_exc()
if status == 'stop':
### See if the array and species are compatible with GO-Elite analysis
system_codes = UI.getSystemInfo()
go_elite_analysis_supported = 'yes'
species_names = UI.getSpeciesInfo()
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]; results_dir = filepath(fl.RootDir())
### Perform GO-Elite Analysis
if pathway_permutations != 'NA':
try:
print '\nBeginning to run GO-Elite analysis on alternative exon results'
elite_input_dirs = ['AltExonConfirmed','AltExon','regulated','upregulated','downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir+'GO-Elite/'+elite_dir,results_dir+'GO-Elite/denominator',results_dir+'GO-Elite/'+elite_dir
input_dir = results_dir+'GO-Elite/'+elite_dir
variables = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,root
try: input_files = read_directory(input_dir) ### Are there any files to analyze?
except Exception: input_files=[]
if len(input_files)>0:
try: GO_Elite.remoteAnalysis(variables,'non-UI',Multi=mlp); goelite_run = True
except Exception,e:
print e
print "GO-Elite analysis failed"
try: GO_Elite.moveMAPPFinderFiles(file_dirs[0])
except Exception: print 'Input GO-Elite files could NOT be moved.'
try: GO_Elite.moveMAPPFinderFiles(file_dirs[1])
except Exception: print 'Input GO-Elite files could NOT be moved.'
except Exception: pass
if goelite_run == False:
print 'No GO-Elite input files to analyze (check your criterion).'
print_out = 'Analysis complete. Gene expression\nsummary exported to "ExpressionOutput".'
try:
if use_Tkinter == 'yes':
print "Analysis Complete\n"; UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'GE',results_dir,dataset,'parent',summary_data_db)
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
if go_elite_analysis_supported == 'yes':
UI.getUpdatedParameters(array_type,species,run_from_scratch,file_dirs)
try: AltAnalyzeSetup('no')
except Exception:
print traceback.format_exc()
sys.exit()
else: print '\n'+print_out; sys.exit()
except Exception:
#print 'Failed to report status through GUI.'
sys.exit()
else: altanalyze_files = status[1] ### These files are the comparison files to analyze
elif run_from_scratch == 'update DBs':
null=[] ###Add link to new module here (possibly)
#updateDBs(species,array_type)
sys.exit()
if perform_alt_analysis != 'expression': ###Thus perform_alt_analysis = 'both' or 'alt' (default when skipping expression summary step)
###### Run AltAnalyze ######
global dataset_name; global summary_results_db; global summary_results_db2
summary_results_db={}; summary_results_db2={}; aspire_output_list=[]; aspire_output_gene_list=[]
onlyAnalyzeJunctions = 'no'; agglomerate_inclusion_probesets = 'no'; filter_probesets_by = 'NA'
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if filter_probeset_types == 'junctions-only': onlyAnalyzeJunctions = 'yes'
elif filter_probeset_types == 'combined-junctions': agglomerate_inclusion_probesets = 'yes'; onlyAnalyzeJunctions = 'yes'
elif filter_probeset_types == 'exons-only': analysis_method = 'splicing-index'; filter_probesets_by = 'exon'
if filter_probeset_types == 'combined-junctions' and array_type == 'junction' or array_type == 'RNASeq': filter_probesets_by = 'all'
else: filter_probesets_by = filter_probeset_types
c = 'Ensembl'; d = 'Entrez Gene'
annotation_system = c
expression_threshold = 0 ###This is different than the raw_expression_threshold (probably shouldn't filter so set to 0)
if analysis_method == 'linearregres-rlm': analysis_method = 'linearregres';use_R = 'yes'
if gene_expression_cutoff<1:
gene_expression_cutoff = 2 ### A number less than one is invalid
print "WARNING!!!! Invalid gene expression fold cutoff entered,\nusing the default value of 2, must be greater than 1."
log_fold_cutoff = math.log(float(gene_expression_cutoff),2)
if analysis_method != 'ASPIRE' and analysis_method != 'none':
if p_threshold <= 0 or p_threshold >1:
p_threshold = 0.05 ### A number less than one is invalid
print "WARNING!!!! Invalid alternative exon p-value threshold entered,\nusing the default value of 0.05."
if alt_exon_fold_variable<1:
alt_exon_fold_variable = 1 ### A number less than one is invalid
print "WARNING!!!! Invalid alternative exon fold cutoff entered,\nusing the default value of 2, must be greater than 1."
try: alt_exon_logfold_cutoff = math.log(float(alt_exon_fold_variable),2)
except Exception: alt_exon_logfold_cutoff = 1
else: alt_exon_logfold_cutoff = float(alt_exon_fold_variable)
global_addition_factor = 0
export_junction_comparisons = 'no' ### No longer accessed in this module - only in update mode through a different module
factor_out_expression_changes = 'yes' ### Use 'no' if data is normalized already or no expression normalization for ASPIRE desired
only_include_constitutive_containing_genes = 'yes'
remove_transcriptional_regulated_genes = 'yes'
add_exons_to_annotations = 'no'
exclude_protein_details = 'no'
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method: annotation_system = d
if 'linear' in analysis_method: analysis_method = 'linearregres'
if 'aspire' in analysis_method: analysis_method = 'ASPIRE'
if array_type == 'AltMouse': species = 'Mm'
#if export_NI_values == 'yes': remove_transcriptional_regulated_genes = 'no'
###Saves run-time while testing the software (global variable stored)
#import_dir = '/AltDatabase/affymetrix/'+species
#dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
### Get Ensembl-GO and pathway annotations from GO-Elite files
universalPrintFunction(["Importing GO-Elite pathway/GO annotations"])
global go_annotations; go_annotations={}
import BuildAffymetrixAssociations
go_annotations = BuildAffymetrixAssociations.getEnsemblAnnotationsFromGOElite(species)
global probeset_annotations_file
if array_type == 'RNASeq': probeset_annotations_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_junctions.txt'
elif array_type == 'AltMouse': probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+'MASTER-probeset-transcript.txt'
else: probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'
#"""
if analysis_method != 'none':
analysis_summary = RunAltAnalyze() ### Only run if analysis methods is specified (only available for RNA-Seq and junction analyses)
else: analysis_summary = None
if analysis_summary != None:
summary_results_db, aspire_output_gene_list, number_events_analyzed = analysis_summary
summary_data_db2 = copy.deepcopy(summary_data_db)
for i in summary_data_db2: del summary_data_db[i] ### If we reset the variable it violates it's global declaration... do this instead
#universalPrintFunction(['Alternative Exon Results for Junction Comparisons:'])
#for i in summary_data_db: universalPrintFunction([i+' '+ str(summary_data_db[i])])
exportSummaryResults(summary_results_db,analysis_method,aspire_output_list,aspire_output_gene_list,annotate_db,array_type,number_events_analyzed,root_dir)
else:
### Occurs for RNASeq when no junctions are present
summary_data_db2={}
if array_type == 'junction' or array_type == 'RNASeq':
#Reanalyze junction array data separately for individual probests rather than recipricol junctions
if array_type == 'junction': explicit_data_type = 'exon'
elif array_type == 'RNASeq': explicit_data_type = 'junction'
else: report_single_probeset_results = 'no'
### Obtain exon analysis defaults
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults('exon',species)
analysis_method, null, filter_probeset_types, null, null, alt_exon_fold_variable, null, null, null, null, null, null, null, calculate_normIntensity_p, null = alt_exon_defaults
filter_probesets_by = filter_probeset_types
if additional_algorithm == 'splicing-index' or additional_algorithm == 'FIRMA':
analysis_method = additional_algorithm
#print [analysis_method], [filter_probeset_types], [p_threshold], [alt_exon_fold_variable]
try: alt_exon_logfold_cutoff = math.log(float(additional_score),2)
except Exception: alt_exon_logfold_cutoff = 1
agglomerate_inclusion_probesets = 'no'
try:
summary_results_db, aspire_output_gene_list, number_events_analyzed = RunAltAnalyze()
exportSummaryResults(summary_results_db,analysis_method,aspire_output_list,aspire_output_gene_list,annotate_db,'exon',number_events_analyzed,root_dir)
if len(summary_data_db2)==0: summary_data_db2 = summary_data_db; explicit_data_type = 'exon-only'
#universalPrintFunction(['Alternative Exon Results for Individual Probeset Analyses:'])
#for i in summary_data_db: universalPrintFunction([i+' '+ str(summary_data_db[i])])
except Exception:
print traceback.format_exc()
None
#"""
### Perform dPSI Analysis
try:
if 'counts.' in fl.CountsFile(): pass
else:
dir_list = read_directory(fl.RootDir()+'ExpressionInput')
for file in dir_list:
if 'exp.' in file and 'steady-state' not in file:
fl.setExpFile(fl.RootDir()+'ExpressionInput/'+file)
#print [fl.RootDir()+'ExpressionInput/'+file]
except Exception:
search_dir = fl.RootDir()+'/ExpressionInput'
files = unique.read_directory(fl.RootDir()+'/ExpressionInput')
for file in files:
if 'exp.' in file and 'steady-state.txt' not in file:
fl.setExpFile(search_dir+'/'+file)
try:
#"""
try:
graphic_links2,cluster_input_file=ExpressionBuilder.unbiasedComparisonSpliceProfiles(fl.RootDir(),
species,array_type,expFile=fl.CountsFile(),min_events=0,med_events=1)
except Exception: pass
#"""
inputpsi = fl.RootDir()+'AltResults/AlternativeOutput/'+species+'_'+array_type+'_top_alt_junctions-PSI-clust.txt'
### Calculate ANOVA p-value stats based on groups
if array_type !='gene' and array_type != 'exon':
matrix,compared_groups,original_data = statistics.matrixImport(inputpsi)
matrix_pvalues=statistics.runANOVA(inputpsi,matrix,compared_groups)
anovaFilteredDir = statistics.returnANOVAFiltered(inputpsi,original_data,matrix_pvalues)
graphic_link1 = ExpressionBuilder.exportHeatmap(anovaFilteredDir)
try: summary_data_db2['QC']+=graphic_link1
except Exception: summary_data_db2['QC']=graphic_link1
except Exception: print traceback.format_exc()
import RNASeq
try:
graphic_link = RNASeq.compareExonAndJunctionResults(species,array_type,summary_results_db,root_dir)
try: summary_data_db2['QC']+=graphic_link
except Exception: summary_data_db2['QC']=graphic_link
except Exception:
print traceback.format_exc()
#"""
### Export the top 15 spliced genes
try:
altresult_dir = fl.RootDir()+'/AltResults/'
splicing_results_root = altresult_dir+'/Clustering/'
dir_list = read_directory(splicing_results_root)
gene_string=''
altanalyze_results_folder = altresult_dir+'/RawSpliceData/'+species
### Lookup the raw expression dir
expression_results_folder = string.replace(altresult_dir,'AltResults','ExpressionInput')
expression_dir = UI.getValidExpFile(expression_results_folder)
show_introns=False
try: altresult_dir = UI.getValidSplicingScoreFile(altanalyze_results_folder)
except Exception,e:
print traceback.format_exc()
analysisType='plot'
for file in dir_list:
if 'AltExonConfirmed' in file:
gene_dir = splicing_results_root+'/'+file
genes = UI.importGeneList(gene_dir,limit=50) ### list of gene IDs or symbols
gene_string = gene_string+','+genes
print 'Imported genes from',file,'\n'
analysisType='plot'
for file in dir_list:
if 'Combined-junction-exon-evidence' in file and 'top' not in file:
gene_dir = splicing_results_root+'/'+file
try: isoform_dir = UI.exportJunctionList(gene_dir,limit=50) ### list of gene IDs or symbols
except Exception: print traceback.format_exc()
UI.altExonViewer(species,array_type,expression_dir, gene_string, show_introns, analysisType, None); print 'completed'
UI.altExonViewer(species,array_type,altresult_dir, gene_string, show_introns, analysisType, None); print 'completed'
except Exception:
print traceback.format_exc()
if array_type != 'exon' and array_type != 'gene':
### SashimiPlot Visualization
try:
top_PSI_junction = inputpsi[:-4]+'-ANOVA.txt'
isoform_dir2 = UI.exportJunctionList(top_PSI_junction,limit=50) ### list of gene IDs or symbols
except Exception:
print traceback.format_exc()
try:
analyzeBAMs = False
dir_list = unique.read_directory(fl.RootDir())
for file in dir_list:
if '.bam' in string.lower(file):
analyzeBAMs=True
if analyzeBAMs:
### Create sashimi plot index
import SashimiIndex
SashimiIndex.remoteIndexing(species,fl)
import SashimiPlot
print 'Exporting Sashimi Plots for the top-predicted splicing events... be patient'
try: SashimiPlot.remoteSashimiPlot(species,fl,fl.RootDir(),isoform_dir) ### assuming the bam files are in the root-dir
except Exception: pass # print traceback.format_exc()
print 'completed'
try: SashimiPlot.remoteSashimiPlot(species,fl,fl.RootDir(),isoform_dir2) ### assuming the bam files are in the root-dir
except Exception: pass #print traceback.format_exc()
print 'completed'
### Try again, in case the symbol conversion failed
SashimiPlot.justConvertFilenames(species,fl.RootDir()+'/SashimiPlots')
else:
print 'No BAM files present in the root directory... skipping SashimiPlot analysis...'
except Exception:
print traceback.format_exc()
try:
clearObjectsFromMemory(exon_db); clearObjectsFromMemory(constitutive_probeset_db)
clearObjectsFromMemory(go_annotations); clearObjectsFromMemory(original_microRNA_z_score_data)
clearObjectsFromMemory(last_exon_region_db)
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
except Exception: null=[]
#print '!!!!!finished'
#returnLargeGlobalVars()
end_time = time.time(); time_diff = int(end_time-start_time)
universalPrintFunction(["Analyses finished in %d seconds" % time_diff])
#universalPrintFunction(["Hit Enter/Return to exit AltAnalyze"])
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]; results_dir = filepath(fl.RootDir())
### Perform GO-Elite Analysis
if pathway_permutations != 'NA':
goelite_run = False
print '\nBeginning to run GO-Elite analysis on alternative exon results'
elite_input_dirs = ['AltExonConfirmed','AltExon','regulated','upregulated','downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir+'GO-Elite/'+elite_dir,results_dir+'GO-Elite/denominator',results_dir+'GO-Elite/'+elite_dir
input_dir = results_dir+'GO-Elite/'+elite_dir
try: input_files = read_directory(input_dir) ### Are there any files to analyze?
except Exception: input_files = []
if len(input_files)>0:
variables = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,root
try: GO_Elite.remoteAnalysis(variables,'non-UI',Multi=mlp); goelite_run = True
except Exception,e:
print e
print "GO-Elite analysis failed"
try: GO_Elite.moveMAPPFinderFiles(file_dirs[0])
except Exception: print 'Input GO-Elite files could NOT be moved.'
try: GO_Elite.moveMAPPFinderFiles(file_dirs[1])
except Exception: print 'Input GO-Elite files could NOT be moved.'
if goelite_run == False:
print 'No GO-Elite input files to analyze (check your criterion).'
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
try:
if root !='' and root !=None:
print "Analysis Complete\n";
UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'AS',results_dir,dataset_name,'specific',summary_data_db2)
except Exception:
print traceback.format_exc()
pass #print 'Failed to open GUI.'
skip_intro = 'yes'
if root !='' and root !=None:
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
try: UI.getUpdatedParameters(array_type,species,run_from_scratch,file_dirs)
except Exception: pass
try: AltAnalyzeSetup('no')
except Exception: sys.exit()
def exportSummaryResults(summary_results_db,analysis_method,aspire_output_list,aspire_output_gene_list,annotate_db,array_type,number_events_analyzed,root_dir):
try:
ResultsExport_module.outputSummaryResults(summary_results_db,'',analysis_method,root_dir)
#ResultsExport_module.outputSummaryResults(summary_results_db2,'-uniprot_attributes',analysis_method)
ResultsExport_module.compareAltAnalyzeResults(aspire_output_list,annotate_db,number_events_analyzed,'no',analysis_method,array_type,root_dir)
ResultsExport_module.compareAltAnalyzeResults(aspire_output_gene_list,annotate_db,'','yes',analysis_method,array_type,root_dir)
except UnboundLocalError: print "...No results to summarize" ###Occurs if there is a problem parsing these files
def checkGOEliteProbesets(fn,species):
### Get all probesets in GO-Elite files
mod_source = 'Ensembl'+'-'+'Affymetrix'
import gene_associations
try: ensembl_to_probeset_id = gene_associations.getGeneToUid(species,mod_source)
except Exception: ensembl_to_probeset_id={}
mod_source = 'EntrezGene'+'-'+'Affymetrix'
try: entrez_to_probeset_id = gene_associations.getGeneToUid(species,mod_source)
except Exception: entrez_to_probeset_id={}
probeset_db={}
for gene in ensembl_to_probeset_id:
for probeset in ensembl_to_probeset_id[gene]: probeset_db[probeset]=[]
for gene in entrez_to_probeset_id:
for probeset in entrez_to_probeset_id[gene]: probeset_db[probeset]=[]
###Import an Affymetrix array annotation file (from http://www.affymetrix.com) and parse out annotations
csv_probesets = {}; x=0; y=0
fn=filepath(fn); status = 'no'
for line in open(fn,'r').readlines():
probeset_data = string.replace(line,'\n','') #remove endline
probeset_data = string.replace(probeset_data,'---','')
affy_data = string.split(probeset_data[1:-1],'","')
if x==0 and line[0]!='#':
x=1; affy_headers = affy_data
for header in affy_headers:
y = 0
while y < len(affy_headers):
if 'Probe Set ID' in affy_headers[y] or 'probeset_id' in affy_headers[y]: ps = y
y+=1
elif x == 1:
try: probeset = affy_data[ps]; csv_probesets[probeset]=[]
except Exception: null=[]
for probeset in csv_probesets:
if probeset in probeset_db: status = 'yes';break
return status
class SpeciesData:
def __init__(self, abrev, species, systems, taxid):
self._abrev = abrev; self._species = species; self._systems = systems; self._taxid = taxid
def SpeciesCode(self): return self._abrev
def SpeciesName(self): return self._species
def Systems(self): return self._systems
def TaxID(self): return self._taxid
def __repr__(self): return self.SpeciesCode()+'|'+SpeciesName
def getSpeciesInfo():
### Used by AltAnalyze
UI.importSpeciesInfo(); species_names={}
for species_full in species_codes:
sc = species_codes[species_full]; abrev = sc.SpeciesCode()
species_names[abrev] = species_full
return species_codes,species_names
def importGOEliteSpeciesInfo():
filename = 'Config/goelite_species.txt'; x=0
fn=filepath(filename); species_codes={}
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
abrev,species,taxid,compatible_mods = string.split(data,'\t')
if x==0: x=1
else:
compatible_mods = string.split(compatible_mods,'|')
sd = SpeciesData(abrev,species,compatible_mods,taxid)
species_codes[species] = sd
return species_codes
def exportGOEliteSpeciesInfo(species_codes):
fn=filepath('Config/goelite_species.txt'); data = open(fn,'w'); x=0
header = string.join(['species_code','species_name','tax_id','compatible_algorithms'],'\t')+'\n'
data.write(header)
for species in species_codes:
if 'other' not in species and 'all-' not in species:
sd = species_codes[species]
mods = string.join(sd.Systems(),'|')
values = [sd.SpeciesCode(),sd.SpeciesName(),sd.TaxID(),mods]
values = string.join(values,'\t')+'\n'
data.write(values)
data.close()
def TimeStamp():
time_stamp = time.localtime()
year = str(time_stamp[0]); month = str(time_stamp[1]); day = str(time_stamp[2])
if len(month)<2: month = '0'+month
if len(day)<2: day = '0'+day
return year+month+day
def verifyFile(filename):
status = 'not found'
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines(): status = 'found';break
except Exception: status = 'not found'
return status
def verifyFileLength(filename):
count = 0
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
count+=1
if count>9: break
except Exception: null=[]
return count
def verifyGroupFileFormat(filename):
correct_format = False
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if len(string.split(data,'\t'))==3:
correct_format = True
break
except Exception: correct_format = False
return correct_format
def displayHelp():
fn=filepath('Documentation/commandline.txt')
print '\n################################################\nAltAnalyze Command-Line Help'
for line in open(fn,'rU').readlines():
print cleanUpLine(line)
print '\n################################################ - END HELP'
sys.exit()
def searchDirectory(directory,var):
directory = unique.filepath(directory)
files = unique.read_directory(directory)
version = unique.getCurrentGeneDatabaseVersion()
for file in files:
if var in file:
location = string.split(directory+'/'+file,version)[1][1:]
return [location]
break
###### Command Line Functions (AKA Headless Mode) ######
def commandLineRun():
print 'Running commandline options'
import getopt
#/hd3/home/nsalomonis/normalization/mir1 - boxer
#python AltAnalyze.py --species Mm --arraytype "3'array" --celdir "C:/CEL" --output "C:/CEL" --expname miR1_column --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --species Hs --arraytype "3'array" --FEdir "C:/FEfiles" --output "C:/FEfiles" --channel_to_extract "green/red ratio" --expname cancer --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --celdir "C:/CEL" --output "C:/CEL" --expname miR1_column
#open ./AltAnalyze.app --celdir "/Users/nsalomonis/Desktop" --output "/Users/nsalomonis/Desktop" --expname test
#python AltAnalyze.py --species Mm --arraytype "3'array" --expdir "C:/CEL/ExpressionInput/exp.miR1_column.txt" --output "C:/CEL" --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --species Mm --platform RNASeq --bedDir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles" --groupdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/ExpressionInput/groups.test.txt" --compdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/ExpressionInput/comps.test.txt" --output "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles" --expname "test"
#python AltAnalyze.py --species Mm --platform RNASeq --filterdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/" --output "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles"
#python AltAnalyze.py --expdir "/Users/nsalomonis/Desktop/Nathan/ExpressionInput/exp.test.txt" --exonMapFile "/Users/nsalomonis/Desktop/Nathan/hgu133_probe.txt" --species Hs --platform "3'array" --output "/Users/nsalomonis/Desktop/Nathan"
#python AltAnalyze.py --species Hs --platform "3'array" --expname test --channelToExtract green --FEdir /Users/saljh8/Downloads/AgllentTest/ --output /Users/saljh8/Downloads/AgllentTest/
global apt_location; global root_dir; global probability_statistic; global log_file; global summary_data_db; summary_data_db={}
###required
marker_finder='no'
manufacturer='Affymetrix'
constitutive_source='Ensembl'
ensembl_version = 'current'
species_code = None
species = None
main_input_folder = None
output_dir = None
array_type = None
input_annotation_file = None
groups_file = None
comps_file = None
input_cdf_file = None
exp_name = None
run_GOElite = 'yes'
visualize_qc_results = 'yes'
run_lineage_profiler = 'yes'
input_exp_file = ''
cel_file_dir = ''
input_stats_file = ''
input_filtered_dir = ''
external_annotation_dir = ''
xhyb_remove = 'no'
update_method = []
update_dbs = 'no'
analyze_all_conditions = 'no'
return_all = 'no'
additional_array_types = []
remove_intronic_junctions = 'no'
ignore_built_species = 'no'
build_exon_bedfile = 'no'
compendiumType = 'protein_coding'
probability_statistic = 'unpaired t-test'
specific_array_type = None
additional_resources = [None]
wpid = None
mod = 'Ensembl'
transpose = False
input_file_dir = None
denom_file_dir = None
image_export = []
selected_species = ['Hs','Mm','Rn'] ### These are the species that additional array types are currently supported
selected_platforms = ['AltMouse','exon','gene','junction']
returnPathways = 'no'
compendiumPlatform = 'gene'
exonMapFile = None
platformType = None ### This option is used to store the orignal platform type
perform_alt_analysis = 'no'
mappedExonAnalysis = False ### Map the original IDs to the RNA-Seq exon database (when True)
microRNA_prediction_method = None
pipelineAnalysis = True
OntologyID=''
PathwaySelection=''
GeneSetSelection=''
interactionDirs=[]
inputType='ID list'
Genes=''
degrees='direct'
includeExpIDs=True
update_interactions=False
data_type = 'raw expression'
batch_effects = 'no'
channel_to_extract = None
normalization = False
justShowTheseIDs = ''
display=False
accessoryAnalysis=''
modelSize=None
geneModel=False
run_from_scratch = None
systemToUse = None ### For other IDs
custom_reference = False
multiThreading = True
genesToReport = 60
correlateAll = True
expression_data_format='log'
runICGS=False
IDtype=None
runKallisto = False
original_arguments = sys.argv
arguments=[]
for arg in original_arguments:
arg = string.replace(arg,'\xe2\x80\x9c','') ### These are non-standard forward quotes
arg = string.replace(arg,'\xe2\x80\x9d','') ### These are non-standard reverse quotes
arg = string.replace(arg,'\xe2\x80\x93','-') ### These are non-standard dashes
arg = string.replace(arg,'\x96','-') ### These are non-standard dashes
arg = string.replace(arg,'\x93','') ### These are non-standard forward quotes
arg = string.replace(arg,'\x94','') ### These are non-standard reverse quotes
arguments.append(arg)
print '\nArguments input:',arguments,'\n'
if '--help' in arguments[1:] or '--h' in arguments[1:]:
try: displayHelp() ### Print out a help file and quit
except Exception: print 'See: http://www.altanalyze.org for documentation and command-line help';sys.exit()
if 'AltAnalyze' in arguments[1]:
arguments = arguments[1:] ### Occurs on Ubuntu with the location of AltAnalyze being added to sys.argv (exclude this since no argument provided for this var)
try:
options, remainder = getopt.getopt(arguments[1:],'', ['species=', 'mod=','elitepval=', 'elitepermut=',
'method=','zscore=','pval=','num=',
'runGOElite=','denom=','output=','arraytype=',
'celdir=','expdir=','output=','statdir=',
'filterdir=','cdfdir=','csvdir=','expname=',
'dabgp=','rawexp=','avgallss=','logexp=',
'inclraw=','runalt=','altmethod=','altp=',
'probetype=','altscore=','GEcutoff=',
'exportnormexp=','calcNIp=','runMiDAS=',
'GEcutoff=','GEelitepval=','mirmethod=','ASfilter=',
'vendor=','GEelitefold=','update=','version=',
'analyzeAllGroups=','GEeliteptype=','force=',
'resources_to_analyze=', 'dataToAnalyze=','returnAll=',
'groupdir=','compdir=','annotatedir=','additionalScore=',
'additionalAlgorithm=','noxhyb=','platform=','bedDir=',
'altpermutep=','altpermute=','removeIntronOnlyJunctions=',
'normCounts=','buildExonExportFile=','groupStat=',
'compendiumPlatform=','rpkm=','exonExp=','specificArray=',
'ignoreBuiltSpecies=','ORAstat=','outputQCPlots=',
'runLineageProfiler=','input=','image=', 'wpid=',
'additional=','row_method=','column_method=',
'row_metric=','column_metric=','color_gradient=',
'transpose=','returnPathways=','compendiumType=',
'exonMapFile=','geneExp=','labels=','contrast=',
'plotType=','geneRPKM=','exonRPKM=','runMarkerFinder=',
'update_interactions=','includeExpIDs=','degrees=',
'genes=','inputType=','interactionDirs=','GeneSetSelection=',
'PathwaySelection=','OntologyID=','dataType=','combat=',
'channelToExtract=','showIntrons=','display=','join=',
'uniqueOnly=','accessoryAnalysis=','inputIDType=','outputIDType=',
'FEdir=','channelToExtract=','AltResultsDir=','geneFileDir=',
'AltResultsDir=','modelSize=','geneModel=','reference=',
'multiThreading=','multiProcessing=','genesToReport=',
'correlateAll=','normalization=','justShowTheseIDs=',
'direction=','analysisType=','algorithm=','rho=',
'clusterGOElite=','geneSetName=','runICGS=','IDtype=',
'CountsCutoff=','FoldDiff=','SamplesDiffering=','removeOutliers='
'featurestoEvaluate=','restrictBy=','ExpressionCutoff=',
'excludeCellCycle=','runKallisto=','fastq_dir=','FDR='])
except Exception:
print traceback.format_exc()
print "There is an error in the supplied command-line arguments (each flag requires an argument)"; sys.exit()
for opt, arg in options:
#print [opt, arg]
if opt == '--species': species=arg
elif opt == '--arraytype':
if array_type != None: additional_array_types.append(arg)
else: array_type=arg; platform = array_type
if specific_array_type == None: specific_array_type = platform
elif opt == '--exonMapFile':
perform_alt_analysis = 'yes' ### Perform alternative exon analysis
exonMapFile = arg
elif opt == '--specificArray': specific_array_type = arg ### e.g., hGlue
elif opt == '--celdir':
arg = verifyPath(arg)
cel_file_dir=arg
elif opt == '--bedDir':
arg = verifyPath(arg)
cel_file_dir=arg
elif opt == '--FEdir':
arg = verifyPath(arg)
cel_file_dir = arg
elif opt == '--expdir':
arg = verifyPath(arg)
input_exp_file=arg
elif opt == '--statdir':
arg = verifyPath(arg)
input_stats_file=arg
elif opt == '--filterdir':
arg = verifyPath(arg)
input_filtered_dir=arg
elif opt == '--groupdir':
arg = verifyPath(arg)
groups_file=arg
elif opt == '--compdir':
arg = verifyPath(arg)
comps_file=arg
elif opt == '--cdfdir':
arg = verifyPath(arg)
input_cdf_file=arg
elif opt == '--csvdir':
arg = verifyPath(arg)
input_annotation_file=arg
elif opt == '--expname': exp_name=arg
elif opt == '--output':
arg = verifyPath(arg)
output_dir=arg
elif opt == '--vendor': manufacturer=arg
elif opt == '--runICGS': runICGS=True
elif opt == '--IDtype': IDtype=arg
elif opt == '--ignoreBuiltSpecies': ignore_built_species=arg
elif opt == '--platform':
if array_type != None: additional_array_types.append(arg)
else: array_type=arg; platform = array_type
if specific_array_type == None: specific_array_type = platform
elif opt == '--update': update_dbs='yes'; update_method.append(arg)
elif opt == '--version': ensembl_version = arg
elif opt == '--compendiumPlatform': compendiumPlatform=arg ### platform for which the LineageProfiler compendium is built on
elif opt == '--force': force=arg
elif opt == '--input':
arg = verifyPath(arg)
input_file_dir=arg; pipelineAnalysis = False ### If this option is entered, only perform the indicated analysis
elif opt == '--image': image_export.append(arg)
elif opt == '--wpid': wpid=arg
elif opt == '--mod': mod=arg
elif opt == '--runKallisto':
if arg == 'yes' or string.lower(arg) == 'true':
runKallisto = True
elif opt == '--fastq_dir':
input_fastq_dir = arg
elif opt == '--additional':
if additional_resources[0] == None:
additional_resources=[]
additional_resources.append(arg)
else:
additional_resources.append(arg)
elif opt == '--transpose':
if arg == 'True': transpose = True
elif opt == '--runLineageProfiler': ###Variable declared here and later (independent analysis here or pipelined with other analyses later)
run_lineage_profiler=arg
elif opt == '--compendiumType': ### protein-coding, ncRNA, or exon
compendiumType=arg
elif opt == '--denom':
denom_file_dir=arg ### Indicates that GO-Elite is run independent from AltAnalyze itself
elif opt == '--accessoryAnalysis':
accessoryAnalysis = arg
elif opt == '--channelToExtract': channel_to_extract=arg
elif opt == '--genesToReport': genesToReport = int(arg)
elif opt == '--correlateAll': correlateAll = True
elif opt == '--direction': direction = arg
elif opt == '--logexp': expression_data_format=arg
elif opt == '--geneRPKM': rpkm_threshold=arg
elif opt == '--multiThreading' or opt == '--multiProcessing':
multiThreading=arg
if multiThreading == 'yes': multiThreading = True
elif 'rue' in multiThreading: multiThreading = True
else: multiThreading = False
if 'other' in manufacturer or 'Other' in manufacturer:
### For other IDs
systemToUse = array_type
if array_type == None:
print 'Please indicate a ID type as --platform when setting vendor equal to "Other IDs"'; sys.exit()
array_type = "3'array"
if array_type == 'RNASeq': manufacturer = array_type
if platformType == None: platformType = array_type
if perform_alt_analysis == 'yes':
if platform == "3'array":
mappedExonAnalysis = True
cel_file_dir = input_exp_file
exp_name = export.findFilename(input_exp_file)
exp_name = string.replace(exp_name,'.txt','')
exp_name = string.replace(exp_name,'exp.','')
input_exp_file = ''
### To perform alternative exon analyses for platforms without a dedicated database, must happing appropriate mapping info or array type data
### (will need to perform downstream testing for unsupported Affymetrix exon, gene and junction arrays)
if exonMapFile == None and specific_array_type == None and cel_file_dir == '':
print_out = "\nUnable to run!!! Please designate either a specific platfrom (e.g., --specificArray hgU133_2), select CEL files, or an "
print_out += "exon-level mapping file location (--exonMapFile C:/mapping.txt) to perform alternative exon analyses for this platform."
### Will need to check here to see if the platform is supported (local or online files) OR wait until an error is encountered later
""" Check to see if a database is already installed """
try: current_species_dirs = unique.read_directory('/AltDatabase')
except Exception: current_species_dirs=[]
if len(current_species_dirs)==0 and update_dbs != 'yes':
print "Please install a database before running AltAnalyze. Please note, AltAnalyze may need to install additional files later for RNASeq and LineageProfiler for some species, automatically. Make sure to list your platform as RNASeq if analyzing RNA-Seq data (--platform RNASeq)."
print "Example:\n"
print 'python AltAnalyze.py --species Hs --update Official --version EnsMart72';sys.exit()
######## Perform analyses independent from AltAnalyze database centric analyses that require additional parameters
if len(image_export) > 0 or len(accessoryAnalysis)>0 or runICGS:
if runICGS:
#python AltAnalyze.py --runICGS yes --expdir "/Users/saljh8/Desktop/demo/Myoblast/ExpressionInput/exp.myoblast.txt" --platform "3'array" --species Hs --GeneSetSelection BioMarkers --PathwaySelection Heart --column_method hopach --rho 0.4 --ExpressionCutoff 200 --justShowTheseIDs "NKX2-5 T TBX5" --FoldDiff 10 --SamplesDiffering 3 --excludeCellCycle conservative
try: species = species
except Exception: 'Please designate a species before continuing (e.g., --species Hs)'
try: array_type = array_type
except Exception: 'Please designate a species before continuing (e.g., --species Hs)'
if len(cel_file_dir)>0:
values = species,exp_file_location_db,dataset,mlp_instance
StatusWindow(values,'preProcessRNASeq') ### proceed to run the full discovery analysis here!!!
else:
if len(input_exp_file) > 0: pass
else: 'Please indicate a source folder or expression file (e.g., --expdir /dataset/singleCells.txt)'
if array_type == 'Other' or 'Other' in array_type:
if ':' in array_type:
array_type, IDtype = string.split(array_type)
array_type == "3'array"
if IDtype == None: IDtype = manufacturer
row_method = 'weighted'
column_method = 'average'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'yellow_black_blue'
contrast=3
vendor = manufacturer
GeneSelection = ''
PathwaySelection = ''
GeneSetSelection = 'None Selected'
excludeCellCycle = True
rho_cutoff = 0.4
restrictBy = 'protein_coding'
featurestoEvaluate = 'Genes'
ExpressionCutoff = 1
CountsCutoff = 1
FoldDiff = 2
SamplesDiffering = 3
JustShowTheseIDs=''
removeOutliers = False
PathwaySelection=[]
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--row_method':
row_method=arg
if row_method == 'None': row_method = None
elif opt == '--column_method':
column_method=arg
if column_method == 'None': column_method = None
elif opt == '--row_metric': row_metric=arg
elif opt == '--column_metric': column_metric=arg
elif opt == '--color_gradient': color_gradient=arg
elif opt == '--GeneSetSelection': GeneSetSelection=arg
elif opt == '--PathwaySelection': PathwaySelection.append(arg)
elif opt == '--genes': GeneSelection=arg
elif opt == '--ExpressionCutoff': ExpressionCutoff=arg
elif opt == '--normalization': normalization=arg
elif opt == '--justShowTheseIDs': justShowTheseIDs=arg
elif opt == '--rho': rho_cutoff=float(arg)
elif opt == '--clusterGOElite':clusterGOElite=float(arg)
elif opt == '--CountsCutoff':CountsCutoff=int(float(arg))
elif opt == '--FoldDiff':FoldDiff=int(float(arg))
elif opt == '--SamplesDiffering':SamplesDiffering=int(float(arg))
elif opt == '--removeOutliers':removeOutliers=arg
elif opt == '--featurestoEvaluate':featurestoEvaluate=arg
elif opt == '--restrictBy':restrictBy=arg
elif opt == '--excludeCellCycle':
excludeCellCycle=arg
if excludeCellCycle == 'False' or excludeCellCycle == 'no': excludeCellCycle = False
elif excludeCellCycle == 'True' or excludeCellCycle == 'yes' or excludeCellCycle == 'conservative': excludeCellCycle = True
elif opt == '--contrast':
try: contrast=float(arg)
except Exception: print '--contrast not a valid float';sys.exit()
elif opt == '--vendor': vendor=arg
elif opt == '--display':
if arg=='yes':
display=True
elif arg=='True':
display=True
else:
display=False
if len(PathwaySelection)==0: PathwaySelection=''
if len(GeneSetSelection)>0 or GeneSelection != '':
gsp = UI.GeneSelectionParameters(species,array_type,vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setJustShowTheseIDs(JustShowTheseIDs)
gsp.setNormalize('median')
gsp.setSampleDiscoveryParameters(ExpressionCutoff,CountsCutoff,FoldDiff,SamplesDiffering,
removeOutliers,featurestoEvaluate,restrictBy,excludeCellCycle,column_metric,column_method,rho_cutoff)
import RNASeq
mlp_instance = mlp
if cel_file_dir != '':
expFile = cel_file_dir + '/ExpressionInput/'+ 'exp.'+exp_name+'.txt'
elif input_exp_file !='':
if 'ExpressionInput' in input_exp_file: expFile = input_exp_file
else:
### Copy over expression file to ExpressionInput
expdir2 = string.replace(input_exp_file,'exp.','')
root_dir = export.findParentDir(expFile)
expFile = root_dir+'/ExpressionInput/exp.'+export.findFilename(expdir2)
export.copyFile(input_exp_file, expFile)
global log_file
root_dir = export.findParentDir(expFile)
root_dir = string.replace(root_dir,'/ExpressionInput','')
time_stamp = timestamp()
log_file = filepath(root_dir+'AltAnalyze_report-'+time_stamp+'.log')
log_report = open(log_file,'w'); log_report.close()
sys.stdout = Logger('')
count = verifyFileLength(expFile[:-4]+'-steady-state.txt')
if count>1:
expFile = expFile[:-4]+'-steady-state.txt'
elif array_type=='RNASeq':
### Indicates that the steady-state file doesn't exist. The exp. may exist, be could be junction only so need to re-build from bed files here
values = species,exp_file_location_db,dataset,mlp_instance
StatusWindow(values,'preProcessRNASeq') ### proceed to run the full discovery analysis here!!!
expFile = expFile[:-4]+'-steady-state.txt'
print [excludeCellCycle]
UI.RemotePredictSampleExpGroups(expFile, mlp_instance, gsp,(species,array_type)) ### proceed to run the full discovery analysis here!!!
sys.exit()
if 'WikiPathways' in image_export:
#python AltAnalyze.py --input /Users/test/input/criterion1.txt --image WikiPathways --mod Ensembl --species Hs --wpid WP536
if wpid==None:
print 'Please provide a valid WikiPathways ID (e.g., WP1234)';sys.exit()
if species==None:
print 'Please provide a valid species ID for an installed database (to install: --update Official --species Hs --version EnsMart72Plus)';sys.exit()
if input_file_dir==None:
print 'Please provide a valid file location for your input IDs (also needs to inlcude system code and value column)';sys.exit()
import WikiPathways_webservice
try:
print 'Attempting to output a WikiPathways colored image from user data'
print 'mod:',mod
print 'species_code:',species
print 'wpid:',wpid
print 'input GO-Elite ID file:',input_file_dir
graphic_link = WikiPathways_webservice.visualizePathwayAssociations(input_file_dir,species,mod,wpid)
except Exception,e:
if 'force_no_matching_error' in traceback.format_exc():
print '\nUnable to run!!! None of the input IDs mapped to this pathway\n'
elif 'IndexError' in traceback.format_exc():
print '\nUnable to run!!! Input ID file does not have at least 3 columns, with the second column being system code\n'
elif 'ValueError' in traceback.format_exc():
print '\nUnable to run!!! Input ID file error. Please check that you do not have extra rows with no data\n'
elif 'source_data' in traceback.format_exc():
print '\nUnable to run!!! Input ID file does not contain a valid system code\n'
elif 'goelite' in traceback.format_exc():
print '\nUnable to run!!! A valid species database needs to first be installed. For example, run:'
print 'python AltAnalyze.py --update Official --species Hs --version EnsMart72\n'
else:
print traceback.format_exc()
print '\nError generating the pathway "%s"' % wpid,'\n'
try:
printout = 'Finished exporting visualized pathway to:',graphic_link['WP']
print printout,'\n'
except Exception: None
sys.exit()
if 'MergeFiles' in accessoryAnalysis:
#python AltAnalyze.py --accessoryAnalysis MergeFiles --input "C:\file1.txt" --input "C:\file2.txt" --output "C:\tables"
files_to_merge=[]
join_option='Intersection'
uniqueOnly=False
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--input':
arg = verifyPath(arg)
files_to_merge.append(arg)
if opt == '--join': join_option = arg
if opt == '--uniqueOnly': unique_only = arg
if len(files_to_merge)<2:
print 'Please designate two or more files to merge (--input)';sys.exit()
UI.MergeFiles(files_to_merge, join_option, uniqueOnly, output_dir, None)
sys.exit()
if 'IDTranslation' in accessoryAnalysis:
#python AltAnalyze.py --accessoryAnalysis IDTranslation --inputIDType Symbol --outputIDType RefSeq --input "C:\file1.txt" --species Hs
inputIDType=None
outputIDType=None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--inputIDType': inputIDType = arg
if opt == '--outputIDType': outputIDType = arg
if inputIDType==None or outputIDType==None:
print 'Please designate an input ID type and and output ID type (--inputIDType Ensembl --outputIDType Symbol)'; sys.exit()
if species==None:
print "Please enter a valide species (--species)"; sys.exit()
UI.IDconverter(input_file_dir, species, inputIDType, outputIDType, None)
sys.exit()
if 'hierarchical' in image_export:
#python AltAnalyze.py --input "/Users/test/pluri.txt" --image hierarchical --row_method average --column_method single --row_metric cosine --column_metric euclidean --color_gradient red_white_blue --transpose False --PathwaySelection Apoptosis:WP254 --GeneSetSelection WikiPathways --species Hs --platform exon --display false
if input_file_dir==None:
print 'Please provide a valid file location for your input data matrix (must have an annotation row and an annotation column)';sys.exit()
row_method = 'weighted'
column_method = 'average'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'red_black_sky'
contrast=2.5
vendor = 'Affymetrix'
GeneSelection = ''
PathwaySelection = ''
GeneSetSelection = 'None Selected'
rho = None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--row_method':
row_method=arg
if row_method == 'None': row_method = None
elif opt == '--column_method':
column_method=arg
if column_method == 'None': column_method = None
elif opt == '--row_metric': row_metric=arg
elif opt == '--column_metric': column_metric=arg
elif opt == '--color_gradient': color_gradient=arg
elif opt == '--GeneSetSelection': GeneSetSelection=arg
elif opt == '--PathwaySelection': PathwaySelection=arg
elif opt == '--genes': GeneSelection=arg
elif opt == '--OntologyID': OntologyID=arg
elif opt == '--normalization': normalization=arg
elif opt == '--justShowTheseIDs': justShowTheseIDs=arg
elif opt == '--rho': rho=arg
elif opt == '--clusterGOElite':clusterGOElite=arg
elif opt == '--contrast':
try: contrast=float(arg)
except Exception: print '--contrast not a valid float';sys.exit()
elif opt == '--vendor': vendor=arg
elif opt == '--display':
if arg=='yes':
display=True
elif arg=='True':
display=True
else:
display=False
if len(GeneSetSelection)>0 or GeneSelection != '':
gsp = UI.GeneSelectionParameters(species,array_type,vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setOntologyID(OntologyID)
gsp.setTranspose(transpose)
gsp.setNormalize(normalization)
gsp.setJustShowTheseIDs(justShowTheseIDs)
try: gsp.setClusterGOElite(clusterGOElite)
except Exception: pass
if rho!=None:
try:
float(rho)
gsp.setRhoCutoff(rho)
except Exception: print 'Must enter a valid Pearson correlation cutoff (float)'
transpose = gsp ### this allows methods that don't transmit this object to also work
if row_method == 'no': row_method = None
if column_method == 'no': column_method = None
if len(GeneSetSelection)>0:
if species == None:
print "Please enter a valide species (--species)"; sys.exit()
try:
files = unique.read_directory(input_file_dir+'/')
dir = input_file_dir
for file in files:
filename = dir+'/'+file
UI.createHeatMap(filename, row_method, row_metric, column_method, column_metric, color_gradient, transpose, contrast, None, display=display)
except Exception:
UI.createHeatMap(input_file_dir, row_method, row_metric, column_method, column_metric, color_gradient, transpose, contrast, None, display=display)
#import clustering; clustering.outputClusters([input_file_dir],[])
sys.exit()
if 'PCA' in image_export:
#AltAnalyze.py --input "/Users/nsalomonis/Desktop/folds.txt" --image PCA --plotType 3D --display True --labels yes
#--algorithm "t-SNE"
include_labels = 'yes'
plotType = '2D'
pca_algorithm = 'SVD'
geneSetName = None
zscore = True
colorByGene=None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--labels':
include_labels=arg
if include_labels == 'True' or include_labels == 'yes':
include_labels = 'yes'
else:
include_labels = 'no'
if opt == '--plotType': plotType=arg
if opt == '--algorithm': pca_algorithm=arg
if opt == '--geneSetName': geneSetName=arg
if opt == '--genes': colorByGene=arg
if opt == '--zscore':
if arg=='yes' or arg=='True' or arg == 'true':
zscore=True
else:
zscore=False
if opt == '--display':
if arg=='yes' or arg=='True' or arg == 'true':
display=True
if input_file_dir==None:
print 'Please provide a valid file location for your input data matrix (must have an annotation row and an annotation column)';sys.exit()
UI.performPCA(input_file_dir, include_labels, pca_algorithm, transpose, None,
plotType=plotType, display=display, geneSetName=geneSetName, species=species, zscore=zscore, colorByGene=colorByGene)
sys.exit()
if 'VennDiagram' in image_export:
# AltAnalyze.py --image "VennDiagram" --input "C:\file1.txt" --input "C:\file2.txt" --output "C:\graphs"
files_to_merge=[]
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--input':
arg = verifyPath(arg)
files_to_merge.append(arg)
if opt == '--display':
if arg=='yes' or arg=='True' or arg == 'true':
display=True
if len(files_to_merge)<2:
print 'Please designate two or more files to compare (--input)';sys.exit()
UI.vennDiagram(files_to_merge, output_dir, None, display=display)
sys.exit()
if 'AltExonViewer' in image_export:
#python AltAnalyze.py --image AltExonViewer --AltResultsDir "C:\CP-hESC" --genes "ANXA7 FYN TCF3 NAV2 ETS2 MYLK ATP2A2" --species Hs --platform exon --dataType "splicing-index"
genes=[]
show_introns='no'
geneFileDir=''
analysisType='plot'
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--genes':genes=arg
elif opt == '--dataType': data_type = arg
elif opt == '--showIntrons': show_introns = arg
elif opt == '--AltResultsDir': altresult_dir = arg
elif opt == '--geneFileDir': geneFileDir = arg
elif opt == '--analysisType': analysisType=arg
if altresult_dir == None:
print 'Please include the location of the AltResults directory (--AltResultsDir)'; sys.exit()
if len(genes)==0 and len(geneFileDir)==0:
print "Please indicate the genes (--genes) or gene file location (--geneFileDir) for AltExonViewer";sys.exit()
if species == None:
print "Please enter a valide species (--species)"; sys.exit()
if array_type == None:
print "Please enter a valide platform (--platform)"; sys.exit()
if 'AltResults' not in altresult_dir:
altresult_dir+='/AltResults/'
if 'Sashimi' in analysisType:
#python AltAnalyze.py --image AltExonViewer --AltResultsDir "/Users/saljh8/Desktop/Grimes/GEC14074/AltResults/" --genes "Dgat1 Dgat2 Tcf7l1" --species Mm --platform RNASeq --analysisType SashimiPlot
analysisType = 'Sashimi-Plot'
altresult_dir = string.split(altresult_dir,'AltResults')[0]
if len(geneFileDir)>0: genes = geneFileDir
geneFileDir=''
elif 'raw' in data_type: ### Switch directories if expression
altanalyze_results_folder = string.replace(altresult_dir,'AltResults','ExpressionInput')
altresult_dir = UI.getValidExpFile(altanalyze_results_folder)
if len(altresult_dir)==0:
print 'No valid expression input file (e.g., exp.MyExperiment.txt) found in',altanalyze_results_folder;sys.exit()
else:
altanalyze_results_folder = altresult_dir+'/RawSpliceData/'+species
try: altresult_dir = UI.getValidSplicingScoreFile(altanalyze_results_folder)
except Exception,e:
print "No files found in: "+altanalyze_results_folder; sys.exit()
if len(geneFileDir)>0:
try:
genes = UI.importGeneList(geneFileDir) ### list of gene IDs or symbols
except Exception:
### Can occur if a directory of files is selected
try:
files = unique.read_directory(geneFileDir+'/')
gene_string=''
for file in files:
if '.txt' in file:
filename = geneFileDir+'/'+file
genes = UI.importGeneList(filename) ### list of gene IDs or symbols
gene_string = gene_string+','+genes
print 'Imported genes from',file,'\n'
#print [altresult_dir];sys.exit()
UI.altExonViewer(species,platform,altresult_dir, gene_string, show_introns, analysisType, False)
except Exception: pass
sys.exit()
if len(genes)==0:
print 'Please list one or more genes (--genes "ANXA7 FYN TCF3 NAV2 ETS2 MYLK ATP2A2")'; sys.exit()
try: UI.altExonViewer(species,platform,altresult_dir, genes, show_introns, analysisType, False)
except Exception:
print traceback.format_exc()
sys.exit()
if 'network' in image_export:
#AltAnalyze.py --image network --species Hs --output "C:\GSE9440_RAW" --PathwaySelection Apoptosis:WP254 --GeneSetSelection WikiPathways
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--update_interactions': update_interactions=arg
elif opt == '--includeExpIDs': includeExpIDs=arg
elif opt == '--degrees': degrees=arg
elif opt == '--genes':
Genes=arg
inputType = 'IDs'
elif opt == '--inputType': inputType=arg
elif opt == '--interactionDirs': interactionDirs.append(arg)
elif opt == '--GeneSetSelection': GeneSetSelection=arg
elif opt == '--PathwaySelection': PathwaySelection=arg
elif opt == '--OntologyID': OntologyID=arg
elif opt == '--display': display=arg
if update_interactions == 'yes': update_interactions = True
else: update_interactions = False
if input_file_dir == None: pass
elif len(input_file_dir) == 0: input_file_dir = None
if len(input_exp_file) == 0: input_exp_file = None
if len(interactionDirs) == 0: interactionDirs=['WikiPathways']
if interactionDirs == ['all']:
interactionDirs = ['WikiPathways','KEGG','BioGRID','TFTargets','common-microRNATargets','all-microRNATargets','common-DrugBank','all-DrugBank']
if interactionDirs == ['main']:
interactionDirs = ['WikiPathways','KEGG','BioGRID','TFTargets']
if interactionDirs == ['confident']:
interactionDirs = ['WikiPathways','KEGG','TFTargets']
if len(Genes) == 0: Genes = None
if output_dir == None: pass
elif len(output_dir) == 0: output_dir = None
if len(GeneSetSelection) == 'None Selected': GeneSetSelection = None
if includeExpIDs=='yes': includeExpIDs = True
else: includeExpIDs = False
gsp = UI.GeneSelectionParameters(species,array_type,manufacturer)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(Genes)
gsp.setOntologyID(OntologyID)
gsp.setIncludeExpIDs(includeExpIDs)
root = ''
if species == None:
print 'Please designate a species (--species).'; sys.exit()
if output_dir == None:
print 'Please designate an ouput directory (--output)'; sys.exit()
if input_file_dir !=None:
if '.txt' in input_file_dir or '.sif' in input_file_dir:
UI.networkBuilder(input_file_dir,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,root)
else:
parent_dir = input_file_dir
dir_list = read_directory(parent_dir)
for file in dir_list:
input_file_dir = parent_dir+'/'+file
try:
UI.networkBuilder(input_file_dir,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,root)
except Exception:
print file, 'failed to produce network'
else:
UI.networkBuilder(None,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,root)
sys.exit()
########## Begin database dependent AltAnalyze workflows
if ensembl_version != 'current' and 'markers' not in update_method:
dbversion = string.replace(ensembl_version,'EnsMart','')
UI.exportDBversion('EnsMart'+dbversion)
gene_database = unique.getCurrentGeneDatabaseVersion()
print 'Current database version:',gene_database
if array_type == None and update_dbs != 'yes' and denom_file_dir == None:
print "Please specify an array or data type (e.g., RNASeq, exon, gene, junction, AltMouse, 3'array)."; sys.exit()
if 'archive' in update_method:
###
print 'Archiving databases', ensembl_version
try: archive_dir = 'ArchiveDBs/EnsMart'+ensembl_version+'/archive'; export.createDirPath(filepath(archive_dir))
except Exception: null = [] ### directory already exists
dirs = unique.read_directory('/ArchiveDBs/EnsMart'+ensembl_version)
print len(dirs), dirs
import shutil
for species_dir in dirs:
try:
#print '/ArchiveDBs/EnsMart'+ensembl_version+'/'+species_dir+'/'+species_dir+'_RNASeq.zip'
src = filepath('ArchiveDBs/EnsMart'+ensembl_version+'/'+species_dir+'/'+species_dir+'_RNASeq.zip')
dstn = filepath('ArchiveDBs/EnsMart'+ensembl_version+'/archive/'+species_dir+'_RNASeq.zip')
#export.copyFile(src, dstn)
shutil.move(src, dstn)
try:
srcj = string.replace(src,'RNASeq.','junction.'); dstnj = string.replace(dstn,'RNASeq.','junction.')
shutil.move(srcj, dstnj)
except Exception: null=[]
try:
src = string.replace(src,'_RNASeq.','.'); dstn = string.replace(dstn,'_RNASeq.','.')
shutil.move(src, dstn)
except Exception: null=[]
except Exception: null=[]
sys.exit()
if update_dbs == 'yes' and 'Official' not in update_method:
if 'cleanup' in update_method:
existing_species_dirs = unique.read_directory('/AltDatabase/ensembl')
print 'Deleting EnsemblSQL directory for all species, ensembl version',ensembl_version
for species in existing_species_dirs:
export.deleteFolder('AltDatabase/ensembl/'+species+'/EnsemblSQL')
existing_species_dirs = unique.read_directory('/AltDatabase')
print 'Deleting SequenceData directory for all species, ensembl version',ensembl_version
for species in existing_species_dirs:
export.deleteFolder('AltDatabase/'+species+'/SequenceData')
print 'Finished...exiting'
sys.exit()
if 'package' not in update_method and 'markers' not in update_method:
### Example:
### python AltAnalyze.py --species all --arraytype all --update all --version 60
### tr -d \\r < AltAnalyze.py > AltAnalyze_new.py
### chmod +x AltAnalyze_new.py
### nohup ./AltAnalyze.py --update all --species Mm --arraytype gene --arraytype exon --version 60 2>&1 > nohup_v60_Mm.txt
if array_type == 'all' and (species == 'Mm' or species == 'all'): array_type = ['AltMouse','exon','gene','junction','RNASeq']
elif array_type == 'all' and (species == 'Hs' or species == 'Rn'): array_type = ['exon','gene','junction','RNASeq']
else: array_type = [array_type]+additional_array_types
if species == 'all' and 'RNASeq' not in array_type: species = selected_species ### just analyze the species for which multiple platforms are supported
if species == 'selected': species = selected_species ### just analyze the species for which multiple platforms are supported
elif species == 'all':
all_supported_names = {}; all_species_names={}
species_names = UI.getSpeciesInfo()
for species in species_names: all_supported_names[species_names[species]]=species
import EnsemblSQL
child_dirs, ensembl_species, ensembl_versions = EnsemblSQL.getCurrentEnsemblSpecies('release-'+ensembl_version)
for ens_species in ensembl_species:
ens_species = string.replace(ens_species,'_',' ')
if ens_species in all_supported_names:
all_species_names[all_supported_names[ens_species]]=[]
del all_species_names['Hs']
del all_species_names['Mm']
del all_species_names['Rn']
"""
del all_species_names['Go']
del all_species_names['Bt']
del all_species_names['Sc']
del all_species_names['Ss']
del all_species_names['Pv']
del all_species_names['Pt']
del all_species_names['La']
del all_species_names['Tt']
del all_species_names['Tr']
del all_species_names['Ts']
del all_species_names['Pb']
del all_species_names['Pc']
del all_species_names['Ec']
del all_species_names['Tb']
del all_species_names['Tg']
del all_species_names['Dn']
del all_species_names['Do']
del all_species_names['Tn']
del all_species_names['Dm']
del all_species_names['Oc']
del all_species_names['Og']
del all_species_names['Fc']
del all_species_names['Dr']
del all_species_names['Me']
del all_species_names['Cp']
del all_species_names['Tt']
del all_species_names['La']
del all_species_names['Tr']
del all_species_names['Ts']
del all_species_names['Et'] ### No alternative isoforms?
del all_species_names['Pc']
del all_species_names['Tb']
del all_species_names['Fc']
del all_species_names['Sc']
del all_species_names['Do']
del all_species_names['Dn']
del all_species_names['Og']
del all_species_names['Ga']
del all_species_names['Me']
del all_species_names['Ml']
del all_species_names['Mi']
del all_species_names['St']
del all_species_names['Sa']
del all_species_names['Cs']
del all_species_names['Vp']
del all_species_names['Ch']
del all_species_names['Ee']
del all_species_names['Ac']"""
sx=[]; all_species_names2=[] ### Ensure that the core selected species are run first
for species in selected_species:
if species in all_species_names: sx.append(species)
for species in all_species_names:
if species not in selected_species: all_species_names2.append(species)
all_species_names = sx+all_species_names2
species = all_species_names
else: species = [species]
update_uniprot='no'; update_ensembl='no'; update_probeset_to_ensembl='no'; update_domain='no'; update_miRs = 'no'; genomic_build = 'new'; update_miR_seq = 'yes'
if 'all' in update_method:
update_uniprot='yes'; update_ensembl='yes'; update_probeset_to_ensembl='yes'; update_domain='yes'; update_miRs = 'yes'
if 'UniProt' in update_method: update_uniprot = 'yes'
if 'Ensembl' in update_method: update_ensembl = 'yes'
if 'Probeset' in update_method or 'ExonAnnotations' in update_method: update_probeset_to_ensembl = 'yes'
if 'Domain' in update_method:
update_domain = 'yes'
try: from Bio import Entrez #test this
except Exception: print 'The dependent module Bio is not installed or not accessible through the default python interpretter. Existing AltAnalyze.'; sys.exit()
if 'miRBs' in update_method or 'miRBS' in update_method: update_miRs = 'yes'
if 'NewGenomeBuild' in update_method: genomic_build = 'new'
if 'current' in ensembl_version: print "Please specify an Ensembl version number (e.g., 60) before proceeding with the update.";sys.exit()
try: force = force ### Variable is not declared otherwise
except Exception: force = 'yes'; print 'force:',force
existing_species_dirs={}
update_all = 'no' ### We don't pass this as yes, in order to skip certain steps when multiple array types are analyzed (others are specified above)
try: print "Updating AltDatabase the following array_types",string.join(array_type),"for the species",string.join(species)
except Exception: print 'Please designate a valid platform/array_type (e.g., exon) and species code (e.g., Mm).'
for specific_species in species:
for platform_name in array_type:
if platform_name == 'AltMouse' and specific_species == 'Mm': proceed = 'yes'
elif platform_name == 'exon' or platform_name == 'gene':
import ExonArrayEnsemblRules
#### Check to see if the probeset.csv file is present
#try: probeset_transcript_file = ExonArrayEnsemblRules.getDirectoryFiles('/AltDatabase/'+specific_species+'/'+platform_name)
#except Exception: print "Affymetrix probeset.csv anotation file is not found. You must save this to",'/AltDatabase/'+specific_species+'/'+platform_name,'before updating (unzipped).'; sys.exit()
proceed = 'yes'
elif platform_name == 'junction' and (specific_species == 'Hs' or specific_species == 'Mm'): proceed = 'yes'
elif platform_name == 'RNASeq': proceed = 'yes'
else: proceed = 'no'
if proceed == 'yes':
print "Analyzing", specific_species, platform_name
if (platform_name != array_type[0]) and len(species)==1:
update_uniprot = 'no'; update_ensembl = 'no'; update_miR_seq = 'no' ### Don't need to do this twice in a row
print 'Skipping ensembl, uniprot and mir-sequence file import updates since already completed for this species',array_type,platform_name
if ignore_built_species == 'yes': ### Useful for when building all species for a new database build
existing_species_dirs = unique.read_directory('/AltDatabase/ensembl') ### call this here to update with every species - if running multiple instances
if specific_array_type != None and specific_array_type != platform_name: platform_name+='|'+specific_array_type ### For the hGlue vs. JAY arrays
if specific_species not in existing_species_dirs: ### Useful when running multiple instances of AltAnalyze to build all species
print 'update_ensembl',update_ensembl
print 'update_uniprot',update_uniprot
print 'update_probeset_to_ensembl',update_probeset_to_ensembl
print 'update_domain',update_domain
print 'update_miRs',update_miRs
update.executeParameters(specific_species,platform_name,force,genomic_build,update_uniprot,update_ensembl,update_probeset_to_ensembl,update_domain,update_miRs,update_all,update_miR_seq,ensembl_version)
else: print 'ignoring',specific_species
sys.exit()
if 'package' in update_method:
### Example: python AltAnalyze.py --update package --species all --platform all --version 65
if ensembl_version == 'current': print '\nPlease specify version of the database to package (e.g., --version 60).'; sys.exit()
ensembl_version = 'EnsMart'+ensembl_version
### Get all possible species
species_names = UI.getSpeciesInfo(); possible_species={}
possible_species = species_names
possible_arrays = ['exon','gene','junction','AltMouse','RNASeq']
try:
if species == 'all': possible_species = possible_species
elif species == 'selected': possible_species = selected_species
else: possible_species = [species]
except Exception: species = possible_species
if array_type == None or array_type == 'all': possible_arrays = possible_arrays
else: possible_arrays = [array_type]+additional_array_types
species_to_package={}
dirs = unique.read_directory('/AltDatabase/'+ensembl_version)
#print possible_arrays, possible_species; sys.exit()
for species_code in dirs:
if species_code in possible_species:
array_types = unique.read_directory('/AltDatabase/'+ensembl_version+'/'+species_code)
for arraytype in array_types:
if arraytype in possible_arrays:
if species_code in possible_species:
array_types = unique.read_directory('/AltDatabase/'+ensembl_version+'/'+species_code)
try: species_to_package[species_code].append(arraytype)
except Exception: species_to_package[species_code] = [arraytype]
species_to_package = eliminate_redundant_dict_values(species_to_package)
for species in species_to_package:
files_to_copy =[species+'_Ensembl_domain_aligning_probesets.txt']
files_to_copy+=[species+'_Ensembl_indirect_domain_aligning_probesets.txt']
files_to_copy+=[species+'_Ensembl_probesets.txt']
files_to_copy+=[species+'_Ensembl_exons.txt']
#files_to_copy+=[species+'_Ensembl_junctions.txt']
files_to_copy+=[species+'_exon_core.mps']
files_to_copy+=[species+'_exon_extended.mps']
files_to_copy+=[species+'_exon_full.mps']
files_to_copy+=[species+'_gene_core.mps']
files_to_copy+=[species+'_gene_extended.mps']
files_to_copy+=[species+'_gene_full.mps']
files_to_copy+=[species+'_gene-exon_probesets.txt']
files_to_copy+=[species+'_probes_to_remove.txt']
files_to_copy+=[species+'_probeset-probes.txt']
files_to_copy+=[species+'_probeset_microRNAs_any.txt']
files_to_copy+=[species+'_probeset_microRNAs_multiple.txt']
files_to_copy+=['probeset-domain-annotations-exoncomp.txt']
files_to_copy+=['probeset-protein-annotations-exoncomp.txt']
#files_to_copy+=['probeset-protein-dbase_exoncomp.txt']
files_to_copy+=['SEQUENCE-protein-dbase_exoncomp.txt']
files_to_copy+=[species+'_Ensembl_junction_probesets.txt']
files_to_copy+=[species+'_Ensembl_AltMouse_probesets.txt']
files_to_copy+=[species+'_RNASeq-exon_probesets.txt']
files_to_copy+=[species+'_junction-exon_probesets.txt']
files_to_copy+=[species+'_junction_all.mps']
files_to_copy+=['platform.txt'] ### Indicates the specific platform for an array type (e.g., HJAY for junction or hGlue for junction)
files_to_copy+=[species+'_junction_comps_updated.txt']
files_to_copy+=['MASTER-probeset-transcript.txt']
files_to_copy+=['AltMouse-Ensembl.txt']
files_to_copy+=['AltMouse_junction-comparisons.txt']
files_to_copy+=['AltMouse_gene_annotations.txt']
files_to_copy+=['AltMouse_annotations.txt']
common_to_copy =['uniprot/'+species+'/custom_annotations.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl-annotations_simple.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl-annotations.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_microRNA-Ensembl.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_transcript-biotypes.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt']
common_to_copy+= searchDirectory("AltDatabase/ensembl/"+species+"/",'Ensembl_Protein')
common_to_copy+= searchDirectory("AltDatabase/ensembl/"+species+"/",'ProteinFeatures')
common_to_copy+= searchDirectory("AltDatabase/ensembl/"+species+"/",'ProteinCoordinates')
common_to_copy+= searchDirectory("AltDatabase/uniprot/"+species+"/",'FeatureCoordinate')
supported_arrays_present = 'no'
for arraytype in selected_platforms:
if arraytype in species_to_package[species]: supported_arrays_present = 'yes' #Hence a non-RNASeq platform is present
if supported_arrays_present == 'yes':
for file in common_to_copy:
ir = 'AltDatabase/'+ensembl_version+'/'
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'
export.copyFile(ir+file, er+file)
if 'RNASeq' in species_to_package[species]:
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_junction.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_exon.txt']
for file in common_to_copy:
ir = 'AltDatabase/'+ensembl_version+'/'
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'
if species in selected_species:
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version+'/' ### This allows us to build the package archive in a separate directory for selected species, so separate but overlapping content can be packaged
export.copyFile(ir+file, er+file)
for array_type in species_to_package[species]:
ir = 'AltDatabase/'+ensembl_version+'/'+species+'/'+array_type+'/'
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'+species+'/'+array_type+'/'
if array_type == 'junction':
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+array_type+'/'
if array_type == 'RNASeq' and species in selected_species:
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version+'/'+species+'/'+array_type+'/'
for file in files_to_copy:
if array_type == 'RNASeq': file=string.replace(file,'_updated.txt','.txt')
filt_file = string.replace(file ,'.txt','-filtered.txt')
try: export.copyFile(ir+filt_file, er+filt_file); export_path = er+filt_file
except Exception:
try: export.copyFile(ir+file, er+file); export_path = er+file
except Exception: null = [] ### File not found in directory
if len(export_path)>0:
if 'AltMouse' in export_path or 'probes_' in export_path:
export.cleanFile(export_path)
if array_type == 'junction':
subdir = '/exon/'
ir = 'AltDatabase/'+ensembl_version+'/'+species+'/'+array_type+subdir
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+array_type+subdir
for file in files_to_copy:
export_path=[]
filt_file = string.replace(file ,'.txt','-filtered.txt')
try: export.copyFile(ir+filt_file, er+filt_file); export_path = er+filt_file
except Exception:
try: export.copyFile(ir+file, er+file); export_path = er+file
except Exception: null = [] ### File not found in directory
if array_type == 'RNASeq':
subdir = '/junction/'
ir = 'AltDatabase/'+ensembl_version+'/'+species+'/'+array_type+subdir
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'+species+'/'+array_type+subdir
if species in selected_species:
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version+'/'+species+'/'+array_type+subdir
for file in files_to_copy:
if 'SEQUENCE-protein-dbase' not in file and 'domain_aligning' not in file: ### This data is now combined into the main file
export_path=[]
filt_file = string.replace(file ,'.txt','-filtered.txt')
try: export.copyFile(ir+filt_file, er+filt_file); export_path = er+filt_file
except Exception:
try: export.copyFile(ir+file, er+file); export_path = er+file
except Exception: null = [] ### File not found in directory
if 'RNASeq' in species_to_package[species]:
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version
dst = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+species+'_RNASeq.zip'
if species in selected_species:
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version
update.zipDirectory(src); print 'Zipping',species, array_type, dst
os.rename(src+'.zip', dst)
if supported_arrays_present == 'yes':
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version
dst = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+species+'.zip'
update.zipDirectory(src); print 'Zipping',species, array_type, dst
os.rename(src+'.zip', dst)
if 'junction' in species_to_package[species]:
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/junction'
dst = string.replace(src,'junction',species+'_junction.zip')
update.zipDirectory(src); print 'Zipping',species+'_junction'
os.rename(src+'.zip', dst)
sys.exit()
if 'markers' in update_method:
if species == None or platform == None:
print "WARNING! A species and platform (e.g., exon, junction, 3'array or RNASeq) must be defined to identify markers.";sys.exit()
elif input_exp_file == '':
print "WARNING! A input expression file must be supplied (e.g., ExpressionOutput/DATASET.YourExperimentName.txt) for this analysis.";sys.exit()
else:
#python AltAnalyze.py --update markers --platform gene --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/Mm_Gene-TissueAtlas/ExpressionInput/exp.meta.txt"
#python AltAnalyze.py --update markers --platform gene --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/Mm_Gene-TissueAtlas/AltResults/RawSpliceData/Mm/splicing-index/meta.txt"
#python AltAnalyze.py --update markers --platform "3'array" --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/U133/ExpressionOutput/DATASET-meta.txt"
#python AltAnalyze.py --update markers --compendiumType ncRNA --platform "exon" --expdir "/home/socr/c/users2/salomoni/conklin/nsalomonis/normalization/Hs_Exon-TissueAtlas/ExpressionOutput/DATASET-meta.txt"
#python AltAnalyze.py --update markers --platform RNASeq --species Mm --geneRPKM 1 --expdir /Users/saljh8/Desktop/Grimes/MergedRSEM/DN-Analysis/ExpressionInput/exp.DN.txt --genesToReport 200
"""The markerFinder module:
1) takes an input ExpressionOutput file (DATASET.YourExperimentName.txt)
2) extracts group average expression and saves to AVERAGE.YourExperimentName.txt to the ExpressionOutput directory
3) re-imports AVERAGE.YourExperimentName.txt
4) correlates the average expression of each gene to an idealized profile to derive a Pearson correlation coefficient
5) identifies optimal markers based on these correlations for each tissue
6) exports an expression file with just these marker genes and tissues
This module can peform these analyses on protein coding or ncRNAs and can segregate the cell/tissue groups into clusters
when a group notation is present in the sample name (e.g., 0~Heart, 0~Brain, 1~Stem Cell)"""
import markerFinder
if 'AltResults' in input_exp_file and 'Clustering' not in input_exp_file:
### This applies to a file compoosed of exon-level normalized intensities (calculae average group expression)
markerFinder.getAverageExonExpression(species,platform,input_exp_file)
if 'Raw' in input_exp_file:
group_exp_file = string.replace(input_exp_file,'Raw','AVERAGE')
else:
group_exp_file = string.replace(input_exp_file,'FullDatasets','AVERAGE-FullDatasets')
altexon_correlation_file = markerFinder.analyzeData(group_exp_file,species,platform,compendiumType,geneToReport=genesToReport,correlateAll=correlateAll,AdditionalParameters=fl)
markerFinder.getExprValsForNICorrelations(platform,altexon_correlation_file,group_exp_file)
else:
### This applies to an ExpressionOutput DATASET file compoosed of gene expression values (averages already present)
import collections
try: test_ordereddict=collections.OrderedDict()
except Exception:
try: import ordereddict
except Exception:
### This is needed to re-order the average file so that the groups are sequentially ordered when analyzing clustered groups (0~)
print 'Warning!!!! To run markerFinder correctly call python version 2.7x or greater (python 3.x not supported)'
print 'Requires ordereddict (also can install the library ordereddict). To call 2.7: /usr/bin/python2.7'
sys.exit()
try:
output_dir = markerFinder.getAverageExpressionValues(input_exp_file,platform) ### Either way, make an average annotated file from the DATASET file
if 'DATASET' in input_exp_file:
group_exp_file = string.replace(input_exp_file,'DATASET','AVERAGE')
else:
group_exp_file = (input_exp_file,output_dir) ### still analyze the primary sample
except Exception:
### Work around when performing this analysis on an alternative exon input cluster file
group_exp_file = input_exp_file
fl = UI.ExpressionFileLocationData(input_exp_file,'','',''); fl.setOutputDir(export.findParentDir(export.findParentDir(input_exp_file)[:-1]))
try: fl.setSpecies(species); fl.setVendor(vendor)
except Exception: pass
try:
rpkm_threshold = float(rpkm_threshold) ### If supplied, for any platform, use it
fl.setRPKMThreshold(rpkm_threshold)
except Exception: pass
if platform=='RNASeq':
try: rpkm_threshold = float(rpkm_threshold)
except Exception: rpkm_threshold = 1.0
fl.setRPKMThreshold(rpkm_threshold)
try: correlationDirection = direction ### correlate to a positive or inverse negative in silico artificial pattern
except Exception: correlationDirection = 'up'
fl.setCorrelationDirection(correlationDirection)
if expression_data_format == 'non-log': logTransform = True
else: logTransform = False
if 'topSplice' in input_exp_file:
markerFinder.filterRNASeqSpliceEvents(species,platform,fl,input_exp_file)
sys.exit()
if 'stats.' in input_exp_file:
markerFinder.filterDetectionPvalues(species,platform,fl,input_exp_file)
sys.exit()
else:
markerFinder.analyzeData(group_exp_file,species,platform,compendiumType,geneToReport=genesToReport,correlateAll=correlateAll,AdditionalParameters=fl,logTransform=logTransform)
try: fl.setVendor(manufacturer)
except Exception:
print '--vendor not indicated by user... assuming Affymetrix'
fl.setVendor('Affymetrix')
try: markerFinder.generateMarkerHeatMaps(fl,array_type,convertNonLogToLog=logTransform,Species=species)
except Exception: print traceback.format_exc()
print 'Cell/Tissue marker classification analysis finished';sys.exit()
if 'EnsMart' in ensembl_version:
UI.exportDBversion(ensembl_version)
annotation_found = verifyFile(input_annotation_file)
proceed = 'no'
if 'Official' not in update_method and denom_file_dir == None: ### If running GO-Elite independent of AltAnalyze (see below GO_Elite call)
try:
time_stamp = timestamp()
if len(cel_file_dir)>0:
if output_dir == None:
output_dir = cel_file_dir
print "Setting output directory to the input path:", output_dir
if output_dir == None and input_filtered_dir>0:
output_dir = input_filtered_dir
if '/' == output_dir[-1] or '\\' in output_dir[-2]: null=[]
else: output_dir +='/'
log_file = filepath(output_dir+'AltAnalyze_report-'+time_stamp+'.log')
log_report = open(log_file,'w'); log_report.close()
sys.stdout = Logger('')
except Exception,e:
print e
print 'Please designate an output directory before proceeding (e.g., --output "C:\RNASeq)';sys.exit()
if mappedExonAnalysis:
array_type = 'RNASeq' ### Although this is not the actual platform, the resulting data will be treated as RNA-Seq with parameters most suitable for arrays
if len(external_annotation_dir)>0:
run_from_scratch = 'Annotate External Results'
if channel_to_extract != None:
run_from_scratch = 'Process Feature Extraction files' ### Agilent Feature Extraction files as input for normalization
manufacturer = 'Agilent'
constitutive_source = 'Agilent'
expression_threshold = 'NA'
perform_alt_analysis = 'NA'
if len(input_filtered_dir)>0:
run_from_scratch ='Process AltAnalyze filtered'; proceed='yes'
if len(input_exp_file)>0:
run_from_scratch = 'Process Expression file'; proceed='yes'
input_exp_file = string.replace(input_exp_file,'\\','/') ### Windows convention is \ rather than /, but works with /
ief_list = string.split(input_exp_file,'/')
if len(output_dir)>0: parent_dir = output_dir
else: parent_dir = string.join(ief_list[:-1],'/')
exp_name = ief_list[-1]
if len(cel_file_dir)>0 or runKallisto == True:
# python AltAnalyze.py --species Mm --platform RNASeq --runKallisto yes --expname test
if exp_name == None:
print "No experiment name defined. Please sumbit a name (e.g., --expname CancerComp) before proceeding."; sys.exit()
else:
dataset_name = 'exp.'+exp_name+'.txt'; exp_file_dir = filepath(output_dir+'/ExpressionInput/'+dataset_name)
if runKallisto:
run_from_scratch == 'Process RNA-seq reads'
elif run_from_scratch!= 'Process Feature Extraction files':
run_from_scratch = 'Process CEL files'; proceed='yes'
if array_type == 'RNASeq': file_ext = '.BED'
else: file_ext = '.CEL'
try: cel_files,cel_files_fn = UI.identifyCELfiles(cel_file_dir,array_type,manufacturer)
except Exception,e:
print e
if mappedExonAnalysis: pass
else: print "No",file_ext,"files found in the directory:",cel_file_dir;sys.exit()
if array_type != 'RNASeq': cel_file_list_dir = UI.exportCELFileList(cel_files_fn,cel_file_dir)
if groups_file != None and comps_file != None:
try: export.copyFile(groups_file, string.replace(exp_file_dir,'exp.','groups.'))
except Exception: print 'Groups file already present in target location OR bad input path.'
try: export.copyFile(comps_file, string.replace(exp_file_dir,'exp.','comps.'))
except Exception: print 'Comparison file already present in target location OR bad input path.'
groups_file = string.replace(exp_file_dir,'exp.','groups.')
comps_file = string.replace(exp_file_dir,'exp.','comps.')
if verifyGroupFileFormat(groups_file) == False:
print "\nWarning! The format of your groups file is not correct. For details, see:\nhttp://code.google.com/p/altanalyze/wiki/ManualGroupsCompsCreation\n"
sys.exit()
if array_type != 'RNASeq' and manufacturer!= 'Agilent':
"""Determine if Library and Annotations for the array exist, if not, download or prompt for selection"""
try:
### For the HGLUE and HJAY arrays, this step is critical in order to have the commond-line AltAnalyze downloadthe appropriate junction database (determined from specific_array_type)
specific_array_types,specific_array_type = UI.identifyArrayType(cel_files_fn)
num_array_types = len(specific_array_types)
except Exception:
null=[]; num_array_types=1; specific_array_type=None
if array_type == 'exon':
if species == 'Hs': specific_array_type = 'HuEx-1_0-st-v2'
if species == 'Mm': specific_array_type = 'MoEx-1_0-st-v2'
if species == 'Rn': specific_array_type = 'RaEx-1_0-st-v2'
elif array_type == 'gene':
if species == 'Hs': specific_array_type = 'HuGene-1_0-st-v1'
if species == 'Mm': specific_array_type = 'MoGene-1_0-st-v1'
if species == 'Rn': specific_array_type = 'RaGene-1_0-st-v1'
elif array_type == 'AltMouse': specific_array_type = 'altMouseA'
"""
elif array_type == 'junction':
if species == 'Mm': specific_array_type = 'MJAY'
if species == 'Hs': specific_array_type = 'HJAY'
"""
supproted_array_db = UI.importSupportedArrayInfo()
if specific_array_type in supproted_array_db and input_cdf_file == None and input_annotation_file == None:
sa = supproted_array_db[specific_array_type]; species = sa.Species(); array_type = sa.ArrayType()
input_cdf_file, input_annotation_file, bgp_file, clf_file = UI.getAffyFilesRemote(specific_array_type,array_type,species)
else: array_type = "3'array"
cdf_found = verifyFile(input_cdf_file)
annotation_found = verifyFile(input_annotation_file)
if input_cdf_file == None:
print [specific_array_type], 'not currently supported... Please provide CDF to AltAnalyze (commandline or GUI) or manually add to AltDatabase/affymetrix/LibraryFiles'; sys.exit()
if cdf_found != "found":
### Copy valid Library files to a local AltAnalyze database directory
input_cdf_file_lower = string.lower(input_cdf_file)
if array_type == "3'array":
if '.cdf' in input_cdf_file_lower:
clf_file='';bgp_file=''; assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_cdf_file,'/'); cdf_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
destination_parent = osfilepath(destination_parent+cdf_short)
info_list = input_cdf_file,destination_parent; UI.StatusWindow(info_list,'copy')
else: print "Valid CDF file not found. Exiting program.";sys.exit()
else:
if '.pgf' in input_cdf_file_lower:
###Check to see if the clf and bgp files are present in this directory
icf_list = string.split(input_cdf_file,'/'); parent_dir = string.join(icf_list[:-1],'/'); cdf_short = icf_list[-1]
clf_short = string.replace(cdf_short,'.pgf','.clf')
kil_short = string.replace(cdf_short,'.pgf','.kil') ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction': bgp_short = string.replace(cdf_short,'.pgf','.antigenomic.bgp')
else: bgp_short = string.replace(cdf_short,'.pgf','.bgp')
dir_list = read_directory(parent_dir)
if clf_short in dir_list and bgp_short in dir_list:
pgf_file = input_cdf_file
clf_file = string.replace(pgf_file,'.pgf','.clf')
kil_file = string.replace(pgf_file,'.pgf','.kil') ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction': bgp_file = string.replace(pgf_file,'.pgf','.antigenomic.bgp')
else: bgp_file = string.replace(pgf_file,'.pgf','.bgp')
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
info_list = input_cdf_file,osfilepath(destination_parent+cdf_short); UI.StatusWindow(info_list,'copy')
info_list = clf_file,osfilepath(destination_parent+clf_short); UI.StatusWindow(info_list,'copy')
info_list = bgp_file,osfilepath(destination_parent+bgp_short); UI.StatusWindow(info_list,'copy')
if 'Glue' in pgf_file:
info_list = kil_file,osfilepath(destination_parent+kil_short); UI.StatusWindow(info_list,'copy')
if annotation_found != "found" and update_dbs == 'no' and array_type != 'RNASeq' and denom_file_dir == None and manufacturer != 'Agilent':
### Copy valid Annotation files to a local AltAnalyze database directory
try:
input_annotation_lower = string.lower(input_annotation_file)
if '.csv' in input_annotation_lower:
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_annotation_file,'/'); csv_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/'+species+'/'
info_list = input_annotation_file,filepath(destination_parent+csv_short); UI.StatusWindow(info_list,'copy')
except Exception: print "No Affymetrix annotation file provided. AltAnalyze will use any .csv annotations files in AltDatabase/Affymetrix/"+species
if 'Official' in update_method and species != None:
proceed = 'yes'
elif array_type != None and species != None:
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults(array_type,species)
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, goelite_permutations, mod, returnPathways, NA = goelite_defaults
use_direct_domain_alignments_only,microRNA_prediction_method = functional_analysis_defaults
analysis_method, additional_algorithms, filter_probeset_types, analyze_all_conditions, p_threshold, alt_exon_fold_variable, additional_score, permute_p_threshold, gene_expression_cutoff, remove_intronic_junctions, perform_permutation_analysis, export_NI_values, run_MiDAS, calculate_normIntensity_p, filter_for_AS = alt_exon_defaults
dabg_p, rpkm_threshold, gene_exp_threshold, exon_exp_threshold, exon_rpkm_threshold, expression_threshold, perform_alt_analysis, analyze_as_groups, expression_data_format, normalize_feature_exp, normalize_gene_data, avg_all_for_ss, include_raw_data, probability_statistic, FDR_statistic, batch_effects, marker_finder, visualize_qc_results, run_lineage_profiler, null = expr_defaults
elif denom_file_dir != None and species != None:
proceed = 'yes' ### Only run GO-Elite
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults('RNASeq',species) ### platform not relevant
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, goelite_permutations, mod, returnPathways, NA = goelite_defaults
else:
print 'No species defined. Please include the species code (e.g., "--species Hs") and array type (e.g., "--arraytype exon") before proceeding.'
print '\nAlso check the printed arguments above to see if there are formatting errors, such as bad quotes.'; sys.exit()
array_type_original = array_type
#if array_type == 'gene': array_type = "3'array"
for opt, arg in options:
if opt == '--runGOElite': run_GOElite=arg
elif opt == '--outputQCPlots': visualize_qc_results=arg
elif opt == '--runLineageProfiler': run_lineage_profiler=arg
elif opt == '--elitepermut': goelite_permutations=arg
elif opt == '--method': filter_method=arg
elif opt == '--zscore': z_threshold=arg
elif opt == '--elitepval': p_val_threshold=arg
elif opt == '--num': change_threshold=arg
elif opt == '--dataToAnalyze': resources_to_analyze=arg
elif opt == '--GEelitepval': ge_pvalue_cutoffs=arg
elif opt == '--GEelitefold': ge_fold_cutoffs=arg
elif opt == '--GEeliteptype': ge_ptype=arg
elif opt == '--ORAstat': ORA_algorithm=arg
elif opt == '--returnPathways': returnPathways=arg
elif opt == '--FDR': FDR_statistic=arg
elif opt == '--dabgp': dabg_p=arg
elif opt == '--rawexp': expression_threshold=arg
elif opt == '--geneRPKM': rpkm_threshold=arg
elif opt == '--exonRPKM': exon_rpkm_threshold=arg
elif opt == '--geneExp': gene_exp_threshold=arg
elif opt == '--exonExp': exon_exp_threshold=arg
elif opt == '--groupStat': probability_statistic=arg
elif opt == '--avgallss': avg_all_for_ss=arg
elif opt == '--logexp': expression_data_format=arg
elif opt == '--inclraw': include_raw_data=arg
elif opt == '--combat': batch_effects=arg
elif opt == '--runalt': perform_alt_analysis=arg
elif opt == '--altmethod': analysis_method=arg
elif opt == '--altp': p_threshold=arg
elif opt == '--probetype': filter_probeset_types=arg
elif opt == '--altscore': alt_exon_fold_variable=arg
elif opt == '--GEcutoff': gene_expression_cutoff=arg
elif opt == '--removeIntronOnlyJunctions': remove_intronic_junctions=arg
elif opt == '--normCounts': normalize_feature_exp=arg
elif opt == '--normMatrix': normalize_gene_data=arg
elif opt == '--altpermutep': permute_p_threshold=arg
elif opt == '--altpermute': perform_permutation_analysis=arg
elif opt == '--exportnormexp': export_NI_values=arg
elif opt == '--buildExonExportFile': build_exon_bedfile = 'yes'
elif opt == '--runMarkerFinder': marker_finder = arg
elif opt == '--calcNIp': calculate_normIntensity_p=arg
elif opt == '--runMiDAS': run_MiDAS=arg
elif opt == '--analyzeAllGroups':
analyze_all_conditions=arg
if analyze_all_conditions == 'yes': analyze_all_conditions = 'all groups'
elif opt == '--GEcutoff': use_direct_domain_alignments_only=arg
elif opt == '--mirmethod': microRNA_prediction_method=arg
elif opt == '--ASfilter': filter_for_AS=arg
elif opt == '--noxhyb': xhyb_remove=arg
elif opt == '--returnAll': return_all=arg
elif opt == '--annotatedir': external_annotation_dir=arg
elif opt == '--additionalScore': additional_score=arg
elif opt == '--additionalAlgorithm': additional_algorithms=arg
elif opt == '--modelSize':
modelSize=arg
try: modelSize = int(modelSize)
except Exception: modelSize = None
elif opt == '--geneModel':
geneModel=arg # file location
if geneModel == 'no' or 'alse' in geneModel:
geneModel = False
elif opt == '--reference':
custom_reference = arg
if run_from_scratch == 'Process Feature Extraction files': ### Agilent Feature Extraction files as input for normalization
normalize_gene_data = 'quantile' ### required for Agilent
proceed = 'yes'
if returnPathways == 'no' or returnPathways == 'None':
returnPathways = None
if pipelineAnalysis == False:
proceed = 'yes'
if proceed == 'yes':
species_codes = UI.remoteSpeciesInfo()
### Update Ensembl Databases
if 'Official' in update_method:
file_location_defaults = UI.importDefaultFileLocations()
db_versions_vendors,db_versions = UI.remoteOnlineDatabaseVersions()
array_codes = UI.remoteArrayInfo()
UI.getOnlineDBConfig(file_location_defaults,'')
if len(species)==2:
species_names = UI.getSpeciesInfo()
species_full = species_names[species]
else: species_full = species
print 'Species name to update:',species_full
db_version_list=[]
for version in db_versions: db_version_list.append(version)
db_version_list.sort(); db_version_list.reverse(); select_version = db_version_list[0]
db_versions[select_version].sort()
print 'Ensembl version',ensembl_version
if ensembl_version != 'current':
if len(ensembl_version) < 4: ensembl_version = 'EnsMart'+ensembl_version
if ensembl_version not in db_versions:
try: UI.getOnlineEliteDatabase(file_location_defaults,ensembl_version,[species],'no',''); sys.exit()
except Exception:
### This is only for database that aren't officially released yet for prototyping
print ensembl_version, 'is not a valid version of Ensembl, while',select_version, 'is.'; sys.exit()
else: select_version = ensembl_version
### Export basic species information
sc = species; db_version = ensembl_version
if sc != None:
for ad in db_versions_vendors[db_version]:
if ad.SpeciesCodes() == species_full:
for array_system in array_codes:
ac = array_codes[array_system]
compatible_species = ac.SpeciesCodes()
if ac.Manufacturer() in ad.Manufacturer() and ('expression' in ac.ArrayName() or 'RNASeq' in ac.ArrayName() or 'RNA-seq' in ac.ArrayName()):
if sc not in compatible_species: compatible_species.append(sc)
ac.setSpeciesCodes(compatible_species)
UI.exportArrayInfo(array_codes)
if species_full not in db_versions[select_version]:
print db_versions[select_version]
print species_full, ': This species is not available for this version %s of the Official database.' % select_version
else:
update_goelite_resources = 'no' ### This is handled separately below
UI.getOnlineEliteDatabase(file_location_defaults,ensembl_version,[species],update_goelite_resources,'');
### Attempt to download additional Ontologies and GeneSets
if additional_resources[0] != None: ### Indicates that the user requested the download of addition GO-Elite resources
try:
import GeneSetDownloader
print 'Adding supplemental GeneSet and Ontology Collections'
if 'all' in additional_resources:
additionalResources = UI.importResourceList() ### Get's all additional possible resources
else: additionalResources = additional_resources
GeneSetDownloader.buildAccessoryPathwayDatabases([species],additionalResources,'yes')
print 'Finished adding additional analysis resources.'
except Exception:
print 'Download error encountered for additional Ontologies and GeneSets...\nplease try again later.'
status = UI.verifyLineageProfilerDatabases(species,'command-line')
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...'
if array_type == 'junction' or array_type == 'RNASeq': ### Download junction databases
try: UI.checkForLocalArraySupport(species,array_type,specific_array_type,'command-line')
except Exception:
print 'Please install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart72';sys.exit()
status = UI.verifyLineageProfilerDatabases(species,'command-line')
print "Finished adding database"
sys.exit()
try:
#print ge_fold_cutoffs,ge_pvalue_cutoffs, change_threshold, resources_to_analyze, goelite_permutations, p_val_threshold, z_threshold
change_threshold = int(change_threshold)-1
goelite_permutations = int(goelite_permutations);change_threshold = change_threshold
p_val_threshold = float(p_val_threshold); z_threshold = float(z_threshold)
if ORA_algorithm == 'Fisher Exact Test':
goelite_permutations = 'FisherExactTest'
except Exception,e:
print e
print 'One of the GO-Elite input values is inapporpriate. Please review and correct.';sys.exit()
if run_GOElite == None or run_GOElite == 'no': goelite_permutations = 'NA' ### This haults GO-Elite from running
else:
if output_dir == None:
print "\nPlease specify an output directory using the flag --output"; sys.exit()
try: expression_threshold = float(expression_threshold)
except Exception: expression_threshold = 1
try: dabg_p = float(dabg_p)
except Exception: dabg_p = 1 ### Occurs for RNASeq
if microRNA_prediction_method == 'two or more': microRNA_prediction_method = 'multiple'
else: microRNA_prediction_method = 'any'
### Run GO-Elite directly from user supplied input and denominator ID folders (outside of the normal workflows)
if run_GOElite == 'yes' and pipelineAnalysis == False and '--runGOElite' in arguments:# and denom_file_dir != None:
#python AltAnalyze.py --input "/Users/nsalomonis/Desktop/Mm_sample/input_list_small" --runGOElite yes --denom "/Users/nsalomonis/Desktop/Mm_sample/denominator" --mod Ensembl --species Mm
"""if denom_file_dir == None:
print 'Please include a folder containing a valid denominator ID list for the input ID sets.'; sys.exit()"""
try:
if output_dir==None:
### Set output to the same directory or parent if none selected
i = -1 ### 1 directory up
output_dir = string.join(string.split(input_file_dir,'/')[:i],'/')
file_dirs = input_file_dir, denom_file_dir, output_dir
import GO_Elite
if ORA_algorithm == 'Fisher Exact Test':
goelite_permutations = 'FisherExactTest'
goelite_var = species,mod,goelite_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,''
GO_Elite.remoteAnalysis(goelite_var,'non-UI',Multi=mlp)
sys.exit()
except Exception:
print traceback.format_exc()
print "Unexpected error encountered. Please see log file."; sys.exit()
if run_lineage_profiler == 'yes':
status = UI.verifyLineageProfilerDatabases(species,'command-line')
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...'
if run_lineage_profiler == 'yes' and input_file_dir != None and pipelineAnalysis == False and '--runLineageProfiler' in arguments:
#python AltAnalyze.py --input "/Users/arrays/test.txt" --runLineageProfiler yes --vendor Affymetrix --platform "3'array" --species Mm --output "/Users/nsalomonis/Merrill"
#python AltAnalyze.py --input "/Users/qPCR/samples.txt" --runLineageProfiler yes --geneModel "/Users/qPCR/models.txt"
if array_type==None:
print "Please include a platform name (e.g., --platform RNASeq)";sys.exit()
if species==None:
print "Please include a species name (e.g., --species Hs)";sys.exit()
try:
status = UI.verifyLineageProfilerDatabases(species,'command-line')
except ValueError:
### Occurs due to if int(gene_database[-2:]) < 65: - ValueError: invalid literal for int() with base 10: ''
print '\nPlease install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart72\n';sys.exit()
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...';sys.exit()
try:
fl = UI.ExpressionFileLocationData('','','','')
fl.setSpecies(species)
fl.setVendor(manufacturer)
fl.setPlatformType(array_type)
fl.setCompendiumType('protein_coding')
#fl.setCompendiumType('AltExon')
fl.setCompendiumPlatform(array_type)
try: expr_input_dir
except Exception: expr_input_dir = input_file_dir
UI.remoteLP(fl, expr_input_dir, manufacturer, custom_reference, geneModel, None, modelSize=modelSize)
#graphic_links = ExpressionBuilder.remoteLineageProfiler(fl,input_file_dir,array_type,species,manufacturer)
print_out = 'Lineage profiles and images saved to the folder "DataPlots" in the input file folder.'
print print_out
except Exception:
print traceback.format_exc()
print_out = 'Analysis error occured...\nplease see warning printouts.'
print print_out
sys.exit()
if array_type == 'junction' or array_type == 'RNASeq': ### Download junction databases
try: UI.checkForLocalArraySupport(species,array_type,specific_array_type,'command-line')
except Exception:
print 'Please install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart72';sys.exit()
probeset_types = ['full','core','extended']
if return_all == 'yes': ### Perform no alternative exon filtering when annotating existing FIRMA or MADS results
dabg_p = 1; expression_threshold = 1; p_threshold = 1; alt_exon_fold_variable = 1
gene_expression_cutoff = 10000; filter_probeset_types = 'full'; exon_exp_threshold = 1; rpkm_threshold = 0
gene_exp_threshold = 1; exon_rpkm_threshold = 0
if array_type == 'RNASeq':
gene_exp_threshold = 0
else:
if array_type != "3'array":
try:
p_threshold = float(p_threshold); alt_exon_fold_variable = float(alt_exon_fold_variable)
expression_threshold = float(expression_threshold); gene_expression_cutoff = float(gene_expression_cutoff)
dabg_p = float(dabg_p); additional_score = float(additional_score)
gene_expression_cutoff = float(gene_expression_cutoff)
except Exception:
try: gene_expression_cutoff = float(gene_expression_cutoff)
except Exception: gene_expression_cutoff = 0
try: rpkm_threshold = float(rpkm_threshold)
except Exception: rpkm_threshold = -1
try: exon_exp_threshold = float(exon_exp_threshold)
except Exception: exon_exp_threshold = 0
try: gene_exp_threshold = float(gene_exp_threshold)
except Exception: gene_exp_threshold = 0
try: exon_rpkm_threshold = float(exon_rpkm_threshold)
except Exception: exon_rpkm_threshold = 0
if filter_probeset_types not in probeset_types and array_type == 'exon':
print "Invalid probeset-type entered:",filter_probeset_types,'. Must be "full", "extended" or "core"'; sys.exit()
elif array_type == 'gene' and filter_probeset_types == 'NA': filter_probeset_types = 'core'
if dabg_p > 1 or dabg_p <= 0:
print "Invalid DABG p-value entered:",dabg_p,'. Must be > 0 and <= 1'; sys.exit()
if expression_threshold <1:
print "Invalid expression threshold entered:",expression_threshold,'. Must be > 1'; sys.exit()
if p_threshold > 1 or p_threshold <= 0:
print "Invalid alternative exon p-value entered:",p_threshold,'. Must be > 0 and <= 1'; sys.exit()
if alt_exon_fold_variable < 1 and analysis_method != 'ASPIRE':
print "Invalid alternative exon threshold entered:",alt_exon_fold_variable,'. Must be > 1'; sys.exit()
if gene_expression_cutoff < 1:
print "Invalid gene expression threshold entered:",gene_expression_cutoff,'. Must be > 1'; sys.exit()
if additional_score < 1:
print "Invalid additional score threshold entered:",additional_score,'. Must be > 1'; sys.exit()
if array_type == 'RNASeq':
if rpkm_threshold < 0:
print "Invalid gene RPKM threshold entered:",rpkm_threshold,'. Must be >= 0'; sys.exit()
if exon_exp_threshold < 1:
print "Invalid exon expression threshold entered:",exon_exp_threshold,'. Must be > 1'; sys.exit()
if exon_rpkm_threshold < 0:
print "Invalid exon RPKM threshold entered:",exon_rpkm_threshold,'. Must be >= 0'; sys.exit()
if gene_exp_threshold < 1:
print "Invalid gene expression threshold entered:",gene_exp_threshold,'. Must be > 1'; sys.exit()
if 'FIRMA' in additional_algorithms and array_type == 'RNASeq':
print 'FIRMA is not an available option for RNASeq... Changing this to splicing-index.'
additional_algorithms = 'splicing-index'
additional_algorithms = UI.AdditionalAlgorithms(additional_algorithms); additional_algorithms.setScore(additional_score)
if array_type == 'RNASeq':
manufacturer = 'RNASeq'
if 'CEL' in run_from_scratch: run_from_scratch = 'Process RNA-seq reads'
if build_exon_bedfile == 'yes': run_from_scratch = 'buildExonExportFiles'
if run_from_scratch == 'Process AltAnalyze filtered': expression_data_format = 'log' ### This is switched to log no matter what, after initial import and analysis of CEL or BED files
### These variables are modified from the defaults in the module UI as below
excludeNonExpExons = True
if avg_all_for_ss == 'yes': avg_all_for_ss = 'yes'
elif 'all exon aligning' in avg_all_for_ss or 'known exons' in avg_all_for_ss or 'expressed exons' in avg_all_for_ss:
if 'known exons' in avg_all_for_ss and array_type == 'RNASeq': excludeNonExpExons = False
avg_all_for_ss = 'yes'
else: avg_all_for_ss = 'no'
if run_MiDAS == 'NA': run_MiDAS = 'no'
if perform_alt_analysis == 'yes': perform_alt_analysis = 'yes'
elif perform_alt_analysis == 'expression': perform_alt_analysis = 'expression'
elif perform_alt_analysis == 'just expression': perform_alt_analysis = 'expression'
elif perform_alt_analysis == 'no': perform_alt_analysis = 'expression'
elif platform != "3'array": perform_alt_analysis = 'both'
if systemToUse != None: array_type = systemToUse
try: permute_p_threshold = float(permute_p_threshold)
except Exception: permute_p_threshold = permute_p_threshold
### Store variables for AltAnalyzeMain
expr_var = species,array_type,manufacturer,constitutive_source,dabg_p,expression_threshold,avg_all_for_ss,expression_data_format,include_raw_data,run_from_scratch,perform_alt_analysis
alt_var = analysis_method,p_threshold,filter_probeset_types,alt_exon_fold_variable,gene_expression_cutoff,remove_intronic_junctions,permute_p_threshold,perform_permutation_analysis, export_NI_values, analyze_all_conditions
additional_var = calculate_normIntensity_p, run_MiDAS, use_direct_domain_alignments_only, microRNA_prediction_method, filter_for_AS, additional_algorithms
goelite_var = ge_fold_cutoffs,ge_pvalue_cutoffs,ge_ptype,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,goelite_permutations,mod,returnPathways
if run_from_scratch == 'buildExonExportFiles':
fl = UI.ExpressionFileLocationData('','','',''); fl.setExonBedBuildStatus('yes'); fl.setFeatureNormalization('none')
fl.setCELFileDir(cel_file_dir); fl.setArrayType(array_type); fl.setOutputDir(output_dir)
fl.setMultiThreading(multiThreading)
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl; parent_dir = output_dir
perform_alt_analysis = 'expression'
if run_from_scratch == 'Process Expression file':
if len(input_exp_file)>0:
if groups_file != None and comps_file != None:
if 'exp.' in input_exp_file: new_exp_file = input_exp_file
else:
new_exp_file = export.findParentDir(input_exp_file)+'exp.'+export.findFilename(input_exp_file)
if 'ExpressionInput' not in new_exp_file:
### This expression file is not currently used (could make it the default after copying to this location)
if output_dir[-1] != '/' and output_dir[-1] != '\\':
output_dir += '/'
new_exp_file = output_dir+'ExpressionInput/'+export.findFilename(new_exp_file)
try: export.copyFile(input_exp_file, new_exp_file)
except Exception: print 'Expression file already present in target location.'
try: export.copyFile(groups_file, string.replace(new_exp_file,'exp.','groups.'))
except Exception: print 'Groups file already present in target location OR bad input path.'
try: export.copyFile(comps_file, string.replace(new_exp_file,'exp.','comps.'))
except Exception: print 'Comparison file already present in target location OR bad input path.'
groups_file = string.replace(new_exp_file,'exp.','groups.')
comps_file = string.replace(new_exp_file,'exp.','comps.')
input_exp_file = new_exp_file
if verifyGroupFileFormat(groups_file) == False:
print "\nWarning! The format of your groups file is not correct. For details, see:\nhttp://code.google.com/p/altanalyze/wiki/ManualGroupsCompsCreation\n"
sys.exit()
try:
cel_files, array_linker_db = ExpressionBuilder.getArrayHeaders(input_exp_file)
if len(input_stats_file)>1: ###Make sure the files have the same arrays and order first
cel_files2, array_linker_db2 = ExpressionBuilder.getArrayHeaders(input_stats_file)
if cel_files2 != cel_files:
print "The probe set p-value file:\n"+input_stats_file+"\ndoes not have the same array order as the\nexpression file. Correct before proceeding."; sys.exit()
except Exception: print '\nWARNING...Expression file not found: "'+input_exp_file+'"\n\n'; sys.exit()
exp_name = string.replace(exp_name,'exp.',''); dataset_name = exp_name; exp_name = string.replace(exp_name,'.txt','')
groups_name = 'ExpressionInput/groups.'+dataset_name; comps_name = 'ExpressionInput/comps.'+dataset_name
groups_file_dir = output_dir+'/'+groups_name; comps_file_dir = output_dir+'/'+comps_name
groups_found = verifyFile(groups_file_dir)
comps_found = verifyFile(comps_file_dir)
if ((groups_found != 'found' or comps_found != 'found') and analyze_all_conditions != 'all groups') or (analyze_all_conditions == 'all groups' and groups_found != 'found'):
files_exported = UI.predictGroupsAndComps(cel_files,output_dir,exp_name)
if files_exported == 'yes': print "AltAnalyze inferred a groups and comps file from the CEL file names."
elif run_lineage_profiler == 'yes' and input_file_dir != None and pipelineAnalysis == False and '--runLineageProfiler' in arguments: pass
else: print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
fl = UI.ExpressionFileLocationData(input_exp_file,input_stats_file,groups_file_dir,comps_file_dir)
dataset_name = exp_name
if analyze_all_conditions == "all groups":
try: array_group_list,group_db = UI.importArrayGroupsSimple(groups_file_dir,cel_files)
except Exception:
print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
print len(group_db), 'groups found'
if len(group_db) == 2: analyze_all_conditions = 'pairwise'
exp_file_location_db={}; exp_file_location_db[exp_name]=fl
elif run_from_scratch == 'Process CEL files' or run_from_scratch == 'Process RNA-seq reads' or run_from_scratch == 'Process Feature Extraction files':
if groups_file != None and comps_file != None:
try: shutil.copyfile(groups_file, string.replace(exp_file_dir,'exp.','groups.'))
except Exception: print 'Groups file already present in target location OR bad input path.'
try: shutil.copyfile(comps_file, string.replace(exp_file_dir,'exp.','comps.'))
except Exception: print 'Comparison file already present in target location OR bad input path.'
stats_file_dir = string.replace(exp_file_dir,'exp.','stats.')
groups_file_dir = string.replace(exp_file_dir,'exp.','groups.')
comps_file_dir = string.replace(exp_file_dir,'exp.','comps.')
groups_found = verifyFile(groups_file_dir)
comps_found = verifyFile(comps_file_dir)
if ((groups_found != 'found' or comps_found != 'found') and analyze_all_conditions != 'all groups') or (analyze_all_conditions == 'all groups' and groups_found != 'found'):
if mappedExonAnalysis: pass
else:
files_exported = UI.predictGroupsAndComps(cel_files,output_dir,exp_name)
if files_exported == 'yes': print "AltAnalyze inferred a groups and comps file from the CEL file names."
#else: print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
fl = UI.ExpressionFileLocationData(exp_file_dir,stats_file_dir,groups_file_dir,comps_file_dir)
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl
parent_dir = output_dir ### interchangable terms (parent_dir used with expression file import)
if analyze_all_conditions == "all groups":
array_group_list,group_db = UI.importArrayGroupsSimple(groups_file_dir,cel_files)
UI.exportGroups(exp_file_location_db,array_group_list)
print len(group_db), 'groups found'
if len(group_db) == 2: analyze_all_conditions = 'pairwise'
try: fl.setRunKallisto(input_fastq_dir)
except Exception: pass
elif run_from_scratch == 'Process AltAnalyze filtered':
if '.txt' in input_filtered_dir: ### Occurs if the user tries to load a specific file
dirs = string.split(input_filtered_dir,'/')
input_filtered_dir = string.join(dirs[:-1],'/')
fl = UI.ExpressionFileLocationData('','','',''); dataset_name = 'filtered-exp_dir'
dirs = string.split(input_filtered_dir,'AltExpression'); parent_dir = dirs[0]
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset_name]
file_location_defaults = UI.importDefaultFileLocations()
apt_location = UI.getAPTLocations(file_location_defaults,run_from_scratch,run_MiDAS)
fl.setAPTLocation(apt_location)
if run_from_scratch == 'Process CEL files':
if xhyb_remove == 'yes' and (array_type == 'gene' or array_type == 'junction'): xhyb_remove = 'no' ### This is set when the user mistakenly selects exon array, initially
fl.setInputCDFFile(input_cdf_file); fl.setCLFFile(clf_file); fl.setBGPFile(bgp_file); fl.setXHybRemoval(xhyb_remove)
fl.setCELFileDir(cel_file_dir); fl.setArrayType(array_type_original); fl.setOutputDir(output_dir)
elif run_from_scratch == 'Process RNA-seq reads':
fl.setCELFileDir(cel_file_dir); fl.setOutputDir(output_dir)
elif run_from_scratch == 'Process Feature Extraction files':
fl.setCELFileDir(cel_file_dir); fl.setOutputDir(output_dir)
fl = exp_file_location_db[dataset]; fl.setRootDir(parent_dir)
try: apt_location = fl.APTLocation()
except Exception: apt_location = ''
root_dir = fl.RootDir(); fl.setExonBedBuildStatus(build_exon_bedfile)
fl.setMarkerFinder(marker_finder)
fl.setFeatureNormalization(normalize_feature_exp)
fl.setNormMatrix(normalize_gene_data)
fl.setProbabilityStatistic(probability_statistic)
fl.setProducePlots(visualize_qc_results)
fl.setPerformLineageProfiler(run_lineage_profiler)
fl.setCompendiumType(compendiumType)
fl.setCompendiumPlatform(compendiumPlatform)
fl.setVendor(manufacturer)
try: fl.setFDRStatistic(FDR_statistic)
except Exception: pass
fl.setAnalysisMode('commandline')
fl.setBatchEffectRemoval(batch_effects)
fl.setChannelToExtract(channel_to_extract)
fl.setMultiThreading(multiThreading)
try: fl.setExcludeLowExpressionExons(excludeNonExpExons)
except Exception: fl.setExcludeLowExpressionExons(True)
if 'other' in manufacturer or 'Other' in manufacturer:
### For data without a primary array ID key
manufacturer = "other:3'array"
fl.setVendor(manufacturer)
if array_type == 'RNASeq': ### Post version 2.0, add variables in fl rather than below
fl.setRPKMThreshold(rpkm_threshold)
fl.setExonExpThreshold(exon_exp_threshold)
fl.setGeneExpThreshold(gene_exp_threshold)
fl.setExonRPKMThreshold(exon_rpkm_threshold)
fl.setJunctionExpThreshold(expression_threshold)
fl.setExonMapFile(exonMapFile)
fl.setPlatformType(platformType)
### Verify database presence
try: dirs = unique.read_directory('/AltDatabase')
except Exception: dirs=[]
if species not in dirs:
print '\n'+species,'species not yet installed. Please install before proceeding (e.g., "python AltAnalyze.py --update Official --species',species,'--version EnsMart75").'
global commandLineMode; commandLineMode = 'yes'
AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db,None)
else:
print 'Insufficient Flags entered (requires --species and --output)'
def cleanUpCommandArguments():
### Needed on PC
command_args = string.join(sys.argv,' ')
arguments = string.split(command_args,' --')
for argument in arguments:
"""
argument_list = string.split(argument,' ')
if len(argument_list)>2:
filename = string.join(argument_list[1:],' ')
argument = argument_list[0]+' '+string.replace(filename,' ','$$$')
"""
argument_list = string.split(argument,' ')
#argument = string.join(re.findall(r"\w",argument),'')
if ':' in argument: ### Windows OS
z = string.find(argument_list[1],':')
if z!= -1 and z!=1: ### Hence, it is in the argument but not at the second position
print 'Illegal parentheses found. Please re-type these and re-run.'; sys.exit()
def runCommandLineVersion():
### This code had to be moved to a separate function to prevent iterative runs upon AltAnalyze.py re-import
command_args = string.join(sys.argv,' ')
#try: cleanUpCommandArguments()
#except Exception: null=[]
print 3,[sys.argv],
if len(sys.argv[1:])>0 and '--' in command_args:
if '--GUI' in command_args:
### Hard-restart of AltAnalyze while preserving the prior parameters
command_arguments = string.split(command_args,' --')
if len(command_arguments)>2:
command_arguments = map(lambda x: string.split(x,' '),command_arguments)
command_arguments = map(lambda (x,y): (x,string.replace(y,'__',' ')),command_arguments[2:])
selected_parameters = [command_arguments[0][1]]
user_variables={}
for (o,v) in command_arguments: user_variables[o]=v
AltAnalyzeSetup((selected_parameters,user_variables))
else:
AltAnalyzeSetup('no') ### a trick to get back to the main page of the GUI (if AltAnalyze has Tkinter conflict)
try:
commandLineRun()
except Exception:
print traceback.format_exc()
###### Determine Command Line versus GUI Control ######
command_args = string.join(sys.argv,' ')
if len(sys.argv[1:])>1 and '-' in command_args: null=[]
else:
try:
import Tkinter
from Tkinter import *
import PmwFreeze
import tkFileDialog
from tkFont import Font
use_Tkinter = 'yes'
except ImportError: use_Tkinter = 'yes'; print "\nPmw or Tkinter not found... Tkinter print out not available";
def testResultsPanel():
import QC
file = "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/3'Array/Merrill/ExpressionInput/exp.test.txt"
#QC.outputArrayQC(file)
global root; root = Tk()
global pathway_permutations; pathway_permutations = 'NA'
global log_file; log_file = 'null.txt'
global array_type; global explicit_data_type
global run_GOElite; run_GOElite = 'run-immediately'
explicit_data_type = 'exon-only'
array_type = 'RNASeq'
fl = UI.ExpressionFileLocationData('','','','')
graphic_links = []
graphic_links.append(['PCA','PCA.png'])
graphic_links.append(['HC','HC.png'])
graphic_links.append(['PCA1','PCA.png'])
graphic_links.append(['HC1','HC.png'])
graphic_links.append(['PCA2','PCA.png'])
graphic_links.append(['HC2','HC.png'])
graphic_links.append(['PCA3','PCA.png'])
graphic_links.append(['HC3','HC.png'])
graphic_links.append(['PCA4','PCA.png'])
graphic_links.append(['HC4','HC.png'])
summary_db={}
summary_db['QC'] = graphic_links
#summary_db={}
fl.setGraphicLinks(graphic_links)
summary_db['gene_assayed'] = 1
summary_db['denominator_exp_genes'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_genes'] = 1
summary_db['direct_domain_genes'] = 1
summary_db['miRNA_gene_hits'] = 1
#summary_db={}
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
dataset = 'test'; results_dir=''
print "Analysis Complete\n";
if root !='' and root !=None:
UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'GE',results_dir,dataset,'parent',summary_db)
print 'here'
#sys.exit()
class Logger(object):
def __init__(self,null):
self.terminal = sys.stdout
self.log = open(log_file, "w")
def write(self, message):
self.terminal.write(message)
self.log = open(log_file, "a")
self.log.write(message)
self.log.close()
def flush(self): pass
def verifyPath(filename):
### See if the file is in the current working directory
new_filename = filename
try:
cwd = os.getcwd()
files = unique.read_directory(cwd)
if filename in files:
new_filename = cwd+'/'+new_filename
except Exception:
pass
return new_filename
def dependencyCheck():
### Make sure core dependencies for AltAnalyze are met and if not report back
from pkgutil import iter_modules
modules = set(x[1] for x in iter_modules()) ### all installed modules
dependent_modules = ['string','csv','base64','getpass','requests']
dependent_modules += ['warnings','sklearn','os','webbrowser']
dependent_modules += ['scipy','numpy','matplotlib','igraph','pandas','patsy']
dependent_modules += ['ImageTk','PIL','cairo','wx','fastcluster','pysam', 'Tkinter']
print ''
count=0
for module in dependent_modules:
if module not in modules:
print 'AltAnalyze depedency not met for:',module
if 'fastcluster' == module:
print '...Faster hierarchical cluster not supported without fastcluster'
if 'pysam' == module:
print '...BAM file access not supported without pysam'
if 'scipy' == module:
print '...Many required statistical routines not supported without scipy'
if 'numpy' == module:
print '...Many required statistical routines not supported without numpy'
if 'matplotlib' == module:
print '...Core graphical outputs not supported without matplotlib'
if 'requests' == module:
print '...Wikipathways visualization not supported without requests'
if 'lxml' == module:
print '...Wikipathways visualization not supported without lxml'
if 'wx' == module:
print '...The AltAnalyze Results Viewer requires wx'
if 'ImageTk' == module or 'PIL' == module:
print '...Some graphical results displays require ImageTk and PIL'
if 'Tkinter' == module:
print '...AltAnalyze graphical user interface mode requires Tkinter'
if 'igraph' == module or 'cairo' == module:
print '...Network visualization requires igraph and cairo'
if 'sklearn' == module:
print '...t-SNE analysis requires sklearn'
if 'pandas' == module or 'patsy' == module:
print '...Combat batch effects correction requires pandas and patsy'
count+=1
if count>0:
print '\nWARNING!!!! Some dependencies are not currently met.'
print "This may impact AltAnalyze's performance\n"
if __name__ == '__main__':
try: mlp.freeze_support()
except Exception: pass
#testResultsPanel()
skip_intro = 'yes'; #sys.exit()
#skip_intro = 'remoteViewer'
runCommandLineVersion()
dependencyCheck()
if use_Tkinter == 'yes': AltAnalyzeSetup(skip_intro)
""" To do list:
1) RNA-Seq and LineageProfiler: threshold based RPKM expression filtering for binary absent present gene and exon calls
3) SQLite for gene-set databases prior to clustering and network visualization
5) (explored - not good) Optional algorithm type of PCA
7) (partially) Integrate splicing factor enrichment analysis (separate module?)
11) Update fields in summary combined alt.exon files (key by probeset)
12) Check field names for junction, exon, RNA-Seq in summary alt.exon report
14) Proper FDR p-value for alt.exon analyses (include all computed p-values)
15) Add all major clustering and LineageProfiler options to UI along with stats filtering by default
17) Support R check (and response that they need it) along with GUI gcrma, agilent array, hopach, combat
18) Probe-level annotations from Ensembl (partial code in place) and probe-level RMA in R (or possibly APT) - google pgf for U133 array
19) Update the software from the software
Advantages of this tool kit:
0) Easiest to use, hands down
1) Established and novel functionality for transcriptome/proteomics analysis built in
2) Independent and cooperative options for RNA-Seq and array analysis (splicing and gene expression)
3) Superior functional analyses (TF-target, splicing-factor target, lineage markers, WikiPathway visualization)
4) Options for different levels of users with different integration options (multiple statistical method options, option R support)
5) Built in secondary analysis options for already processed data (graphing, clustering, biomarker discovery, pathway analysis, network visualization)
6) Incorporates highly validated alternative exon identification methods, independent and jointly
Primary Engineer Work:
0) C-library calls and/or multithreading where applicable to improve peformance.
1) MySQL or equivalent transition for all large database queries (e.g., HuEx 2.1 on-the-fly coordinate mapping).
3) Isoform-domain network visualization and WP overlays.
4) Webservice calls to in silico protein translation, domain prediction, splicing factor regulation.
### 2.0.9
moncole integration
generic and cell classification machine learning
PCR primer design (gene centric after file selection)
BAM->BED (local SAMTools)
updated APT
"""
|
wuxue/altanalyze
|
AltAnalyze.py
|
Python
|
apache-2.0
| 493,280
|
[
"Cytoscape",
"pysam"
] |
4a0767cbe64ef7020088768d5b6f1d234d93a57240f5a709f279bf1d2dbdfeda
|
#!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import pymysql
import dbInfo
import optparse
import smtplib
from email.message import EmailMessage
from smtplib import SMTPRecipientsRefused
import time
from datetime import timedelta, datetime
import mailInfo
sys.path.append("galaxyharvester.net")
sys.path.append("html")
import ghNames
import serverBest
import dbShared
def ghConn():
conn = pymysql.connect(host = dbInfo.DB_HOST,
db = dbInfo.DB_NAME,
user = dbInfo.DB_USER,
passwd = dbInfo.DB_PASS)
conn.autocommit(True)
return conn
# Creates alert records for specified alert types
def addAlert(userID, alertTypes, msgText, link, alertTitle):
msgText = dbShared.dbInsertSafe(msgText)
alertTitle = dbShared.dbInsertSafe(alertTitle)
if len(msgText) + len(alertTitle) + 3 > 1023:
# Truncate the message so it will fit
msgText = msgText[:(1020 - len(alertTitle))]
msgText = msgText[:msgText[:-9].rfind("\n")]
msgText = msgText + "\n more..."
conn = ghConn()
cursor = conn.cursor()
if (alertTypes % 2 == 1):
cursor.execute("".join(("INSERT INTO tAlerts (userID, alertType, alertTime, alertMessage, alertLink, alertStatus) VALUES ('", userID, "', 1, NOW(), '", alertTitle, " - ", msgText, "', '", link, "', 0);")))
homeid = cursor.lastrowid
if (alertTypes >= 4):
cursor.execute("".join(("INSERT INTO tAlerts (userID, alertType, alertTime, alertMessage, alertLink, alertStatus) VALUES ('", userID, "', 4, NOW(), '", alertTitle, " - ", msgText, "', '", link, "', 0);")))
mobileid = cursor.lastrowid
if (alertTypes != 1 and alertTypes != 4 and alertTypes != 5):
cursor.execute("".join(("INSERT INTO tAlerts (userID, alertType, alertTime, alertMessage, alertLink, alertStatus) VALUES ('", userID, "', 2, NOW(), '", alertTitle, " - ", msgText, "', '", link, "', 0);")))
emailid = cursor.lastrowid
cursor.close()
sendAlertMail(conn, userID, msgText, link, emailid, alertTitle)
else:
cursor.close()
def sendAlertMail(conn, userID, msgText, link, alertID, alertTitle):
# Don't try to send mail if we exceeded quota within last hour
lastFailureTime = datetime(2000, 1, 1, 12)
currentTime = datetime.fromtimestamp(time.time())
timeSinceFailure = currentTime - lastFailureTime
try:
f = open("last_email_failure.txt")
lastFailureTime = datetime.strptime(f.read().strip(), "%Y-%m-%d %H:%M:%S")
f.close()
timeSinceFailure = currentTime - lastFailureTime
except IOError as e:
sys.stdout.write("No last failure time\n")
if timeSinceFailure.days < 1 and timeSinceFailure.seconds < 3660:
return 1
# look up the user email
cursor = conn.cursor()
cursor.execute("SELECT emailAddress FROM tUsers WHERE userID='" + userID + "';")
row = cursor.fetchone()
if row == None:
result = "bad username"
else:
email = row[0]
if (email.find("@") > -1):
# send message
message = EmailMessage()
message['From'] = "".join(("\"Galaxy Harvester Alerts\" <", mailInfo.ALERTMAIL_USER, "@galaxyharvester.net>"))
message['To'] = email
message['Subject'] = "".join(("Galaxy Harvester ", alertTitle))
message.set_content("".join(("Hello ", userID, ",\n\n", msgText, "\n\n", link, "\n\n You can manage your alerts at http://galaxyharvester.net/myAlerts.py\n")))
message.add_alternative("".join(("<div><img src='http://galaxyharvester.net/images/ghLogoLarge.png'/></div><p>Hello ", userID, ",</p><br/><p>", msgText.replace("\n", "<br/>"), "</p><p><a style='text-decoration:none;' href='", link, "'><div style='width:170px;font-size:18px;font-weight:600;color:#feffa1;background-color:#003344;padding:8px;margin:4px;border:1px solid black;'>View in Galaxy Harvester</div></a><br/>or copy and paste link: ", link, "</p><br/><p>You can manage your alerts at <a href='http://galaxyharvester.net/myAlerts.py'>http://galaxyharvester.net/myAlerts.py</a></p><p>-Galaxy Harvester Bot</p>")), subtype='html')
mailer = smtplib.SMTP(mailInfo.MAIL_HOST)
mailer.login(mailInfo.ALERTMAIL_USER, mailInfo.MAIL_PASS)
try:
mailer.send_message(message)
result = 'email sent'
except SMTPRecipientsRefused as e:
result = 'email failed'
sys.stderr.write('Email failed - ' + str(e))
trackEmailFailure(datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S"))
mailer.quit()
# update alert status
if ( result == 'email sent' ):
cursor.execute('UPDATE tAlerts SET alertStatus=1, statusChanged=NOW() WHERE alertID=' + str(alertID) + ';')
else:
result = 'Invalid email.'
cursor.close()
def checkSpawnAlerts(conn, spawnName, alertValue, galaxy, enteredBy, stats, galaxyName):
# array of stat titles for making message
statNames = ["CR","CD","DR","FL","HR","MA","PE","OQ","SR","UT","ER"]
# open filters for the type
cursor = conn.cursor()
cursor.execute("SELECT userID, alertTypes, CRmin, CDmin, DRmin, FLmin, HRmin, MAmin, PEmin, OQmin, SRmin, UTmin, ERmin, fltType, fltValue, minQuality FROM tFilters WHERE galaxy=" + str(galaxy) + " AND alertTypes > 0 AND ((fltType = 1 AND fltValue = '" + alertValue + "') OR (fltType = 2 AND '" + alertValue + "' IN (SELECT resourceType FROM tResourceTypeGroup WHERE resourceGroup=fltValue)))")
row = cursor.fetchone()
# check each filter for this resource type/group
while row != None:
sendAlert = True
statStr = ""
alertMessage = ""
if row[15] is not None and row[15] > 0:
# Check resource to see if it hits min quality
qualityTotal = 0.0
for x in range(11):
if row[x+2] > 0 and stats[x] != None:
thisValue = 1.0*stats[x]*(row[x+2]/100.0)
qualityTotal = qualityTotal + thisValue
statStr = statStr + statNames[x] + " " + str(row[x+2]) + "% "
if qualityTotal < row[15]:
sendAlert = False
else:
alertMessage = ' named {0} added to {1} with quality score {2:.0f} for {3}'.format(spawnName, galaxyName, qualityTotal, statStr)
else:
# check to see if min stats hit
for x in range(11):
if (row[x+2]) > 0:
if stats[x] is None or (stats[x] < row[x+2]):
sendAlert = False
else:
statStr = statStr + statNames[x] + ": " + str(stats[x]) + ", "
if len(statStr) > 1:
statStr = statStr[:-2]
if sendAlert:
alertMessage = ' named {0} added to {1} with stats {2}'.format(spawnName, galaxyName, statStr)
# add alert records if stats or quality triggered
if sendAlert:
# Look up the name for the alert value
typeGroup = row[14]
if row[13] == 1:
typeGroup = ghNames.getResourceTypeName(row[14])
else:
typeGroup = ghNames.getResourceGroupName(row[14])
addAlert(row[0], row[1], typeGroup + alertMessage, 'http://galaxyharvester.net/resource.py/' + str(galaxy) + '/' + spawnName, 'Resource Spawn Alert')
row = cursor.fetchone()
cursor.close()
def checkDespawnAlerts(conn, spawnID, spawnName, galaxyName, unavailable, galaxy):
cursor = conn.cursor()
cursor.execute('SELECT userID, despawnAlert FROM tFavorites WHERE itemID={0} AND despawnAlert > 0;'.format(spawnID))
row = cursor.fetchone()
while row != None:
addAlert(row[0], row[1], 'Resource named ' + spawnName + ' on ' + galaxyName + ' despawned at ' + str(unavailable), 'http://galaxyharvester.net/resource.py/' + str(galaxy) + '/' + spawnName, 'Resource Despawn Alert')
row = cursor.fetchone()
cursor.close()
def checkServerBest(conn, spawnID, spawnName, galaxy, galaxyName):
result = serverBest.checkSpawn(spawnID, 'history')
for x in range(len(result[1])):
schematicStr = ''
bestStr = ''
for k, v in result[1][x].items():
quoteSchem = "".join(("'", k, "'"))
schematicStr = ','.join((schematicStr, quoteSchem))
bestStr = '\n'.join((bestStr, '\n'.join(v)))
if len(schematicStr) > 0:
schematicStr = schematicStr[1:]
# open people with favorites for the professions involved
cursor = conn.cursor()
cursor.execute("SELECT tFavorites.userID, defaultAlertTypes, profName FROM tFavorites INNER JOIN tUsers ON tFavorites.userID = tUsers.userID INNER JOIN tProfession ON tFavorites.itemID = tProfession.profID WHERE tFavorites.galaxy={1} AND favType=3 AND itemID={0} GROUP BY tFavorites.userID, defaultAlertTypes, profName;".format(result[0][x], galaxy))
row = cursor.fetchone()
# Add alert for each user watching for profession server bests hit by this spawn
while row != None:
addAlert(row[0], row[1], bestStr, ''.join(('http://galaxyharvester.net/resource.py/', str(galaxy), '/', spawnName)), ''.join((row[2], ' Server best alert for ', galaxyName)))
row = cursor.fetchone()
cursor.close()
# open people with favorites for the schematics involved
cursor = conn.cursor()
cursor.execute("SELECT tFavorites.userID, defaultAlertTypes, schematicID, schematicName FROM tFavorites INNER JOIN tUsers ON tFavorites.userID = tUsers.userID INNER JOIN tSchematic ON tFavorites.favGroup = tSchematic.schematicID WHERE tFavorites.galaxy={1} AND favType=4 AND favGroup IN ({0}) GROUP BY tFavorites.userID, defaultAlertTypes, schematicID, schematicName;".format(schematicStr, galaxy))
row = cursor.fetchone()
# Add alert for each user watching for schematic server bests hit by this spawn
while row != None:
addAlert(row[0], row[1], '\n'.join(result[1][x][row[2]]), ''.join(('http://galaxyharvester.net/resource.py/', str(galaxy), '/', spawnName)), ''.join((row[3], ' Server best alert for ', galaxyName)))
row = cursor.fetchone()
cursor.close()
def checkDespawnReputation(conn, spawnID, spawnName, entered, galaxy):
# open events for this despawned resource
users = {}
lastEventTime = None
alreadyRemovedFlag = False
editedFlag = False
cursor = conn.cursor()
cursor.execute("SELECT galaxy, userID, eventTime, eventType, planetID, eventDetail FROM tResourceEvents WHERE spawnID={0} ORDER BY eventTime DESC;".format(spawnID))
row = cursor.fetchone()
if row != None:
lastEventTime = row[2]
# Summarize reputation bonus for each user involved
while row != None:
if row[1] not in users:
users[row[1]] = 0
if row[3] == 'a':
if editedFlag == False:
users[row[1]] = users[row[1]] + 3
else:
users[row[1]] = users[row[1]] + 1
if row[3] == 'p':
users[row[1]] = users[row[1]] + 1
if row[3] == 'v':
users[row[1]] = users[row[1]] + 2
if row[3] == 'r':
users[row[1]] = users[row[1]] + 1
if row[3] == 'r' and row[4] == 0:
users[row[1]] = users[row[1]] + 2
if row[3] == 'e':
users[row[1]] = users[row[1]] + 2
editedFlag = True
if row[3] == 'w':
users[row[1]] = users[row[1]] + 2
if row[3] == 'n':
users[row[1]] = users[row[1]] + 2
if row[3] == 'g':
users[row[1]] = users[row[1]] + 2
if row[5] == 'previously unavailable':
alreadyRemovedFlag = True
row = cursor.fetchone()
cursor.close()
if lastEventTime != None and alreadyRemovedFlag == False:
timeSinceEntered = lastEventTime - entered
tmpDays = timeSinceEntered.days
# If resource has not been available for at least a few days its being removed prematurely and not valid for rep awards
if tmpDays > 3:
link = "/resource.py/" + str(galaxy) + "/" + spawnName
message = "You gained reputation for your contribution to tracking resource " + spawnName + "!"
for k, v in users.items():
# Award rep for users contributing at least "4 points" and exclude automated users
if v >= 4 and k != "etas" and k != "default" and k != "c0pp3r":
dbShared.logEvent("INSERT INTO tUserEvents (userID, targetType, targetID, eventType, eventTime) VALUES ('" + k + "', 'r', " + str(spawnID) + ", '+', NOW());", "+", k, galaxy, spawnID)
cursor = conn.cursor()
cursor.execute("INSERT INTO tAlerts (userID, alertType, alertTime, alertMessage, alertLink, alertStatus) VALUES ('" + k + "', 1, NOW(), '" + message + "', '" + link + "', 0);")
cursor.close()
def main():
conn = ghConn()
# First try sending any backed up alert mails
retryPendingMail(conn)
f = None
lastAddedCheckTime = ""
lastRemovedCheckTime = ""
try:
f = open("last_alerts_check_added.txt")
lastAddedCheckTime = f.read().strip()
f.close()
except IOError as e:
sys.stdout.write("No last added check time\n")
try:
f = open("last_alerts_check_removed.txt")
lastRemovedCheckTime = f.read().strip()
f.close()
except IOError as e:
sys.stdout.write("No last removed check time\n")
# Check for despawn alerts
checkRemovedStart = datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S")
if lastRemovedCheckTime == "":
sys.stderr.write("Skipping removed check.\n")
else:
# look up the despawn info
cursor = conn.cursor()
cursor.execute("SELECT spawnName, galaxy, enteredBy, resourceType, CR, CD, DR, FL, HR, MA, PE, OQ, SR, UT, ER, galaxyName, unavailable, spawnID, entered FROM tResources INNER JOIN tGalaxy ON tResources.galaxy = tGalaxy.galaxyID WHERE unavailable >= '" + lastRemovedCheckTime + "';")
row = cursor.fetchone()
while row != None:
spawnName = row[0]
galaxyName = row[15]
unavailable = row[16]
checkDespawnAlerts(conn, row[17], spawnName, galaxyName, unavailable, row[1])
checkDespawnReputation(conn, row[17], row[0], row[18], row[1])
row = cursor.fetchone()
cursor.close()
# Update tracking file
try:
f = open("last_alerts_check_removed.txt", "w")
f.write(checkRemovedStart)
f.close()
except IOError as e:
sys.stderr.write("Could not write removed tracking file")
# Check for spawn and server best alerts
checkAddedStart = datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S")
if lastAddedCheckTime == "":
sys.stderr.write("Skipping added check.\n")
else:
# look up the spawn info
cursor = conn.cursor()
cursor.execute("SELECT spawnName, galaxy, enteredBy, resourceType, CR, CD, DR, FL, HR, MA, PE, OQ, SR, UT, ER, galaxyName, unavailable, spawnID FROM tResources INNER JOIN tGalaxy ON tResources.galaxy = tGalaxy.galaxyID WHERE entered >= '" + lastAddedCheckTime + "' AND galaxyState=1 and unavailable IS NULL ORDER BY entered;")
row = cursor.fetchone()
while row != None:
alertValue = row[3]
galaxy = row[1]
spawnName = row[0]
enteredBy = row[2]
stats = [row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],row[12],row[13],row[14]]
galaxyName = row[15]
checkSpawnAlerts(conn, spawnName, alertValue, galaxy, enteredBy, stats, galaxyName)
checkServerBest(conn, row[17], spawnName, galaxy, galaxyName)
row = cursor.fetchone()
cursor.close()
conn.close()
# Update tracking file
try:
f = open("last_alerts_check_added.txt", "w")
f.write(checkAddedStart)
f.close()
except IOError as e:
sys.stderr.write("Could not write added tracking file")
def trackEmailFailure(failureTime):
# Update tracking file
try:
f = open("last_email_failure.txt", "w")
f.write(failureTime)
f.close()
except IOError as e:
sys.stderr.write("Could not write email failure tracking file")
def retryPendingMail(conn):
# open email alerts that have not been sucessfully sent less than 48 hours old
minTime = datetime.fromtimestamp(time.time()) - timedelta(days=2)
cursor = conn.cursor()
cursor.execute("SELECT userID, alertTime, alertMessage, alertLink, alertID FROM tAlerts WHERE alertType=2 AND alertStatus=0 and alertTime > '" + minTime.strftime("%Y-%m-%d %H:%M:%S") + "';")
row = cursor.fetchone()
# try to send as long as not exceeding quota
while row != None:
fullText = row[2]
splitPos = fullText.find(" - ")
alertTitle = fullText[:splitPos]
alertBody = fullText[splitPos+3:]
result = sendAlertMail(conn, row[0], alertBody, row[3], row[4], alertTitle)
if result == 1:
sys.stderr.write("Delayed retrying rest of mail since quota reached.\n")
break
row = cursor.fetchone()
cursor.close()
if __name__ == "__main__":
main()
|
pwillworth/galaxyharvester
|
checkAlerts.py
|
Python
|
gpl-3.0
| 16,340
|
[
"Galaxy"
] |
d3c1ba0166eeda10e4a760f469ab6683f71691fbea5e34150d7e0980878bf4cb
|
#!/usr/bin/env python
# AUTHOR: Shane Gordon
# CREATED: 2015-06-16 21:46:32
import mdtraj as md
import numpy as np
def compute_rg(fname, topname, step=1):
rg = []
for chunk in md.iterload(fname, top=topname, stride=step):
rg.append(md.compute_rg(chunk))
rg = np.concatenate(rg)
return rg
|
s-gordon/MD-TAT
|
mdtat/analysis/rg.py
|
Python
|
mit
| 326
|
[
"MDTraj"
] |
504b4c7f5a3279585c96b9788f9f0d5a6a6b3dc6402108f7d3219dd496abc003
|
#!/usr/bin/python
# (C) 2013, Markus Wildi, markus.wildi@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
"""This modules defines various data objects.
"""
__author__ = 'markus.wildi@bluewin.ch'
import numpy as np
import math
class DataFit(object):
"""Base object
:var plotFn: plot file name
:var ambient: ambient temperature
:var ftName: name of the filter
:var pos: list of focuser positions
:var val: list of values at position
:var errx: list of errors, focuser position
:var erry: list of errors, value
:var nObjs: number of SExtractor objects
:var par: start parameters for the fitted function
"""
def __init__(self, plotFn=None, ambientTemp=None, ftName=None, pos=list(), val=list(), errx=list(), erry=list(), nObjs=None, par=None):
self.plotFn=plotFn
self.ambientTemp=ambientTemp
self.ftName=ftName
self.pos=pos
self.val=val
self.errx=errx
self.erry=erry
self.nObjs=nObjs
self.par=par
class FitFunctionFwhm(object):
def __init__(self):
# not nice, but understandable
# this one is for SymPy
self.fitFuncSS = 'p[0] + p[1] * x + p[2] * (x ** 2)+ p[3] * (x ** 4)'
self.fitFunc = lambda x, p: p[0] + p[1] * x + p[2] * (x ** 2)+ p[3] * (x ** 4) # due to optimize.fminbound
self.recpFunc = None
class DataFitFwhm(DataFit):
"""Data passed to :py:mod:`rts2saf.fitfunction.FitFunction`
:var plotFn: plot file name
:var ambient: ambient temperature
:var ftName: name of the filter
:var pos: list of focuser positions
:var val: list of values at position
:var errx: list of errors, focuser position
:var erry: list of errors, value
:var nObjs: number of SExtractor objects
:var par: start parameters for the fitted function
:var dataSxtr: list of :py:mod:`rts2saf.data.DataSxtr`
"""
def __init__( self, dataSxtr=None, *args, **kw ):
super( DataFitFwhm, self ).__init__( *args, **kw )
self.dataSxtr=dataSxtr
self.pos =np.asarray([x.focPos for x in dataSxtr])
self.val =np.asarray([x.fwhm for x in dataSxtr])
self.errx=np.asarray([x.stdFocPos for x in dataSxtr])
self.erry=np.asarray([x.stdFwhm for x in dataSxtr])
self.nObjs=[len(x.catalog) for x in dataSxtr]
# ToDo must reside outside
self.par= np.array([1., 1., 1., 1.])
fFFWHM = FitFunctionFwhm()
self.fitFunc = fFFWHM.fitFunc
self.recpFunc = fFFWHM.recpFunc
class FitFunctionFlux(object):
def __init__(self):
# not nice, but understandable
# this one is for SymPy
self.fitFuncSS = 'p[3] + p[0]* exp(-(x-p[1])**2/(2*p[2]**2))'
self.fitFunc = lambda x, p: p[3] + p[0]*np.exp(-(x-p[1])**2/(2*p[2]**2))
self.recpFunc = lambda x, p: 1./(p[3] + p[0]*np.exp(-(x-p[1])**2/(2*p[2]**2)))
class DataFitFlux(DataFit):
"""Data passed to :py:mod:`rts2saf.fitfunction.FitFunction`
:var plotFn: plot file name
:var ambient: ambient temperature
:var ftName: name of the filter
:var pos: list of focuser positions
:var val: list of values at position
:var errx: list of errors, focuser position
:var erry: list of errors, value
:var nObjs: number of SExtractor objects
:var par: start parameters for the fitted function
:var dataSxtr: list of :py:mod:`rts2saf.data.DataSxtr`
:var dataFitFwhm: :py:mod:`rts2saf.data.DataFitFwhm`
:var i_flux: index to field flux in :py:mod:`rts2saf.data.DataSxtr`.catalog
"""
def __init__( self, dataSxtr=None, dataFitFwhm=None, i_flux=None, *args, **kw ):
super( DataFitFlux, self ).__init__( *args, **kw )
self.dataSxtr=dataSxtr
self.i_flux=i_flux
self.dataFitFwhm=dataFitFwhm
self.pos =np.asarray([x.focPos for x in dataSxtr])
self.val =np.asarray([x.flux for x in dataSxtr])
self.errx=np.asarray([x.stdFocPos for x in dataSxtr])
self.erry=np.asarray([x.stdFlux for x in dataSxtr])
self.nObjs=[len(x.catalog) for x in dataSxtr]
self.par= None # see below
fFFlux = FitFunctionFlux()
self.fitFunc = fFFlux.fitFunc
self.recpFunc = fFFlux.recpFunc
# scale the values [a.u.]
mfw=max(self.dataFitFwhm.val)
mfl=max(self.val)
sv = [mfw/mfl * x for x in self.val]
sstd = [mfw/mfl * x for x in self.erry]
self.val=sv
self.erry=sstd
# start values for fit
x=np.array([p for p in self.pos])
y=np.array([v for v in self.val])
wmean= np.average(a=x, weights=y)
xy= zip(x,y)
wstd = np.std(a=xy)
# fit Gaussian
self.par= np.array([ 10., wmean, wstd/40., 2.]) # ToDo !!!!!!!!!!!!!!!!!!!!!!!!!!!!
class ResultMeans(object):
"""Store and calculate various weighted means.
:var dataFit: :py:mod:`rts2saf.data.DataFit`
:var logger: :py:mod:`rts2saf.log`
"""
def __init__(self, dataFit=None, logger=None):
self.dataFit=dataFit
self.logger=logger
self.nmbrObjects=None
self.val=None
self.stdVal=None
self.combined=None
self.nObjsC = self.dataFit.nObjs[:]
self.posC = self.dataFit.pos[:]
self.valC = self.dataFit.val[:]
self.stdValC= self.dataFit.erry[:]
# remove elements with val=0
# while True:
# try:
# valC.index(0.)
# except Exception, e:
# self.logger.warn('ResultMeans: valC.index, error:{0}'.format(e))
# break # ToDo what happens here really
#
# del self.nObjsC[ind] # not strictly necessary
# del self.posC[ind]
# del self.valC[ind]
# del self.stdValC[ind]
def calculate(self, var=None):
"""Calculate weighted means based on
1) number of sextracted objects
2) median FWHM, flux
3) average standard deviation of FWHM, Flux
4) a combination of above variables
"""
#Weighted means based on number of extracted objects (stars)
try:
self.nmbrObjects= np.average(a=self.posC, axis=0, weights=self.nObjsC)
except Exception, e:
self.logger.warn('ResultMeans: can not calculate weightedMeanObjects:\n{0}'.format(e))
try:
self.logger.info('ResultMeans: FOC_DEF: {0:5d} : weighted mean derived from sextracted objects'.format(int(self.nmbrObjects)))
except Exception, e:
self.logger.warn('ResultMeans: can not convert weightedMeanObjects:\n{0}'.format(e))
# Weighted mean based on median FWHM, Flux
if var in 'FWHM':
wght= [ 1./x for x in self.valC ]
else:
wght= [ x for x in self.valC ]
try:
self.val= np.average(a=self.posC, axis=0, weights=wght)
except Exception, e:
self.logger.warn('ResultMeans: can not calculate weightedMean{0}:\n{0}'.format(var,e))
try:
self.logger.info('ResultMeans: FOC_DEF: {0:5d} : weighted mean derived from {1}'.format(int(self.val), var))
except Exception, e:
self.logger.warn('ResultMeans: can not convert weightedMean{0}:\n{1}'.format(var,e))
# Weighted mean based on median std(FWHM, Flux)
try:
self.stdVal= np.average(a=self.posC, axis=0, weights=[ 1./x for x in self.stdValC])
except Exception, e:
self.logger.warn('ResultMeans: can not calculate weightedMeanStd{0}:\n{1}'.format(var,e))
try:
self.logger.info('ResultMeans: FOC_DEF: {0:5d} : weighted mean derived from std({1})'.format(int(self.stdVal), var))
except Exception, e:
self.logger.warn('ResultMeans: can not convert weightedMeanStd{0}:\n{1}'.format(var,e))
# Weighted mean based on a combination of variables
combined=list()
for i, v in enumerate(self.nObjsC):
combined.append( self.nObjsC[i]/(self.stdValC[i] * self.valC[i]))
try:
self.combined= np.average(a=self.posC, axis=0, weights=combined)
except Exception, e:
self.logger.warn('ResultMeans: can not calculate weightedMeanCombined{0}:\n{1}'.format(var,e))
try:
self.logger.info('ResultMeans: FOC_DEF: {0:5d} : weighted mean derived from Combined{1}'.format(int(self.combined),var))
except Exception, e:
self.logger.warn('ResultMeans: can not convert weightedMeanCombined{0}:\n{1}'.format(var, e))
def logWeightedMeans(self, ftw=None, ft=None):
"""Log weighted means to file.
"""
if self.nmbrObjects:
self.logger.info('Focus: {0:5.0f}: weightmedMeanObjects, filter wheel:{1}, filter:{2}'.format(self.nmbrObjects, ftw.name, ft.name))
if self.val:
self.logger.info('Focus: {0:5.0f}: weightedMeanFwhm, filter wheel:{1}, filter:{2}'.format(self.val, ftw.name, ft.name))
if self.stdVal:
self.logger.info('Focus: {0:5.0f}: weightedMeanStdFwhm, filter wheel:{1}, filter:{2}'.format(self.stdVal, ftw.name, ft.name))
if self.combined:
self.logger.info('Focus: {0:5.0f}: weightedMeanCombined, filter wheel:{1}, filter:{2}'.format(self.combined, ftw.name, ft.name))
class ResultFit(object):
"""Results calculated in :py:mod:`rts2saf.fitfunction.FitFunction` passed to :py:mod:`rts2saf.fitdisplay.FitDisplay`
:var ambient: ambient temperature
:var ftName: name of the filter
:var extrFitPos: focuser position of the extreme
:var extrFitVal: value of the extreme
:var fitPar: fit parameters forum numpy
:var fitFlag: fit flag from numpy
:var color: color of the points
:var ylabel: label text of the y-axis
:var titleResult: title of the plot
"""
def __init__(self, ambientTemp=None, ftName=None, extrFitPos=None, extrFitVal=None, fitPar=None, fitFlag=None, color=None, ylabel=None, titleResult=None):
self.ambientTemp=ambientTemp
self.ftName=ftName
self.extrFitPos=extrFitPos
self.extrFitVal=extrFitVal
self.fitPar=fitPar
self.fitFlag=fitFlag
self.color=color
self.ylabel=ylabel
self.titleResult=titleResult
self.accepted = False
class DataSxtr(object):
"""Main data object holding data of single focus run.
:var date: date from FITS header
:var fitsFn: FITS file name
:var focPos: FOC_DEF from FITS header
:var stdFocPos: error of focus position
:var fwhm: FWHM from :py:mod:`rts2saf.sextractor.Sextractor`
:var stdFwhm: standard deviation from :py:mod:`rts2saf.sextractor.Sextractor`
:var nstars: number of objects :py:mod:`rts2saf.sextractor.Sextractor`
:var ambient: ambient temperature from FITS header
:var catalog: rawCatalog of all sextracted objects from :py:mod:`rts2saf.sextractor.Sextractor`
:var catalog: catalog of sextracted and cleaned objects from :py:mod:`rts2saf.sextractor.Sextractor`
:var fields: SExtractor parameter fields passed to :py:mod:`rts2saf.sextractor.Sextractor`
:var binning: binning from FITS header
:var binningXY: binningXY from FITS header
:var naxis1: from FITS header
:var naxis2: from FITS header
:var ftName: name of the filter
:var ftAName:
:var ftBName:
:var ftCName:
:var assocFn: name of the ASSOC file used by Sextractor
:var logger: :py:mod:`rts2saf.log`
"""
def __init__(self, date=None, fitsFn=None, focPos=None, stdFocPos=None, fwhm=None, stdFwhm=None, flux=None, stdFlux=None, nstars=None, ambientTemp=None, rawCatalog=None, catalog=None, binning=None, binningXY=None, naxis1=None, naxis2=None, fields=None, ftName=None, ftAName=None, ftBName=None, ftCName=None, assocFn=None):
self.date=date
self.fitsFn=fitsFn
self.focPos=focPos
self.stdFocPos=stdFocPos
self.fwhm=fwhm
self.stdFwhm=stdFwhm
self.nstars=nstars
self.ambientTemp=ambientTemp
self.rawCatalog=rawCatalog
self.catalog=catalog
# ToDo ugly
try:
self.nObjs=len(self.catalog)
except Exception, e:
pass
self.fields=fields
self.binning=binning
self.binningXY=binningXY
self.naxis1=naxis1
self.naxis2=naxis2
self.ftName=ftName
self.ftAName=ftAName
self.ftBName=ftBName
self.ftCName=ftCName
# filled in analyze
self.flux=flux # unittest!
self.stdFlux=stdFlux
self.assocFn=assocFn
self.assocCount=0
self.reducedCatalog=list()
def fillFlux(self, i_flux=None, logger=None):
# ToDo create a deepcopy() method,
# if logger is a member variable then error message:
# TypeError: object.__new__(thread.lock) is not safe, use thread.lock.__new__()
# appears.
"""Calculate median of flux as well as its standard deviation (this is not provided by :py:mod:`rts2saf.sextractor.Sextractor`), used by :py:mod:`rts2saf.sextract.Sextract`.sextract.
"""
fluxv = [x[i_flux] for x in self.catalog]
fluxm=np.median(fluxv)
if np.isnan(fluxm):
logger.warn( 'data: focPos: {0:5.0f}, raw objects: {1}, fwhm is NaN, numpy failed on {2}'.format(self.focPos, self.nstars, self.fitsFn))
else:
self.flux=fluxm
self.stdFlux= np.average([ math.sqrt(x) for x in fluxv]) # ToDo hope that is ok
def fillData(self, i_fwhm=None, i_flux=None):
"""helper method used by :py:mod:`rts2saf.datarun.DataRun`.onAlmostImagesAssoc"""
fwhmv = [x[i_fwhm] for x in self.catalog]
self.fwhm=np.median(fwhmv)
self.stdFwhm= np.std(fwhmv)
if i_flux !=None:
fluxv = [x[i_flux] for x in self.catalog]
self.flux=np.median(fluxv)
self.stdFlux= np.average([ math.sqrt(x) for x in fluxv])
def toReducedCatalog(self):
"""Helper method to copy data for later analysis."""
# http://stackoverflow.com/questions/2612802/how-to-clone-or-copy-a-list-in-python
# self.rawCatalog=list() self.catalog[:]
self.reducedCatalog=[list(x) for x in self.catalog]
self.catalog=list()
|
RTS2/rts2
|
scripts/rts2saf/rts2saf/data.py
|
Python
|
lgpl-3.0
| 15,375
|
[
"Gaussian",
"VisIt"
] |
db4f5f15dff545fa812807c6e021186f89231b33bcc3da40dbb84191ed937507
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This example demostrates how to get host information from a request object.
To test the script, rename the file to report.rpy, and move it to any directory,
let's say /var/www/html/.
Now, start your Twist web server:
$ twistd -n web --path /var/www/html/
Then visit http://127.0.0.1:8080/report.rpy in your web browser.
"""
from twisted.web.resource import Resource
class ReportResource(Resource):
def render_GET(self, request):
path = request.path
host = request.getHost().host
port = request.getHost().port
url = request.prePathURL()
uri = request.uri
secure = (request.isSecure() and "securely") or "insecurely"
return ("""\
<HTML>
<HEAD><TITLE>Welcome To Twisted Python Reporting</title></head>
<BODY><H1>Welcome To Twisted Python Reporting</H1>
<UL>
<LI>The path to me is %(path)s
<LI>The host I'm on is %(host)s
<LI>The port I'm on is %(port)s
<LI>I was accessed %(secure)s
<LI>A URL to me is %(url)s
<LI>My URI to me is %(uri)s
</UL>
</body>
</html>""" % vars())
resource = ReportResource()
|
tzewangdorje/SIPserv
|
Twisted-13.1.0/doc/web/examples/report.rpy.py
|
Python
|
gpl-3.0
| 1,191
|
[
"VisIt"
] |
28bb925b185609380be2848e9c3e273a39b4008f3948e036e6181782407bd9f3
|
"""
SDSS Images
-----------
This script plots an example quasar, star, and galaxy image for use in
the tutorial.
"""
import os
import urllib2
import pylab as pl
from matplotlib import image
def _fetch(outfile, RA, DEC, scale=0.2, width=400, height=400):
"""Fetch the image at the given RA, DEC from the SDSS server"""
url = ("http://casjobs.sdss.org/ImgCutoutDR7/"
"getjpeg.aspx?ra=%.8f&dec=%.8f&scale=%.2f&width=%i&height=%i"
% (RA, DEC, scale, width, height))
print "downloading %s" % url
print " -> %s" % outfile
fhandle = urllib2.urlopen(url)
open(outfile, 'w').write(fhandle.read())
def fetch_image(object_type):
"""Return the data array for the image of object type"""
if not os.path.exists('downloads'):
os.makedirs('downloads')
filename = os.path.join('downloads', '%s_image.jpg' % object_type)
if not os.path.exists(filename):
RA = image_locations[object_type]['RA']
DEC = image_locations[object_type]['DEC']
_fetch(filename, RA, DEC)
return image.imread(filename)
image_locations = dict(star=dict(RA=180.63040108,
DEC=64.96767375),
galaxy=dict(RA=197.51943983,
DEC=0.94881436),
quasar=dict(RA=226.18451462,
DEC=4.07456639))
# Plot the images
fig = pl.figure(figsize=(9, 3))
# Check that PIL is installed for jpg support
if 'jpg' not in fig.canvas.get_supported_filetypes():
raise ValueError("PIL required to load SDSS jpeg images")
object_types = ['star', 'galaxy', 'quasar']
for i, object_type in enumerate(object_types):
ax = pl.subplot(131 + i, xticks=[], yticks=[])
I = fetch_image(object_type)
ax.imshow(I)
if object_type != 'galaxy':
pl.arrow(0.65, 0.65, -0.1, -0.1, width=0.005, head_width=0.03,
length_includes_head=True,
color='w', transform=ax.transAxes)
pl.text(0.99, 0.01, object_type, fontsize='large', color='w', ha='right',
transform=ax.transAxes)
pl.subplots_adjust(bottom=0.04, top=0.94, left=0.02, right=0.98, wspace=0.04)
pl.show()
|
astroML/sklearn_tutorial
|
examples/plot_sdss_images.py
|
Python
|
bsd-3-clause
| 2,206
|
[
"Galaxy"
] |
63a1d367b4b60eca23812ce17108bbda6302909c44c55403fc860312af685ac4
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from PyQt4 import QtGui
class Menu( object ):
"""A menu is a part of the main menu shown on the main window. Each Menu
contains a list of items the user select. Such a menu item is either a Menu
itself, an Action object or None to insert a separator.
"""
def __init__( self,
verbose_name,
items,
icon=None ):
self.verbose_name = verbose_name
self.icon = icon
self.items = items
def get_verbose_name( self ):
return self.verbose_name
def get_icon( self ):
return self.icon
def get_items( self ):
return self.items
def render( self, gui_context, parent ):
"""
:return: a :class:`QtGui.QMenu` object
"""
menu = QtGui.QMenu( unicode( self.get_verbose_name() ), parent )
for item in self.get_items():
if item == None:
menu.addSeparator()
continue
rendered_item = item.render( gui_context, menu )
if isinstance( rendered_item, QtGui.QMenu ):
menu.addMenu( rendered_item )
elif isinstance( rendered_item, QtGui.QAction ):
menu.addAction( rendered_item )
else:
raise Exception( 'Cannot handle menu items of type %s'%type( rendered_item ) )
return menu
|
jeroendierckx/Camelot
|
camelot/admin/menu.py
|
Python
|
gpl-2.0
| 2,452
|
[
"VisIt"
] |
3eaa624eecf58f84390f99d1e55f56108c87f34f096ce006612c5c834706d38c
|
# #############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Matthew Harrigan
# Contributors: Robert T. McGibbon
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
# #############################################################################
from __future__ import print_function
import re
import ast
import sys
from copy import deepcopy
from collections import namedtuple
from mdtraj.utils.six import PY2
from mdtraj.utils.external.pyparsing import (Word, ParserElement, MatchFirst,
Keyword, opAssoc, quotedString, alphas, alphanums, infixNotation, Group,
Optional, ParseException)
from mdtraj.utils.external.astor import codegen
ParserElement.enablePackrat()
__all__ = ['parse_selection']
# ############################################################################
# Globals
# ############################################################################
NUMS = '.0123456789'
THIS_ATOM = ast.Name(id='atom', ctx=ast.Load(), SINGLETON=True)
RE_MODULE = ast.Name(id='re', ctx=ast.Load(), SINGLETON=True)
SELECTION_GLOBALS = {'re': re}
_ParsedSelection = namedtuple('_ParsedSelection', ['expr', 'source', 'astnode'])
# ############################################################################
# Utils
# ############################################################################
class _RewriteNames(ast.NodeTransformer):
def visit_Name(self, node):
if hasattr(node, 'SINGLETON'):
return node
_safe_names = {'None': None, 'True': True, 'False': False}
if node.id in _safe_names:
if sys.version_info >= (3, 4):
return ast.NameConstant(value=_safe_names[node.id])
return node
# all other bare names are taken to be string literals. Thus something
# like parse_selection('name CA') properly resolves CA as a string
# literal, not a barename to be loaded from the global scope!
return ast.Str(s=node.id)
def _chain(*attrs):
"""This transforms, for example, ('residue', 'is_protein'), into
Attribute(value=Attribute(value=THIS_ATOM,
attr='residue', ctx=Load()), attr='is_protein', ctx=Load())
"""
left = THIS_ATOM
for attr in attrs:
left = ast.Attribute(value=left, attr=attr, ctx=ast.Load())
return left
def _kw(*tuples):
"""Create a many-to-one dictionary.
_kw((['one', '1'], 'one'))
gives {'one': 'one', '1': 'one'}
"""
dic = dict()
for keys, val in tuples:
for key in keys:
dic[key] = val
return dic
def _check_n_tokens(tokens, n_tokens, name):
if not len(tokens) == n_tokens:
err = "{} take {} values. You gave {}"
err = err.format(name, n_tokens, len(tokens))
raise ParseException(err)
class SelectionKeyword(object):
keyword_aliases = _kw(
# Atom.<attribute>
(('all', 'everything'), ast.Name(id='True', ctx=ast.Load())),
(('none', 'nothing'), ast.Name(id='False', ctx=ast.Load())),
(('backbone', 'is_backbone'), _chain('is_backbone')),
(('sidechain', 'is_sidechain'), _chain('is_sidechain')),
# Atom.residue.<attribute>
(('protein', 'is_protein'), _chain('residue', 'is_protein')),
(('code', 'rescode', 'resc'), _chain('residue', 'code')),
# (('nucleic', 'is_nucleic'), _chain('residue', 'is_nucleic')),
(('water', 'waters', 'is_water'), _chain('residue', 'is_water')),
(('name',), _chain('name')),
(('index',), _chain('index')),
(('n_bonds',), _chain('n_bonds')),
(('residue', 'resSeq'), _chain('residue', 'resSeq')),
(('resname', 'resn'), _chain('residue', 'name')),
(('resid', 'resi'), _chain('residue', 'index')),
# Atom.residue.chain.<attribute>
(('chainid',), _chain('residue', 'chain', 'index')),
# Atom.element.<attribute>
(('type', 'element', 'symbol'), _chain('element', 'symbol')),
# (('radius',), _chain('element', 'radius')),
(('mass',), _chain('element', 'mass')),
)
def __init__(self, tokens):
# pyparsing constructs the instance while building the parse tree,
# and gives us the set tokens. In this case, the tokens are
self._tokens = tokens
_check_n_tokens(tokens, 1, 'Unary selectors')
assert tokens[0] in self.keyword_aliases
def ast(self):
return self.keyword_aliases[self._tokens[0]]
class Literal(object):
def __init__(self, tokens):
self.token = tokens[0]
_check_n_tokens(tokens, 1, 'literal')
def ast(self):
return ast.parse(self.token, mode='eval').body
class UnaryInfixOperand(object):
n_terms = 1
assoc = 'RIGHT'
keyword_aliases = _kw(
(['not ', '!'], ast.Not()),
)
def __init__(self, tokens):
tokens = tokens[0]
_check_n_tokens(tokens, 2, 'Unary infix operators')
self.op_token, self.value_token = tokens
assert self.op_token in self.keyword_aliases
if isinstance(self.value_token, Literal):
raise ValueError("Cannot use literals as booleans.")
def ast(self):
return ast.UnaryOp(op=self.keyword_aliases[self.op_token],
operand=self.value_token.ast())
class RegexInfixOperand(object):
n_terms = 2
assoc = 'LEFT'
keyword_aliases = {'=~': '=~'}
def __init__(self, tokens):
self.tokens = tokens[0]
_check_n_tokens(self.tokens, 3, 'regex operator')
self.string, op, self.pattern = self.tokens
assert op == '=~'
if isinstance(self.string, Literal):
raise ValueError("Cannot do regex comparison on literal")
def ast(self):
pattern = self.tokens[2].ast()
string = self.tokens[0].ast()
return ast.Compare(
left=ast.Call(func=ast.Attribute(value=RE_MODULE, attr='match',
ctx=ast.Load()),
args=[pattern, string], keywords=[], starargs=None,
kwargs=None),
ops=[ast.IsNot()], comparators=[ast.Name(id='None', ctx=ast.Load())]
)
class BinaryInfixOperand(object):
n_terms = 2
assoc = 'LEFT'
keyword_aliases = _kw(
(['and', '&&'], ast.And()),
(['or', '||'], ast.Or()),
(['<', 'lt'], ast.Lt()),
(['==', 'eq'], ast.Eq()),
(['<=', 'le'], ast.LtE()),
(['!=', 'ne'], ast.NotEq()),
(['>=', 'ge'], ast.GtE()),
(['>', 'gt'], ast.Gt()),
)
def __init__(self, tokens):
tokens = tokens[0]
if len(tokens) % 2 == 1:
self.op_token = tokens[1]
self.comparators = tokens[::2]
else:
err = "Invalid number of infix expressions: {}"
err = err.format(len(tokens))
raise ParseException(err)
assert self.op_token in self.keyword_aliases
# Check for too many literals and not enough keywords
op = self.keyword_aliases[self.op_token]
if isinstance(op, ast.boolop):
if any(isinstance(c, Literal) for c in self.comparators):
raise ValueError("Cannot use literals as truth")
else:
if all(isinstance(c, Literal) for c in self.comparators):
raise ValueError("Cannot compare literals.")
def ast(self):
op = self.keyword_aliases[self.op_token]
if isinstance(op, ast.boolop):
# and and or use one type of AST node
value = ast.BoolOp(op=op, values=[e.ast() for e in self.comparators])
else:
# remaining operators use another
value = ast.Compare(left=self.comparators[0].ast(), ops=[op],
comparators=[e.ast() for e in self.comparators[1:]])
return value
class RangeCondition(object):
def __init__(self, tokens):
tokens = tokens[0]
_check_n_tokens(tokens, 4, 'range condition')
assert tokens[2] == 'to'
self._from, self._center, self._to = tokens[0], tokens[1], tokens[3]
if isinstance(self._from, Literal):
raise ValueError("Can't test literal in range.")
def ast(self):
return ast.Compare(left=self._center.ast(), ops=[ast.LtE(), ast.LtE()],
comparators=[self._from.ast(), self._to.ast()])
class parse_selection(object):
"""Parse an atom selection expression
Parameters
----------
selection_string : str
Selection string, a string in the MDTraj atom selection grammer.
Returns
-------
expr : callable (atom -> bool)
A callable object which accepts an MDTraj.core.topology.Atom object and
returns a boolean value giving whether or not that particular atom
satisfies the selection string.
source : str
Python source code corresponding to the expression ``expr``.
astnode : ast.AST
Python abstract syntax tree node containing the parsed expression
Examples
--------
>>> expr, source, astnode = parse_selection('protein and type CA')
>>> expr
<function __main__.<lambda>>
>>> source
'(atom.residue.is_protein and (atom.element.symbol == CA))'
>>> <_ast.BoolOp at 0x103969d50>
"""
def __init__(self):
self.is_initialized = False
self.expression = None
def _initialize(self):
def keywords(klass):
kws = sorted(klass.keyword_aliases.keys())
return MatchFirst([Keyword(kw) for kw in kws])
def infix(klass):
kws = sorted(klass.keyword_aliases.keys())
return [(kw, klass.n_terms, getattr(opAssoc, klass.assoc), klass)
for kw in kws]
# literals include words made of alphanumerics, numbers,
# or quoted strings but we exclude any of the logical
# operands (e.g. 'or') from being parsed literals
literal = (
~(keywords(BinaryInfixOperand) | keywords(UnaryInfixOperand)) +
(Word(NUMS) | quotedString | Word(alphas, alphanums))
)
literal.setParseAction(Literal)
# These are the other 'root' expressions,
# the selection keywords (resname, resid, mass, etc)
selection_keyword = keywords(SelectionKeyword)
selection_keyword.setParseAction(SelectionKeyword)
base_expression = MatchFirst([selection_keyword, literal])
# the grammar includes implicit equality comparisons
# between adjacent expressions:
# i.e. 'name CA' --> 'name == CA'
implicit_equality = Group(
base_expression + Optional(Keyword('=='), '==') + base_expression
)
implicit_equality.setParseAction(BinaryInfixOperand)
# range condition matches expressions such as 'mass 1 to 20'
range_condition = Group(
base_expression + literal + Keyword('to') + literal
)
range_condition.setParseAction(RangeCondition)
expression = range_condition | implicit_equality | base_expression
logical_expr = infixNotation(
expression,
infix(UnaryInfixOperand) +
infix(BinaryInfixOperand) +
infix(RegexInfixOperand)
)
self.expression = logical_expr
self.is_initialized = True
self.transformer = _RewriteNames()
def __call__(self, selection):
if not self.is_initialized:
self._initialize()
try:
parse_result = self.expression.parseString(selection, parseAll=True)
except ParseException as e:
msg = str(e)
lines = ["%s: %s" % (msg, selection),
" " * (12 + len("%s: " % msg) + e.loc) + "^^^"]
raise ValueError('\n'.join(lines))
# Change __ATOM__ in function bodies. It must bind to the arg
# name specified below (i.e. 'atom')
astnode = self.transformer.visit(deepcopy(parse_result[0].ast()))
# Special check for a single literal
if isinstance(astnode, ast.Num) or isinstance(astnode, ast.Str):
raise ValueError("Cannot use a single literal as a boolean.")
if PY2:
args = [ast.Name(id='atom', ctx=ast.Param())]
signature = ast.arguments(args=args, vararg=None, kwarg=None,
defaults=[])
else:
args = [ast.arg(arg='atom', annotation=None)]
signature = ast.arguments(args=args, vararg=None, kwarg=None,
kwonlyargs=[], defaults=[],
kw_defaults=[])
func = ast.Expression(body=ast.Lambda(signature, astnode))
source = codegen.to_source(astnode)
expr = eval(
compile(ast.fix_missing_locations(func), '<string>', mode='eval'),
SELECTION_GLOBALS)
return _ParsedSelection(expr, source, astnode)
# Create the callable, and use it to overshadow the class. this way there's
# basically just one global instance of the "function", even thought its
# a callable class.
parse_selection = parse_selection()
if __name__ == '__main__':
import sys
exp = parse_selection(sys.argv[1])
print(exp.source)
print(ast.dump(exp.astnode))
|
hainm/mdtraj
|
mdtraj/core/selection.py
|
Python
|
lgpl-2.1
| 14,061
|
[
"MDTraj",
"VisIt"
] |
949aaf2eebc115ca0de3492f752b2400275ec109cffafc6c54b9168995371c3d
|
# Author: Ahmed Hani
# Package: https://github.com/AhmedHani/Neural-Networks-for-ML/tree/master/Implementations
#
# The package is implemented according to the lectures of Toronto University's Neural Networks for Machine Learning ..
# taught by Geoffrey Hinton.
#
# Course link: https://www.coursera.org/learn/neural-networks
# Lectures Repository: https://github.com/AhmedHani/Neural-Networks-for-ML/tree/master/Lectures
#
# Lecture link: https://www.coursera.org/learn/neural-networks/home/week/2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from data_helpers import *
# Number of epochs that would be used for linear regression learning process (default: 1000)
tf.flags.DEFINE_integer("epochs", 200, "Training number of epochs")
tf.flags.DEFINE_float("learning_rate", 0.1, "Learning rate value for training phase")
# Parsing the arguments
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
# Model log data
print("Model: " + str("Online Linear Regression"))
print("Epochs: " + str(FLAGS.epochs))
print("Learning Rate" + str(FLAGS.learning_rate))
# Get training and testing data
train_features, train_labels, test_features, test_labels = get_data_for_stock_market_for_linear_regression()
# Initialize the linear regression weights in Gaussian Distribution
weights = np.random.uniform(size=2)
# Training loop
for epoch in range(0, FLAGS.epochs):
print("Epoch: " + str(epoch))
for i in range(0, len(train_features)):
# Get the current input from the training data
current_input = train_features[i]
# Calculate the output by multiplying the input vector with the weights
current_output = np.dot(weights.T, current_input)[0]
# Get the desired output of the data
desired_output = train_labels[i]
# Update the weights using the error according to the formula w(t+1) = w(t) - alpha * (1/m) * error
weights[0] -= FLAGS.learning_rate * (1 / (len(train_labels) * 1.0)) * (desired_output - current_output) * current_input
weights[1] -= FLAGS.learning_rate * (1 / (len(train_labels) * 1.0)) * (desired_output - current_output) * 1
# Testing loop
test_results = []
for i in range(0, len(test_features)):
current_features = test_features[i]
output = np.dot(weights.T, current_features)
test_results.append(output)
plt.plot(test_features, test_labels, marker='o', color='r', ls='')
plt.plot([weights[0], -weights[0]], [-weights[1], weights[1]], marker='', color='b', ls='--')
plt.show()
|
AhmedHani/Neural-Networks-for-ML
|
Implementations/online_linear_regression.py
|
Python
|
gpl-3.0
| 2,503
|
[
"Gaussian"
] |
51b75b0031af6c5205da005b7545b84e2e0c21c2ade211023b26acbeda9daf2f
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Based on implementation of TGM layer proposed by.
Temporal Gaussian Mixture Layer for Videos
AJ Piergiovanni and Michael S. Ryoo, ICML 2019
https://arxiv.org/abs/1803.06316
and extended iTGM layer in
Evolving Space-Time Neural Architectures for Videos
AJ Piergiovanni, A. Angelova, A. Toshev, and M. S. Ryoo, ICCV 2019
https://arxiv.org/abs/1811.10636
"""
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import slim as contrib_slim
from tensorflow.contrib.slim import initializers as contrib_slim_initializers
from tensorflow.contrib.slim import utils as contrib_slim_utils
add_arg_scope = contrib_framework.add_arg_scope
def n_element_tuple(ary, int_or_tuple):
"""Converts `int_or_tuple` to an n-ary tuple.
This functions normalizes the input value by always returning a tuple. If a
single value is provided, the value is broadcoast.
Args:
ary: The size of the expected tuple.
int_or_tuple: A list of `ary` ints, a single int or a tf.TensorShape.
Returns:
A tuple with `ary` values.
Raises:
ValueError: If `int_or_tuple` it not well formed.
"""
if not isinstance(ary, int) or ary < 1:
raise ValueError('`ary` must be a positive integer')
if isinstance(int_or_tuple, (list, tuple)):
if len(int_or_tuple) != ary:
raise ValueError(
'Must be a list with %d elements: %s' % (ary, int_or_tuple))
return tuple([int(x) for x in int_or_tuple])
if isinstance(int_or_tuple, int):
return tuple([int(int_or_tuple)] * ary)
if isinstance(int_or_tuple, tf.TensorShape):
if len(int_or_tuple) == ary:
return tuple([x for x in int_or_tuple])
raise ValueError('Must be an int, a list with %d elements or a TensorShape of'
' length %d' % (ary, ary))
def get_filters(length, num, scope, init=1, dtype=tf.float32):
"""Gets the filters based on gaussian or cauchy distribution.
Gaussian and Cauchy distributions are very similar, we find that cauchy can
converge more quickly.
Args:
length: The temporal length of the filter
num: number of distributions
scope: variable scope
init: std variance
dtype: layer type
Returns:
the filters
"""
with tf.variable_scope(scope):
# create slim variables for the center and std of distribution
center = contrib_slim.model_variable(
'tgm-center',
shape=[num],
initializer=tf.initializers.random_normal(0, 0.5))
gamma = contrib_slim.model_variable(
'tgm-gamma',
shape=[num],
initializer=tf.initializers.random_normal(0, init))
# create gaussians (eqs from paper)
center = tf.cast(tf.tanh(center), dtype)
gamma = tf.cast(tf.tanh(gamma), dtype)
center = tf.expand_dims((length - 1) * (center + 1) / 2, -1)
gamma = tf.expand_dims(
tf.expand_dims(tf.exp(1.5 - 2 * tf.abs(gamma)), -1), -1)
a = tf.expand_dims(tf.cast(tf.zeros(num), dtype), -1)
a += center
b = tf.cast(tf.range(length), dtype)
f = b - tf.expand_dims(a, -1)
f = f / gamma
f = np.pi * gamma * tf.square(f) + 1
f = 1.0 / f
f = f / tf.expand_dims(tf.reduce_sum(f, axis=2) + 1e-6, -1)
return tf.squeeze(f)
@add_arg_scope
def tgm_3d_conv(
inputs,
num_outputs,
kernel_size,
num,
stride=1,
padding='SAME',
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
trainable=True,
scope=None,
weights_regularizer=None,
outputs_collection=None,
weights_initializer=contrib_slim_initializers.xavier_initializer(),
dtype=tf.float32):
"""iTGM inflated 3D convoltuion.
Args:
inputs: input tensor
num_outputs: number of output channels
kernel_size: size of kernel (T, H, W)
num: number of gaussians
stride: stride of layer int or (T,H,W)
padding: SAME or VALID
activation_fn: activation function to apply
normalizer_fn: normalization fn (e.g., batch norm)
normalizer_params: params of normalization fn
trainable: train parameters
scope: variable scope
weights_regularizer: weight regularizer
outputs_collection: graph collection to store outputs
weights_initializer: weight initialization
dtype: dtype of layer
Returns:
output tensor after iTGM conv.
"""
with tf.variable_scope(scope, 'Conv3d', [inputs]) as sc:
num_filters_in = contrib_slim_utils.last_dimension(
inputs.get_shape(), min_rank=5)
length, kernel_h, kernel_w = n_element_tuple(3, kernel_size)
stride_d, stride_h, stride_w = n_element_tuple(3, stride)
spatial_weight_shape = [1, kernel_h, kernel_w, num_filters_in, num_outputs]
weight_collection = contrib_slim_utils.get_variable_collections(
None, 'weights')
spatial_kernel = contrib_slim.model_variable(
'weights',
shape=spatial_weight_shape,
dtype=inputs.dtype.base_dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weight_collection,
trainable=trainable)
if length > 1:
# for now, set these to be the same
# (i.e., number of filters after 2D conv)
# though we could be more creative here and have
# more/less filters intermediatly.
c_in = num_filters_in
c_out = num_outputs
mixing_weights = contrib_slim.model_variable(
'soft-attn',
shape=[c_in * c_out, num],
initializer=tf.initializers.truncated_normal())
# N x L
with tf.variable_scope('tgm-f'):
k = get_filters(length, num, scope='tgm-f', init=0.1, dtype=dtype)
# apply mixing weights to gaussians
mw = tf.nn.softmax(mixing_weights, dim=1)
# now L x num_outputs
k = tf.transpose(tf.matmul(mw, k))
# make this Lx1x1x1xO
k = tf.cast(tf.reshape(k, (length, 1, 1, c_in, c_out)), dtype)
# 2D spatial conv 1x1x1x1xO
spatial_kernel = tf.cast(spatial_kernel, dtype)
k = spatial_kernel * k
outputs = tf.nn.conv3d(
inputs,
k,
strides=[1, stride_d, stride_h, stride_w, 1],
padding=padding)
else:
outputs = tf.nn.conv3d(
inputs,
spatial_kernel,
strides=[1, stride_d, stride_h, stride_w, 1],
padding=padding)
if normalizer_fn:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return contrib_slim_utils.collect_named_outputs(outputs_collection,
sc.original_name_scope,
outputs)
|
google-research/google-research
|
evanet/tgm_layer.py
|
Python
|
apache-2.0
| 7,365
|
[
"Gaussian"
] |
e7a296051d8f9b427fa1577f038f4add92ce50d644e8561e94574ad6ba016e18
|
'''
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
'''
from __future__ import print_function, division
import numpy as np
import unittest
from six.moves import range
from smt.problems import CantileverBeam, Sphere, ReducedProblem, RobotArm, Rosenbrock, Branin
from smt.problems import TensorProduct, TorsionVibration, WaterFlow, WeldedBeam, WingWeight
from smt.problems import NdimCantileverBeam, NdimRobotArm, NdimRosenbrock, NdimStepFunction
from smt.utils.sm_test_case import SMTestCase
class Test(SMTestCase):
def run_test(self, problem):
problem.options['return_complex'] = True
# Test xlimits
ndim = problem.options['ndim']
xlimits = problem.xlimits
self.assertEqual(xlimits.shape, (ndim, 2))
# Test evaluation of multiple points at once
x = np.zeros((10, ndim))
for ind in range(10):
x[ind, :] = 0.5 * (xlimits[:, 0] + xlimits[:, 1])
y = problem(x)
ny = y.shape[1]
self.assertEqual(x.shape[0], y.shape[0])
# Test derivatives
x = np.zeros((4, ndim), complex)
x[0, :] = 0.2 * xlimits[:, 0] + 0.8 * xlimits[:, 1]
x[1, :] = 0.4 * xlimits[:, 0] + 0.6 * xlimits[:, 1]
x[2, :] = 0.6 * xlimits[:, 0] + 0.4 * xlimits[:, 1]
x[3, :] = 0.8 * xlimits[:, 0] + 0.2 * xlimits[:, 1]
y0 = problem(x)
dydx_FD = np.zeros(4)
dydx_CS = np.zeros(4)
dydx_AN = np.zeros(4)
print()
h = 1e-5
ch = 1e-16
for iy in range(ny):
for idim in range(ndim):
x[:, idim] += h
y_FD = problem(x)
x[:, idim] -= h
x[:, idim] += complex(0, ch)
y_CS = problem(x)
x[:, idim] -= complex(0, ch)
dydx_FD[:] = (y_FD[:, iy] - y0[:, iy]) / h
dydx_CS[:] = np.imag(y_CS[:, iy]) / ch
dydx_AN[:] = problem(x, idim)[:, iy]
abs_rms_error_FD = np.linalg.norm(dydx_FD - dydx_AN)
rel_rms_error_FD = np.linalg.norm(dydx_FD - dydx_AN) / np.linalg.norm(dydx_FD)
abs_rms_error_CS = np.linalg.norm(dydx_CS - dydx_AN)
rel_rms_error_CS = np.linalg.norm(dydx_CS - dydx_AN) / np.linalg.norm(dydx_CS)
msg = '{:16s} iy {:2} dim {:2} of {:2} ' \
+ 'abs_FD {:16.9e} rel_FD {:16.9e} abs_CS {:16.9e} rel_CS {:16.9e}'
print(msg.format(
problem.options['name'], iy, idim, ndim,
abs_rms_error_FD, rel_rms_error_FD,
abs_rms_error_CS, rel_rms_error_CS,
))
self.assertTrue(rel_rms_error_FD < 1e-3 or abs_rms_error_FD < 1e-5)
def test_sphere(self):
self.run_test(Sphere(ndim=1))
self.run_test(Sphere(ndim=3))
def test_exp(self):
self.run_test(TensorProduct(name='TP-exp', ndim=1, func='exp'))
self.run_test(TensorProduct(name='TP-exp', ndim=3, func='exp'))
def test_tanh(self):
self.run_test(TensorProduct(name='TP-tanh', ndim=1, func='tanh'))
self.run_test(TensorProduct(name='TP-tanh', ndim=3, func='tanh'))
def test_cos(self):
self.run_test(TensorProduct(name='TP-cos', ndim=1, func='cos'))
self.run_test(TensorProduct(name='TP-cos', ndim=3, func='cos'))
def test_gaussian(self):
self.run_test(TensorProduct(name='TP-gaussian', ndim=1, func='gaussian'))
self.run_test(TensorProduct(name='TP-gaussian', ndim=3, func='gaussian'))
def test_branin(self):
self.run_test(Branin(ndim=2))
def test_rosenbrock(self):
self.run_test(Rosenbrock(ndim=2))
self.run_test(Rosenbrock(ndim=3))
def test_cantilever_beam(self):
self.run_test(CantileverBeam(ndim=3))
self.run_test(CantileverBeam(ndim=6))
self.run_test(CantileverBeam(ndim=9))
self.run_test(CantileverBeam(ndim=12))
def test_robot_arm(self):
self.run_test(RobotArm(ndim=2))
self.run_test(RobotArm(ndim=4))
self.run_test(RobotArm(ndim=6))
def test_torsion_vibration(self):
self.run_test(TorsionVibration(ndim=15))
self.run_test(ReducedProblem(TorsionVibration(ndim=15), dims=[5, 10, 12, 13]))
def test_water_flow(self):
self.run_test(WaterFlow(ndim=8))
self.run_test(ReducedProblem(WaterFlow(ndim=8), dims=[0, 1, 6]))
def test_welded_beam(self):
self.run_test(WeldedBeam(ndim=3))
def test_wing_weight(self):
self.run_test(WingWeight(ndim=10))
self.run_test(ReducedProblem(WingWeight(ndim=10), dims=[0, 2, 3, 5]))
def test_ndim_cantilever_beam(self):
self.run_test(NdimCantileverBeam(ndim=1))
self.run_test(NdimCantileverBeam(ndim=2))
def test_ndim_robot_arm(self):
self.run_test(NdimRobotArm(ndim=1))
self.run_test(NdimRobotArm(ndim=2))
def test_ndim_rosenbrock(self):
self.run_test(NdimRosenbrock(ndim=1))
self.run_test(NdimRosenbrock(ndim=2))
def test_ndim_step_function(self):
self.run_test(NdimStepFunction(ndim=1))
self.run_test(NdimStepFunction(ndim=2))
if __name__ == '__main__':
unittest.main()
|
hwangjt/SMT
|
smt/tests/test_problems.py
|
Python
|
bsd-3-clause
| 5,293
|
[
"Gaussian"
] |
948793fc039d35cf9d3ead1a442a6479b6dfc7afbc44d83a583e1d114cb5bbf0
|
#!/usr/bin/env python
# Copyright 2016 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import argparse, codecs, itertools, json, multiprocessing, os, optparse, re, subprocess, sys, tempfile, traceback
from collections import OrderedDict
import regent
_version = sys.version_info.major
if _version == 2: # Python 2.x:
def glob(path):
def visit(result, dirname, filenames):
for filename in filenames:
result.append(os.path.join(dirname, filename))
result = []
os.path.walk(path, visit, result)
return result
elif _version == 3: # Python 3.x:
def glob(path):
return [os.path.join(dirname, filename)
for dirname, _, filenames in os.walk(path)
for filename in filenames]
else:
raise Exception('Incompatible Python version')
class TestFailure(Exception):
def __init__(self, command, output):
Exception.__init__(self, command, output)
self.command = command
self.output = output
def run(filename, debug, verbose, flags, env):
args = ((['-mg'] if debug else []) +
[os.path.basename(filename)] + flags +
([] if verbose else ['-level', '5']))
if verbose: print('Running', ' '.join(args))
proc = regent.regent(
args,
stdout=None if verbose else subprocess.PIPE,
stderr=None if verbose else subprocess.STDOUT,
env=env,
cwd=os.path.dirname(os.path.abspath(filename)))
output, _ = proc.communicate()
retcode = proc.wait()
if retcode != 0:
raise TestFailure(' '.join(args), output.decode('utf-8') if output is not None else None)
def run_spy(logfile, verbose):
cmd = ['pypy', os.path.join(regent.root_dir(), 'tools', 'legion_spy.py'),
'--logical',
'--physical',
'--cycle',
# '--sanity', # FIXME: This breaks on several test cases.
'--leaks',
# '--geometry', # FIXME: This is *very* slow.
'--assert-error',
'--assert-warning',
logfile]
if verbose: print('Running', ' '.join(cmd))
proc = subprocess.Popen(
cmd,
stdout=None if verbose else subprocess.PIPE,
stderr=None if verbose else subprocess.STDOUT)
output, _ = proc.communicate()
retcode = proc.wait()
if retcode != 0:
raise TestFailure(' '.join(cmd), output.decode('utf-8') if output is not None else None)
_re_label = r'^[ \t\r]*--[ \t]+{label}:[ \t\r]*$\n((^[ \t\r]*--.*$\n)+)'
def find_labeled_text(filename, label):
re_label = re.compile(_re_label.format(label=label), re.MULTILINE)
with codecs.open(filename, 'r', encoding='utf-8') as f:
program_text = f.read()
match = re.search(re_label, program_text)
if match is None:
return None
match_lines = match.group(1).strip().split('\n')
match_text = '\n'.join([line.strip()[2:].strip() for line in match_lines])
return match_text
def find_labeled_flags(filename, prefix):
flags = [[]]
flags_text = find_labeled_text(filename, prefix)
if flags_text is not None:
flags = json.loads(flags_text)
assert isinstance(flags, list), "%s declaration must be a json-formatted nested list" % prefix
for flag in flags:
assert isinstance(flag, list), "%s declaration must be a json-formatted nested list" % prefix
return flags
def test_compile_fail(filename, debug, verbose, flags, env):
expected_failure = find_labeled_text(filename, 'fails-with')
if expected_failure is None:
raise Exception('No fails-with declaration in compile_fail test')
runs_with = find_labeled_flags(filename, 'runs-with')
try:
for params in runs_with:
run(filename, debug, False, flags + params, env)
except TestFailure as e:
failure = e.output
lines = set(line.strip() for line in failure.strip().split('\n')
if len(line.strip()) > 0)
expected_lines = expected_failure.split('\n')
for expected_line in expected_lines:
if expected_line not in lines:
raise Exception('Expected failure:\n%s\n\nInstead got:\n%s' % (expected_failure, failure))
else:
raise Exception('Expected failure, but test passed')
def test_run_pass(filename, debug, verbose, flags, env):
runs_with = find_labeled_flags(filename, 'runs-with')
try:
for params in runs_with:
run(filename, debug, verbose, flags + params, env)
except TestFailure as e:
raise Exception('Command failed:\n%s\n\nOutput:\n%s' % (e.command, e.output))
def test_spy(filename, debug, verbose, flags, env):
spy_fd, spy_log = tempfile.mkstemp()
os.close(spy_fd)
spy_flags = ['-level', 'legion_spy=2', '-logfile', spy_log]
runs_with = find_labeled_flags(filename, 'runs-with')
try:
for params in runs_with:
run(filename, debug, verbose, flags + params + spy_flags, env)
run_spy(spy_log, verbose)
except TestFailure as e:
raise Exception('Command failed:\n%s\n\nOutput:\n%s' % (e.command, e.output))
finally:
os.remove(spy_log)
red = "\033[1;31m"
green = "\033[1;32m"
clear = "\033[0m"
PASS = 'pass'
FAIL = 'fail'
INTERRUPT = 'interrupt'
def test_runner(test_name, test_closure, debug, verbose, filename):
test_fn, test_args = test_closure
saved_temps = []
try:
test_fn(filename, debug, verbose, *test_args)
except KeyboardInterrupt:
return test_name, filename, [], INTERRUPT, None
# except driver.CompilerException as e:
# if verbose:
# return test_name, filename, e.saved_temps, FAIL, ''.join(traceback.format_exception(*sys.exc_info()))
# return test_name, filename, e.saved_temps, FAIL, None
except Exception as e:
if verbose:
return test_name, filename, [], FAIL, ''.join(traceback.format_exception(*sys.exc_info()))
return test_name, filename, [], FAIL, ''.join(traceback.format_exception_only(*sys.exc_info()[:2]))
else:
return test_name, filename, [], PASS, None
class Counter:
def __init__(self):
self.passed = 0
self.failed = 0
def get_test_specs(include_spy, extra_flags):
base = [
# FIXME: Move this flag into a per-test parameter so we don't use it everywhere.
# Don't include backtraces on those expected to fail
('compile_fail', (test_compile_fail, (['-fbounds-checks', '1'] + extra_flags, {})),
(os.path.join('tests', 'regent', 'compile_fail'),
os.path.join('tests', 'bishop', 'compile_fail'),
)),
('pretty', (test_run_pass, (['-fpretty', '1'] + extra_flags, {})),
(os.path.join('tests', 'regent', 'run_pass'),
os.path.join('tests', 'bishop', 'run_pass'),
os.path.join('examples'),
os.path.join('..', 'tutorial'),
)),
('run_pass', (test_run_pass, ([] + extra_flags, {'REALM_BACKTRACE': '1'})),
(os.path.join('tests', 'regent', 'run_pass'),
os.path.join('tests', 'bishop', 'run_pass'),
os.path.join('examples'),
os.path.join('..', 'tutorial'),
os.path.join('tests', 'runtime', 'bugs'),
)),
]
spy = [
('spy', (test_spy, ([] + extra_flags, {})),
(os.path.join('tests', 'regent', 'run_pass'),
os.path.join('tests', 'bishop', 'run_pass'),
os.path.join('examples'),
os.path.join('..', 'tutorial'),
)),
]
if include_spy:
return spy
else:
return base
def run_all_tests(thread_count, debug, spy, extra_flags, verbose, quiet,
only_patterns, skip_patterns):
thread_pool = multiprocessing.Pool(thread_count)
results = []
# Run tests asynchronously.
tests = get_test_specs(spy, extra_flags)
for test_name, test_fn, test_dirs in tests:
test_paths = []
for test_dir in test_dirs:
if os.path.isfile(test_dir):
test_paths.append(test_dir)
else:
test_paths.extend(
path for path in sorted(glob(test_dir))
if os.path.isfile(path) and os.path.splitext(path)[1] in ('.rg', '.md'))
for test_path in test_paths:
if only_patterns and not(any(re.search(p,test_path) for p in only_patterns)):
continue
if skip_patterns and any(re.search(p,test_path) for p in skip_patterns):
continue
results.append(thread_pool.apply_async(test_runner, (test_name, test_fn, debug, verbose, test_path)))
thread_pool.close()
test_counters = OrderedDict()
for test_name, test_fn, test_dirs in tests:
test_counter = Counter()
test_counters[test_name] = test_counter
all_saved_temps = []
try:
for result in results:
test_name, filename, saved_temps, outcome, output = result.get()
if len(saved_temps) > 0:
all_saved_temps.append((test_name, filename, saved_temps))
if outcome == PASS:
if quiet:
print('.', end='')
sys.stdout.flush()
else:
print('[%sPASS%s] (%s) %s' % (green, clear, test_name, filename))
if output is not None: print(output)
test_counters[test_name].passed += 1
elif outcome == FAIL:
if quiet: print()
print('[%sFAIL%s] (%s) %s' % (red, clear, test_name, filename))
if output is not None: print(output)
test_counters[test_name].failed += 1
else:
raise Exception('Unexpected test outcome %s' % outcome)
except KeyboardInterrupt:
raise
thread_pool.join()
global_counter = Counter()
for test_counter in test_counters.values():
global_counter.passed += test_counter.passed
global_counter.failed += test_counter.failed
global_total = global_counter.passed + global_counter.failed
if len(all_saved_temps) > 0:
print()
print('The following temporary files have been saved:')
print()
for test_name, filename, saved_temps in all_saved_temps:
print('[%sFAIL%s] (%s) %s' % (red, clear, test_name, filename))
for saved_temp in saved_temps:
print(' %s' % saved_temp)
if global_total > 0:
print()
print('Summary of test results by category:')
for test_name, test_counter in test_counters.items():
test_total = test_counter.passed + test_counter.failed
if test_total > 0:
print('%24s: Passed %3d of %3d tests (%5.1f%%)' % (
'%s' % test_name, test_counter.passed, test_total,
float(100*test_counter.passed)/test_total))
print(' ' + '~'*54)
print('%24s: Passed %3d of %3d tests (%5.1f%%)' % (
'total', global_counter.passed, global_total,
(float(100*global_counter.passed)/global_total)))
if not verbose and global_counter.failed > 0:
print()
print('For detailed information on test failures, run:')
print(' ./test.py -j1 -v')
sys.exit(1)
def test_driver(argv):
parser = argparse.ArgumentParser(description='Regent compiler test suite')
parser.add_argument('-j',
nargs='?',
type=int,
help='number threads used to compile',
dest='thread_count')
parser.add_argument('--debug', '-g',
action='store_true',
help='enable debug mode',
dest='debug')
parser.add_argument('--spy', '-s',
action='store_true',
help='enable Legion Spy mode',
dest='spy')
parser.add_argument('--extra',
action='append',
required=False,
default=[],
help='extra flags to use for each test',
dest='extra_flags')
parser.add_argument('-v',
action='store_true',
help='display verbose output',
dest='verbose')
parser.add_argument('-q',
action='store_true',
help='suppress passing test results',
dest='quiet')
parser.add_argument('--only',
action='append',
default=[],
help='only run tests matching pattern',
dest='only_patterns')
parser.add_argument('--skip',
action='append',
default=[],
help='skip tests matching pattern',
dest='skip_patterns')
args = parser.parse_args(argv[1:])
run_all_tests(
args.thread_count,
args.debug,
args.spy,
args.extra_flags,
args.verbose,
args.quiet,
args.only_patterns,
args.skip_patterns)
if __name__ == '__main__':
test_driver(sys.argv)
|
chuckatkins/legion
|
language/test.py
|
Python
|
apache-2.0
| 13,891
|
[
"VisIt"
] |
a3517a5545598f94256e149f77a5a8115c5f3a92494fb951b9269072aba07ae4
|
#!/usr/bin/env python
#
# draw_gd_all_core.py
#
# (c) The James Hutton Institute 2013
# Author: Leighton Pritchard
#
# Contact:
# leighton.pritchard@hutton.ac.uk
#
# Leighton Pritchard,
# Information and Computing Sciences,
# James Hutton Institute,
# Errol Road,
# Invergowrie,
# Dundee,
# DD6 9LH,
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2010-2014 The James Hutton Institute
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Script to draw a simple pairwise GenomeDiagram figure with connectors,
based on i-ADHoRe data
"""
# builtins
import os
from itertools import chain
# Biopython
from Bio.Graphics import GenomeDiagram as gd
from Bio import SeqIO
from Bio.SeqFeature import SeqFeature, FeatureLocation
# Reportlab
from reportlab.lib.units import cm
from reportlab.lib import colors
# local
from ColorSpiral import get_color_dict, get_colors
from iadhore import IadhoreData
# Genome data: (id, GenBank file location)
# We list organisms in the order we want to present them, which is from
# 'outside-in' on the clade tree (GenomeDiagram orders from the bottom-up)
orgs = ('DPA2511', 'DPA703',
'DXXRW240', 'DXXDW0440',
'DZE3531', 'DZERW192', 'DZE586', 'DZE3532', 'DZEMK19', 'DZE2538',
'DCH1591', 'DCH3533', 'DCH516', 'DCH402',
'DXY569',
'DSOIPO2222', 'DSOGBBC2040', 'DSOMK10', 'DSOMK16',
'DXX3274', 'DXXMK7',
'DDA898', 'DDA2976', 'DDA3937', 'DDA3537',
'DDI3534', 'DDIIPO980', 'DDI453', 'DDIGBBC2039')
# List of organisms that need to be reverse complemented
reverse = ('DCH1591', 'DCH3533', 'DCH516', 'DCH402', 'DPA2511', 'DPA703')
gbkdir = '/mnt/synology_Dickeya_sequencing/Annotations/' +\
'20120813_local_genbank_annotation/'
refdir = '/mnt/synology_Dickeya_sequencing/NCBI_GenBank_reference/'
genome_data = {'DPA2511': (gbkdir, 'NCPPB_2511_draft.gbk'),
'DPA703': (refdir, 'NC_012880.gbk'),
'DXXRW240': (gbkdir, 'CSL_RW240_draft.gbk'),
'DXXDW0440': (gbkdir, 'DW_0440_draft.gbk'),
'DZE3531': (gbkdir, 'NCPPB_3531_draft.gbk'),
'DZERW192': (gbkdir, 'CSL_RW192_draft.gbk'),
'DZE586': (refdir, 'NC_013592.gbk'),
'DZE3532': (gbkdir, 'NCPPB_3532_draft.gbk'),
'DZEMK19': (gbkdir, 'MK19_draft.gbk'),
'DZE2538': (gbkdir, 'NCPPB_2538_draft.gbk'),
'DCH1591': (refdir, 'NC_012912.gbk'),
'DCH3533': (gbkdir, 'NCPPB_3533_draft.gbk'),
'DCH516': (gbkdir, 'NCPPB_516_draft.gbk'),
'DCH402': (gbkdir, 'NCPPB_402_draft.gbk'),
'DXY569': (gbkdir, 'NCPPB_569_draft.gbk'),
'DSOIPO2222': (gbkdir, 'IPO_2222_draft.gbk'),
'DSOGBBC2040': (gbkdir, 'GBBC2040_draft.gbk'),
'DSOMK10': (gbkdir, 'MK10_draft.gbk'),
'DSOMK16': (gbkdir, 'MK16_draft.gbk'),
'DXX3274': (gbkdir, 'NCPPB_3274_draft.gbk'),
'DXXMK7': (gbkdir, 'MK7_draft.gbk'),
'DDA898': (gbkdir, 'NCPPB_898_draft.gbk'),
'DDA2976': (gbkdir, 'NCPPB_2976_draft.gbk'),
'DDA3937': (refdir, 'NC_014500.gbk'),
'DDA3537': (gbkdir, 'NCPPB_3537_draft.gbk'),
'DDI3534': (gbkdir, 'NCPPB_3534_draft.gbk'),
'DDIIPO980': (gbkdir, 'IPO_980_draft.gbk'),
'DDIGBBC2039': (gbkdir, 'GBBC2039_draft.gbk'),
'DDI453': (gbkdir, 'NCPPB_453_draft.gbk')
}
# Create GenomeDiagram image
gdd = gd.Diagram("Dickeya core collinear regions", x=0.01, y=0.005)
tracks = {}
featuresets = {}
regionsets = {}
records = {}
track_level = 1
org_colours = get_color_dict(orgs, a=4)
for l, org in enumerate(orgs):
# Load data
filename = os.path.join(genome_data[org][0], genome_data[org][1])
print "Loading %s" % filename
records[org] = SeqIO.read(filename, 'genbank')
if org in reverse:
print "Reverse-complementing %s" % org
records[org] = records[org].reverse_complement(annotations=True,
id=True, name=True,
description=True)
# Set up tracks
tracks[org] = gdd.new_track(2*l, name=org, greytrack=True,
greytrack_labels=10,
height=0.5,
start=0, end=len(records[org]))
regionsets[org] = tracks[org].new_set(name="collinear regions")
# Convenience function for getting feature locations
def get_ft_loc(org, ft):
for f in records[org].features:
if f.type == 'CDS' and f.qualifiers['locus_tag'][0] == str(ft):
return f.location.nofuzzy_start, f.location.nofuzzy_end
# Get data for crosslinks from i-ADHoRe results
data = IadhoreData(os.path.join('dickeya_all_output_params2',
'multiplicons.txt'),
os.path.join('dickeya_all_output_params2',
'segments.txt'))
full_leaves = data.get_multiplicons_at_level(29)
region_colours = list(get_colors(len(full_leaves), a=5, b=0.33,
jitter=0.25))
for midx, m in enumerate(full_leaves):
segments = data.get_multiplicon_segments(m)
# Loop over the pairs of consecutive genomes in the table, and add
# crosslinks for multiplicons
print "Cross-linking multiplicon %d" % m
for idx in range(1, len(orgs)):
org1, org2 = orgs[idx-1], orgs[idx]
org1loc = list(chain.from_iterable([get_ft_loc(org1, f) for f in
segments[org1]]))
org2loc = list(chain.from_iterable([get_ft_loc(org2, f) for f in
segments[org2]]))
org1ft = (tracks[org1], min(org1loc), max(org1loc))
org2ft = (tracks[org2], min(org2loc), max(org2loc))
# Need to create a colour rather than pass a tuple - unlike features.
# Raise this bug in Biopython!
c = colors.Color(region_colours[midx][0], region_colours[midx][1],
region_colours[midx][2])
crosslink = gd.CrossLink(org1ft, org2ft, c)
gdd.cross_track_links.append(crosslink)
# Add feature to track (with transparency)
# We add org1 here, then the final org when the looping's done
f = SeqFeature(FeatureLocation(min(org1loc), max(org1loc)),
strand=None)
regionsets[org1].add_feature(f, label=False, color=
colors.Color(region_colours[midx][0],
region_colours[midx][1],
region_colours[midx][2],
0.5))
# Finish off the cross-link features
f = SeqFeature(FeatureLocation(min(org2loc), max(org2loc)),
strand=None)
regionsets[org2].add_feature(f, label=False,
color=colors.Color(region_colours[midx][0],
region_colours[midx][1],
region_colours[midx][2],
0.5))
# Add annotated organism features
for l, org in enumerate(orgs):
print "Adding features for %s" % org
featuresets[org] = tracks[org].new_set(name="CDS features")
label_state = True
for feature in [f for f in records[org].features if f.type == 'CDS']:
label_state = not label_state
featuresets[org].add_feature(feature, color=org_colours[org],
label=False, sigil="ARROW",
label_size=3)
# Render image
print "Rendering"
gdd.draw(format='linear', orientation='landscape',
pagesize=(500*cm, (len(orgs)*91.4/29)*cm),
fragments=1)
gdd.write('dickeya_core_collinear.pdf', 'PDF')
gdd.write('dickeya_core_collinear.png', 'PNG')
|
widdowquinn/scripts
|
bioinformatics/draw_gd_all_core.py
|
Python
|
mit
| 9,049
|
[
"Biopython"
] |
2870fad33b33a6d6cab1108f8ef6bda15e960b576ea889c485c541eb5ee2f77a
|
import numpy as np #from numpy import mgrid, empty, sin, pi
import vtk
import pandas as pd
from scipy import special
def simple_grid():
# Generate some points.
x, y, z = np.mgrid[1:6:11j, 0:4:13j, 0:3:6j]
base = x[..., 0] + y[..., 0]
# Some interesting z values.
for i in range(z.shape[2]):
z[..., i] = base * 0.25 * i
return x, y, z
def uniform_grid(bounds, dims):
# Generate some points.
x, y, z = np.mgrid[bounds[0]:bounds[1]:(dims[0] * 1j),
bounds[2]:bounds[3]:(dims[1] * 1j),
bounds[4]:bounds[5]:(dims[2] * 1j)
]
#base = x[..., 0] + y[..., 0]
# Some interesting z values.
# for i in range(z.shape[2]):
# z[..., i] = base * 0.25 * i
return x, y, z
def reshape_pts(x,y,z):
# The actual points.
pts = np.empty(z.shape + (3,), dtype=float)
pts[..., 0] = x
pts[..., 1] = y
pts[..., 2] = z
# We reorder the points, scalars and vectors so this is as per VTK's
# requirement of x first, y next and z last.
pts = pts.transpose(2, 1, 0, 3).copy()
pts.shape = pts.size // 3, 3
return pts
def gen_data(x,y,z):
# Simple scalars.
scalars = x * x + y * y + z * z
# Some vectors
vectors = np.empty(z.shape + (3,), dtype=float)
vectors[..., 0] = (4 - y * 2)
vectors[..., 1] = (x * 3 - 12)
vectors[..., 2] = np.sin(z * np.pi)
scalars = scalars.T.copy()
vectors = vectors.transpose(2, 1, 0, 3).copy()
vectors.shape = vectors.size // 3, 3
return scalars, vectors
def test_uniform_grid(bounds, dims):
x,y,z = uniform_grid(bounds, dims)
pts = reshape_pts(x,y,z)
#print(pts.shape)
scalars, vectors = gen_data(x,y,z)
#print(pts.shape, scalars.shape, vectors.shape)
vtk_pts = vtk.vtkPoints()
for pt in pts:
#print(pt)
vtk_pts.InsertNextPoint(pt)
# Uncomment the following if you want to add some noise to the data.
#pts += np.random.randn(dims[0]*dims[1]*dims[2], 3)*0.04
sgrid = vtk.vtkStructuredGrid()
sgrid.SetDimensions(x.shape)
sgrid.SetPoints(vtk_pts)
scalar_arr = vtk.vtkDoubleArray()
scalar_arr.SetNumberOfComponents(1)
scalar_arr.SetName("distance")
vec_arr = vtk.vtkDoubleArray()
vec_arr.SetNumberOfComponents(3)
vec_arr.SetName("vector")
for idx, s_ in enumerate(scalars.ravel()):
scalar_arr.InsertNextTuple([s_])
vec_arr.InsertNextTuple(vectors[idx])
#print(s.shape)
sgrid.GetPointData().AddArray(scalar_arr)
sgrid.GetPointData().AddArray(vec_arr)
centers = vtk.vtkCellCenters()
centers.SetInputData(sgrid)
centers.VertexCellsOn()
centers.Update()
return sgrid, centers
# sgrid.point_data.scalars.name = 'scalars'
# Uncomment the next two lines to save the dataset to a VTK XML file.
# writer = vtk.vtkXMLStructuredGridWriter()
# writer.SetFileName("test_uniform.vts")
# writer.SetInputData(sgrid)
# writer.Write()
# writer2 = vtk.vtkXMLPolyDataWriter()
# writer2.SetFileName("test_uniform_centers.vtp")
# writer2.SetInputConnection(centers.GetOutputPort())
# writer2.Write()
# print("success")
def read_file ():
path = "/home/krs/code/python/Tools/vtk/c109-20001.anm"
with open(path, mode="r") as f :
data = pd.read_csv(f, sep='\s+', names=["n", "m", "a", "aj" ])
#print(data.head())
n = data["n"].to_numpy()
m = data["m"].to_numpy()
#print(n.shape[0])
coeff = np.empty((n.shape[0]), dtype=complex)
coeff.real = data["a"].to_numpy()
coeff.imag = data["aj"].to_numpy()
#print(coeff[0])
# with open(path, mode="r") as f :
# data = np.loadtxt(f, sep='\s+', names=["n", "m", "a", "aj" ])
# print(data)
return n, m, coeff
class Sphere(object):
def __init__(self, res=10):
res = (4 if res < 4 else res) # ternary
self.radius = 0.5
self.center = [0.0, 0.0, 0.0]
self.thetaResolution = int(res)
self.phiResolution = int(res)
self.startTheta = 0.0
self.endTheta = 360.0
self.startPhi = 0.0
self.endPhi = 180.0
self.LatLongTessellation = False
self.output = vtk.vtkPolyData()
def do_stuff(self):
x = [0.0, 0.0, 0.0]
n = [0.0, 0.0, 0.0]
pts = [0, 0, 0, 0]
numPoles = 0
localThetaResolution = self.thetaResolution
localStartTheta = self.startTheta
localEndTheta = self.endTheta
numPieces = self.thetaResolution
while(localEndTheta < localStartTheta):
localEndTheta += 360.0
deltaTheta = (localEndTheta - localStartTheta) / localThetaResolution
# if you eant to split this up into pieces this part here allow that
start = 0 #piece * localThetaResolution / numPieces
end = numPieces #1 #localThetaResolution / numPieces
localEndTheta = localStartTheta + float(end)*deltaTheta
localStartTheta = localStartTheta + float(start)*deltaTheta
localThetaIndx = int(end - start)
numPts = self.phiResolution * localThetaIndx + 2
numPolys = self.phiResolution * 2 * localThetaIndx
newPoints = vtk.vtkPoints()
newPoints.Allocate(numPts)
newPolys = vtk.vtkCellArray()
#newPolys.AllocateEstimate(numPolys, 3)
newNormals = vtk.vtkDoubleArray()
newNormals.SetNumberOfComponents(3)
newNormals.Allocate(3 * numPts)
newNormals.SetName("Normals")
# Create sphere
# Create north pole if needed
if (self.startPhi <= 0.0):
x[0] = self.center[0]
x[1] = self.center[1]
x[2] = self.center[2] + self.radius
newPoints.InsertPoint(numPoles, x)
x[0] = 0.0
x[1] = 0.0
x[2] = 1.0
newNormals.InsertTuple(numPoles, x)
numPoles += 1
# Create south pole if needed
if (self.endPhi >= 180.0):
x[0] = self.center[0]
x[1] = self.center[1]
x[2] = self.center[2] - self.radius
newPoints.InsertPoint(numPoles, x)
x[0] = 0.0
x[1] = 0.0
x[2] = -1.0
newNormals.InsertTuple(numPoles, x)
numPoles += 1
# Check data, determine increments, and convert to radians
startTheta = (localStartTheta if localStartTheta < localEndTheta else localEndTheta)
startTheta *= vtk.vtkMath.Pi() / 180.0
endTheta = (localEndTheta if localEndTheta > localStartTheta else localStartTheta)
endTheta *= vtk.vtkMath.Pi() / 180.0
startPhi = (self.startPhi if self.startPhi < self.endPhi else self.endPhi)
startPhi *= vtk.vtkMath.Pi() / 180.0
endPhi = (self.endPhi if self.endPhi > self.startPhi else self.startPhi)
endPhi *= vtk.vtkMath.Pi() / 180.0
phiResolution = self.phiResolution - numPoles
deltaPhi = (endPhi - startPhi) / (self.phiResolution - 1)
thetaResolution = localThetaResolution
# check that it should return float versus int
if (abs(localStartTheta - localEndTheta) < 360.0):
localThetaResolution += 1
deltaTheta = (endTheta - startTheta) / thetaResolution
jStart = (1 if self.startPhi <= 0.0 else 0)
jEnd = (self.phiResolution - 1 if self.endPhi >= 180.0 else self.phiResolution)
# Create intermediate points
for i in range(localThetaResolution):
theta = localStartTheta * vtk.vtkMath.Pi() / 180.0 + i * deltaTheta
for j in range(jStart, jEnd):
phi = startPhi + j * deltaPhi
radius = self.radius * np.sin(phi)
n[0] = radius * np.cos(theta)
n[1] = radius * np.sin(theta)
n[2] = self.radius * np.cos(phi)
x[0] = n[0] + self.center[0]
x[1] = n[1] + self.center[1]
x[2] = n[2] + self.center[2]
newPoints.InsertNextPoint(x)
norm = vtk.vtkMath.Norm(n)
if (norm == 0.0):
norm = 1.0
n[0] /= norm
n[1] /= norm
n[2] /= norm
newNormals.InsertNextTuple(n)
# Generate mesh connectivity
base = phiResolution * localThetaResolution
# check if fabs is required
if (abs(localStartTheta - localEndTheta) < 360.0):
localThetaResolution -= 1
if (self.startPhi <= 0.0): # around north pole
for i in range(localThetaResolution):
pts[0] = (phiResolution * i + numPoles)
pts[1] = ((phiResolution * (i + 1) % base) + numPoles)
pts[2] = 0
newPolys.InsertNextCell(3, pts[:3])
if (self.endPhi >= 180.0): # around south pole
numOffset = phiResolution - 1 + numPoles
for i in range(localThetaResolution):
pts[0] = phiResolution * i + numOffset
pts[2] = ((phiResolution * (i + 1)) % base) + numOffset
pts[1] = numPoles - 1
newPolys.InsertNextCell(3, pts[:3])
# bands in-between poles
for i in range(localThetaResolution):
for j in range(phiResolution - 1):
pts[0] = phiResolution * i + j + numPoles
pts[1] = pts[0] + 1
pts[2] = ((phiResolution * (i + 1) + j) % base) + numPoles + 1
if (self.LatLongTessellation == True):
newPolys.InsertNextCell(3, pts[:3])
pts[1] = pts[2]
pts[2] = pts[1] - 1
newPolys.InsertNextCell(3, pts[:3])
else:
pts[3] = pts[2] - 1
newPolys.InsertNextCell(4, pts)
# Update ourselves and release memory
#
newPoints.Squeeze()
self.output.SetPoints(newPoints)
#newPoints.Delete()
newNormals.Squeeze()
self.output.GetPointData().SetNormals(newNormals)
#newNormals.Delete()
newPolys.Squeeze()
self.output.SetPolys(newPolys)
#newPolys.Delete()
writer2 = vtk.vtkXMLPolyDataWriter()
writer2.SetFileName("test_sphere.vtp")
writer2.SetInputData(self.output)
writer2.Write()
print("success")
class star_object(object):
def __init__(self, res=10):
res = (4 if res < 4 else res) # ternary
self.radius = 0.5
self.center = [0.0, 0.0, 0.0]
self.thetaResolution = int(res)
self.phiResolution = int(res)
self.startTheta = 0.0
self.endTheta = 360.0
self.startPhi = 0.0
self.endPhi = 180.0
self.LatLongTessellation = False
self.output = vtk.vtkPolyData()
self.tol = 1.0E-8
def read_file (self, file_path="/home/krs/code/python/Tools/vtk/c109-20001.anm"):
#path = "/home/krs/code/python/Tools/vtk/c109-20001.anm"
with open(file_path, mode="r") as f :
data = pd.read_csv(f, sep='\s+', names=["n", "m", "a", "aj" ])
#print(data.head())
self.n = data["n"].to_numpy()
self.m = data["m"].to_numpy()
#print(n.shape[0])
self.coeff = np.empty((self.n.shape[0]), dtype=complex)
self.coeff.real = data["a"].to_numpy()
self.coeff.imag = data["aj"].to_numpy()
#print(coeff[0])
# with open(path, mode="r") as f :
# data = np.loadtxt(f, sep='\s+', names=["n", "m", "a", "aj" ])
# print(data)
#return n, m, coeff
def do_stuff(self):
x = [0.0, 0.0, 0.0]
n = [0.0, 0.0, 0.0]
pts = [0, 0, 0, 0]
numPoles = 0
localThetaResolution = self.thetaResolution
localStartTheta = self.startTheta
localEndTheta = self.endTheta
numPieces = self.thetaResolution
while(localEndTheta < localStartTheta):
localEndTheta += 360.0
deltaTheta = (localEndTheta - localStartTheta) / localThetaResolution
# if you eant to split this up into pieces this part here allow that
start = 0 #piece * localThetaResolution / numPieces
end = numPieces #1 #localThetaResolution / numPieces
localEndTheta = localStartTheta + float(end)*deltaTheta
localStartTheta = localStartTheta + float(start)*deltaTheta
localThetaIndx = int(end - start)
numPts = self.phiResolution * localThetaIndx + 2
numPolys = self.phiResolution * 2 * localThetaIndx
newPoints = vtk.vtkPoints()
newPoints.Allocate(numPts)
newPolys = vtk.vtkCellArray()
#newPolys.AllocateEstimate(numPolys, 3)
newNormals = vtk.vtkDoubleArray()
newNormals.SetNumberOfComponents(3)
newNormals.Allocate(3 * numPts)
newNormals.SetName("Normals")
# Create sphere
# Create north pole if needed
if (self.startPhi <= 0.0+self.tol):
radius = 0.0
for idx in range(self.n.shape[0]):
radius += self.coeff[idx] * special.sph_harm(self.m[idx], self.n[idx], 0.0, 0.0)
x[0] = self.center[0]
x[1] = self.center[1]
x[2] = self.center[2] + np.abs(radius) * self.radius
newPoints.InsertPoint(numPoles, x)
x[0] = 0.0
x[1] = 0.0
x[2] = 1.0
newNormals.InsertTuple(numPoles, x)
numPoles += 1
# Create south pole if needed
if (self.endPhi >= 180.0-self.tol):
radius = 0.0
print("got here")
for idx in range(self.n.shape[0]):
radius += self.coeff[idx] * special.sph_harm(self.m[idx], self.n[idx], 0.0, np.pi)
x[0] = self.center[0]
x[1] = self.center[1]
x[2] = self.center[2] - radius.real * self.radius
newPoints.InsertPoint(numPoles, x)
x[0] = 0.0
x[1] = 0.0
x[2] = -1.0
newNormals.InsertTuple(numPoles, x)
numPoles += 1
# Check data, determine increments, and convert to radians
startTheta = (localStartTheta if localStartTheta < localEndTheta else localEndTheta)
startTheta *= vtk.vtkMath.Pi() / 180.0
endTheta = (localEndTheta if localEndTheta > localStartTheta else localStartTheta)
endTheta *= vtk.vtkMath.Pi() / 180.0
startPhi = (self.startPhi if self.startPhi < self.endPhi else self.endPhi)
startPhi *= vtk.vtkMath.Pi() / 180.0
endPhi = (self.endPhi if self.endPhi > self.startPhi else self.startPhi)
endPhi *= vtk.vtkMath.Pi() / 180.0
phiResolution = self.phiResolution - numPoles
deltaPhi = (endPhi - startPhi) / (self.phiResolution - 1)
thetaResolution = localThetaResolution
# check that it should return float versus int
if (abs(localStartTheta - localEndTheta) < 360.0):
localThetaResolution += 1
deltaTheta = (endTheta - startTheta) / thetaResolution
jStart = (1 if self.startPhi <= 0.0 else 0)
jEnd = (self.phiResolution - 1 if self.endPhi >= 180.0 else self.phiResolution)
# Create intermediate points
for i in range(localThetaResolution):
theta = localStartTheta * vtk.vtkMath.Pi() / 180.0 + i * deltaTheta
for j in range(jStart, jEnd):
phi = startPhi + j * deltaPhi
# print(phi*180.0/np.pi)
radius = 0.0
for idx in range(self.n.shape[0]):
radius += self.coeff[idx] * special.sph_harm(self.m[idx], self.n[idx], theta, phi)
radius = self.radius*np.abs(radius) #radius scaling
#print(np.abs(radius))
#quit()
sinphi = np.sin(phi)
n[0] = radius * np.cos(theta) * sinphi
n[1] = radius * np.sin(theta) * sinphi
n[2] = radius * np.cos(phi)
x[0] = n[0] + self.center[0]
x[1] = n[1] + self.center[1]
x[2] = n[2] + self.center[2]
newPoints.InsertNextPoint(x)
norm = vtk.vtkMath.Norm(n)
if (norm == 0.0):
norm = 1.0
n[0] /= norm
n[1] /= norm
n[2] /= norm
newNormals.InsertNextTuple(n)
# Generate mesh connectivity
base = phiResolution * localThetaResolution
# check if fabs is required
if (abs(localStartTheta - localEndTheta) < 360.0):
localThetaResolution -= 1
if (self.startPhi <= 0.0): # around north pole
for i in range(localThetaResolution):
pts[0] = (phiResolution * i + numPoles)
pts[1] = ((phiResolution * (i + 1) % base) + numPoles)
pts[2] = 0
newPolys.InsertNextCell(3, pts[:3])
if (self.endPhi >= 180.0): # around south pole
numOffset = phiResolution - 1 + numPoles
for i in range(localThetaResolution):
pts[0] = phiResolution * i + numOffset
pts[2] = ((phiResolution * (i + 1)) % base) + numOffset
pts[1] = numPoles - 1
newPolys.InsertNextCell(3, pts[:3])
# bands in-between poles
for i in range(localThetaResolution):
for j in range(phiResolution - 1):
pts[0] = phiResolution * i + j + numPoles
pts[1] = pts[0] + 1
pts[2] = ((phiResolution * (i + 1) + j) % base) + numPoles + 1
if (self.LatLongTessellation == True):
newPolys.InsertNextCell(3, pts[:3])
pts[1] = pts[2]
pts[2] = pts[1] - 1
newPolys.InsertNextCell(3, pts[:3])
else:
pts[3] = pts[2] - 1
newPolys.InsertNextCell(4, pts)
# Update ourselves and release memory
#
newPoints.Squeeze()
self.output.SetPoints(newPoints)
#newPoints.Delete()
newNormals.Squeeze()
self.output.GetPointData().SetNormals(newNormals)
#newNormals.Delete()
newPolys.Squeeze()
self.output.SetPolys(newPolys)
#newPolys.Delete()
writer2 = vtk.vtkXMLPolyDataWriter()
writer2.SetFileName("test_star.vtp")
writer2.SetInputData(self.output)
writer2.Write()
print("success")
def gen_surface(n, m, coef):
theta = np.linspace(0.0, 2.0*np.pi, num=20, endpoint=False) # don't repeat the last part
phi = np.linspace(0.0, np.pi, num=20, endpoint=True)
T, P = np.meshgrid(theta, phi) # thete is 0-2pi, and phi is 0-pi
r = np.zeros(T.shape, dtype=complex)
for idx in range(n.shape[0]):
r += coef[idx] * special.sph_harm(m[idx], n[idx], T, P)
return r, T, P
def test_sphere_in_box():
bounds = [-10., 20., 20., 40., 0., 60.]
dims = (37, 23, 65)
sgrid, centers = test_uniform_grid(bounds, dims)
box_centroid = [ (bounds[j+1]+ bounds[j]) / 2.0 for j in range(0,6,2)]
box_extents = [ (bounds[j+1] - bounds[j]) for j in range(0,6,2)]
test = Sphere(res=20)
test.center = box_centroid
test.radius = np.array(box_extents).min() / 4.0
test.LatLongTessellation = False
test.do_stuff()
in_out = vtk.vtkUnsignedCharArray()
in_out.SetNumberOfComponents(1)
in_out.SetNumberOfTuples(centers.GetOutput().GetNumberOfCells())
in_out.Fill(0)
in_out.SetName("inside")
tree = vtk.vtkModifiedBSPTree()
tree.SetDataSet(test.output)
tree.BuildLocator()
#intersect the locator with the line
tolerance = 0.0000001
IntersectPoints = vtk.vtkPoints()
IntersectCells = vtk.vtkIdList()
hex_cen = vtk.vtkDoubleArray()
hex_cen.SetNumberOfComponents(3)
hex_cen.SetNumberOfTuples(centers.GetOutput().GetNumberOfCells())
hex_cen.SetName("centroid")
#pts_from_grid = sgrid.GetPoints()
for idx in range(centers.GetOutput().GetNumberOfCells()):
grid_pt = centers.GetOutput().GetPoint(idx)
hex_cen.SetTuple(idx, grid_pt)
code = tree.IntersectWithLine(box_centroid, grid_pt,
tolerance, IntersectPoints,
IntersectCells)
if (code == 0):
# no intersection
in_out.SetTuple(idx, [1])
sgrid.GetCellData().AddArray(in_out)
sgrid.GetCellData().AddArray(hex_cen)
# Uncomment the next two lines to save the dataset to a VTK XML file.
# writer = vtk.vtkXMLStructuredGridWriter()
# writer.SetFileName("test_inside.vts")
# writer.SetInputData(sgrid)
# writer.Write()
#threshold the grid by the inside hexahedrals
thresh = vtk.vtkThreshold()
thresh.ThresholdByUpper(0.5)
thresh.SetInputData(sgrid)
#for point data
#thresh.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, "distance");
#for cell data
thresh.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS, "inside")
thresh.Update()
# Uncomment the next two lines to save the dataset to a VTK XML file.
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName("clipped_cells.vtu")
writer.SetInputConnection(thresh.GetOutputPort())
writer.Write()
def test_star():
test = star_object()#res=20)
test.phiResolution = 120
test.thetaResolution = 80
test.read_file()
test.center = (0.0, 0.0, 0.0)
test.radius = 1.0
test.LatLongTessellation = False
test.do_stuff()
def main():
n, m, coeff = read_file()
print(n[0], m[0], coeff[0])
r, T, P = gen_surface(n, m, coeff)
print(r.shape)#, T, P)
#test_sphere_in_box()
test_star()
if __name__ == '__main__':
main()
|
kayarre/Tools
|
vtk/test_points.py
|
Python
|
bsd-2-clause
| 19,971
|
[
"VTK"
] |
6362b73612ddb74a469057a04c1efaa70803f8dafb872750a0e03ab82a902e5c
|
import scrapelib
import datetime
import os
import re
from collections import defaultdict
from pupa.scrape import Scraper, Bill, VoteEvent
from pupa.utils.generic import convert_pdf
import lxml.html
def action_type(action):
"""
Used to standardise the bill actions to the terms specified
@ https://opencivicdata.readthedocs.io/en/latest/scrape/bills.html
:param scraped action:
:return opencivicdata action:
"""
# http://www.scstatehouse.gov/actionsearch.php is very useful for this
classifiers = (('Adopted', 'passage'),
('Amended and adopted',
['passage', 'amendment-passage']),
('Amended', 'amendment-passage'),
('Certain items vetoed', 'executive-veto:line-item'),
('Committed to', 'referral-committee'),
('Committee Amendment Adopted', 'amendment-passage'),
('Committee Amendment Amended and Adopted',
['amendment-passage', 'amendment-amendment']),
('Committee Amendment Amended', 'amendment-amendment'),
('Committee Amendment Tabled', 'amendment-deferral'),
('Committee report: Favorable',
'committee-passage-favorable'),
('Committee report: Majority favorable',
'committee-passage'),
('House amendment amended', 'amendment-amendment'),
('Introduced and adopted',
['introduction', 'passage']),
('Introduced, adopted',
['introduction', 'passage']),
('Introduced and read first time', ['introduction', 'reading-1']),
('Introduced, read first time', ['introduction', 'reading-1']),
('Introduced', 'introduction'),
('Prefiled', 'filing'),
('Read second time', 'reading-2'),
('Read third time', ['passage', 'reading-3']),
('Recommitted to Committee', 'referral-committee'),
('Referred to Committee', 'referral-committee'),
('Rejected', 'failure'),
('Senate amendment amended', 'amendment-amendment'),
('Signed by governor', 'executive-signature'),
('Signed by Governor', 'executive-signature'),
('Tabled', 'failure'),
('Veto overridden', 'veto-override-passage'),
('Veto sustained', 'veto-override-failure'),
('Vetoed by Governor', 'executive-veto'),
)
for prefix, atype in classifiers:
if action.lower().startswith(prefix.lower()):
return atype
# otherwise
return None
class SCBillScraper(Scraper):
"""
Bill scraper that pulls down all legislatition on from sc website.
Used to pull in information regarding Legislation, and basic associated metadata,
using x-path to find and obtain the information
"""
urls = {
'lower': {
'daily-bill-index': "http://www.scstatehouse.gov/hintro/hintros.php",
'prefile-index': "http://www.scstatehouse.gov/sessphp/prefil"
"{last_two_digits_of_session_year}.php",
},
'upper': {
'daily-bill-index': "http://www.scstatehouse.gov/sintro/sintros.php",
'prefile-index': "http://www.scstatehouse.gov/sessphp/prefil"
"{last_two_digits_of_session_year}.php",
}
}
_subjects = defaultdict(set)
def scrape_subjects(self, session):
"""
Obtain bill subjects, which will be saved onto _subjects global,
to be added on to bill later on in process.
:param session_code:
"""
# only need to do it once
if self._subjects:
return
session_code = {
'2013-2014': '120',
'2015-2016': '121',
'2017-2018': '122',
}[session]
subject_search_url = 'http://www.scstatehouse.gov/subjectsearch.php'
data = self.post(subject_search_url,
data=dict((('GETINDEX', 'Y'), ('SESSION', session_code),
('INDEXCODE', '0'), ('INDEXTEXT', ''),
('AORB', 'B'), ('PAGETYPE', '0')))).text
doc = lxml.html.fromstring(data)
# skip first two subjects, filler options
for option in doc.xpath('//option')[2:]:
subject = option.text
code = option.get('value')
url = '%s?AORB=B&session=%s&indexcode=%s' % (subject_search_url,
session_code, code)
data = self.get(url).text
doc = lxml.html.fromstring(data)
for bill in doc.xpath('//span[@style="font-weight:bold;"]'):
match = re.match('(?:H|S) \d{4}', bill.text)
if match:
# remove * and leading zeroes
bill_id = match.group().replace('*', ' ')
bill_id = re.sub(' 0*', ' ', bill_id)
self._subjects[bill_id].add(subject)
def scrape_vote_history(self, bill, vurl):
"""
Obtain the information on a vote and link it to the related Bill
:param bill: related bill
:param vurl: source for the voteEvent information.
:return: voteEvent object
"""
html = self.get(vurl).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(vurl)
# skip first two rows
for row in doc.xpath('//table/tr')[2:]:
tds = row.getchildren()
if len(tds) != 11:
self.warning('irregular vote row: %s' % vurl)
continue
timestamp, motion, vote, yeas, nays, nv, exc, pres, abst, total, result = tds
timestamp = timestamp.text.replace(u'\xa0', ' ')
timestamp = datetime.datetime.strptime(timestamp,
'%m/%d/%Y %H:%M %p')
yeas = int(yeas.text)
nays = int(nays.text)
others = int(nv.text) + int(exc.text) + int(abst.text) + int(pres.text)
assert yeas + nays + others == int(total.text)
if result.text == 'Passed':
passed = 'pass'
else:
passed = 'fail'
vote_link = vote.xpath('a')[0]
if '[H]' in vote_link.text:
chamber = 'lower'
else:
chamber = 'upper'
vote = VoteEvent(
chamber=chamber, # 'upper' or 'lower'
start_date=timestamp.strftime('%Y-%m-%d'), # 'YYYY-MM-DD' format
motion_text=motion.text,
result=passed,
classification='passage', # Can also be 'other'
# Provide a Bill instance to link with the VoteEvent...
bill=bill,
)
vote.set_count('yes', yeas)
vote.set_count('no', nays)
vote.set_count('other', others)
vote.add_source(vurl)
# obtain vote rollcall from pdf and add it to the VoteEvent object
rollcall_pdf = vote_link.get('href')
self.scrape_rollcall(vote, rollcall_pdf)
vote.add_source(rollcall_pdf)
yield vote
def scrape_rollcall(self, vote, vurl):
"""
Get text information from the pdf, containing the vote roll call
and add the information obtained to the related voteEvent object
:param vote: related voteEvent object
:param vurl: pdf source url
"""
(path, resp) = self.urlretrieve(vurl)
pdflines = convert_pdf(path, 'text')
os.remove(path)
current_vfunc = None
option = None
for line in pdflines.split(b'\n'):
line = line.strip().decode()
# change what is being recorded
if line.startswith('YEAS') or line.startswith('AYES'):
current_vfunc = vote.yes
elif line.startswith('NAYS'):
current_vfunc = vote.no
elif line.startswith('EXCUSED'):
current_vfunc = vote.vote
option = 'excused'
elif line.startswith('NOT VOTING'):
current_vfunc = vote.vote
option = 'excused'
elif line.startswith('ABSTAIN'):
current_vfunc = vote.vote
option = 'excused'
elif line.startswith('PAIRED'):
current_vfunc = vote.vote
option = 'paired'
# skip these
elif not line or line.startswith('Page '):
continue
# if a vfunc is active
elif current_vfunc:
# split names apart by 3 or more spaces
names = re.split('\s{3,}', line)
for name in names:
if name:
if not option:
current_vfunc(name.strip())
else:
current_vfunc(option=option,
voter=name.strip())
def scrape_details(self, bill_detail_url, session, chamber, bill_id):
"""
Create the Bill and add the information obtained from the provided bill_detail_url.
and then yield the bill object.
:param bill_detail_url:
:param session:
:param chamber:
:param bill_id:
:return:
"""
page = self.get(bill_detail_url).text
if 'INVALID BILL NUMBER' in page:
self.warning('INVALID BILL %s' % bill_detail_url)
return
doc = lxml.html.fromstring(page)
doc.make_links_absolute(bill_detail_url)
bill_div = doc.xpath('//div[@style="margin:0 0 40px 0;"]')[0]
bill_type = bill_div.xpath('span/text()')[0]
if 'General Bill' in bill_type:
bill_type = 'bill'
elif 'Concurrent Resolution' in bill_type:
bill_type = 'concurrent resolution'
elif 'Joint Resolution' in bill_type:
bill_type = 'joint resolution'
elif 'Resolution' in bill_type:
bill_type = 'resolution'
else:
raise ValueError('unknown bill type: %s' % bill_type)
# this is fragile, but less fragile than it was
b = bill_div.xpath('./b[text()="Summary:"]')[0]
bill_summary = b.getnext().tail.strip()
bill = Bill(
bill_id,
legislative_session=session, # session name metadata's `legislative_sessions`
chamber=chamber, # 'upper' or 'lower'
title=bill_summary,
classification=bill_type
)
subjects = list(self._subjects[bill_id])
for subject in subjects:
bill.add_subject(subject)
# sponsors
for sponsor in doc.xpath('//a[contains(@href, "member.php")]/text()'):
bill.add_sponsorship(
name=sponsor,
classification='primary',
primary=True,
entity_type='person'
)
for sponsor in doc.xpath('//a[contains(@href, "committee.php")]/text()'):
sponsor = sponsor.replace(u'\xa0', ' ').strip()
bill.add_sponsorship(
name=sponsor,
classification='primary',
primary=True,
entity_type='organization'
)
# find versions
version_url = doc.xpath('//a[text()="View full text"]/@href')[0]
version_html = self.get(version_url).text
version_doc = lxml.html.fromstring(version_html)
version_doc.make_links_absolute(version_url)
for version in version_doc.xpath('//a[contains(@href, "/prever/")]'):
# duplicate versions with same date, use first appearance
bill.add_version_link(
note=version.text, # Description of the version from the state;
# eg, 'As introduced', 'Amended', etc.
url=version.get('href'),
on_duplicate='ignore',
media_type='text/html' # Still a MIME type
)
# actions
for row in bill_div.xpath('table/tr'):
date_td, chamber_td, action_td = row.xpath('td')
date = datetime.datetime.strptime(date_td.text, "%m/%d/%y")
action_chamber = {'Senate': 'upper',
'House': 'lower',
None: 'other'}[chamber_td.text]
action = action_td.text_content()
action = action.split('(House Journal')[0]
action = action.split('(Senate Journal')[0].strip()
atype = action_type(action)
bill.add_action(
description=action, # Action description, from the state
date=date.strftime('%Y-%m-%d'), # `YYYY-MM-DD` format
chamber=action_chamber, # 'upper' or 'lower'
classification=atype # Options explained in the next section
)
# votes
vurl = doc.xpath('//a[text()="View Vote History"]/@href')
if vurl:
vurl = vurl[0]
yield from self.scrape_vote_history(bill, vurl)
bill.add_source(bill_detail_url)
yield bill
def scrape(self, chamber=None, session=None):
"""
Obtain the bill urls containing the bill information which will be used
by the scrape_details function to yield the desired Bill objects
:param chamber:
:param session:
"""
if session is None:
session = self.latest_session()
self.info('no session specified, using %s', session)
# start with subjects
self.scrape_subjects(session)
# get bill index
chambers = [chamber] if chamber else ['upper', 'lower']
for chamber in chambers:
index_url = self.urls[chamber]['daily-bill-index']
chamber_letter = 'S' if chamber == 'upper' else 'H'
page = self.get(index_url).text
doc = lxml.html.fromstring(page)
doc.make_links_absolute(index_url)
# visit each day and extract bill ids
days = doc.xpath('//div/b/a/@href')
for day_url in days:
try:
data = self.get(day_url).text
except scrapelib.HTTPError:
continue
doc = lxml.html.fromstring(data)
doc.make_links_absolute(day_url)
for bill_a in doc.xpath('//p/a[1]'):
bill_id = bill_a.text.replace('.', '')
if bill_id.startswith(chamber_letter):
yield from self.scrape_details(bill_a.get('href'), session, chamber,
bill_id)
prefile_url = self.urls[chamber]['prefile-index']\
.format(last_two_digits_of_session_year=session[2:4])
page = self.get(prefile_url).text
doc = lxml.html.fromstring(page)
doc.make_links_absolute(prefile_url)
# visit each day and extract bill ids
if chamber == 'lower':
days = doc.xpath('//dd[contains(text(),"House")]/a/@href')
else:
days = doc.xpath('//dd[contains(text(),"Senate")]/a/@href')
for day_url in days:
try:
data = self.get(day_url).text
except scrapelib.HTTPError:
continue
doc = lxml.html.fromstring(data)
doc.make_links_absolute(day_url)
for bill_a in doc.xpath('//p/a[1]'):
bill_id = bill_a.text.replace('.', '')
if bill_id.startswith(chamber_letter):
yield from self.scrape_details(bill_a.get('href'), session, chamber,
bill_id)
|
cliftonmcintosh/openstates
|
openstates/sc/bills.py
|
Python
|
gpl-3.0
| 16,332
|
[
"VisIt"
] |
463e9564b91e618ee3d7f19079959e0173709c21405ab3428821bca7169f390d
|
#Author: Miguel Molero <miguel.molero@gmail.com>
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from pcloudpy.gui.graphics.QVTKWidget import QVTKWidget
class QVTKMainWindow(QMainWindow):
def __init__(self, parent = None):
super(QVTKMainWindow, self).__init__(parent)
self.vtkWidget = QVTKWidget(self)
self.setCentralWidget(self.vtkWidget)
self.setWindowTitle("QVTKMainWindow")
self.setGeometry(50,50, 800,800)
if __name__ == "__main__":
from vtk import vtkConeSource
from vtk import vtkPolyDataMapper, vtkActor
app = QApplication(['QVTKWindow'])
win = QVTKMainWindow()
cone = vtkConeSource()
cone.SetResolution(8)
coneMapper = vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtkActor()
coneActor.SetMapper(coneMapper)
win.vtkWidget.renderer.AddActor(coneActor)
# show the widget
win.show()
# start event processing
app.exec_()
|
mmolero/pcloudpy
|
pcloudpy/gui/graphics/QVTKWindow.py
|
Python
|
bsd-3-clause
| 998
|
[
"VTK"
] |
46833b82c471edffb25360740fa5cdad55dafe35d43af7df5a4f46fb8ace9624
|
# Copyright (c) 2010 Howard Hughes Medical Institute.
# All rights reserved.
# Use is subject to Janelia Farm Research Campus Software Copyright 1.1 license terms.
# http://license.janelia.org/license/jfrc_copyright_1_1.html
import neuroptikon
import osgUtil
import os
from neuro_object import NeuroObject
class Arborization(NeuroObject):
def __init__(self, neurite, region, sendsOutput=None, receivesInput=None, *args, **keywords):
"""
Arborizations represent a neurite's arborization within a region.
You create an arborization by messaging a :meth:`neuron <Network.Neuron.Neuron.arborize>` or :meth:`neurite <Network.Neurite.Neurite.arborize>`:
>>> neuron1 = network.createNeuron()
>>> region1 = network.createRegion()
>>> arborization_1_1 = neuron1.arborize(region1)
"""
NeuroObject.__init__(self, neurite.network, *args, **keywords)
self.neurite = neurite
self.region = region
self.sendsOutput = sendsOutput # does the neurite send output to the arbor? None = unknown
self.receivesInput = receivesInput # does the neurite receive input from the arbor? None = unknown
def defaultName(self):
return str(self.neurite.neuron().name) + ' -> ' + str(self.region.name)
@classmethod
def _fromXMLElement(cls, network, xmlElement):
arborization = super(Arborization, cls)._fromXMLElement(network, xmlElement)
neuriteId = xmlElement.get('neuriteId')
arborization.neurite = network.objectWithId(neuriteId)
if arborization.neurite is None:
raise ValueError, gettext('Neurite with id "%s" does not exist') % (neuriteId)
arborization.neurite.arborization = arborization
regionId = xmlElement.get('regionId')
arborization.region = network.objectWithId(regionId)
if arborization.region is None:
raise ValueError, gettext('Region with id "%s" does not exist') % (regionId)
arborization.region.arborizations.append(arborization)
sends = xmlElement.get('sends')
if sends == 'true':
arborization.sendsOutput = True
elif sends == 'false':
arborization.sendsOutput = False
else:
arborization.sendsOutput = None
receives = xmlElement.get('receives')
if receives == 'true':
arborization.receivesInput = True
elif receives == 'false':
arborization.receivesInput = False
else:
arborization.receivesInput = None
return arborization
def _toXMLElement(self, parentElement):
arborizationElement = NeuroObject._toXMLElement(self, parentElement)
arborizationElement.set('neuriteId', str(self.neurite.networkId))
arborizationElement.set('regionId', str(self.region.networkId))
if self.sendsOutput is not None:
arborizationElement.set('sends', 'true' if self.sendsOutput else 'false')
if self.receivesInput is not None:
arborizationElement.set('receives', 'true' if self.receivesInput else 'false')
return arborizationElement
def _creationScriptMethod(self, scriptRefs):
if self.neurite.networkId in scriptRefs:
command = scriptRefs[self.neurite.networkId]
else:
command = scriptRefs[self.neurite.root.networkId]
return command + '.arborize'
def _creationScriptParams(self, scriptRefs):
args, keywords = NeuroObject._creationScriptParams(self, scriptRefs)
args.insert(0, scriptRefs[self.region.networkId])
if self.sendsOutput is not None:
keywords['sendsOutput'] = str(self.sendsOutput)
if self.receivesInput is not None:
keywords['receivesInput'] = str(self.receivesInput)
return (args, keywords)
def connections(self, recurse = True):
return NeuroObject.connections(self, recurse) + [self.neurite, self.region]
def inputs(self, recurse = True):
inputs = NeuroObject.inputs(self, recurse)
if self.sendsOutput:
inputs += [self.neurite]
if self.receivesInput:
inputs += [self.region]
return inputs
def outputs(self, recurse = True):
outputs = NeuroObject.outputs(self, recurse)
if self.sendsOutput:
outputs += [self.region]
if self.receivesInput:
outputs += [self.neurite]
return outputs
def disconnectFromNetwork(self):
self.neurite.arborization = None
self.region.arborizations.remove(self)
@classmethod
def _defaultVisualizationParams(cls):
params = NeuroObject._defaultVisualizationParams()
# NOTE: Fixed now that PolytopeIntersector works on windows.
# Used to default to cylinder on windows
params['shape'] = 'Line' if hasattr(osgUtil, 'PolytopeIntersector') else 'Cylinder' # and not os.name.startswith('nt'))
params['color'] = (0.0, 0.0, 0.0)
params['pathIsFixed'] = None
params['weight'] = 1.0
return params
def defaultVisualizationParams(self):
params = self.__class__._defaultVisualizationParams()
params['pathEndPoints'] = (self.neurite.neuron(), self.region)
params['flowTo'] = self.sendsOutput
params['flowFrom'] = self.receivesInput
return params
|
JaneliaSciComp/Neuroptikon
|
Source/network/arborization.py
|
Python
|
bsd-3-clause
| 5,522
|
[
"NEURON"
] |
7da27f94f360f1f0b7325469030c57026222a861f1656ed8277b71b76f3f57ca
|
""" A script for setting each Visit org from its parent Patient.
This is a manage.py command. Run with --help for documentation.
Example usage:
To run on localhost:
> manage.py setvisitorg
To run on production:
> manage.py setvisitorg --remote
NOTE: This should be no longer needed once the first initialization is done.
"""
import getpass
import logging
import settings
from django.core.management.base import BaseCommand, CommandError
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.ext import db
from optparse import make_option
from healthdb import models
def auth_func():
"""Get username and password (for access to localhost)"""
return raw_input('Username:'), getpass.getpass('Password:')
# Number of rows to read/write at once
ROWS_PER_BATCH = 50
def run():
count = 0
# TODO(dan): Shouldn't be necessary to fetch in batches, but if I don't it hangs
visits = models.Visit.all().order('__key__').fetch(ROWS_PER_BATCH)
visits_to_put = []
while visits:
for visit in visits:
if not visit.organization:
visit.organization = visit.get_patient().organization
visits_to_put.append(visit)
db.put(visits_to_put)
visits_to_put = []
count += len(visits_to_put)
logging.info('Updated %d visits' % count)
visits = models.Visit.all().order('__key__').filter(
'__key__ >', visits[-1].key()).fetch(ROWS_PER_BATCH)
db.put(visits_to_put)
count += len(visits_to_put)
logging.info('Updated %d visits. Done' % count)
# TODO(dan): Factor out app-id, host, etc.
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--app-id', dest='app_id', help='The app id'),
make_option('--host', dest='host', default='localhost:8080',
help='Specifies the URL of the local application. Use -- remote '
'to modify the production site.'),
)
help = 'Sets visit orgs'
args = ''
def handle(self, *app_labels, **options):
# Turn off copious DEBUG logging
logging.getLogger().setLevel(logging.INFO)
# Note: this app is only supported for decisionapp
if len(app_labels) != 0:
raise CommandError("This command doesn't take a list of parameters"
"...it only runs against the 'childdb' app.")
app_id = options.get('app_id')
# app_id is optional for the local app
# if not app_id:
# raise CommandError('Must give --app-id')
# Configure local server to run against, if we're not --remote
# TODO(max): I couldn't get this to run against the correct local
# instance of the datastore, so we'll connect this way. It remains
# a TODO to just run this script directly, without this block.
remote = options.get('remote') # None==local, True==remote (production)
if not remote:
remote_api_url = settings.DATABASE_OPTIONS['remote_url']
host = options.get('host')
remote_api_stub.ConfigureRemoteDatastore(
app_id, remote_api_url, auth_func, host)
run()
|
avastjohn/maventy_new
|
healthdb/management/commands/setvisitorg.py
|
Python
|
bsd-3-clause
| 3,015
|
[
"VisIt"
] |
7dc43e09ad0ae5781acc1d2cfe0b09fa3e328092e5d7d2ab897eea670d86a9eb
|
import argparse
from collections import defaultdict
import pysam
def Parser():
the_parser = argparse.ArgumentParser()
the_parser.add_argument(
'--input', action="store", type=str, help="bam alignment file")
the_parser.add_argument(
'--minquery', type=int,
help="Minimum readsize of query reads (nt) - must be an integer")
the_parser.add_argument(
'--maxquery', type=int,
help="Maximum readsize of query reads (nt) - must be an integer")
the_parser.add_argument(
'--mintarget', type=int,
help="Minimum readsize of target reads (nt) - must be an integer")
the_parser.add_argument(
'--maxtarget', type=int,
help="Maximum readsize of target reads (nt) - must be an integer")
the_parser.add_argument(
'--overlap', type=int,
help="Overlap analyzed (nt) - must be an integer")
the_parser.add_argument(
'--output', action="store", type=str,
help="Pairable sequences")
args = the_parser.parse_args()
return args
class Map:
def __init__(self, bam_file, output, minquery=23, maxquery=29,
mintarget=23, maxtarget=29, overlap=10):
self.bam_object = pysam.AlignmentFile(bam_file, 'rb')
self.output = output
self.query_range = range(minquery, maxquery + 1)
self.target_range = range(mintarget, maxtarget + 1)
self.overlap = overlap
self.chromosomes = dict(zip(self.bam_object.references,
self.bam_object.lengths))
self.alignement_dic = self.index_alignments(self.bam_object)
self.all_query_positions = self.query_positions(self.bam_object,
overlap=self.overlap)
self.readdic = self.make_readdic(self.bam_object)
self.pairing()
def make_readdic(self, bam_object):
readdic = defaultdict(int)
for read in bam_object.fetch():
readdic[read.query_sequence] += 1
return readdic
def index_alignments(self, bam_object):
'''
dic[(chrom, pos, polarity)]: [readseq1, readseq2, ...]
the list value is further converted in set
'''
dic = defaultdict(list)
for chrom in self.chromosomes:
for read in bam_object.fetch(chrom):
if read.is_reverse:
coord = read.reference_end-1
pol = 'R'
else:
coord = read.reference_start
pol = 'F'
dic[(chrom, coord, pol)].append(read.query_sequence)
for key in dic:
dic[key] = set(dic[key])
return dic
def query_positions(self, bam_object, overlap):
all_query_positions = defaultdict(list)
for genomicKey in self.alignement_dic.keys():
chrom, coord, pol = genomicKey
if pol == 'F' and len(self.alignement_dic[(chrom,
coord+overlap-1,
'R')]) > 0:
all_query_positions[chrom].append(coord)
for chrom in all_query_positions:
all_query_positions[chrom] = sorted(
list(set(all_query_positions[chrom])))
return all_query_positions
def countpairs(self, uppers, lowers):
query_range = self.query_range
target_range = self.target_range
uppers = [seq for seq in uppers if (len(seq) in query_range or len(seq)
in target_range)]
print(uppers)
uppers_expanded = []
for seq in uppers:
expand = [seq for i in range(self.readdic[seq])]
uppers_expanded.extend(expand)
print(uppers_expanded)
uppers = uppers_expanded
lowers = [seq for seq in lowers if (len(seq) in query_range or len(seq)
in target_range)]
lowers_expanded = []
for seq in lowers:
expand = [seq for i in range(self.readdic[seq])]
lowers_expanded.extend(expand)
lowers = lowers_expanded
paired = []
for upread in uppers:
for downread in lowers:
if (len(upread) in query_range and len(downread) in
target_range) or (len(upread) in target_range and
len(downread) in query_range):
paired.append(upread)
lowers.remove(downread)
break
return len(paired)
def pairing(self):
F = open(self.output, 'w')
query_range = self.query_range
target_range = self.target_range
overlap = self.overlap
stringresult = []
header_template = '>%s|coord=%s|strand %s|size=%s|nreads=%s\n%s\n'
total_pairs = 0
print('Chromosome\tNbre of pairs')
for chrom in sorted(self.chromosomes):
number_pairs = 0
for pos in self.all_query_positions[chrom]:
stringbuffer = []
uppers = self.alignement_dic[chrom, pos, 'F']
lowers = self.alignement_dic[chrom, pos+overlap-1, 'R']
number_pairs += self.countpairs(uppers, lowers)
total_pairs += number_pairs
if uppers and lowers:
for upread in uppers:
for downread in lowers:
if (len(upread) in query_range and len(downread) in
target_range) or (len(upread) in target_range
and len(downread) in
query_range):
stringbuffer.append(
header_template %
(chrom, pos+1, '+', len(upread),
self.readdic[upread], upread))
stringbuffer.append(
header_template %
(chrom, pos+overlap-len(downread)+1, '-',
len(downread), self.readdic[downread],
self.revcomp(downread)))
stringresult.extend(sorted(set(stringbuffer)))
print('%s\t%s' % (chrom, number_pairs))
print('Total nbre of pairs that can be simultaneously formed\t%s'
% total_pairs)
F.write(''.join(stringresult))
def revcomp(self, sequence):
antidict = {"A": "T", "T": "A", "G": "C", "C": "G", "N": "N"}
revseq = sequence[::-1]
return "".join([antidict[i] for i in revseq])
if __name__ == "__main__":
args = Parser()
mapobj = Map(args.input, args.output, args.minquery, args.maxquery,
args.mintarget, args.maxtarget, args.overlap)
|
drosofff/tools-artbio
|
tools/small_rna_signatures/overlapping_reads.py
|
Python
|
mit
| 6,930
|
[
"pysam"
] |
3eb471313209476d4fde2929179b0eaa024b246efe898fad8e12c9e8c987bfed
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************************
espressopp.integrator.Isokinetic
********************************
.. function:: espressopp.integrator.Isokinetic(system)
:param system:
:type system:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_Isokinetic
class IsokineticLocal(ExtensionLocal, integrator_Isokinetic):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_Isokinetic, system)
if pmi.isController :
class Isokinetic(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.IsokineticLocal',
pmiproperty = [ 'temperature', 'coupling' ]
)
|
fedepad/espressopp
|
src/integrator/Isokinetic.py
|
Python
|
gpl-3.0
| 1,738
|
[
"ESPResSo"
] |
686be5e5b0b0a075e20430ba9787c6c1374867e8fd8e57d610806901aa4461eb
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.